From eb2c5f34e6e4bf460e1443670cb7aacff1bb30b2 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Tue, 4 Aug 2020 17:00:24 -0700 Subject: [PATCH 001/301] separate build yamls for ci_prod branch (#415) --- ...l.all_tag.all_phase.all_config.ci_prod.yml | 44 +++++++++++++++ ...l.all_tag.all_phase.all_config.ci_prod.yml | 55 +++++++++++++++++++ 2 files changed, 99 insertions(+) create mode 100644 .pipelines/pipeline.user.linux.official.all_tag.all_phase.all_config.ci_prod.yml create mode 100644 .pipelines/pipeline.user.windows.official.all_tag.all_phase.all_config.ci_prod.yml diff --git a/.pipelines/pipeline.user.linux.official.all_tag.all_phase.all_config.ci_prod.yml b/.pipelines/pipeline.user.linux.official.all_tag.all_phase.all_config.ci_prod.yml new file mode 100644 index 000000000..d47a60ffe --- /dev/null +++ b/.pipelines/pipeline.user.linux.official.all_tag.all_phase.all_config.ci_prod.yml @@ -0,0 +1,44 @@ +environment: + host: + os: 'linux' + flavor: 'ubuntu' + version: '16.04' + runtime: + provider: 'appcontainer' + image: 'cdpxlinux.azurecr.io/user/azure-monitor/container-insights:1.0' + +version: + name: 'DockerProvider' + major: 10 + minor: 0 + tag: 'beta' + system: 'custom' + exclude_commit: true + +restore: + commands: + - !!defaultcommand + name: 'get go modules' + command: '.pipelines/restore-linux.sh' + fail_on_stderr: false + +build: + commands: + - !!defaultcommand + name: 'Build Docker Provider Shell Bundle' + command: '.pipelines/build-linux.sh' + fail_on_stderr: false + +package: + commands: + - !!dockerbuildcommand # REQUIRED: This maps the command data to a concrete type in the CDPX orchestrator. + name: 'Build Docker Image' # REQUIRED: All commands have a name field. All console output captured when + # this command runs is tagged with the value of this field. + context_folder: 'kubernetes/linux' # REQUIRED: The repository root relative path of the folder containing the Dockerfile to build. + # In effect, the context folder will be repository_checkout_folder/src/DockerFinal. + dockerfile_name: 'Dockerfile' # OPTIONAL: The name of the dockerfile. Docker client does allow the Dockerfile + # to be named differently. Defaults to Dockerfile. + # In effect, the -f option value passed to docker build will be repository_checkout_folder/src/DockerFinal/Foo.dockerfile. + repository_name: 'cdpxlinux' # only supported ones are cdpx acr repos + tag: 'ciprod' # OPTIONAL: Defaults to latest. The tag for the built image. Final tag will be 1.0.0alpha, 1.0.0-timestamp-commitID. + latest: false # OPTIONAL: Defaults to false. If tag is not set to latest and this flag is set, then tag as latest as well and push latest as well. diff --git a/.pipelines/pipeline.user.windows.official.all_tag.all_phase.all_config.ci_prod.yml b/.pipelines/pipeline.user.windows.official.all_tag.all_phase.all_config.ci_prod.yml new file mode 100644 index 000000000..e0286fbd6 --- /dev/null +++ b/.pipelines/pipeline.user.windows.official.all_tag.all_phase.all_config.ci_prod.yml @@ -0,0 +1,55 @@ +environment: + host: + os: 'windows' + flavor: 'server' + version: '2019' + runtime: + provider: 'appcontainer' + image: 'cdpxwin1809.azurecr.io/user/azure-monitor/container-insights:6.0' + source_mode: 'map' + +version: + name: 'Certificate Generator and Out OMS plugin' + major: 10 + minor: 0 + tag: 'beta' + system: 'custom' + exclude_commit: true + +signing_options: + profile: 'azure' + codesign_validation_glob_pattern: 'regex|.+(?:dll|exe|sys|ps1|psm1|ps1xml|psc1|psd1|cdxml|vbs|js|wsf)$;-:file|**\linux\**' #CSV does not currently support binaries built for linux, so we exclude this folder + +static_analysis_options: + binskim_options: + files_to_scan: + - from: 'build\windows\installer\certificategenerator\bin\' + exclude: # exclude binaries which are referenced via dotnet packages and not built by us + - '**/**/**/BouncyCastle.Crypto.dll' + - '**/**/**/**/BouncyCastle.Crypto.dll' +restore: + commands: + - !!defaultcommand + name: 'Restore dotnet packages' + command: '.pipelines/restore-windows.cmd' + +build: + commands: + - !!defaultcommand + name: 'Build Certificate Generator Source code and Out OMS Go plugin code' + command: '.pipelines/build-windows.cmd' + fail_on_stderr: false + +package: + commands: + - !!dockerbuildcommand # REQUIRED: This maps the command data to a concrete type in the CDPX orchestrator. + name: 'Build Docker Image' # REQUIRED: All commands have a name field. All console output captured when + # this command runs is tagged with the value of this field. + context_folder: 'kubernetes/windows' # REQUIRED: The repository root relative path of the folder containing the Dockerfile to build. + # In effect, the context folder will be repository_checkout_folder/src/DockerFinal. + dockerfile_name: 'Dockerfile' # OPTIONAL: The name of the dockerfile. Docker client does allow the Dockerfile + # to be named differently. Defaults to Dockerfile. + # In effect, the -f option value passed to docker build will be repository_checkout_folder/src/DockerFinal/Foo.dockerfile. + repository_name: 'cdpxwin1809' # only supported ones are cdpx acr repos + tag: 'win-ciprod' # OPTIONAL: Defaults to latest. The tag for the built image. Final tag will be 1.0.0alpha, 1.0.0-timestamp-commitID. + latest: false # OPTIONAL: Defaults to false. If tag is not set to latest and this flag is set, then tag as latest as well and push latest as well. From df29e35c0b5d5a4bf73bb833f9939bda40ee0732 Mon Sep 17 00:00:00 2001 From: Vishwanath Date: Wed, 5 Aug 2020 17:45:14 -0700 Subject: [PATCH 002/301] re-enable adx path (#420) --- source/plugins/go/src/oms.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/source/plugins/go/src/oms.go b/source/plugins/go/src/oms.go index 88c5641f7..63ca6de10 100644 --- a/source/plugins/go/src/oms.go +++ b/source/plugins/go/src/oms.go @@ -1323,9 +1323,7 @@ func InitializePlugin(pluginConfPath string, agentVersion string) { ContainerLogsRouteV2 = true Log("Routing container logs thru %s route...", ContainerLogsV2Route) fmt.Fprintf(os.Stdout, "Routing container logs thru %s route... \n", ContainerLogsV2Route) - //} else if strings.Compare(ContainerLogsRoute, ContainerLogsADXRoute) == 0 { - //making dormant with below comparison for now -- - } else if strings.Compare("willnot", "match") == 0 { + } else if strings.Compare(ContainerLogsRoute, ContainerLogsADXRoute) == 0 { //check if adx clusteruri, clientid & secret are set var err error AdxClusterUri, err = ReadFileContents(PluginConfiguration["adx_cluster_uri_path"]) From bcc8506e4d4a1114307d3d13ad09111ada9c367e Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Wed, 5 Aug 2020 18:17:12 -0700 Subject: [PATCH 003/301] Gangams/release changes (#419) * updates related to release * updates related to release * fix the incorrect version * fix pr feedback * fix some typos in the release notes --- README.md | 4 ++-- ReleaseNotes.md | 16 ++++++++++++++++ ReleaseProcess.md | 16 +++++++++++----- build/version | 4 ++-- charts/azuremonitor-containers/Chart.yaml | 2 +- charts/azuremonitor-containers/values.yaml | 6 +++--- kubernetes/linux/Dockerfile | 2 +- kubernetes/omsagent.yaml | 12 ++++++------ kubernetes/windows/Dockerfile | 2 +- 9 files changed, 43 insertions(+), 21 deletions(-) diff --git a/README.md b/README.md index 06d3606c0..659fe0161 100644 --- a/README.md +++ b/README.md @@ -213,7 +213,7 @@ powershell -ExecutionPolicy bypass # switch to powershell if you are not on pow # Azure DevOps Build Pipeline -Navigate to https://github-private.visualstudio.com/microsoft/_build?view=pipelines to see Linux and Windows Agent build pipelines. These pipelines are configured with CI triggers for ci_dev and ci_prod (TBD). +Navigate to https://github-private.visualstudio.com/microsoft/_build?view=pipelines to see Linux and Windows Agent build pipelines. These pipelines are configured with CI triggers for ci_dev and ci_prod. Docker Images will be pushed to CDPX ACR repos and these needs to retagged and pushed to corresponding ACR or docker hub. Only onboarded Azure AD AppId has permission to pull the images from CDPx ACRs. @@ -236,7 +236,7 @@ Here are the instructions to onboard the feature branch to Azure Dev Ops pipelin # Azure DevOps Release Pipeline -Integrated to Azure DevOps release pipeline for the ci_dev and ci_prod (TBD).With this, for every commit to ci_dev branch, latest bits automatically deployded to DEV AKS clusters in Build subscription and similarly for for every commit to ci_prod branch, latest bits automatically deployed to PROD AKS clusters in Build subscription. +Integrated to Azure DevOps release pipeline for the ci_dev and ci_prod.With this, for every commit to ci_dev branch, latest bits automatically deployded to DEV AKS clusters in Build subscription and similarly for for every commit to ci_prod branch, latest bits automatically deployed to PROD AKS clusters in Build subscription. For dev, agent image will be in this format mcr.microsoft.com/azuremonitor/containerinsights/cidev:cidev. For prod, agent will be in this format mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod`
`. diff --git a/ReleaseNotes.md b/ReleaseNotes.md index aa57d8388..933900b89 100644 --- a/ReleaseNotes.md +++ b/ReleaseNotes.md @@ -11,6 +11,22 @@ additional questions or comments. Note : The agent version(s) below has dates (ciprod), which indicate the agent build dates (not release dates) +### 08/05/2020 - +##### Version microsoft/oms:ciprod08052020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08052020 (linux) +##### Version microsoft/oms:win-ciprod08052020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod08052020 (windows) +##### Code change log +- Collection of KubeState metrics for deployments and HPA +- Add the Proxy support for Windows agent +- Fix for ContainerState in ContainerInventory to handle Failed state and collection of environment variables for terminated and failed containers +- Change /spec to /metrics/cadvisor endpoint to collect node capacity metrics +- Disable Health Plugin by default and can enabled via configmap +- Pin version of jq to 1.5+dfsg-2 +- Bug fix for showing node as 'not ready' when there is disk pressure +- oneagent integration (disabled by default) +- Add region check before sending alertable metrics to MDM +- Telemetry fix for agent telemetry for sov. clouds + + ### 07/15/2020 - ##### Version microsoft/oms:ciprod07152020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod07152020 (linux) ##### Version microsoft/oms:win-ciprod05262020-2 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod05262020-2 (windows) diff --git a/ReleaseProcess.md b/ReleaseProcess.md index 38ff1ab69..5ec42d496 100644 --- a/ReleaseProcess.md +++ b/ReleaseProcess.md @@ -5,15 +5,21 @@ Here are the high-level instructions to get the CIPROD`
` image for the production release 1. create feature branch from ci_dev and make the following updates > Note: This required since Azure Dev Ops pipeline doesnt support --build-arg yet to automate this. - - Ensure IMAGE_TAG updated with release candiate image tag in the DockerFile under kubernetes/linux and kubernetes/windows directory - - Update omsagent.yaml if there are any changes to the yaml + - Ensure IMAGE_TAG updated with release candiate image tag in the DockerFile under kubernetes/linux and kubernetes/windows directory + - Update the version file under build directory with build version and date + - Update omsagent.yaml for the image tag and dockerProviderVersion, and any other changes + - Update the chart version and image tags in values.yaml under charts/azuremonitor-containers - Release notes 2. Make PR to ci_dev branch and once the PR approved, merge the changes to ci_dev 3. Latest bits of ci_dev automatically deployed to CIDEV cluster in build subscription so just validated E2E to make sure everthing works 4. If everything validated in DEV, make merge PR from ci_dev and ci_prod and merge once this reviewed by dev team -5. Merge ci_dev and ci_prod branch which will trigger automatic deployment of latest bits to CIPROD cluster with CIPROD`
` image (TBD) +6. Update following pipeline variables under ReleaseCandiate with version of chart and image tag + - CIHELMCHARTVERSION # For example, 2.7.4 + - CIImageTagSuffix # ciprod08052020 or ciprod08052020-1 etc. +7. Merge ci_dev and ci_prod branch which will trigger automatic deployment of latest bits to CIPROD cluster with CIPROD`
` image to test and scale cluters, AKS, AKS-Engine > Note: production image automatically pushed to CIPROD Public cloud ACR which will inturn replicated to Public cloud MCR. -6. Validate all the scenarios against CIPROD cluster in Build subscription +8. Validate all the scenarios against clusters in build subscription and scale clusters + # 2. Perf and scale testing @@ -27,7 +33,7 @@ Image automatically synched to MCR CN from Public cloud MCR. ## AKS -Make PR against [AKS-RP](https://msazure.visualstudio.com/CloudNativeCompute/_git/aks-rp?version=GBmaster) repo with chart update(s) +- Refer to internal docs for the release process and instructions. ## ARO v3 diff --git a/build/version b/build/version index b856fc312..f26973116 100644 --- a/build/version +++ b/build/version @@ -5,8 +5,8 @@ CONTAINER_BUILDVERSION_MAJOR=10 CONTAINER_BUILDVERSION_MINOR=0 CONTAINER_BUILDVERSION_PATCH=0 -CONTAINER_BUILDVERSION_BUILDNR=1 -CONTAINER_BUILDVERSION_DATE=20200526 +CONTAINER_BUILDVERSION_BUILDNR=4 +CONTAINER_BUILDVERSION_DATE=20200805 CONTAINER_BUILDVERSION_STATUS=Developer_Build #-------------------------------- End of File ----------------------------------- diff --git a/charts/azuremonitor-containers/Chart.yaml b/charts/azuremonitor-containers/Chart.yaml index 8a84692e7..202494152 100644 --- a/charts/azuremonitor-containers/Chart.yaml +++ b/charts/azuremonitor-containers/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v1 appVersion: 7.0.0-1 description: Helm chart for deploying Azure Monitor container monitoring agent in Kubernetes name: azuremonitor-containers -version: 2.7.3 +version: 2.7.4 kubeVersion: "^1.10.0-0" keywords: - monitoring diff --git a/charts/azuremonitor-containers/values.yaml b/charts/azuremonitor-containers/values.yaml index 685c767bb..927d24b35 100644 --- a/charts/azuremonitor-containers/values.yaml +++ b/charts/azuremonitor-containers/values.yaml @@ -7,10 +7,10 @@ omsagent: image: repo: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod" - tag: "ciprod07152020" - tagWindows: "win-ciprod05262020-2" + tag: "ciprod08052020" + tagWindows: "win-ciprod08052020" pullPolicy: IfNotPresent - dockerProviderVersion: "10.0.0-3" + dockerProviderVersion: "10.0.0-4" agentVersion: "1.10.0.1" ## To get your workspace id and key do the following ## You can create a Azure Loganalytics workspace from portal.azure.com and get its ID & PRIMARY KEY from 'Advanced Settings' tab in the Ux. diff --git a/kubernetes/linux/Dockerfile b/kubernetes/linux/Dockerfile index c8b61995d..c82532471 100644 --- a/kubernetes/linux/Dockerfile +++ b/kubernetes/linux/Dockerfile @@ -2,7 +2,7 @@ FROM ubuntu:18.04 MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=ciprod07152020 +ARG IMAGE_TAG=ciprod08052020 ENV AGENT_VERSION ${IMAGE_TAG} ENV tmpdir /opt ENV APPLICATIONINSIGHTS_AUTH NzAwZGM5OGYtYTdhZC00NThkLWI5NWMtMjA3ZjM3NmM3YmRi diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index 038c7e92b..ac712722a 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -337,13 +337,13 @@ spec: tier: node annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "10.0.0-3" + dockerProviderVersion: "10.0.0-4" schema-versions: "v1" spec: serviceAccountName: omsagent containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod07152020" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08052020" imagePullPolicy: IfNotPresent resources: limits: @@ -480,13 +480,13 @@ spec: rsName: "omsagent-rs" annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "10.0.0-3" + dockerProviderVersion: "10.0.0-4" schema-versions: "v1" spec: serviceAccountName: omsagent containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod07152020" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08052020" imagePullPolicy: IfNotPresent resources: limits: @@ -631,13 +631,13 @@ spec: tier: node-win annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "10.0.0-2" + dockerProviderVersion: "10.0.0-4" schema-versions: "v1" spec: serviceAccountName: omsagent containers: - name: omsagent-win - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod05262020-2" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod08052020" imagePullPolicy: IfNotPresent resources: limits: diff --git a/kubernetes/windows/Dockerfile b/kubernetes/windows/Dockerfile index 9a5e22e0d..0b81b9c71 100644 --- a/kubernetes/windows/Dockerfile +++ b/kubernetes/windows/Dockerfile @@ -3,7 +3,7 @@ MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=win-ciprod05262020-2 +ARG IMAGE_TAG=win-ciprod08052020 SHELL ["powershell"] From 39534d6116ca5df1325768e681646b5d6010ea6b Mon Sep 17 00:00:00 2001 From: rashmichandrashekar Date: Wed, 5 Aug 2020 20:06:46 -0700 Subject: [PATCH 004/301] fix for zero filled metrics (#423) --- source/plugins/ruby/podinventory_to_mdm.rb | 98 +++++++++++----------- 1 file changed, 51 insertions(+), 47 deletions(-) diff --git a/source/plugins/ruby/podinventory_to_mdm.rb b/source/plugins/ruby/podinventory_to_mdm.rb index dd5a15990..834515969 100644 --- a/source/plugins/ruby/podinventory_to_mdm.rb +++ b/source/plugins/ruby/podinventory_to_mdm.rb @@ -93,67 +93,71 @@ def initialize(custom_metrics_azure_regions) end def get_pod_inventory_mdm_records(batch_time) + records = [] begin - # generate all possible values of non_phase_dim_values X pod Phases and zero-fill the ones that are not already present - @no_phase_dim_values_hash.each { |key, value| - @@pod_phase_values.each { |phase| - pod_key = [key, phase].join("~~") - if !@pod_count_hash.key?(pod_key) - @pod_count_hash[pod_key] = 0 - else + if @process_incoming_stream + # generate all possible values of non_phase_dim_values X pod Phases and zero-fill the ones that are not already present + @no_phase_dim_values_hash.each { |key, value| + @@pod_phase_values.each { |phase| + pod_key = [key, phase].join("~~") + if !@pod_count_hash.key?(pod_key) + @pod_count_hash[pod_key] = 0 + else + next + end + } + } + @pod_count_hash.each { |key, value| + key_elements = key.split("~~") + if key_elements.length != 4 next end - } - } - records = [] - @pod_count_hash.each { |key, value| - key_elements = key.split("~~") - if key_elements.length != 4 - next - end - # get dimension values by key - podNodeDimValue = key_elements[0] - podNamespaceDimValue = key_elements[1] - podControllerNameDimValue = key_elements[2] - podPhaseDimValue = key_elements[3] + # get dimension values by key + podNodeDimValue = key_elements[0] + podNamespaceDimValue = key_elements[1] + podControllerNameDimValue = key_elements[2] + podPhaseDimValue = key_elements[3] - record = @@pod_inventory_custom_metrics_template % { - timestamp: batch_time, - metricName: @@pod_count_metric_name, - phaseDimValue: podPhaseDimValue, - namespaceDimValue: podNamespaceDimValue, - nodeDimValue: podNodeDimValue, - controllerNameDimValue: podControllerNameDimValue, - podCountMetricValue: value, + record = @@pod_inventory_custom_metrics_template % { + timestamp: batch_time, + metricName: @@pod_count_metric_name, + phaseDimValue: podPhaseDimValue, + namespaceDimValue: podNamespaceDimValue, + nodeDimValue: podNodeDimValue, + controllerNameDimValue: podControllerNameDimValue, + podCountMetricValue: value, + } + records.push(JSON.parse(record)) } - records.push(JSON.parse(record)) - } - #Add pod metric records - records = MdmMetricsGenerator.appendAllPodMetrics(records, batch_time) + #Add pod metric records + records = MdmMetricsGenerator.appendAllPodMetrics(records, batch_time) - #Send telemetry for pod metrics - timeDifference = (DateTime.now.to_time.to_i - @@metricTelemetryTimeTracker).abs - timeDifferenceInMinutes = timeDifference / 60 - if (timeDifferenceInMinutes >= Constants::TELEMETRY_FLUSH_INTERVAL_IN_MINUTES) - MdmMetricsGenerator.flushPodMdmMetricTelemetry - @@metricTelemetryTimeTracker = DateTime.now.to_time.to_i - end + #Send telemetry for pod metrics + timeDifference = (DateTime.now.to_time.to_i - @@metricTelemetryTimeTracker).abs + timeDifferenceInMinutes = timeDifference / 60 + if (timeDifferenceInMinutes >= Constants::TELEMETRY_FLUSH_INTERVAL_IN_MINUTES) + MdmMetricsGenerator.flushPodMdmMetricTelemetry + @@metricTelemetryTimeTracker = DateTime.now.to_time.to_i + end - # Clearing out all hashes after telemetry is flushed - MdmMetricsGenerator.clearPodHashes + # Clearing out all hashes after telemetry is flushed + MdmMetricsGenerator.clearPodHashes + end rescue Exception => e @log.info "Error processing pod inventory record Exception: #{e.class} Message: #{e.message}" ApplicationInsightsUtility.sendExceptionTelemetry(e.backtrace) return [] end - @log.info "Pod Count To Phase #{@pod_count_by_phase} " - @log.info "resetting convertor state " - @pod_count_hash = {} - @no_phase_dim_values_hash = {} - @pod_count_by_phase = {} - @pod_uids = {} + if @process_incoming_stream + @log.info "Pod Count To Phase #{@pod_count_by_phase} " + @log.info "resetting convertor state " + @pod_count_hash = {} + @no_phase_dim_values_hash = {} + @pod_count_by_phase = {} + @pod_uids = {} + end return records end From 5e0b42909bc63886dbf5433545d921a8237ef1e0 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Fri, 7 Aug 2020 13:26:21 -0700 Subject: [PATCH 005/301] consolidate windows agent image docker files (#422) * consolidate windows agent image docker files * revert docker file consolidation * revert readme updates * merge back windows dockerfiles * image tag update --- ReleaseNotes.md | 6 ++--- ReleaseProcess.md | 2 +- charts/azuremonitor-containers/values.yaml | 4 +-- kubernetes/linux/Dockerfile | 2 +- kubernetes/omsagent.yaml | 6 ++--- kubernetes/windows/Dockerfile | 31 ++++++++++++++++++++-- kubernetes/windows/baseimage/Dockerfile | 28 ------------------- 7 files changed, 39 insertions(+), 40 deletions(-) delete mode 100644 kubernetes/windows/baseimage/Dockerfile diff --git a/ReleaseNotes.md b/ReleaseNotes.md index 933900b89..0f1d932a8 100644 --- a/ReleaseNotes.md +++ b/ReleaseNotes.md @@ -11,9 +11,9 @@ additional questions or comments. Note : The agent version(s) below has dates (ciprod), which indicate the agent build dates (not release dates) -### 08/05/2020 - -##### Version microsoft/oms:ciprod08052020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08052020 (linux) -##### Version microsoft/oms:win-ciprod08052020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod08052020 (windows) +### 08/07/2020 - +##### Version microsoft/oms:ciprod08072020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08072020 (linux) +##### Version microsoft/oms:win-ciprod08072020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod08072020 (windows) ##### Code change log - Collection of KubeState metrics for deployments and HPA - Add the Proxy support for Windows agent diff --git a/ReleaseProcess.md b/ReleaseProcess.md index 5ec42d496..19802e22c 100644 --- a/ReleaseProcess.md +++ b/ReleaseProcess.md @@ -15,7 +15,7 @@ Here are the high-level instructions to get the CIPROD`
` image for 4. If everything validated in DEV, make merge PR from ci_dev and ci_prod and merge once this reviewed by dev team 6. Update following pipeline variables under ReleaseCandiate with version of chart and image tag - CIHELMCHARTVERSION # For example, 2.7.4 - - CIImageTagSuffix # ciprod08052020 or ciprod08052020-1 etc. + - CIImageTagSuffix # ciprod08072020 or ciprod08072020-1 etc. 7. Merge ci_dev and ci_prod branch which will trigger automatic deployment of latest bits to CIPROD cluster with CIPROD`
` image to test and scale cluters, AKS, AKS-Engine > Note: production image automatically pushed to CIPROD Public cloud ACR which will inturn replicated to Public cloud MCR. 8. Validate all the scenarios against clusters in build subscription and scale clusters diff --git a/charts/azuremonitor-containers/values.yaml b/charts/azuremonitor-containers/values.yaml index 927d24b35..610e109ef 100644 --- a/charts/azuremonitor-containers/values.yaml +++ b/charts/azuremonitor-containers/values.yaml @@ -7,8 +7,8 @@ omsagent: image: repo: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod" - tag: "ciprod08052020" - tagWindows: "win-ciprod08052020" + tag: "ciprod08072020" + tagWindows: "win-ciprod08072020" pullPolicy: IfNotPresent dockerProviderVersion: "10.0.0-4" agentVersion: "1.10.0.1" diff --git a/kubernetes/linux/Dockerfile b/kubernetes/linux/Dockerfile index c82532471..bc27a5384 100644 --- a/kubernetes/linux/Dockerfile +++ b/kubernetes/linux/Dockerfile @@ -2,7 +2,7 @@ FROM ubuntu:18.04 MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=ciprod08052020 +ARG IMAGE_TAG=ciprod08072020 ENV AGENT_VERSION ${IMAGE_TAG} ENV tmpdir /opt ENV APPLICATIONINSIGHTS_AUTH NzAwZGM5OGYtYTdhZC00NThkLWI5NWMtMjA3ZjM3NmM3YmRi diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index ac712722a..29533e678 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -343,7 +343,7 @@ spec: serviceAccountName: omsagent containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08052020" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08072020" imagePullPolicy: IfNotPresent resources: limits: @@ -486,7 +486,7 @@ spec: serviceAccountName: omsagent containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08052020" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08072020" imagePullPolicy: IfNotPresent resources: limits: @@ -637,7 +637,7 @@ spec: serviceAccountName: omsagent containers: - name: omsagent-win - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod08052020" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod08072020" imagePullPolicy: IfNotPresent resources: limits: diff --git a/kubernetes/windows/Dockerfile b/kubernetes/windows/Dockerfile index 0b81b9c71..a18404772 100644 --- a/kubernetes/windows/Dockerfile +++ b/kubernetes/windows/Dockerfile @@ -1,9 +1,36 @@ -FROM mcr.microsoft.com/azuremonitor/containerinsights/ciprod:winakslogbase-07022020 +FROM mcr.microsoft.com/windows/servercore:ltsc2019 MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=win-ciprod08052020 +ARG IMAGE_TAG=win-ciprod08072020 + +# Do not split this into multiple RUN! +# Docker creates a layer for every RUN-Statement +RUN powershell -Command "Set-ExecutionPolicy Bypass -Scope Process -Force; iex ((New-Object System.Net.WebClient).DownloadString('https://chocolatey.org/install.ps1'))" +# Fluentd depends on cool.io whose fat gem is only available for Ruby < 2.5, so need to specify --platform ruby when install Ruby > 2.5 and install msys2 to get dev tools +RUN choco install -y ruby --version 2.6.5.1 --params "'/InstallDir:C:\ruby26'" \ +&& choco install -y msys2 --version 20190524.0.0.20191030 --params "'/NoPath /NoUpdate /InstallDir:C:\ruby26\msys64'" \ +&& choco install -y vim + +# gangams - optional MSYS2 update via ridk failing in merged docker file so skipping that since we dont need optional update +RUN refreshenv \ +&& ridk install 3 \ +&& echo gem: --no-document >> C:\ProgramData\gemrc \ +&& gem install cool.io -v 1.5.4 --platform ruby \ +&& gem install oj -v 3.3.10 \ +&& gem install json -v 2.2.0 \ +&& gem install fluentd -v 1.10.2 \ +&& gem install win32-service -v 1.0.1 \ +&& gem install win32-ipc -v 0.7.0 \ +&& gem install win32-event -v 0.6.3 \ +&& gem install windows-pr -v 1.2.6 \ +&& gem install tomlrb -v 1.3.0 \ +&& gem install gyoku -v 1.3.1 \ +&& gem sources --clear-all + +# Remove gem cache and chocolatey +RUN powershell -Command "Remove-Item -Force C:\ruby26\lib\ruby\gems\2.6.0\cache\*.gem; Remove-Item -Recurse -Force 'C:\ProgramData\chocolatey'" SHELL ["powershell"] diff --git a/kubernetes/windows/baseimage/Dockerfile b/kubernetes/windows/baseimage/Dockerfile deleted file mode 100644 index 122daa9cc..000000000 --- a/kubernetes/windows/baseimage/Dockerfile +++ /dev/null @@ -1,28 +0,0 @@ -FROM mcr.microsoft.com/windows/servercore:ltsc2019 - -# Do not split this into multiple RUN! -# Docker creates a layer for every RUN-Statement -RUN powershell -Command "Set-ExecutionPolicy Bypass -Scope Process -Force; iex ((New-Object System.Net.WebClient).DownloadString('https://chocolatey.org/install.ps1'))" - -# Fluentd depends on cool.io whose fat gem is only available for Ruby < 2.5, so need to specify --platform ruby when install Ruby > 2.5 and install msys2 to get dev tools -RUN choco install -y ruby --version 2.6.5.1 --params "'/InstallDir:C:\ruby26'" \ -&& choco install -y msys2 --version 20190524.0.0.20191030 --params "'/NoPath /NoUpdate /InstallDir:C:\ruby26\msys64'" \ -&& choco install -y vim -RUN refreshenv \ -&& ridk install 2 3 \ -&& echo gem: --no-document >> C:\ProgramData\gemrc \ -&& gem install cool.io -v 1.5.4 --platform ruby \ -&& gem install oj -v 3.3.10 \ -&& gem install json -v 2.2.0 \ -&& gem install fluentd -v 1.10.2 \ -&& gem install win32-service -v 1.0.1 \ -&& gem install win32-ipc -v 0.7.0 \ -&& gem install win32-event -v 0.6.3 \ -&& gem install windows-pr -v 1.2.6 \ -&& gem install tomlrb -v 1.3.0 \ -&& gem install gyoku -v 1.3.1 \ -&& gem sources --clear-all - -# Remove gem cache and chocolatey -RUN powershell -Command "Remove-Item -Force C:\ruby26\lib\ruby\gems\2.6.0\cache\*.gem; Remove-Item -Recurse -Force 'C:\ProgramData\chocolatey'" - From c5c28f0dc4f89893aea4215c6fd5647b904c4c92 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Thu, 13 Aug 2020 11:00:19 -0700 Subject: [PATCH 006/301] Gangams/cluster creation scripts (#414) * onprem k8s script * script updates * scripts for creating non-aks clusters * fix minor text update * updates * script updates * fix * script updates * fix scripts to install docker --- scripts/cluster-creation/README.md | 45 +++++ scripts/cluster-creation/aks-engine.sh | 163 +++++++++++++++++ scripts/cluster-creation/arc-k8s-cluster.sh | 190 ++++++++++++++++++++ scripts/cluster-creation/aro-v4.sh | 146 +++++++++++++++ scripts/cluster-creation/onprem-k8s.sh | 106 +++++++++++ 5 files changed, 650 insertions(+) create mode 100644 scripts/cluster-creation/README.md create mode 100644 scripts/cluster-creation/aks-engine.sh create mode 100644 scripts/cluster-creation/arc-k8s-cluster.sh create mode 100644 scripts/cluster-creation/aro-v4.sh create mode 100755 scripts/cluster-creation/onprem-k8s.sh diff --git a/scripts/cluster-creation/README.md b/scripts/cluster-creation/README.md new file mode 100644 index 000000000..57d0c5dbf --- /dev/null +++ b/scripts/cluster-creation/README.md @@ -0,0 +1,45 @@ +# Instructions to create k8s clusters + +## On-Prem K8s Cluster + +on-prem k8s cluster can be created on any VM or physical machine using kind. + +``` +bash onprem-k8s.sh --cluster-name +``` + +## AKS-Engine cluster + +aks-engine is unmanaged cluster in azure and you can use below command to create the cluster in azure. + +``` + +# Either you can reuse existing service principal or create one with below instructions +subscriptionId="" +az account set -s ${subscriptionId} +sp=$(az ad sp create-for-rbac --role="Contributor" --scopes="/subscriptions/${subscriptionId}") +# get the appId (i.e. clientid) and password (i.e. clientSecret) +echo $sp + +clientId=$(echo $sp | jq '.appId') +clientSecret=$(echo $sp | jq '.password') + +# create the aks-engine +bash aks-engine.sh --subscription-id "" --client-id "" --client-secret "" --dns-prefix "" --location "" +``` + +## ARO v4 Cluster + +Azure Redhat Openshift v4 cluster can be created with below command. + +> Note: Because of the cleanup policy on internal subscriptions, cluster creation can fail if you dont change cleanup service to none on the subnets of aro vnet before creation. +``` +bash aro-v4.sh --subscription-id "" --resource-group "" --cluster-name "" --location "" +``` +## Azure Arc K8s cluster + +you can connect on-prem k8s cluster or unmanaged k8s cluster such as aks-engine to azure through azure arc. + +``` +bash arc-k8s-cluster.sh --subscription-id "" --resource-group "" --cluster-name "" --location "" --kube-context "" +``` diff --git a/scripts/cluster-creation/aks-engine.sh b/scripts/cluster-creation/aks-engine.sh new file mode 100644 index 000000000..9d287ea07 --- /dev/null +++ b/scripts/cluster-creation/aks-engine.sh @@ -0,0 +1,163 @@ +#!/bin/bash +set -e +TEMP_DIR=temp-$RANDOM +DEFAULT_ONPREM_K8S_CLUSTER="aks-engine-k8s-test" +AKS_ENGINE_VERSION="v0.54.0" + +download-aks-engine() +{ + sudo curl -LO https://github.com/Azure/aks-engine/releases/download/${AKS_ENGINE_VERSION}/aks-engine-v0.54.0-linux-amd64.tar.gz + sudo tar -xvf aks-engine-${AKS_ENGINE_VERSION}-linux-amd64.tar.gz + sudo mv aks-engine-${AKS_ENGINE_VERSION}-linux-amd64 aks-engine + sudo mv -f aks-engine/aks-engine /usr/local/bin +} + + +usage() +{ + local basename=`basename $0` + echo + echo "create aks-engine cluster:" + echo "$basename deploy --subscription-id --client-id --client-secret --dns-prefix --location " +} + +parse_args() +{ + + if [ $# -le 1 ] + then + usage + exit 1 + fi + +# Transform long options to short ones +for arg in "$@"; do + shift + case "$arg" in + "--subscription-id") set -- "$@" "-s" ;; + "--client-id") set -- "$@" "-c" ;; + "--client-secret") set -- "$@" "-w" ;; + "--dns-prefix") set -- "$@" "-d" ;; + "--location") set -- "$@" "-l" ;; + "--"*) usage ;; + *) set -- "$@" "$arg" + esac +done + +local OPTIND opt + +while getopts 'hs:c:w:d:l:' opt; do + case "$opt" in + h) + usage + ;; + + s) + subscriptionId="$OPTARG" + echo "subscriptionId is $OPTARG" + ;; + + c) + clientId="$OPTARG" + echo "clientId is $OPTARG" + ;; + + w) + clientSecret="$OPTARG" + echo "clientSecret is $OPTARG" + ;; + + d) + dnsPrefix="$OPTARG" + echo "dnsPrefix is $OPTARG" + ;; + + l) + location="$OPTARG" + echo "location is $OPTARG" + ;; + + ?) + usage + exit 1 + ;; + esac + done + shift "$(($OPTIND -1))" + + +} +create_cluster() +{ + +sudo touch kubernetes.json +sudo chmod 777 kubernetes.json +# For docker runtime, remove kubernetesConfig block +cat >> kubernetes.json < --resource-group --cluster-name --location --kube-context " +} + +parse_args() +{ + + if [ $# -le 1 ] + then + usage + exit 1 + fi + +# Transform long options to short ones +for arg in "$@"; do + shift + case "$arg" in + "--subscription-id") set -- "$@" "-s" ;; + "--resource-group") set -- "$@" "-r" ;; + "--cluster-name") set -- "$@" "-c" ;; + "--location") set -- "$@" "-l" ;; + "--kube-context") set -- "$@" "-k" ;; + "--"*) usage ;; + *) set -- "$@" "$arg" + esac +done + +local OPTIND opt + +while getopts 'hs:r:c:l:k:' opt; do + case "$opt" in + h) + usage + ;; + + s) + subscriptionId="$OPTARG" + echo "subscriptionId is $OPTARG" + ;; + + r) + resourceGroupName="$OPTARG" + echo "resourceGroupName is $OPTARG" + ;; + + c) + clusterName="$OPTARG" + echo "clusterName is $OPTARG" + ;; + + l) + location="$OPTARG" + echo "location is $OPTARG" + ;; + + k) + kubecontext="$OPTARG" + echo "kubecontext is $OPTARG" + ;; + + ?) + usage + exit 1 + ;; + esac + done + shift "$(($OPTIND -1))" + + +} + +connect_azure_arc_k8s() +{ + + echo "create resource group: ${resourceGroupName} if it doenst exist" + isrgExists=$(az group exists -g ${resourceGroupName}) + if $isrgExists; then + echo "resource group: ${resourceGroupName} already exists" + else + echo "creating resource group ${resourceGroupName} in region since it doesnt exist" + az group create -l ${location} -n ${resourceGroupName} + fi + + echo "connecting k8s cluster with kube-context : ${kubecontext} to azure with clustername: ${clusterName} and resourcegroup: ${resourceGroupName} ..." + az connectedk8s connect --name ${clusterName} --resource-group ${resourceGroupName} + echo "connecting k8s cluster with kube-context : ${kubecontext} to azure with clustername: ${clusterName} and resourcegroup: ${resourceGroupName} completed." +} + + + +echo "connecting k8s cluster to azure arc..." +echo "HELM version: ${HELM_VERSION}" +cd ~ +echo "creating temp directory":$TEMP_DIR +sudo mkdir $TEMP_DIR && cd $TEMP_DIR + +echo "validate args" +parse_args $@ + +echo "set the ${DefaultCloud} for azure cli" +az cloud set -n $DefaultCloud + +echo "login to azure cli" +az login --use-device-code + +echo "set the subscription ${subscriptionId} for cli" +az account set -s $subscriptionId + +echo "installing helm client ..." +install-helm +echo "installing helm client completed." + +echo "installing azure cli ..." +download-and-install-azure-cli +echo "installing azure cli completed." + +echo "installing arc k8s extensions and pre-requisistes ..." +install_arc_k8s_prerequisites +echo "installing arc k8s extensions and pre-requisites completed." + +echo "connecting cluster to azure arc k8s via azure arc " +connect_azure_arc_k8s +echo "connecting cluster to azure arc k8s via azure arc completed." + +echo "connecting k8s cluster to azure arc completed." diff --git a/scripts/cluster-creation/aro-v4.sh b/scripts/cluster-creation/aro-v4.sh new file mode 100644 index 000000000..8540ae931 --- /dev/null +++ b/scripts/cluster-creation/aro-v4.sh @@ -0,0 +1,146 @@ +#!/bin/bash +set -e +TEMP_DIR=temp-$RANDOM +DefaultCloud="AzureCloud" +DefaultVnetName="aro-net" +DefaultMasterSubnetName="master-subnet" +DefaultWorkerSubnetName="worker-subnet" + +download-and-install-azure-cli() +{ + # https://docs.microsoft.com/en-us/cli/azure/install-azure-cli-apt?view=azure-cli-latest#install-with-one-command + sudo curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash +} + +register_aro_v4_provider() +{ + echo "register Microsoft.RedHatOpenShift provider" + az provider register -n Microsoft.RedHatOpenShift --wait +} + +usage() +{ + local basename=`basename $0` + echo + echo "create aro v4 cluster:" + echo "$basename --subscription-id --resource-group --cluster-name --location " +} + +parse_args() +{ + + if [ $# -le 1 ] + then + usage + exit 1 + fi + +# Transform long options to short ones +for arg in "$@"; do + shift + case "$arg" in + "--subscription-id") set -- "$@" "-s" ;; + "--resource-group") set -- "$@" "-r" ;; + "--cluster-name") set -- "$@" "-c" ;; + "--location") set -- "$@" "-l" ;; + "--"*) usage ;; + *) set -- "$@" "$arg" + esac +done + +local OPTIND opt + +while getopts 'hs:r:c:l:' opt; do + case "$opt" in + h) + usage + ;; + + s) + subscriptionId="$OPTARG" + echo "subscriptionId is $OPTARG" + ;; + + r) + resourceGroupName="$OPTARG" + echo "resourceGroupName is $OPTARG" + ;; + + c) + clusterName="$OPTARG" + echo "clusterName is $OPTARG" + ;; + + l) + location="$OPTARG" + echo "location is $OPTARG" + ;; + + ?) + usage + exit 1 + ;; + esac + done + shift "$(($OPTIND -1))" +} + +create_aro_v4_cluster() +{ + + echo "create resource group: ${resourceGroupName} if it doenst exist" + isrgExists=$(az group exists -g ${resourceGroupName}) + if $isrgExists; then + echo "resource group: ${resourceGroupName} already exists" + else + echo "creating resource group ${resourceGroupName} in region since it doesnt exist" + az group create -l ${location} -n ${resourceGroupName} + fi + + echo "creating virtual network" + az network vnet create --resource-group ${resourceGroupName} --name ${DefaultVnetName} --address-prefixes 10.0.0.0/22 + + echo "adding empty subnet for master nodes" + az network vnet subnet create --resource-group ${resourceGroupName} --vnet-name ${DefaultVnetName} --name ${DefaultMasterSubnetName} --address-prefixes 10.0.0.0/23 --service-endpoints Microsoft.ContainerRegistry + + echo "adding empty subnet for worker nodes" + az network vnet subnet create --resource-group ${resourceGroupName} --vnet-name ${DefaultVnetName} --name ${DefaultWorkerSubnetName} --address-prefixes 10.0.2.0/23 --service-endpoints Microsoft.ContainerRegistry + + echo "Please make sure disable to diable cleanup service on subnet nsgs of aor vnet for internal subscriptions" + sleep 1m + + echo "Disable subnet private endpoint policies on the master subnet" + az network vnet subnet update --name ${DefaultMasterSubnetName} --resource-group ${resourceGroupName} --vnet-name ${DefaultVnetName} --disable-private-link-service-network-policies true + + echo "creating ARO v4 cluster" + az aro create --resource-group ${resourceGroupName} --name ${clusterName} --vnet ${DefaultVnetName} --master-subnet ${DefaultMasterSubnetName} --worker-subnet ${DefaultWorkerSubnetName} + +} + + +echo "creating aro v4 cluster in specified azure subscription and resource group..." +cd ~ +echo "creating temp directory":$TEMP_DIR +sudo mkdir $TEMP_DIR && cd $TEMP_DIR + +echo "validate args" +parse_args $@ + +echo "set the ${DefaultCloud} for azure cli" +az cloud set -n $DefaultCloud + +echo "login to azure cli" +az login --use-device-code + +echo "set the subscription ${subscriptionId} for cli" +az account set -s $subscriptionId + +echo "installing azure cli ..." +download-and-install-azure-cli +echo "installing azure cli completed." + +echo "creating aro v4 cluster ..." +create_aro_v4_cluster +echo "creating aro v4 cluster completed." + +echo "creating aro v4 cluster in specified azure subscription and resource completed." diff --git a/scripts/cluster-creation/onprem-k8s.sh b/scripts/cluster-creation/onprem-k8s.sh new file mode 100755 index 000000000..147681133 --- /dev/null +++ b/scripts/cluster-creation/onprem-k8s.sh @@ -0,0 +1,106 @@ +#!/bin/bash +set -e +TEMP_DIR=temp-$RANDOM +KIND_VERSION="v0.8.1" + +install-kind() +{ +sudo curl -Lo ./kind https://kind.sigs.k8s.io/dl/${KIND_VERSION}/kind-linux-amd64 +sudo chmod +x ./kind +sudo mv ./kind /usr/local/bin/kind +} + +download_install_docker() +{ + echo "download docker script" + sudo curl -L https://get.docker.com/ -o get-docker.sh + echo "installing docker script" + sudo sh get-docker.sh + + echo "add user to docker group" + sudo usermod -aG docker $USER + +} + +create_cluster() +{ +sudo touch kind-config.yaml +sudo chmod 777 kind-config.yaml +cat >> kind-config.yaml < " +} + +parse_args() +{ + + if [ $# -le 1 ] + then + usage + exit 1 + fi + +# Transform long options to short ones +for arg in "$@"; do + shift + case "$arg" in + "--cluster-name") set -- "$@" "-c" ;; + "--"*) usage ;; + *) set -- "$@" "$arg" + esac +done + +local OPTIND opt + +while getopts 'hc:' opt; do + case "$opt" in + h) + usage + ;; + + c) + clusterName="$OPTARG" + echo "clusterName is $OPTARG" + ;; + + ?) + usage + exit 1 + ;; + esac + done + shift "$(($OPTIND -1))" +} + +echo "creating kind k8 cluster ..." +echo "KIND version: ${KIND_VERSION}" +cd ~ +echo "creating temp directory":$TEMP_DIR +sudo mkdir $TEMP_DIR && cd $TEMP_DIR + +echo "parsing args" +parse_args $@ + +echo "download and install docker" +download_install_docker + +echo "download and install kind" +install-kind + +echo "creating cluster: ${clusterName}" +create_cluster + +echo "creating kind k8 cluster completed." From d7a3750107e6c8778f13dccb8d20767348a68292 Mon Sep 17 00:00:00 2001 From: bragi92 Date: Fri, 14 Aug 2020 13:00:30 -0700 Subject: [PATCH 007/301] fix: Pin to a particular version of ltsc2019 by SHA (#427) --- kubernetes/windows/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kubernetes/windows/Dockerfile b/kubernetes/windows/Dockerfile index a18404772..c8162b539 100644 --- a/kubernetes/windows/Dockerfile +++ b/kubernetes/windows/Dockerfile @@ -1,4 +1,4 @@ -FROM mcr.microsoft.com/windows/servercore:ltsc2019 +FROM mcr.microsoft.com/windows/servercore@sha256:921bed01c2a023310bdbaa288edebd82c4910e536ff206b87e9cbe703ca27505 MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" From 5e8de91534c59a9bff4d786f2085195dca67392d Mon Sep 17 00:00:00 2001 From: Vishwanath Date: Fri, 14 Aug 2020 14:17:53 -0700 Subject: [PATCH 008/301] enable collecting npm metrics (optionally) (#425) * enable collecting npm metrics (optionally) * fix default enrichment value * fix adx --- build/linux/installer/conf/telegraf-rs.conf | 42 +++++++ build/linux/installer/conf/telegraf.conf | 41 +++++++ .../installer/datafiles/base_container.data | 1 + .../scripts/tomlparser-npm-config.rb | 113 ++++++++++++++++++ kubernetes/container-azm-ms-agentconfig.yaml | 4 + kubernetes/linux/main.sh | 11 +- .../plugins/ruby/CAdvisorMetricsAPIClient.rb | 10 +- 7 files changed, 220 insertions(+), 2 deletions(-) create mode 100644 build/linux/installer/scripts/tomlparser-npm-config.rb diff --git a/build/linux/installer/conf/telegraf-rs.conf b/build/linux/installer/conf/telegraf-rs.conf index f1e9cc282..3f2f65cff 100644 --- a/build/linux/installer/conf/telegraf-rs.conf +++ b/build/linux/installer/conf/telegraf-rs.conf @@ -611,3 +611,45 @@ $AZMON_RS_PROM_PLUGINS_WITH_NAMESPACE_FILTER # Computer = "placeholder_hostname" # ControllerType = "$CONTROLLER_TYPE" +##npm +[[inputs.prometheus]] + #name_prefix="container.azm.ms/" + ## An array of urls to scrape metrics from. + urls = ["$AZMON_INTEGRATION_NPM_METRICS_URL_LIST_CLUSTER"] + fielddrop = ["$AZMON_INTEGRATION_NPM_METRICS_DROP_LIST_CLUSTER"] + + metric_version = 2 + url_tag = "scrapeUrl" + + ## An array of Kubernetes services to scrape metrics from. + # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] + + ## Kubernetes config file to create client from. + # kube_config = "/path/to/kubernetes.config" + + ## Scrape Kubernetes pods for the following prometheus annotations: + ## - prometheus.io/scrape: Enable scraping for this pod + ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to + ## set this to `https` & most likely set the tls config. + ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. + ## - prometheus.io/port: If port is not 9102 use this annotation + # monitor_kubernetes_pods = true + + ## Use bearer token for authorization. ('bearer_token' takes priority) + bearer_token = "/var/run/secrets/kubernetes.io/serviceaccount/token" + ## OR + # bearer_token_string = "abc_123" + + ## Specify timeout duration for slower prometheus clients (default is 3s) + response_timeout = "15s" + + ## Optional TLS Config + tls_ca = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" + #tls_cert = /path/to/certfile + # tls_key = /path/to/keyfile + ## Use TLS but skip chain & host verification + insecure_skip_verify = true + #tagexclude = ["AgentVersion","AKS_RESOURCE_ID","ACS_RESOURCE_NAME", "Region", "ClusterName", "ClusterType", "Computer", "ControllerType"] + #[inputs.prometheus.tagpass] + # operation_type = ["create_container", "remove_container", "pull_image"] + diff --git a/build/linux/installer/conf/telegraf.conf b/build/linux/installer/conf/telegraf.conf index b554dd4b3..19b6058be 100644 --- a/build/linux/installer/conf/telegraf.conf +++ b/build/linux/installer/conf/telegraf.conf @@ -703,6 +703,47 @@ insecure_skip_verify = true #tagexclude = ["AgentVersion","AKS_RESOURCE_ID","ACS_RESOURCE_NAME", "Region", "ClusterName", "ClusterType", "Computer", "ControllerType"] +##npm +[[inputs.prometheus]] + #name_prefix="container.azm.ms/" + ## An array of urls to scrape metrics from. + urls = ["$AZMON_INTEGRATION_NPM_METRICS_URL_LIST_NODE"] + + metric_version = 2 + url_tag = "scrapeUrl" + + ## An array of Kubernetes services to scrape metrics from. + # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] + + ## Kubernetes config file to create client from. + # kube_config = "/path/to/kubernetes.config" + + ## Scrape Kubernetes pods for the following prometheus annotations: + ## - prometheus.io/scrape: Enable scraping for this pod + ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to + ## set this to `https` & most likely set the tls config. + ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. + ## - prometheus.io/port: If port is not 9102 use this annotation + # monitor_kubernetes_pods = true + + ## Use bearer token for authorization. ('bearer_token' takes priority) + bearer_token = "/var/run/secrets/kubernetes.io/serviceaccount/token" + ## OR + # bearer_token_string = "abc_123" + + ## Specify timeout duration for slower prometheus clients (default is 3s) + response_timeout = "15s" + + ## Optional TLS Config + tls_ca = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" + #tls_cert = /path/to/certfile + # tls_key = /path/to/keyfile + ## Use TLS but skip chain & host verification + insecure_skip_verify = true + #tagexclude = ["AgentVersion","AKS_RESOURCE_ID","ACS_RESOURCE_NAME", "Region", "ClusterName", "ClusterType", "Computer", "ControllerType"] + #[inputs.prometheus.tagpass] + # operation_type = ["create_container", "remove_container", "pull_image"] + # [[inputs.exec]] # ## Commands array # interval = "15m" diff --git a/build/linux/installer/datafiles/base_container.data b/build/linux/installer/datafiles/base_container.data index f07e71b2d..fc5a6c8bc 100644 --- a/build/linux/installer/datafiles/base_container.data +++ b/build/linux/installer/datafiles/base_container.data @@ -125,6 +125,7 @@ MAINTAINER: 'Microsoft Corporation' /opt/tomlparser.rb; build/common/installer/scripts/tomlparser.rb; 755; root; root /opt/td-agent-bit-conf-customizer.rb; build/common/installer/scripts/td-agent-bit-conf-customizer.rb; 755; root; root /opt/ConfigParseErrorLogger.rb; build/common/installer/scripts/ConfigParseErrorLogger.rb; 755; root; root +/opt/tomlparser-npm-config.rb; build/linux/installer/scripts/tomlparser-npm-config.rb; 755; root; root /opt/microsoft/omsagent/plugin/filter_cadvisor_health_container.rb; source/plugins/ruby/filter_cadvisor_health_container.rb; 644; root; root diff --git a/build/linux/installer/scripts/tomlparser-npm-config.rb b/build/linux/installer/scripts/tomlparser-npm-config.rb new file mode 100644 index 000000000..c5953836b --- /dev/null +++ b/build/linux/installer/scripts/tomlparser-npm-config.rb @@ -0,0 +1,113 @@ +#!/usr/local/bin/ruby + +#this should be require relative in Linux and require in windows, since it is a gem install on windows +@os_type = ENV["OS_TYPE"] +if !@os_type.nil? && !@os_type.empty? && @os_type.strip.casecmp("windows") == 0 + require "tomlrb" +else + require_relative "tomlrb" +end + +require_relative "ConfigParseErrorLogger" + +@configMapMountPath = "/etc/config/settings/integrations" +@configSchemaVersion = "" +@collect_basic_npm_metrics = false +@collect_advanced_npm_metrics = false +@npm_node_url="http://$NODE_IP:10091/node-metrics" +@npm_cluster_url="http://npm-metrics-cluster-service.kube-system:9000/cluster-metrics" +@npm_basic_drop_metrics_cluster = "npm_ipset_counts" + +# Use parser to parse the configmap toml file to a ruby structure +def parseConfigMap + begin + # Check to see if config map is created + if (File.file?(@configMapMountPath)) + puts "config::configmap container-azm-ms-agentconfig for npm metrics found, parsing values" + parsedConfig = Tomlrb.load_file(@configMapMountPath, symbolize_keys: true) + puts "config::Successfully parsed mounted config map for npm metrics" + return parsedConfig + else + puts "config::configmap container-azm-ms-agentconfig for npm metrics not mounted, using defaults" + return nil + end + rescue => errorStr + ConfigParseErrorLogger.logError("Exception while parsing config map for npm metrics: #{errorStr}, using defaults, please check config map for errors") + return nil + end +end + +# Use the ruby structure created after config parsing to set the right values to be used as environment variables +def populateSettingValuesFromConfigMap(parsedConfig) + begin + if !parsedConfig.nil? && !parsedConfig[:integrations].nil? && !parsedConfig[:integrations][:azure_network_policy_manager].nil? && !parsedConfig[:integrations][:azure_network_policy_manager][:collect_advanced_metrics].nil? + advanced_npm_metrics = parsedConfig[:integrations][:azure_network_policy_manager][:collect_advanced_metrics].to_s + puts "got:integrations.azure_network_policy_manager.collect_advanced_metrics='#{advanced_npm_metrics}'" + if !advanced_npm_metrics.nil? && advanced_npm_metrics.strip.casecmp("true") == 0 + @collect_advanced_npm_metrics = true + else + @collect_advanced_npm_metrics = false + end + puts "set:integrations.azure_network_policy_manager.collect_advanced_metrics=#{@collect_advanced_npm_metrics}" + end + rescue => errorStr + puts "config::error:Exception while reading config settings for npm advanced setting - #{errorStr}, using defaults" + @collect_advanced_npm_metrics = false + end + begin + if !parsedConfig.nil? && !parsedConfig[:integrations].nil? && !parsedConfig[:integrations][:azure_network_policy_manager].nil? && !parsedConfig[:integrations][:azure_network_policy_manager][:collect_basic_metrics].nil? + basic_npm_metrics = parsedConfig[:integrations][:azure_network_policy_manager][:collect_basic_metrics].to_s + puts "got:integrations.azure_network_policy_manager.collect_basic_metrics='#{basic_npm_metrics}'" + if !basic_npm_metrics.nil? && basic_npm_metrics.strip.casecmp("true") == 0 + @collect_basic_npm_metrics = true + else + @collect_basic_npm_metrics = false + end + puts "set:integrations.azure_network_policy_manager.collect_basic_metrics=#{@collect_basic_npm_metrics}" + end + rescue => errorStr + puts "config::error:Exception while reading config settings for npm basic setting - #{errorStr}, using defaults" + @collect_basic_npm_metrics = false + end +end + +@configSchemaVersion = ENV["AZMON_AGENT_CFG_SCHEMA_VERSION"] +puts "****************Start Config Processing********************" +if !@configSchemaVersion.nil? && !@configSchemaVersion.empty? && @configSchemaVersion.strip.casecmp("v1") == 0 #note v1 is the only supported schema version , so hardcoding it + configMapSettings = parseConfigMap + if !configMapSettings.nil? + populateSettingValuesFromConfigMap(configMapSettings) + end +else + if (File.file?(@configMapMountPath)) + ConfigParseErrorLogger.logError("config::unsupported/missing config schema version - '#{@configSchemaVersion}' , using defaults, please use supported schema version") + end + @collect_basic_npm_metrics = false + @collect_advanced_npm_metrics = false +end + +# Write the settings to file, so that they can be set as environment variables +file = File.open("integration_npm_config_env_var", "w") + +if !file.nil? + if @collect_advanced_npm_metrics == true + file.write("export TELEMETRY_NPM_INTEGRATION_METRICS_ADVANCED=1\n") + file.write("export AZMON_INTEGRATION_NPM_METRICS_URL_LIST_NODE=#{@npm_node_url}\n") + file.write("export AZMON_INTEGRATION_NPM_METRICS_URL_LIST_CLUSTER=#{@npm_cluster_url}\n") + file.write("export AZMON_INTEGRATION_NPM_METRICS_DROP_LIST_CLUSTER=\n") + elsif @collect_basic_npm_metrics == true + file.write("export TELEMETRY_NPM_INTEGRATION_METRICS_BASIC=1\n") + file.write("export AZMON_INTEGRATION_NPM_METRICS_URL_LIST_NODE=#{@npm_node_url}\n") + file.write("export AZMON_INTEGRATION_NPM_METRICS_URL_LIST_CLUSTER=#{@npm_cluster_url}\n") + file.write("export AZMON_INTEGRATION_NPM_METRICS_DROP_LIST_CLUSTER=#{@npm_basic_drop_metrics_cluster}\n") + else + file.write("export AZMON_INTEGRATION_NPM_METRICS_URL_LIST_NODE=\n") + file.write("export AZMON_INTEGRATION_NPM_METRICS_URL_LIST_CLUSTER=\n") + file.write("export AZMON_INTEGRATION_NPM_METRICS_DROP_LIST_CLUSTER=\n") + end + # Close file after writing all environment variables + file.close +else + puts "Exception while opening file for writing config environment variables" + puts "****************End Config Processing********************" +end \ No newline at end of file diff --git a/kubernetes/container-azm-ms-agentconfig.yaml b/kubernetes/container-azm-ms-agentconfig.yaml index f3f442608..58e09f041 100644 --- a/kubernetes/container-azm-ms-agentconfig.yaml +++ b/kubernetes/container-azm-ms-agentconfig.yaml @@ -100,6 +100,10 @@ data: container_memory_rss_threshold_percentage = 95.0 # Threshold for container memoryWorkingSet, metric will be sent only when memory working set exceeds or becomes equal to the following percentage container_memory_working_set_threshold_percentage = 95.0 + integrations: |- + [integrations.azure_network_policy_manager] + collect_basic_metrics = false + collect_advanced_metrics = false metadata: name: container-azm-ms-agentconfig namespace: kube-system diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index 92f4977d6..311470660 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -160,7 +160,7 @@ done source config_env_var -#Parse the configmap to set the right environment variables. +#Parse the configmap to set the right environment variables for health feature. /opt/microsoft/omsagent/ruby/bin/ruby tomlparser-health-config.rb cat health_config_env_var | while read line; do @@ -169,6 +169,15 @@ cat health_config_env_var | while read line; do done source health_config_env_var +#Parse the configmap to set the right environment variables for network policy manager (npm) integration. +/opt/microsoft/omsagent/ruby/bin/ruby tomlparser-npm-config.rb + +cat integration_npm_config_env_var | while read line; do + #echo $line + echo $line >> ~/.bashrc +done +source integration_npm_config_env_var + #Replace the placeholders in td-agent-bit.conf file for fluentbit with custom/default values in daemonset if [ ! -e "/etc/config/kube.conf" ]; then /opt/microsoft/omsagent/ruby/bin/ruby td-agent-bit-conf-customizer.rb diff --git a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb index 42ecfcaf0..13796cd1e 100644 --- a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb +++ b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb @@ -33,6 +33,8 @@ class CAdvisorMetricsAPIClient @cAdvisorMetricsSecurePort = ENV["IS_SECURE_CADVISOR_PORT"] @containerLogsRoute = ENV["AZMON_CONTAINER_LOGS_ROUTE"] @hmEnabled = ENV["AZMON_CLUSTER_ENABLE_HEALTH_MODEL"] + @npmIntegrationBasic = ENV["TELEMETRY_NPM_INTEGRATION_METRICS_BASIC"] + @npmIntegrationAdvanced = ENV["TELEMETRY_NPM_INTEGRATION_METRICS_ADVANCED"] @LogPath = "/var/opt/microsoft/docker-cimprov/log/kubernetes_perf_log.txt" @Log = Logger.new(@LogPath, 2, 10 * 1048576) #keep last 2 files, max log file size = 10M @@ -250,7 +252,13 @@ def getContainerCpuMetricItems(metricJSON, hostName, cpuMetricNameToCollect, met #telemetry about health model if (!@hmEnabled.nil? && !@hmEnabled.empty?) telemetryProps["hmEnabled"] = @hmEnabled - end + end + #telemetry for npm integration + if (!@npmIntegrationAdvanced.nil? && !@npmIntegrationAdvanced.empty?) + telemetryProps["int-npm-a"] = "1" + elsif (!@npmIntegrationBasic.nil? && !@npmIntegrationBasic.empty?) + telemetryProps["int-npm-b"] = "1" + end ApplicationInsightsUtility.sendMetricTelemetry(metricNametoReturn, metricValue, telemetryProps) end end From 17e7ff8bf65c6fd3ab2dc2b47043249055e2dc3d Mon Sep 17 00:00:00 2001 From: saaror <31900410+saaror@users.noreply.github.com> Date: Mon, 17 Aug 2020 00:56:26 -0700 Subject: [PATCH 009/301] Saaror patch 3 (#426) * Create README.MD Creating content for Kubecon lab * Update README.MD * Update README.MD --- Kubecon/README.MD | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) create mode 100644 Kubecon/README.MD diff --git a/Kubecon/README.MD b/Kubecon/README.MD new file mode 100644 index 000000000..873cfaf9a --- /dev/null +++ b/Kubecon/README.MD @@ -0,0 +1,36 @@ +# Kubecon Azure Monitor for containers lab + +## Overview + +### This Azure Monitor for containers lab will give you hands on experience to monitor AKS workloads. In this lab you will be working Azure Monitor, Log Analytics and Azure Monitor for Container Insights. + +## Instructions for lab + +1. Set-up environment [Setup Guide](https://github.com/rkuehfus/pre-ready-2019-H1/blob/master/Student/Guides/Deployment%20Setup%20Guide.docx?raw=true) + +2. Tasks for the lab + * From your Visual Studio Server, deploy the eShoponWeb application to AKS using Dev Spaces + * From Azure Monitor, locate the container running the eShoponWeb application + * Generate an exception in the eShoponWeb application(Hint: Try to change your password) + * Optimize the Azure Monitor for contains ingestion cost by fine tuning log-collection parameters like std-out/std-error, namespace. + +## Outcome + +### Understand Azure Monitor capabilities, facilitate an Azure Monitor customer conversation, and demo key features of Azure Monitor. + +## Target Audience + +This content has been targeted to devops/SRE intended to build their knowledge on Azure Monitor also for people that have a passion around Monitoring are more than welcome to attend. + +## Prerequisites + 1. Please review the following content before the event + a. [Azure Monitor for containers Overview](https://docs.microsoft.com/azure/azure-monitor/insights/container-insights-overview) + b. [Optimize Azure Monitor for containers cost ](https://medium.com/microsoftazure/azure-monitor-for-containers-optimizing-data-collection-settings-for-cost-ce6f848aca32) + +2. Attendees have access to an Azure Subscription where they can each deploy the provided ARM template that will build a very detailed infrastructure to monitor. This includes the Vnet, subnets, NSG(s), LB(s), NAT rules, scales set and a fully functional .NET Core Application (eShopOnWeb) to monitor. +3. Attendees should have a level 200-300 understanding of the Azure platform. Understand concepts like PowerShell, Azure Cli, ARM, resource groups, RBAC, network, storage, compute, scale sets, virtual machines and security. Previous experience working with ARM templates is recommended. +4. Access to a machine with Visual Studio Code and the Azure PowerShell Modules loaded or Azure CLI. VS Code ARM and PowerShell extensions should be configured. + +![alt text](https://raw.githubusercontent.com/rkuehfus/pre-ready-2019-H1/master/monitoringhackdiagram.png) + + From 6c7c6757b8c8cc87eaa89516393788d3d942857b Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Tue, 18 Aug 2020 11:53:59 -0700 Subject: [PATCH 010/301] Gangams/add containerd support to windows agent (#428) * wip * wip * wip * wip * bug fix related to uri * wip * wip * fix bug with ignore cert validation * logic to ignore cert validation * minor * fix minor debug log issue * improve log message * debug message * fix bug with nullorempty check * remove debug statements * refactor parsers * add debug message * clean up * chart updates * fix formatting issues --- .../installer/conf/fluent-cri-parser.conf | 6 + .../installer/conf/fluent-docker-parser.conf | 5 + build/windows/installer/conf/fluent.conf | 32 ++- .../templates/omsagent-daemonset-windows.yaml | 7 + kubernetes/omsagent.yaml | 4 + kubernetes/windows/Dockerfile | 3 + kubernetes/windows/main.ps1 | 199 ++++++++++++++---- 7 files changed, 198 insertions(+), 58 deletions(-) create mode 100644 build/windows/installer/conf/fluent-cri-parser.conf create mode 100644 build/windows/installer/conf/fluent-docker-parser.conf diff --git a/build/windows/installer/conf/fluent-cri-parser.conf b/build/windows/installer/conf/fluent-cri-parser.conf new file mode 100644 index 000000000..86f1572ca --- /dev/null +++ b/build/windows/installer/conf/fluent-cri-parser.conf @@ -0,0 +1,6 @@ + + @type regexp + expression ^(? diff --git a/build/windows/installer/conf/fluent-docker-parser.conf b/build/windows/installer/conf/fluent-docker-parser.conf new file mode 100644 index 000000000..9dc800aeb --- /dev/null +++ b/build/windows/installer/conf/fluent-docker-parser.conf @@ -0,0 +1,5 @@ + + @type json + time_format %Y-%m-%dT%H:%M:%S.%NZ + keep_time_key true + diff --git a/build/windows/installer/conf/fluent.conf b/build/windows/installer/conf/fluent.conf index a4cacbcf6..c96300b1e 100644 --- a/build/windows/installer/conf/fluent.conf +++ b/build/windows/installer/conf/fluent.conf @@ -12,11 +12,8 @@ @log_level trace path_key tailed_path limit_recently_modified 5m - - @type json - time_format %Y-%m-%dT%H:%M:%S.%NZ - keep_time_key true - + # if the container runtime is non docker then this will be updated to fluent-cri-parser.conf during container startup + @include fluent-docker-parser.conf @@ -27,11 +24,8 @@ @log_level trace path_key tailed_path read_from_head true - - @type json - time_format %Y-%m-%dT%H:%M:%S.%NZ - keep_time_key true - + # if the container runtime is non docker then this will be updated to fluent-cri-parser.conf during container startup + @include fluent-docker-parser.conf @@ -59,13 +53,13 @@ - overflow_action throw_exception - chunk_limit_size 32k - queued_chunks_limit_size 256 - flush_interval 1 - flush_thread_interval 0.5 - flush_thread_burst_interval 0.01 - flush_thread_count 4 - retry_forever true - + overflow_action throw_exception + chunk_limit_size 32k + queued_chunks_limit_size 256 + flush_interval 1 + flush_thread_interval 0.5 + flush_thread_burst_interval 0.01 + flush_thread_count 4 + retry_forever true + diff --git a/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml b/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml index 0ea7a9af6..b8e667398 100644 --- a/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml @@ -53,6 +53,13 @@ spec: - name: CONTROLLER_TYPE value: "DaemonSet" - name: HOSTNAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: NODE_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP volumeMounts: - mountPath: C:\ProgramData\docker\containers name: docker-windows-containers diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index 29533e678..db788a37e 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -660,6 +660,10 @@ spec: valueFrom: fieldRef: fieldPath: spec.nodeName + - name: NODE_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP volumeMounts: - mountPath: C:\ProgramData\docker\containers name: docker-windows-containers diff --git a/kubernetes/windows/Dockerfile b/kubernetes/windows/Dockerfile index c8162b539..06e11e73a 100644 --- a/kubernetes/windows/Dockerfile +++ b/kubernetes/windows/Dockerfile @@ -56,6 +56,9 @@ COPY ./omsagentwindows/out_oms.so /opt/omsagentwindows/out_oms.so # copy fluent, fluent-bit and out_oms conf files COPY ./omsagentwindows/installer/conf/fluent.conf /etc/fluent/ +# copy fluent docker and cri parser conf files +COPY ./omsagentwindows/installer/conf/fluent-cri-parser.conf /etc/fluent/ +COPY ./omsagentwindows/installer/conf/fluent-docker-parser.conf /etc/fluent/ COPY ./omsagentwindows/installer/conf/fluent-bit.conf /etc/fluent-bit COPY ./omsagentwindows/installer/conf/out_oms.conf /etc/omsagentwindows diff --git a/kubernetes/windows/main.ps1 b/kubernetes/windows/main.ps1 index b7ddfa8e7..de82722ad 100644 --- a/kubernetes/windows/main.ps1 +++ b/kubernetes/windows/main.ps1 @@ -1,34 +1,51 @@ -function Confirm-WindowsServiceExists($name) -{ - if (Get-Service $name -ErrorAction SilentlyContinue) +Add-Type @" + using System; + using System.Net; + using System.Net.Security; + using System.Security.Cryptography.X509Certificates; + public class ServerCertificateValidationCallback { + public static void Ignore() + { + ServicePointManager.ServerCertificateValidationCallback += + delegate + ( + Object obj, + X509Certificate certificate, + X509Chain chain, + SslPolicyErrors errors + ) + { + return true; + }; + } + } +"@ +function Confirm-WindowsServiceExists($name) { + if (Get-Service $name -ErrorAction SilentlyContinue) { return $true } return $false } -function Remove-WindowsServiceIfItExists($name) -{ +function Remove-WindowsServiceIfItExists($name) { $exists = Confirm-WindowsServiceExists $name - if ($exists) - { + if ($exists) { sc.exe \\server delete $name } } -function Start-FileSystemWatcher -{ +function Start-FileSystemWatcher { Start-Process powershell -NoNewWindow .\filesystemwatcher.ps1 } #register fluentd as a windows service -function Set-EnvironmentVariables -{ +function Set-EnvironmentVariables { $domain = "opinsights.azure.com" if (Test-Path /etc/omsagent-secret/DOMAIN) { # TODO: Change to omsagent-secret before merging - $domain = Get-Content /etc/omsagent-secret/DOMAIN + $domain = Get-Content /etc/omsagent-secret/DOMAIN } # Set DOMAIN @@ -38,7 +55,7 @@ function Set-EnvironmentVariables $wsID = "" if (Test-Path /etc/omsagent-secret/WSID) { # TODO: Change to omsagent-secret before merging - $wsID = Get-Content /etc/omsagent-secret/WSID + $wsID = Get-Content /etc/omsagent-secret/WSID } # Set DOMAIN @@ -48,7 +65,7 @@ function Set-EnvironmentVariables $wsKey = "" if (Test-Path /etc/omsagent-secret/KEY) { # TODO: Change to omsagent-secret before merging - $wsKey = Get-Content /etc/omsagent-secret/KEY + $wsKey = Get-Content /etc/omsagent-secret/KEY } # Set KEY @@ -58,7 +75,7 @@ function Set-EnvironmentVariables $proxy = "" if (Test-Path /etc/omsagent-secret/PROXY) { # TODO: Change to omsagent-secret before merging - $proxy = Get-Content /etc/omsagent-secret/PROXY + $proxy = Get-Content /etc/omsagent-secret/PROXY Write-Host "Validating the proxy configuration since proxy configuration provided" # valide the proxy endpoint configuration if (![string]::IsNullOrEmpty($proxy)) { @@ -66,26 +83,22 @@ function Set-EnvironmentVariables if (![string]::IsNullOrEmpty($proxy)) { $proxy = [string]$proxy.Trim(); $parts = $proxy -split "@" - if ($parts.Length -ne 2) - { + if ($parts.Length -ne 2) { Write-Host "Invalid ProxyConfiguration $($proxy). EXITING....." exit 1 } $subparts1 = $parts[0] -split "//" - if ($subparts1.Length -ne 2) - { + if ($subparts1.Length -ne 2) { Write-Host "Invalid ProxyConfiguration $($proxy). EXITING....." exit 1 } $protocol = $subparts1[0].ToLower().TrimEnd(":") - if (!($protocol -eq "http") -and !($protocol -eq "https")) - { + if (!($protocol -eq "http") -and !($protocol -eq "https")) { Write-Host "Unsupported protocol in ProxyConfiguration $($proxy). EXITING....." exit 1 } $subparts2 = $parts[1] -split ":" - if ($subparts2.Length -ne 2) - { + if ($subparts2.Length -ne 2) { Write-Host "Invalid ProxyConfiguration $($proxy). EXITING....." exit 1 } @@ -118,46 +131,154 @@ function Set-EnvironmentVariables .\setenv.ps1 } -function Start-Fluent -{ +function Get-ContainerRuntime { + # default container runtime and make default as containerd when containerd becomes default in AKS + $containerRuntime = "docker" + $response = "" + $NODE_IP = "" + try { + if (![string]::IsNullOrEmpty([System.Environment]::GetEnvironmentVariable("NODE_IP", "PROCESS"))) { + $NODE_IP = [System.Environment]::GetEnvironmentVariable("NODE_IP", "PROCESS") + } + elseif (![string]::IsNullOrEmpty([System.Environment]::GetEnvironmentVariable("NODE_IP", "USER"))) { + $NODE_IP = [System.Environment]::GetEnvironmentVariable("NODE_IP", "USER") + } + elseif (![string]::IsNullOrEmpty([System.Environment]::GetEnvironmentVariable("NODE_IP", "MACHINE"))) { + $NODE_IP = [System.Environment]::GetEnvironmentVariable("NODE_IP", "MACHINE") + } + + if (![string]::IsNullOrEmpty($NODE_IP)) { + $isPodsAPISuccess = $false + Write-Host "Value of NODE_IP environment variable : $($NODE_IP)" + try { + Write-Host "Making API call to http://$($NODE_IP):10255/pods" + $response = Invoke-WebRequest -uri http://$($NODE_IP):10255/pods -UseBasicParsing + Write-Host "Response status code of API call to http://$($NODE_IP):10255/pods : $($response.StatusCode)" + } + catch { + Write-Host "API call to http://$($NODE_IP):10255/pods failed" + } + + if (![string]::IsNullOrEmpty($response) -and $response.StatusCode -eq 200) { + Write-Host "API call to http://$($NODE_IP):10255/pods succeeded" + $isPodsAPISuccess = $true + } + else { + try { + Write-Host "Making API call to https://$($NODE_IP):10250/pods" + # ignore certificate validation since kubelet uses self-signed cert + [ServerCertificateValidationCallback]::Ignore() + $response = Invoke-WebRequest -Uri https://$($NODE_IP):10250/pods -Headers @{'Authorization' = "Bearer $(Get-Content /var/run/secrets/kubernetes.io/serviceaccount/token)" } -UseBasicParsing + Write-Host "Response status code of API call to https://$($NODE_IP):10250/pods : $($response.StatusCode)" + if (![string]::IsNullOrEmpty($response) -and $response.StatusCode -eq 200) { + Write-Host "API call to https://$($NODE_IP):10250/pods succeeded" + $isPodsAPISuccess = $true + } + } + catch { + Write-Host "API call to https://$($NODE_IP):10250/pods failed" + } + } + + if ($isPodsAPISuccess) { + if (![string]::IsNullOrEmpty($response.Content)) { + $podList = $response.Content | ConvertFrom-Json + if (![string]::IsNullOrEmpty($podList)) { + $podItems = $podList.Items + if ($podItems.Length -gt 0) { + Write-Host "found pod items: $($podItems.Length)" + for ($index = 0; $index -le $podItems.Length ; $index++) { + Write-Host "current podItem index : $($index)" + $pod = $podItems[$index] + if (![string]::IsNullOrEmpty($pod) -and + ![string]::IsNullOrEmpty($pod.status) -and + ![string]::IsNullOrEmpty($pod.status.phase) -and + $pod.status.phase -eq "Running" -and + $pod.status.ContainerStatuses.Length -gt 0) { + $containerID = $pod.status.ContainerStatuses[0].containerID + $detectedContainerRuntime = $containerID.split(":")[0].trim() + Write-Host "detected containerRuntime as : $($detectedContainerRuntime)" + if (![string]::IsNullOrEmpty($detectedContainerRuntime) -and [string]$detectedContainerRuntime.StartsWith('docker') -eq $false) { + $containerRuntime = $detectedContainerRuntime + } + Write-Host "using containerRuntime as : $($containerRuntime)" + break + } + } + } + else { + Write-Host "got podItems count is 0 hence using default container runtime: $($containerRuntime)" + } + + + } + else { + Write-Host "got podList null or empty hence using default container runtime: $($containerRuntime)" + } + } + else { + Write-Host "got empty response content for /Pods API call hence using default container runtime: $($containerRuntime)" + } + } + } + else { + Write-Host "got empty NODE_IP environment variable" + } + # set CONTAINER_RUNTIME env for debug and telemetry purpose + [System.Environment]::SetEnvironmentVariable("CONTAINER_RUNTIME", $containerRuntime, "Process") + [System.Environment]::SetEnvironmentVariable("CONTAINER_RUNTIME", $containerRuntime, "Machine") + } + catch { + $e = $_.Exception + Write-Host $e + Write-Host "exception occured on getting container runtime hence using default container runtime: $($containerRuntime)" + } + + return $containerRuntime +} + +function Start-Fluent { + # Run fluent-bit service first so that we do not miss any logs being forwarded by the fluentd service. # Run fluent-bit as a background job. Switch this to a windows service once fluent-bit supports natively running as a windows service Start-Job -ScriptBlock { Start-Process -NoNewWindow -FilePath "C:\opt\fluent-bit\bin\fluent-bit.exe" -ArgumentList @("-c", "C:\etc\fluent-bit\fluent-bit.conf", "-e", "C:\opt\omsagentwindows\out_oms.so") } + $containerRuntime = Get-ContainerRuntime + #register fluentd as a service and start # there is a known issues with win32-service https://github.com/chef/win32-service/issues/70 + if (![string]::IsNullOrEmpty($containerRuntime) -and [string]$containerRuntime.StartsWith('docker') -eq $false) { + # change parser from docker to cri if the container runtime is not docker + Write-Host "changing parser from Docker to CRI since container runtime : $($containerRuntime) and which is non-docker" + (Get-Content -Path C:/etc/fluent/fluent.conf -Raw) -replace 'fluent-docker-parser.conf','fluent-cri-parser.conf' | Set-Content C:/etc/fluent/fluent.conf + } + fluentd --reg-winsvc i --reg-winsvc-auto-start --winsvc-name fluentdwinaks --reg-winsvc-fluentdopt '-c C:/etc/fluent/fluent.conf -o C:/etc/fluent/fluent.log' Notepad.exe | Out-Null } -function Generate-Certificates -{ +function Generate-Certificates { Write-Host "Generating Certificates" C:\\opt\\omsagentwindows\\certgenerator\\certificategenerator.exe } -function Test-CertificatePath -{ +function Test-CertificatePath { $certLocation = $env:CI_CERT_LOCATION - $keyLocation = $env:CI_KEY_LOCATION - if (!(Test-Path $certLocation)) - { + $keyLocation = $env:CI_KEY_LOCATION + if (!(Test-Path $certLocation)) { Write-Host "Certificate file not found at $($certLocation). EXITING....." exit 1 } - else - { + else { Write-Host "Certificate file found at $($certLocation)" } - if (! (Test-Path $keyLocation)) - { + if (! (Test-Path $keyLocation)) { Write-Host "Key file not found at $($keyLocation). EXITING...." exit 1 } - else - { + else { Write-Host "Key file found at $($keyLocation)" } } @@ -172,7 +293,7 @@ Test-CertificatePath Start-Fluent # List all powershell processes running. This should have main.ps1 and filesystemwatcher.ps1 -Get-WmiObject Win32_process | Where-Object {$_.Name -match 'powershell'} | Format-Table -Property Name, CommandLine, ProcessId +Get-WmiObject Win32_process | Where-Object { $_.Name -match 'powershell' } | Format-Table -Property Name, CommandLine, ProcessId #check if fluentd service is running Get-Service fluentdwinaks From bac8a32aa72b50a2e1ac1844404d7dbdb9ed4d04 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Wed, 19 Aug 2020 19:16:31 -0700 Subject: [PATCH 011/301] Gangams/arc k8s metrics (#413) * cluster identity token * wip * fix exception * fix exceptions * fix exception * fix bug * fix bug * minor update * refactor the code * more refactoring * fix bug * typo fix * fix typo * wait for 1min after token renewal request * add proxy support for arc k8s mdm endpoint * avoid additional get call * minor line ending fix * wip * have separate log for arc k8s cluster identity * fix bug on creating crd resource * remove update permission since not required * fixed some bugs * fix pr feedback * remove list since its not required --- README.md | 6 +- build/linux/Makefile | 2 +- .../installer/datafiles/base_container.data | 6 +- .../templates/omsagent-arc-k8s-crd.yaml | 9 + .../templates/omsagent-rbac.yaml | 8 + .../build-and-publish-docker-image.sh | 2 +- .../build-and-publish-docker-image.ps1 | 2 +- source/plugins/ruby/KubernetesApiClient.rb | 17 +- .../plugins/ruby/arc_k8s_cluster_identity.rb | 216 ++++++++++++++++++ source/plugins/ruby/out_mdm.rb | 61 +++-- 10 files changed, 307 insertions(+), 22 deletions(-) create mode 100644 charts/azuremonitor-containers/templates/omsagent-arc-k8s-crd.yaml create mode 100644 source/plugins/ruby/arc_k8s_cluster_identity.rb diff --git a/README.md b/README.md index 659fe0161..d5d874c9c 100644 --- a/README.md +++ b/README.md @@ -200,11 +200,15 @@ docker build -t /: --build-arg IMAGE_TAG= . docker push /: ``` -### Build Cert generator, Out OMS Plugun and Docker Image and Publish Docker Image +### Build Cert generator, Out OMS Plugin and Docker Image and Publish Docker Image If you have code cloned on to windows, you can built everything for windows agent on windows machine via below instructions ``` +# install pre-requisites if you havent installed already +cd %userprofile%\Docker-Provider\kubernetes\windows # based on your repo path +.\install-build-pre-requisites.ps1 + cd %userprofile%\Docker-Provider\kubernetes\windows\dockerbuild # based on your repo path docker login # if you want to publish the image to acr then login to acr via `docker login ` powershell -ExecutionPolicy bypass # switch to powershell if you are not on powershell already diff --git a/build/linux/Makefile b/build/linux/Makefile index 0a20ed205..3f35e1204 100644 --- a/build/linux/Makefile +++ b/build/linux/Makefile @@ -118,7 +118,7 @@ distclean : clean PROVIDER_STATUS: @echo "========================= Performing Building provider" @echo "clean up everything under: $(INTERMEDIATE_BASE_DIR) to avoid picking up old binaries" - $(RMDIR) $(INTERMEDIATE_BASE_DIR) + sudo $(RMDIR) $(INTERMEDIATE_BASE_DIR) KIT_STATUS: @echo "========================= Performing Building provider tests" diff --git a/build/linux/installer/datafiles/base_container.data b/build/linux/installer/datafiles/base_container.data index fc5a6c8bc..87b89b14c 100644 --- a/build/linux/installer/datafiles/base_container.data +++ b/build/linux/installer/datafiles/base_container.data @@ -50,7 +50,7 @@ MAINTAINER: 'Microsoft Corporation' /opt/microsoft/omsagent/plugin/kubernetes_container_inventory.rb; source/plugins/ruby/kubernetes_container_inventory.rb; 644; root; root /opt/microsoft/omsagent/plugin/proxy_utils.rb; source/plugins/ruby/proxy_utils.rb; 644; root; root - +/opt/microsoft/omsagent/plugin/arc_k8s_cluster_identity.rb; source/plugins/ruby/arc_k8s_cluster_identity.rb; 644; root; root /opt/microsoft/omsagent/plugin/out_mdm.rb; source/plugins/ruby/out_mdm.rb; 644; root; root /opt/microsoft/omsagent/plugin/filter_cadvisor2mdm.rb; source/plugins/ruby/filter_cadvisor2mdm.rb; 644; root; root /opt/microsoft/omsagent/plugin/filter_telegraf2mdm.rb; source/plugins/ruby/filter_telegraf2mdm.rb; 644; root; root @@ -276,6 +276,10 @@ touch /var/opt/microsoft/docker-cimprov/log/fluent_forward_failed.log chmod 666 /var/opt/microsoft/docker-cimprov/log/fluent_forward_failed.log chown omsagent:omiusers /var/opt/microsoft/docker-cimprov/log/fluent_forward_failed.log +touch /var/opt/microsoft/docker-cimprov/log/arc_k8s_cluster_identity.log +chmod 666 /var/opt/microsoft/docker-cimprov/log/arc_k8s_cluster_identity.log +chown omsagent:omiusers /var/opt/microsoft/docker-cimprov/log/arc_k8s_cluster_identity.log + mv /etc/opt/microsoft/docker-cimprov/container.conf /etc/opt/microsoft/omsagent/sysconf/omsagent.d/container.conf chown omsagent:omsagent /etc/opt/microsoft/omsagent/sysconf/omsagent.d/container.conf diff --git a/charts/azuremonitor-containers/templates/omsagent-arc-k8s-crd.yaml b/charts/azuremonitor-containers/templates/omsagent-arc-k8s-crd.yaml new file mode 100644 index 000000000..f7873de40 --- /dev/null +++ b/charts/azuremonitor-containers/templates/omsagent-arc-k8s-crd.yaml @@ -0,0 +1,9 @@ +{{- if contains "microsoft.kubernetes/connectedclusters" (.Values.omsagent.env.clusterId | lower) }} +apiVersion: clusterconfig.azure.com/v1beta1 +kind: AzureClusterIdentityRequest +metadata: + name: container-insights-clusteridentityrequest + namespace: azure-arc +spec: + audience: https://monitoring.azure.com/ +{{- end }} diff --git a/charts/azuremonitor-containers/templates/omsagent-rbac.yaml b/charts/azuremonitor-containers/templates/omsagent-rbac.yaml index 9903f41ff..4f7408e7c 100644 --- a/charts/azuremonitor-containers/templates/omsagent-rbac.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-rbac.yaml @@ -27,8 +27,16 @@ rules: - apiGroups: ["azmon.container.insights"] resources: ["healthstates"] verbs: ["get", "create", "patch"] +- apiGroups: ["clusterconfig.azure.com"] + resources: ["azureclusteridentityrequests"] + resourceNames: ["container-insights-clusteridentityrequest"] + verbs: ["get", "create", "patch"] - nonResourceURLs: ["/metrics"] verbs: ["get"] +- apiGroups: [""] + resources: ["secrets"] + resourceNames: ["container-insights-clusteridentityrequest-token"] + verbs: ["get"] --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1beta1 diff --git a/kubernetes/linux/dockerbuild/build-and-publish-docker-image.sh b/kubernetes/linux/dockerbuild/build-and-publish-docker-image.sh index 982c8c491..267f15f32 100644 --- a/kubernetes/linux/dockerbuild/build-and-publish-docker-image.sh +++ b/kubernetes/linux/dockerbuild/build-and-publish-docker-image.sh @@ -127,7 +127,7 @@ baseDir=$(dirname $kubernetsDir) buildDir=$baseDir/build/linux dockerFileDir=$baseDir/kubernetes/linux -echo "sour code base directory: $baseDir" +echo "source code base directory: $baseDir" echo "build directory for docker provider: $buildDir" echo "docker file directory: $dockerFileDir" diff --git a/kubernetes/windows/dockerbuild/build-and-publish-docker-image.ps1 b/kubernetes/windows/dockerbuild/build-and-publish-docker-image.ps1 index 27be90d48..dbcfa6097 100644 --- a/kubernetes/windows/dockerbuild/build-and-publish-docker-image.ps1 +++ b/kubernetes/windows/dockerbuild/build-and-publish-docker-image.ps1 @@ -35,7 +35,7 @@ $imagerepo = $imageparts[0] if ($imagetag.StartsWith("win-") -eq $false) { Write-Host "adding win- prefix image tag since its not provided" - $imagetag = "win"-$imagetag + $imagetag = "win-$imagetag" } Write-Host "image tag used is :$imagetag" diff --git a/source/plugins/ruby/KubernetesApiClient.rb b/source/plugins/ruby/KubernetesApiClient.rb index 987d290aa..36dcdd8c6 100644 --- a/source/plugins/ruby/KubernetesApiClient.rb +++ b/source/plugins/ruby/KubernetesApiClient.rb @@ -99,7 +99,6 @@ def getResourceUri(resource, api_group) elsif api_group == @@ApiGroupHPA return "https://#{ENV["KUBERNETES_SERVICE_HOST"]}:#{ENV["KUBERNETES_PORT_443_TCP_PORT"]}/apis/" + @@ApiGroupHPA + "/" + @@ApiVersionHPA + "/" + resource end - else @Log.warn ("Kubernetes environment variable not set KUBERNETES_SERVICE_HOST: #{ENV["KUBERNETES_SERVICE_HOST"]} KUBERNETES_PORT_443_TCP_PORT: #{ENV["KUBERNETES_PORT_443_TCP_PORT"]}. Unable to form resourceUri") return nil @@ -743,7 +742,7 @@ def getResourcesAndContinuationToken(uri, api_group: nil) resourceInventory = nil begin @Log.info "KubernetesApiClient::getResourcesAndContinuationToken : Getting resources from Kube API using url: #{uri} @ #{Time.now.utc.iso8601}" - resourceInfo = getKubeResourceInfo(uri, api_group:api_group) + resourceInfo = getKubeResourceInfo(uri, api_group: api_group) @Log.info "KubernetesApiClient::getResourcesAndContinuationToken : Done getting resources from Kube API using url: #{uri} @ #{Time.now.utc.iso8601}" if !resourceInfo.nil? @Log.info "KubernetesApiClient::getResourcesAndContinuationToken:Start:Parsing data for #{uri} using yajl @ #{Time.now.utc.iso8601}" @@ -761,5 +760,19 @@ def getResourcesAndContinuationToken(uri, api_group: nil) end return continuationToken, resourceInventory end #getResourcesAndContinuationToken + + def getKubeAPIServerUrl + apiServerUrl = nil + begin + if ENV["KUBERNETES_SERVICE_HOST"] && ENV["KUBERNETES_PORT_443_TCP_PORT"] + apiServerUrl = "https://#{ENV["KUBERNETES_SERVICE_HOST"]}:#{ENV["KUBERNETES_PORT_443_TCP_PORT"]}" + else + @Log.warn "Kubernetes environment variable not set KUBERNETES_SERVICE_HOST: #{ENV["KUBERNETES_SERVICE_HOST"]} KUBERNETES_PORT_443_TCP_PORT: #{ENV["KUBERNETES_PORT_443_TCP_PORT"]}. Unable to form resourceUri" + end + rescue => errorStr + @Log.warn "KubernetesApiClient::getKubeAPIServerUrl:Failed #{errorStr}" + end + return apiServerUrl + end end end diff --git a/source/plugins/ruby/arc_k8s_cluster_identity.rb b/source/plugins/ruby/arc_k8s_cluster_identity.rb new file mode 100644 index 000000000..ef55c3257 --- /dev/null +++ b/source/plugins/ruby/arc_k8s_cluster_identity.rb @@ -0,0 +1,216 @@ +# frozen_string_literal: true +require "logger" +require "net/http" +require "net/https" +require "uri" +require "yajl/json_gem" +require "base64" +require "time" +require_relative "KubernetesApiClient" +require_relative "ApplicationInsightsUtility" + +class ArcK8sClusterIdentity + # this arc k8s crd version and arc k8s uses corresponding version v1beta1 vs v1 based on the k8s version for apiextensions.k8s.io + @@cluster_config_crd_api_version = "clusterconfig.azure.com/v1beta1" + @@cluster_identity_resource_name = "container-insights-clusteridentityrequest" + @@cluster_identity_resource_namespace = "azure-arc" + @@cluster_identity_token_secret_namespace = "azure-arc" + @@crd_resource_uri_template = "%{kube_api_server_url}/apis/%{cluster_config_crd_api_version}/namespaces/%{cluster_identity_resource_namespace}/azureclusteridentityrequests/%{cluster_identity_resource_name}" + @@secret_resource_uri_template = "%{kube_api_server_url}/api/v1/namespaces/%{cluster_identity_token_secret_namespace}/secrets/%{token_secret_name}" + @@azure_monitor_custom_metrics_audience = "https://monitoring.azure.com/" + @@cluster_identity_request_kind = "AzureClusterIdentityRequest" + + def initialize + @LogPath = "/var/opt/microsoft/docker-cimprov/log/arc_k8s_cluster_identity.log" + @log = Logger.new(@LogPath, 1, 5000000) + @log.info "initialize start @ #{Time.now.utc.iso8601}" + @token_expiry_time = Time.now + @cached_access_token = String.new + @token_file_path = "/var/run/secrets/kubernetes.io/serviceaccount/token" + @cert_file_path = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" + @kube_api_server_url = KubernetesApiClient.getKubeAPIServerUrl + if @kube_api_server_url.nil? + @log.warn "got api server url nil from KubernetesApiClient.getKubeAPIServerUrl @ #{Time.now.utc.iso8601}" + end + @http_client = get_http_client + @service_account_token = get_service_account_token + @log.info "initialize complete @ #{Time.now.utc.iso8601}" + end + + def get_cluster_identity_token() + begin + # get the cluster msi identity token either if its empty or near expirty. Token is valid 24 hrs. + if @cached_access_token.to_s.empty? || (Time.now + 60 * 60 > @token_expiry_time) # Refresh token 1 hr from expiration + # renew the token if its near expiry + if !@cached_access_token.to_s.empty? && (Time.now + 60 * 60 > @token_expiry_time) + @log.info "renewing the token since its near expiry @ #{Time.now.utc.iso8601}" + renew_near_expiry_token + # sleep 60 seconds to get the renewed token available + sleep 60 + end + @log.info "get token reference from crd @ #{Time.now.utc.iso8601}" + tokenReference = get_token_reference_from_crd + if !tokenReference.nil? && !tokenReference.empty? + @token_expiry_time = Time.parse(tokenReference["expirationTime"]) + token_secret_name = tokenReference["secretName"] + token_secret_data_name = tokenReference["dataName"] + # get the token from secret + @log.info "get token from secret @ #{Time.now.utc.iso8601}" + token = get_token_from_secret(token_secret_name, token_secret_data_name) + if !token.nil? + @cached_access_token = token + else + @log.warn "got token nil from secret: #{@token_secret_name}" + end + else + @log.warn "got token reference either nil or empty" + end + end + rescue => err + @log.warn "get_cluster_identity_token failed: #{err}" + ApplicationInsightsUtility.sendExceptionTelemetry(err, { "FeatureArea" => "MDM" }) + end + return @cached_access_token + end + + private + + def get_token_from_secret(token_secret_name, token_secret_data_name) + token = nil + begin + secret_request_uri = @@secret_resource_uri_template % { + kube_api_server_url: @kube_api_server_url, + cluster_identity_token_secret_namespace: @@cluster_identity_token_secret_namespace, + token_secret_name: token_secret_name, + } + get_request = Net::HTTP::Get.new(secret_request_uri) + get_request["Authorization"] = "Bearer #{@service_account_token}" + @log.info "Making GET request to #{secret_request_uri} @ #{Time.now.utc.iso8601}" + get_response = @http_client.request(get_request) + @log.info "Got response of #{get_response.code} for #{secret_request_uri} @ #{Time.now.utc.iso8601}" + if get_response.code.to_i == 200 + token_secret = JSON.parse(get_response.body)["data"] + cluster_identity_token = token_secret[token_secret_data_name] + token = Base64.decode64(cluster_identity_token) + end + rescue => err + @log.warn "get_token_from_secret API call failed: #{err}" + ApplicationInsightsUtility.sendExceptionTelemetry(err, { "FeatureArea" => "MDM" }) + end + return token + end + + private + + def get_token_reference_from_crd() + tokenReference = {} + begin + crd_request_uri = @@crd_resource_uri_template % { + kube_api_server_url: @kube_api_server_url, + cluster_config_crd_api_version: @@cluster_config_crd_api_version, + cluster_identity_resource_namespace: @@cluster_identity_resource_namespace, + cluster_identity_resource_name: @@cluster_identity_resource_name, + } + get_request = Net::HTTP::Get.new(crd_request_uri) + get_request["Authorization"] = "Bearer #{@service_account_token}" + @log.info "Making GET request to #{crd_request_uri} @ #{Time.now.utc.iso8601}" + get_response = @http_client.request(get_request) + @log.info "Got response of #{get_response.code} for #{crd_request_uri} @ #{Time.now.utc.iso8601}" + if get_response.code.to_i == 200 + status = JSON.parse(get_response.body)["status"] + tokenReference["expirationTime"] = status["expirationTime"] + tokenReference["secretName"] = status["tokenReference"]["secretName"] + tokenReference["dataName"] = status["tokenReference"]["dataName"] + end + rescue => err + @log.warn "get_token_reference_from_crd call failed: #{err}" + ApplicationInsightsUtility.sendExceptionTelemetry(err, { "FeatureArea" => "MDM" }) + end + return tokenReference + end + + private + + def renew_near_expiry_token() + begin + crd_request_uri = @@crd_resource_uri_template % { + kube_api_server_url: @kube_api_server_url, + cluster_config_crd_api_version: @@cluster_config_crd_api_version, + cluster_identity_resource_namespace: @@cluster_identity_resource_namespace, + cluster_identity_resource_name: @@cluster_identity_resource_name, + } + crd_request_body = get_crd_request_body + crd_request_body_json = crd_request_body.to_json + update_request = Net::HTTP::Patch.new(crd_request_uri) + update_request["Content-Type"] = "application/merge-patch+json" + update_request["Authorization"] = "Bearer #{@service_account_token}" + update_request.body = crd_request_body_json + update_response = @http_client.request(update_request) + @log.info "Got response of #{update_response.code} for PATCH #{crd_request_uri} @ #{Time.now.utc.iso8601}" + if update_response.code.to_i == 404 + @log.info "since crd resource doesnt exist since creating crd resource : #{@@cluster_identity_resource_name} @ #{Time.now.utc.iso8601}" + create_request = Net::HTTP::Post.new(crd_request_uri) + create_request["Content-Type"] = "application/json" + create_request["Authorization"] = "Bearer #{@service_account_token}" + create_request.body = crd_request_body_json + create_response = @http_client.request(create_request) + @log.info "Got response of #{create_response.code} for POST #{crd_request_uri} @ #{Time.now.utc.iso8601}" + end + rescue => err + @log.warn "renew_near_expiry_token call failed: #{err}" + ApplicationInsightsUtility.sendExceptionTelemetry(err, { "FeatureArea" => "MDM" }) + end + end + + private + + def get_service_account_token() + begin + if File.exist?(@token_file_path) && File.readable?(@token_file_path) + token_str = File.read(@token_file_path).strip + return token_str + else + @log.warn "Unable to read token string from #{@token_file_path}" + return nil + end + rescue => err + @log.warn "get_service_account_token call failed: #{err}" + ApplicationInsightsUtility.sendExceptionTelemetry(err, { "FeatureArea" => "MDM" }) + end + end + + private + + def get_http_client() + begin + base_api_server_url = URI.parse(@kube_api_server_url) + http = Net::HTTP.new(base_api_server_url.host, base_api_server_url.port) + http.use_ssl = true + if !File.exist?(@cert_file_path) + raise "#{@cert_file_path} doesnt exist" + else + http.ca_file = @cert_file_path + end + http.verify_mode = OpenSSL::SSL::VERIFY_PEER + return http + rescue => err + @log.warn "Unable to create http client #{err}" + ApplicationInsightsUtility.sendExceptionTelemetry(err, { "FeatureArea" => "MDM" }) + end + return nil + end + + private + + def get_crd_request_body + body = {} + body["apiVersion"] = @@cluster_config_crd_api_version + body["kind"] = @@cluster_identity_request_kind + body["metadata"] = {} + body["metadata"]["name"] = @@cluster_identity_resource_name + body["metadata"]["namespace"] = @@cluster_identity_resource_namespace + body["spec"] = {} + body["spec"]["audience"] = @@azure_monitor_custom_metrics_audience + return body + end +end diff --git a/source/plugins/ruby/out_mdm.rb b/source/plugins/ruby/out_mdm.rb index d801edb9a..b28c17034 100644 --- a/source/plugins/ruby/out_mdm.rb +++ b/source/plugins/ruby/out_mdm.rb @@ -16,6 +16,8 @@ def initialize require_relative "KubernetesApiClient" require_relative "ApplicationInsightsUtility" require_relative "constants" + require_relative "arc_k8s_cluster_identity" + require_relative "proxy_utils" @@token_resource_url = "https://monitoring.azure.com/" @@grant_type = "client_credentials" @@ -45,6 +47,8 @@ def initialize @useMsi = false @metrics_flushed_count = 0 + @cluster_identity = nil + @isArcK8sCluster = false @get_access_token_backoff_expiry = Time.now end @@ -76,28 +80,48 @@ def start if @can_send_data_to_mdm @log.info "MDM Metrics supported in #{aks_region} region" + if aks_resource_id.downcase.include?("microsoft.kubernetes/connectedclusters") + @isArcK8sCluster = true + end @@post_request_url = @@post_request_url_template % { aks_region: aks_region, aks_resource_id: aks_resource_id } @post_request_uri = URI.parse(@@post_request_url) - @http_client = Net::HTTP.new(@post_request_uri.host, @post_request_uri.port) + if (!!@isArcK8sCluster) + proxy = (ProxyUtils.getProxyConfiguration) + if proxy.nil? || proxy.empty? + @http_client = Net::HTTP.new(@post_request_uri.host, @post_request_uri.port) + else + @log.info "Proxy configured on this cluster: #{aks_resource_id}" + @http_client = Net::HTTP.new(@post_request_uri.host, @post_request_uri.port, proxy[:addr], proxy[:port], proxy[:user], proxy[:pass]) + end + else + @http_client = Net::HTTP.new(@post_request_uri.host, @post_request_uri.port) + end @http_client.use_ssl = true @log.info "POST Request url: #{@@post_request_url}" ApplicationInsightsUtility.sendCustomEvent("AKSCustomMetricsMDMPluginStart", {}) - # Check to see if SP exists, if it does use SP. Else, use msi - sp_client_id = @data_hash["aadClientId"] - sp_client_secret = @data_hash["aadClientSecret"] - - if (!sp_client_id.nil? && !sp_client_id.empty? && sp_client_id.downcase != "msi") - @useMsi = false - aad_token_url = @@aad_token_url_template % { tenant_id: @data_hash["tenantId"] } - @parsed_token_uri = URI.parse(aad_token_url) + # arc k8s cluster uses cluster identity + if (!!@isArcK8sCluster) + @log.info "using cluster identity token since cluster is azure arc k8s cluster" + @cluster_identity = ArcK8sClusterIdentity.new + @cached_access_token = @cluster_identity.get_cluster_identity_token else - @useMsi = true - msi_endpoint = @@msi_endpoint_template % { user_assigned_client_id: @@user_assigned_client_id, resource: @@token_resource_url } - @parsed_token_uri = URI.parse(msi_endpoint) - end + # Check to see if SP exists, if it does use SP. Else, use msi + sp_client_id = @data_hash["aadClientId"] + sp_client_secret = @data_hash["aadClientSecret"] + + if (!sp_client_id.nil? && !sp_client_id.empty? && sp_client_id.downcase != "msi") + @useMsi = false + aad_token_url = @@aad_token_url_template % { tenant_id: @data_hash["tenantId"] } + @parsed_token_uri = URI.parse(aad_token_url) + else + @useMsi = true + msi_endpoint = @@msi_endpoint_template % { user_assigned_client_id: @@user_assigned_client_id, resource: @@token_resource_url } + @parsed_token_uri = URI.parse(msi_endpoint) + end - @cached_access_token = get_access_token + @cached_access_token = get_access_token + end end rescue => e @log.info "exception when initializing out_mdm #{e}" @@ -226,7 +250,14 @@ def write(chunk) def send_to_mdm(post_body) begin - access_token = get_access_token + if (!!@isArcK8sCluster) + if @cluster_identity.nil? + @cluster_identity = ArcK8sClusterIdentity.new + end + access_token = @cluster_identity.get_cluster_identity_token + else + access_token = get_access_token + end request = Net::HTTP::Post.new(@post_request_uri.request_uri) request["Content-Type"] = "application/x-ndjson" request["Authorization"] = "Bearer #{access_token}" From ab03640d2314b1e37a8a248c086b40adf5a2dbe4 Mon Sep 17 00:00:00 2001 From: bragi92 Date: Thu, 20 Aug 2020 17:51:57 -0700 Subject: [PATCH 012/301] fix: Reverting back to ltsc2019 tag (#429) --- kubernetes/windows/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kubernetes/windows/Dockerfile b/kubernetes/windows/Dockerfile index 06e11e73a..70a5f6045 100644 --- a/kubernetes/windows/Dockerfile +++ b/kubernetes/windows/Dockerfile @@ -1,4 +1,4 @@ -FROM mcr.microsoft.com/windows/servercore@sha256:921bed01c2a023310bdbaa288edebd82c4910e536ff206b87e9cbe703ca27505 +FROM mcr.microsoft.com/windows/servercore:ltsc2019 MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" From af0f98176fb85c5cb2366b6927525867c217afeb Mon Sep 17 00:00:00 2001 From: Vishwanath Date: Thu, 27 Aug 2020 16:44:26 -0700 Subject: [PATCH 013/301] more kubelet metrics (#430) * more kubelet metrics * celan up new config --- build/linux/installer/conf/telegraf.conf | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/build/linux/installer/conf/telegraf.conf b/build/linux/installer/conf/telegraf.conf index 19b6058be..28a74a3d0 100644 --- a/build/linux/installer/conf/telegraf.conf +++ b/build/linux/installer/conf/telegraf.conf @@ -627,6 +627,7 @@ # ACSResourceName = "$TELEMETRY_ACS_RESOURCE_NAME" # Region = "$TELEMETRY_AKS_REGION" +#kubelet-1 [[inputs.prometheus]] name_prefix="container.azm.ms/" ## An array of urls to scrape metrics from. @@ -669,6 +670,28 @@ [inputs.prometheus.tagpass] operation_type = ["create_container", "remove_container", "pull_image"] +#kubelet-2 +[[inputs.prometheus]] + name_prefix="container.azm.ms/" + ## An array of urls to scrape metrics from. + urls = ["$CADVISOR_METRICS_URL"] + + fieldpass = ["kubelet_running_pod_count","volume_manager_total_volumes", "kubelet_node_config_error", "process_resident_memory_bytes", "process_cpu_seconds_total"] + + metric_version = 2 + url_tag = "scrapeUrl" + + + ## Use bearer token for authorization. ('bearer_token' takes priority) + bearer_token = "/var/run/secrets/kubernetes.io/serviceaccount/token" + ## Specify timeout duration for slower prometheus clients (default is 3s) + response_timeout = "15s" + + ## Optional TLS Config + tls_ca = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" + insecure_skip_verify = true + + ## prometheus custom metrics [[inputs.prometheus]] From 7fc4d4cb03648a081dd9e0fceefc4b742e14021a Mon Sep 17 00:00:00 2001 From: Vishwanath Date: Mon, 31 Aug 2020 18:55:20 -0700 Subject: [PATCH 014/301] fix nom issue when config is empty (#432) --- build/linux/installer/conf/telegraf-rs.conf | 4 +- build/linux/installer/conf/telegraf.conf | 2 +- .../scripts/tomlparser-npm-config.rb | 83 ++++++++++++------- 3 files changed, 56 insertions(+), 33 deletions(-) diff --git a/build/linux/installer/conf/telegraf-rs.conf b/build/linux/installer/conf/telegraf-rs.conf index 3f2f65cff..d81196330 100644 --- a/build/linux/installer/conf/telegraf-rs.conf +++ b/build/linux/installer/conf/telegraf-rs.conf @@ -615,8 +615,8 @@ $AZMON_RS_PROM_PLUGINS_WITH_NAMESPACE_FILTER [[inputs.prometheus]] #name_prefix="container.azm.ms/" ## An array of urls to scrape metrics from. - urls = ["$AZMON_INTEGRATION_NPM_METRICS_URL_LIST_CLUSTER"] - fielddrop = ["$AZMON_INTEGRATION_NPM_METRICS_DROP_LIST_CLUSTER"] + urls = $AZMON_INTEGRATION_NPM_METRICS_URL_LIST_CLUSTER + fielddrop = $AZMON_INTEGRATION_NPM_METRICS_DROP_LIST_CLUSTER metric_version = 2 url_tag = "scrapeUrl" diff --git a/build/linux/installer/conf/telegraf.conf b/build/linux/installer/conf/telegraf.conf index 28a74a3d0..013aa1af2 100644 --- a/build/linux/installer/conf/telegraf.conf +++ b/build/linux/installer/conf/telegraf.conf @@ -730,7 +730,7 @@ [[inputs.prometheus]] #name_prefix="container.azm.ms/" ## An array of urls to scrape metrics from. - urls = ["$AZMON_INTEGRATION_NPM_METRICS_URL_LIST_NODE"] + urls = $AZMON_INTEGRATION_NPM_METRICS_URL_LIST_NODE metric_version = 2 url_tag = "scrapeUrl" diff --git a/build/linux/installer/scripts/tomlparser-npm-config.rb b/build/linux/installer/scripts/tomlparser-npm-config.rb index c5953836b..777fef209 100644 --- a/build/linux/installer/scripts/tomlparser-npm-config.rb +++ b/build/linux/installer/scripts/tomlparser-npm-config.rb @@ -14,9 +14,13 @@ @configSchemaVersion = "" @collect_basic_npm_metrics = false @collect_advanced_npm_metrics = false -@npm_node_url="http://$NODE_IP:10091/node-metrics" -@npm_cluster_url="http://npm-metrics-cluster-service.kube-system:9000/cluster-metrics" -@npm_basic_drop_metrics_cluster = "npm_ipset_counts" +@npm_default_setting = "[]" +@npm_node_urls = "[\"http://$NODE_IP:10091/node-metrics\"]" +@npm_cluster_urls="[\"http://npm-metrics-cluster-service.kube-system:9000/cluster-metrics\"]" +@npm_basic_drop_metrics_cluster = "[\"npm_ipset_counts\"]" +@tgfConfigFileDS = "/etc/opt/microsoft/docker-cimprov/telegraf.conf" +@tgfConfigFileRS = "/etc/opt/microsoft/docker-cimprov/telegraf-rs.conf" +@replicaset = "replicaset" # Use parser to parse the configmap toml file to a ruby structure def parseConfigMap @@ -42,37 +46,37 @@ def populateSettingValuesFromConfigMap(parsedConfig) begin if !parsedConfig.nil? && !parsedConfig[:integrations].nil? && !parsedConfig[:integrations][:azure_network_policy_manager].nil? && !parsedConfig[:integrations][:azure_network_policy_manager][:collect_advanced_metrics].nil? advanced_npm_metrics = parsedConfig[:integrations][:azure_network_policy_manager][:collect_advanced_metrics].to_s - puts "got:integrations.azure_network_policy_manager.collect_advanced_metrics='#{advanced_npm_metrics}'" + puts "config::npm::got:integrations.azure_network_policy_manager.collect_advanced_metrics='#{advanced_npm_metrics}'" if !advanced_npm_metrics.nil? && advanced_npm_metrics.strip.casecmp("true") == 0 @collect_advanced_npm_metrics = true else @collect_advanced_npm_metrics = false end - puts "set:integrations.azure_network_policy_manager.collect_advanced_metrics=#{@collect_advanced_npm_metrics}" + puts "config::npm::set:integrations.azure_network_policy_manager.collect_advanced_metrics=#{@collect_advanced_npm_metrics}" end rescue => errorStr - puts "config::error:Exception while reading config settings for npm advanced setting - #{errorStr}, using defaults" + puts "config::npm::error:Exception while reading config settings for npm advanced setting - #{errorStr}, using defaults" @collect_advanced_npm_metrics = false end begin if !parsedConfig.nil? && !parsedConfig[:integrations].nil? && !parsedConfig[:integrations][:azure_network_policy_manager].nil? && !parsedConfig[:integrations][:azure_network_policy_manager][:collect_basic_metrics].nil? basic_npm_metrics = parsedConfig[:integrations][:azure_network_policy_manager][:collect_basic_metrics].to_s - puts "got:integrations.azure_network_policy_manager.collect_basic_metrics='#{basic_npm_metrics}'" + puts "config::npm::got:integrations.azure_network_policy_manager.collect_basic_metrics='#{basic_npm_metrics}'" if !basic_npm_metrics.nil? && basic_npm_metrics.strip.casecmp("true") == 0 @collect_basic_npm_metrics = true else @collect_basic_npm_metrics = false end - puts "set:integrations.azure_network_policy_manager.collect_basic_metrics=#{@collect_basic_npm_metrics}" + puts "config::npm::set:integrations.azure_network_policy_manager.collect_basic_metrics=#{@collect_basic_npm_metrics}" end rescue => errorStr - puts "config::error:Exception while reading config settings for npm basic setting - #{errorStr}, using defaults" + puts "config::npm::error:Exception while reading config settings for npm basic setting - #{errorStr}, using defaults" @collect_basic_npm_metrics = false end end @configSchemaVersion = ENV["AZMON_AGENT_CFG_SCHEMA_VERSION"] -puts "****************Start Config Processing********************" +puts "****************Start NPM Config Processing********************" if !@configSchemaVersion.nil? && !@configSchemaVersion.empty? && @configSchemaVersion.strip.casecmp("v1") == 0 #note v1 is the only supported schema version , so hardcoding it configMapSettings = parseConfigMap if !configMapSettings.nil? @@ -80,34 +84,53 @@ def populateSettingValuesFromConfigMap(parsedConfig) end else if (File.file?(@configMapMountPath)) - ConfigParseErrorLogger.logError("config::unsupported/missing config schema version - '#{@configSchemaVersion}' , using defaults, please use supported schema version") + ConfigParseErrorLogger.logError("config::npm::unsupported/missing config schema version - '#{@configSchemaVersion}' , using defaults, please use supported schema version") end @collect_basic_npm_metrics = false @collect_advanced_npm_metrics = false end -# Write the settings to file, so that they can be set as environment variables -file = File.open("integration_npm_config_env_var", "w") -if !file.nil? + +controller = ENV["CONTROLLER_TYPE"] +tgfConfigFile = @tgfConfigFileDS + +if controller.casecmp(@replicaset) == 0 + tgfConfigFile = @tgfConfigFileRS +end + +#replace place holders in configuration file +tgfConfig = File.read(tgfConfigFile) #read returns only after closing the file + +if @collect_advanced_npm_metrics == true + tgfConfig = tgfConfig.gsub("$AZMON_INTEGRATION_NPM_METRICS_URL_LIST_NODE", @npm_node_urls) + tgfConfig = tgfConfig.gsub("$AZMON_INTEGRATION_NPM_METRICS_URL_LIST_CLUSTER", @npm_cluster_urls) + tgfConfig = tgfConfig.gsub("$AZMON_INTEGRATION_NPM_METRICS_DROP_LIST_CLUSTER", @npm_default_setting) +elsif @collect_basic_npm_metrics == true + tgfConfig = tgfConfig.gsub("$AZMON_INTEGRATION_NPM_METRICS_URL_LIST_NODE", @npm_node_urls) + tgfConfig = tgfConfig.gsub("$AZMON_INTEGRATION_NPM_METRICS_URL_LIST_CLUSTER", @npm_cluster_urls) + tgfConfig = tgfConfig.gsub("$AZMON_INTEGRATION_NPM_METRICS_DROP_LIST_CLUSTER", @npm_basic_drop_metrics_cluster) +else + tgfConfig = tgfConfig.gsub("$AZMON_INTEGRATION_NPM_METRICS_URL_LIST_NODE", @npm_default_setting) + tgfConfig = tgfConfig.gsub("$AZMON_INTEGRATION_NPM_METRICS_URL_LIST_CLUSTER", @npm_default_setting) + tgfConfig = tgfConfig.gsub("$AZMON_INTEGRATION_NPM_METRICS_DROP_LIST_CLUSTER", @npm_default_setting) +end + +File.open(tgfConfigFile, "w") { |file| file.puts tgfConfig } # 'file' will be closed here after it goes out of scope +puts "config::npm::Successfully substituted the NPM placeholders into #{tgfConfigFile} file for #{controller}" + +# Write the telemetry to file, so that they can be set as environment variables +telemetryFile = File.open("integration_npm_config_env_var", "w") + +if !telemetryFile.nil? if @collect_advanced_npm_metrics == true - file.write("export TELEMETRY_NPM_INTEGRATION_METRICS_ADVANCED=1\n") - file.write("export AZMON_INTEGRATION_NPM_METRICS_URL_LIST_NODE=#{@npm_node_url}\n") - file.write("export AZMON_INTEGRATION_NPM_METRICS_URL_LIST_CLUSTER=#{@npm_cluster_url}\n") - file.write("export AZMON_INTEGRATION_NPM_METRICS_DROP_LIST_CLUSTER=\n") + telemetryFile.write("export TELEMETRY_NPM_INTEGRATION_METRICS_ADVANCED=1\n") elsif @collect_basic_npm_metrics == true - file.write("export TELEMETRY_NPM_INTEGRATION_METRICS_BASIC=1\n") - file.write("export AZMON_INTEGRATION_NPM_METRICS_URL_LIST_NODE=#{@npm_node_url}\n") - file.write("export AZMON_INTEGRATION_NPM_METRICS_URL_LIST_CLUSTER=#{@npm_cluster_url}\n") - file.write("export AZMON_INTEGRATION_NPM_METRICS_DROP_LIST_CLUSTER=#{@npm_basic_drop_metrics_cluster}\n") - else - file.write("export AZMON_INTEGRATION_NPM_METRICS_URL_LIST_NODE=\n") - file.write("export AZMON_INTEGRATION_NPM_METRICS_URL_LIST_CLUSTER=\n") - file.write("export AZMON_INTEGRATION_NPM_METRICS_DROP_LIST_CLUSTER=\n") + telemetryFile.write("export TELEMETRY_NPM_INTEGRATION_METRICS_BASIC=1\n") end # Close file after writing all environment variables - file.close + telemetryFile.close else - puts "Exception while opening file for writing config environment variables" - puts "****************End Config Processing********************" -end \ No newline at end of file + puts "config::npm::Exception while opening file for writing NPM telemetry environment variables" + puts "****************End NPM Config Processing********************" +end From 281a77c8c871d6d9a3ad98715098234c1f027302 Mon Sep 17 00:00:00 2001 From: Vishwanath Date: Tue, 1 Sep 2020 16:21:04 -0700 Subject: [PATCH 015/301] support multiple docker paths when docker root is updated thru knode (#433) --- kubernetes/omsagent.yaml | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index db788a37e..947620ebc 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -387,6 +387,13 @@ spec: name: host-log - mountPath: /var/lib/docker/containers name: containerlog-path + readOnly: true + - mountPath: /mnt/docker + name: containerlog-path-2 + readOnly: true + - mountPath: /mnt/containers + name: containerlog-path-3 + readOnly: true - mountPath: /etc/kubernetes/host name: azure-json-path - mountPath: /etc/omsagent-secret @@ -444,6 +451,12 @@ spec: - name: containerlog-path hostPath: path: /var/lib/docker/containers + - name: containerlog-path-2 + hostPath: + path: /mnt/docker + - name: containerlog-path-3 + hostPath: + path: /mnt/containers - name: azure-json-path hostPath: path: /etc/kubernetes @@ -528,8 +541,6 @@ spec: name: docker-sock - mountPath: /var/log name: host-log - - mountPath: /var/lib/docker/containers - name: containerlog-path - mountPath: /etc/kubernetes/host name: azure-json-path - mountPath: /etc/omsagent-secret @@ -588,9 +599,6 @@ spec: - name: host-log hostPath: path: /var/log - - name: containerlog-path - hostPath: - path: /var/lib/docker/containers - name: azure-json-path hostPath: path: /etc/kubernetes From d8d7f9feac3e402b9a004cf2a15e57e5efd445d1 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Thu, 10 Sep 2020 17:22:03 -0700 Subject: [PATCH 016/301] Gangams/doc and other related updates (#434) * bring back nodeslector changes for windows agent ds * readme updates * chart updates for azure cluster resourceid and region * set cluster region during onboarding for managed clusters * wip * fix for onboarding script * add sp support for the login * update help * add sp support for powershell * script updates for sp login * wip * wip * wip * readme updates * update the links to use ci_prod branch * fix links * fix image link * some more readme updates --- README.md | 62 +++++++++------- charts/azuremonitor-containers/Chart.yaml | 2 +- .../templates/NOTES.txt | 4 +- .../templates/omsagent-arc-k8s-crd.yaml | 2 +- .../templates/omsagent-daemonset-windows.yaml | 12 +++- .../templates/omsagent-daemonset.yaml | 9 ++- .../templates/omsagent-deployment.yaml | 9 ++- .../templates/omsagent-rs-configmap.yaml | 2 +- .../templates/omsagent-secret.yaml | 2 +- charts/azuremonitor-containers/values.yaml | 18 ++--- img/azuremonitor-containers.svg | 66 +++++++++++++++++ .../linux/acrworkflows/acrdevnamespace.yaml | 4 +- .../add-monitoring-metrics-publisher-role.md | 8 +-- .../aks/mdmonboarding/mdm_onboarding.sh | 2 +- .../mdmonboarding/mdm_onboarding_atscale.sh | 2 +- .../kubernetes/AddMonitoringOnboardingTags.sh | 2 +- scripts/onboarding/attach-monitoring-tags.md | 8 +-- .../onboarding_azuremonitor_for_containers.sh | 2 +- .../onboarding/managed/disable-monitoring.ps1 | 36 +++++++++- .../onboarding/managed/disable-monitoring.sh | 47 ++++++++++-- .../onboarding/managed/enable-monitoring.ps1 | 38 +++++++++- .../onboarding/managed/enable-monitoring.sh | 71 +++++++++++++++---- scripts/onboarding/solution-onboarding.md | 4 +- .../preview/health/HealthAgentOnboarding.ps1 | 2 +- scripts/troubleshoot/README.md | 12 ++-- scripts/troubleshoot/TroubleshootError.ps1 | 2 +- .../TroubleshootError_nonAzureK8s.ps1 | 2 +- 27 files changed, 334 insertions(+), 96 deletions(-) create mode 100644 img/azuremonitor-containers.svg diff --git a/README.md b/README.md index d5d874c9c..3eec1f344 100644 --- a/README.md +++ b/README.md @@ -70,7 +70,6 @@ The general directory structure is: │ ├── windows/ - scripts to build the Docker image for Windows Agent │ │ ├── dockerbuild - script to build the code and docker imag, and publish docker image │ │ ├── acrworkflows/ - acr work flows for the Windows Agent container image -│ │ ├── baseimage/ - windowsservercore base image for the windows agent container │ │ ├── DockerFile - DockerFile for Windows Agent Container Image │ │ ├── main.ps1 - Windows Agent container entry point │ │ ├── setup.ps1 - setup file for Windows Agent Container Image @@ -140,7 +139,7 @@ bash ~/Docker-Provider/scripts/build/linux/install-build-pre-requisites.sh ### Build Docker Provider Shell Bundle and Docker Image and Publish Docker Image -> Note: If you are using WSL2, ensure Docker for windows running Linux containers mode to build Linux agent image successfully +> Note: If you are using WSL2, ensure `Docker for windows` running with Linux containers mode on your windows machine to build Linux agent image successfully ``` cd ~/Docker-Provider/kubernetes/linux/dockerbuild @@ -167,9 +166,23 @@ docker push /: ``` ## Windows Agent +To build the windows agent, you will have to build .NET and Go code, and docker image for windows agent. +Docker image for windows agent can only build on Windows machine with `Docker for windows` with Windows containers mode but the .NET code and Go code can be built either on Windows or Linux or WSL2. + ### Install Pre-requisites -If you are planning to build the .net and go code for windows agent on Linux machine and you have already have Docker for Windows on Windows machine, then you may skip this. +Install pre-requisites based on OS platform you will be using to build the windows agent code + +#### Option 1 - Using Windows Machine to Build the Windows agent + +``` +powershell # launch powershell with elevated admin on your windows machine +Set-ExecutionPolicy -ExecutionPolicy bypass # set the execution policy +cd %userprofile%\Docker-Provider\scripts\build\windows # based on your repo path +.\install-build-pre-requisites.ps1 # +``` + +#### Option 2 - Using WSL2 to Build the Windows agent ``` powershell # launch powershell with elevated admin on your windows machine @@ -178,20 +191,36 @@ net use z: \\wsl$\Ubuntu-16.04 # map the network drive of the ubuntu app to wind cd z:\home\sshadmin\Docker-Provider\scripts\build\windows # based on your repo path .\install-build-pre-requisites.ps1 # ``` -#### Build Certificate Generator Source code and Out OMS Go plugin code -> Note: .net and go code for windows agent can built on Ubuntu + +### Build Windows Agent code and Docker Image + +> Note: format of the windows agent imagetag will be `win-ci`. possible values for release are test, dev, preview, dogfood, prod etc. + +#### Option 1 - Using Windows Machine to Build the Windows agent + +Execute below instructions on elevated command prompt to build windows agent code and docker image, publishing the image to acr or docker hub + +``` +cd %userprofile%\Docker-Provider\kubernetes\windows\dockerbuild # based on your repo path +docker login # if you want to publish the image to acr then login to acr via `docker login ` +powershell -ExecutionPolicy bypass # switch to powershell if you are not on powershell already +.\build-and-publish-docker-image.ps1 -image /: # trigger build code and image and publish docker hub or acr +``` + +#### Option 2 - Using WSL2 to Build the Windows agent + +##### On WSL2, Build Certificate Generator Source code and Out OMS Go plugin code ``` -cd ~/Docker-Provider/build/windows # based on your repo path on ubuntu or WSL2 +cd ~/Docker-Provider/build/windows # based on your repo path on WSL2 Ubuntu app pwsh #switch to powershell .\Makefile.ps1 # trigger build and publish of .net and go code ``` -> Note: format of the imagetag will be `win-ci`. possible values for release are test, dev, preview, dogfood, prod etc. -#### Build and Push Docker Image +#### On Windows machine, build and Push Docker Image -> Note: windows container can only built on windows hence you will have to execute below commands on windows via accessing network share or copying published bits omsagentwindows under kubernetes directory on to windows machine +> Note: Docker image for windows container can only built on windows hence you will have to execute below commands on windows via accessing network share or copying published bits omsagentwindows under kubernetes directory on to windows machine ``` net use z: \\wsl$\Ubuntu-16.04 # map the network drive of the ubuntu app to windows @@ -200,21 +229,6 @@ docker build -t /: --build-arg IMAGE_TAG= . docker push /: ``` -### Build Cert generator, Out OMS Plugin and Docker Image and Publish Docker Image - -If you have code cloned on to windows, you can built everything for windows agent on windows machine via below instructions - -``` -# install pre-requisites if you havent installed already -cd %userprofile%\Docker-Provider\kubernetes\windows # based on your repo path -.\install-build-pre-requisites.ps1 - -cd %userprofile%\Docker-Provider\kubernetes\windows\dockerbuild # based on your repo path -docker login # if you want to publish the image to acr then login to acr via `docker login ` -powershell -ExecutionPolicy bypass # switch to powershell if you are not on powershell already -.\build-and-publish-docker-image.ps1 -image /: # trigger build code and image and publish docker hub or acr -``` - # Azure DevOps Build Pipeline Navigate to https://github-private.visualstudio.com/microsoft/_build?view=pipelines to see Linux and Windows Agent build pipelines. These pipelines are configured with CI triggers for ci_dev and ci_prod. diff --git a/charts/azuremonitor-containers/Chart.yaml b/charts/azuremonitor-containers/Chart.yaml index 202494152..8976b5561 100644 --- a/charts/azuremonitor-containers/Chart.yaml +++ b/charts/azuremonitor-containers/Chart.yaml @@ -28,7 +28,7 @@ keywords: - kubernetes - kuberneteshealth home: https://docs.microsoft.com/en-us/azure/monitoring/monitoring-container-health -icon: https://raw.githubusercontent.com/Microsoft/OMS-docker/ci_feature/img/azuremonitor-containers.svg +icon: https://raw.githubusercontent.com/microsoft/Docker-Provider/ci_prod/img/azuremonitor-containers.svg sources: - https://github.com/microsoft/Docker-Provider/tree/ci_prod maintainers: diff --git a/charts/azuremonitor-containers/templates/NOTES.txt b/charts/azuremonitor-containers/templates/NOTES.txt index 6179b6f1a..372cecb95 100644 --- a/charts/azuremonitor-containers/templates/NOTES.txt +++ b/charts/azuremonitor-containers/templates/NOTES.txt @@ -14,7 +14,7 @@ {{- end }} -{{- if and (ne .Values.omsagent.env.clusterName "") (ne .Values.omsagent.env.clusterId "") }} +{{- if and (ne .Values.omsagent.env.clusterName "") (ne .Values.omsagent.env.clusterId "") (ne .Values.Azure.Cluster.ResourceId "") }} ############################################################################## #### ERROR: You did not provide cluster name #### @@ -22,7 +22,7 @@ {{- end }} -{{- if or (eq .Values.omsagent.secret.key "") (eq .Values.omsagent.secret.wsid "") (and (eq .Values.omsagent.env.clusterName "") (eq .Values.omsagent.env.clusterId ""))}} +{{- if or (eq .Values.omsagent.secret.key "") (eq .Values.omsagent.secret.wsid "") (and (eq .Values.omsagent.env.clusterName "") (eq .Values.omsagent.env.clusterId "") (eq .Values.Azure.Cluster.ResourceId "") )}} This deployment will not complete. To proceed, run helm upgrade {{ .Release.Name }} \ diff --git a/charts/azuremonitor-containers/templates/omsagent-arc-k8s-crd.yaml b/charts/azuremonitor-containers/templates/omsagent-arc-k8s-crd.yaml index f7873de40..ebdd5ea3f 100644 --- a/charts/azuremonitor-containers/templates/omsagent-arc-k8s-crd.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-arc-k8s-crd.yaml @@ -1,4 +1,4 @@ -{{- if contains "microsoft.kubernetes/connectedclusters" (.Values.omsagent.env.clusterId | lower) }} +{{- if or ( contains "microsoft.kubernetes/connectedclusters" (.Values.Azure.Cluster.ResourceId | lower) ) ( contains "microsoft.kubernetes/connectedclusters" (.Values.omsagent.env.clusterId | lower)) }} apiVersion: clusterconfig.azure.com/v1beta1 kind: AzureClusterIdentityRequest metadata: diff --git a/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml b/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml index b8e667398..7acd46c37 100644 --- a/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml @@ -25,6 +25,8 @@ spec: dockerProviderVersion: {{ .Values.omsagent.image.dockerProviderVersion }} schema-versions: "v1" spec: + nodeSelector: + beta.kubernetes.io/os: windows {{- if .Values.omsagent.rbac }} serviceAccountName: omsagent {{- end }} @@ -46,6 +48,13 @@ spec: - name: AKS_REGION value: {{ .Values.omsagent.env.clusterRegion | quote }} {{- end }} + {{- else if ne .Values.Azure.Cluster.ResourceId "" }} + - name: AKS_RESOURCE_ID + value: {{ .Values.Azure.Cluster.ResourceId | quote }} + {{- if ne .Values.Azure.Cluster.Region "" }} + - name: AKS_REGION + value: {{ .Values.Azure.Cluster.Region | quote }} + {{- end }} {{- else }} - name: ACS_RESOURCE_NAME value: {{ .Values.omsagent.env.clusterName | quote }} @@ -80,9 +89,6 @@ spec: - C:\opt\omsagentwindows\scripts\cmd\livenessProbe.cmd periodSeconds: 60 initialDelaySeconds: 180 - {{- with .Values.omsagent.daemonsetwindows.affinity }} - affinity: {{- toYaml . | nindent 8 }} - {{- end }} {{- with .Values.omsagent.tolerations }} tolerations: {{- toYaml . | nindent 8 }} {{- end }} diff --git a/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml b/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml index d6d6171cd..7514247a0 100644 --- a/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml @@ -1,4 +1,4 @@ -{{- if and (ne .Values.omsagent.secret.key "") (ne .Values.omsagent.secret.wsid "") (or (ne .Values.omsagent.env.clusterName "") (ne .Values.omsagent.env.clusterId ""))}} +{{- if and (ne .Values.omsagent.secret.key "") (ne .Values.omsagent.secret.wsid "") (or (ne .Values.omsagent.env.clusterName "") (ne .Values.omsagent.env.clusterId "") (ne .Values.Azure.Cluster.ResourceId "") )}} apiVersion: apps/v1 kind: DaemonSet metadata: @@ -46,6 +46,13 @@ spec: - name: AKS_REGION value: {{ .Values.omsagent.env.clusterRegion | quote }} {{- end }} + {{- else if ne .Values.Azure.Cluster.ResourceId "" }} + - name: AKS_RESOURCE_ID + value: {{ .Values.Azure.Cluster.ResourceId | quote }} + {{- if ne .Values.Azure.Cluster.Region "" }} + - name: AKS_REGION + value: {{ .Values.Azure.Cluster.Region | quote }} + {{- end }} {{- else }} - name: ACS_RESOURCE_NAME value: {{ .Values.omsagent.env.clusterName | quote }} diff --git a/charts/azuremonitor-containers/templates/omsagent-deployment.yaml b/charts/azuremonitor-containers/templates/omsagent-deployment.yaml index 6f8140eb6..7d7ac7040 100644 --- a/charts/azuremonitor-containers/templates/omsagent-deployment.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-deployment.yaml @@ -1,4 +1,4 @@ -{{- if and (ne .Values.omsagent.secret.key "") (ne .Values.omsagent.secret.wsid "") (or (ne .Values.omsagent.env.clusterName "") (ne .Values.omsagent.env.clusterId ""))}} +{{- if and (ne .Values.omsagent.secret.key "") (ne .Values.omsagent.secret.wsid "") (or (ne .Values.omsagent.env.clusterName "") (ne .Values.omsagent.env.clusterId "") (ne .Values.Azure.Cluster.ResourceId "") )}} apiVersion: apps/v1 kind: Deployment metadata: @@ -47,6 +47,13 @@ spec: - name: AKS_REGION value: {{ .Values.omsagent.env.clusterRegion | quote }} {{- end }} + {{- else if ne .Values.Azure.Cluster.ResourceId "" }} + - name: AKS_RESOURCE_ID + value: {{ .Values.Azure.Cluster.ResourceId | quote }} + {{- if ne .Values.Azure.Cluster.Region "" }} + - name: AKS_REGION + value: {{ .Values.Azure.Cluster.Region | quote }} + {{- end }} {{- else }} - name: ACS_RESOURCE_NAME value: {{ .Values.omsagent.env.clusterName | quote }} diff --git a/charts/azuremonitor-containers/templates/omsagent-rs-configmap.yaml b/charts/azuremonitor-containers/templates/omsagent-rs-configmap.yaml index c77fb12b4..ee0664495 100644 --- a/charts/azuremonitor-containers/templates/omsagent-rs-configmap.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-rs-configmap.yaml @@ -1,4 +1,4 @@ -{{- if and (ne .Values.omsagent.secret.key "") (ne .Values.omsagent.secret.wsid "") (or (ne .Values.omsagent.env.clusterName "") (ne .Values.omsagent.env.clusterId ""))}} +{{- if and (ne .Values.omsagent.secret.key "") (ne .Values.omsagent.secret.wsid "") (or (ne .Values.omsagent.env.clusterName "") (ne .Values.omsagent.env.clusterId "") (ne .Values.Azure.Cluster.ResourceId "") )}} kind: ConfigMap apiVersion: v1 data: diff --git a/charts/azuremonitor-containers/templates/omsagent-secret.yaml b/charts/azuremonitor-containers/templates/omsagent-secret.yaml index c6d992b82..1a7f087ed 100644 --- a/charts/azuremonitor-containers/templates/omsagent-secret.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-secret.yaml @@ -1,4 +1,4 @@ -{{- if and (ne .Values.omsagent.secret.key "") (ne .Values.omsagent.secret.wsid "") (or (ne .Values.omsagent.env.clusterName "") (ne .Values.omsagent.env.clusterId ""))}} +{{- if and (ne .Values.omsagent.secret.key "") (ne .Values.omsagent.secret.wsid "") (or (ne .Values.omsagent.env.clusterName "") (ne .Values.omsagent.env.clusterId "") (ne .Values.Azure.Cluster.ResourceId "") )}} apiVersion: v1 kind: Secret metadata: diff --git a/charts/azuremonitor-containers/values.yaml b/charts/azuremonitor-containers/values.yaml index 610e109ef..4d0d7f8f2 100644 --- a/charts/azuremonitor-containers/values.yaml +++ b/charts/azuremonitor-containers/values.yaml @@ -3,7 +3,12 @@ # Declare variables to be passed into your templates. ## Microsoft OMS Agent image for kubernetes cluster monitoring -## ref: https://github.com/Microsoft/OMS-docker/tree/ci_feature_prod +## ref: https://github.com/microsoft/Docker-Provider/tree/ci_prod +## Values of ResourceId and Region under Azure->Cluster being populated by Azure Arc K8s RP during the installation of the extension +Azure: + Cluster: + Region: + ResourceId: omsagent: image: repo: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod" @@ -81,17 +86,6 @@ omsagent: operator: NotIn values: - master - daemonsetwindows: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - labelSelector: - matchExpressions: - - key: beta.kubernetes.io/os - operator: In - values: - - windows ## Configure resource requests and limits ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ ## diff --git a/img/azuremonitor-containers.svg b/img/azuremonitor-containers.svg new file mode 100644 index 000000000..b2f7c5323 --- /dev/null +++ b/img/azuremonitor-containers.svg @@ -0,0 +1,66 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/kubernetes/linux/acrworkflows/acrdevnamespace.yaml b/kubernetes/linux/acrworkflows/acrdevnamespace.yaml index 9270be755..6a3617f6b 100644 --- a/kubernetes/linux/acrworkflows/acrdevnamespace.yaml +++ b/kubernetes/linux/acrworkflows/acrdevnamespace.yaml @@ -1,5 +1,5 @@ version: 1.0-preview-1 steps: - build: -t {{.Run.Registry}}/public/azuremonitor/containerinsights/cidev:{{.Run.Branch}}-{{.Run.Date}}-{{.Run.Commit | substr 0 7 }} . - workingDirectory: ci_feature - - push: ["{{.Run.Registry}}/public/azuremonitor/containerinsights/cidev:{{.Run.Branch}}-{{.Run.Date}}-{{.Run.Commit | substr 0 7 }}"] + workingDirectory: ci_dev + - push: ["{{.Run.Registry}}/public/azuremonitor/containerinsights/cidev:{{.Run.Branch}}-{{.Run.Date}}-{{.Run.Commit | substr 0 7 }}"] diff --git a/scripts/onboarding/add-monitoring-metrics-publisher-role.md b/scripts/onboarding/add-monitoring-metrics-publisher-role.md index 822ff0f64..91b91d872 100644 --- a/scripts/onboarding/add-monitoring-metrics-publisher-role.md +++ b/scripts/onboarding/add-monitoring-metrics-publisher-role.md @@ -16,7 +16,7 @@ Of the built-in roles, only Owner and User Access Administrator are granted acce ### For single AKS cluster using Azure CLI ``` sh -curl -sL https://raw.githubusercontent.com/Microsoft/OMS-docker/ci_feature/docs/aks/mdmonboarding/mdm_onboarding.sh | bash -s +curl -sL https://raw.githubusercontent.com/microsoft/Docker-Provider/ci_prod/scripts/onboarding/aks/mdmonboarding/mdm_onboarding.sh | bash -s ``` The configuration change can take a few minutes to complete. When it finishes, you see a message similar to the following that includes the result: @@ -28,7 +28,7 @@ completed the role assignment ### For all AKS clusters in the specified subscription using Azure CLI ``` sh -curl -sL https://raw.githubusercontent.com/Microsoft/OMS-docker/ci_feature/docs/aks/mdmonboarding/mdm_onboarding_atscale.sh | bash -s +curl -sL https://raw.githubusercontent.com/microsoft/Docker-Provider/ci_prod/scripts/onboarding/aks/mdmonboarding/mdm_onboarding_atscale.sh | bash -s ``` The configuration change can take a few minutes to complete. When it finishes, you see a message similar to the following that includes the result: @@ -43,7 +43,7 @@ completed role assignments for all AKS clusters in subscription: /resourceGroups//providers/Microsoft.OperationalInsights/workspaces/" "clusterName of AKS-Engine cluster" +# https://raw.githubusercontent.com/microsoft/Docker-Provider/ci_prod/scripts/onboarding/aksengine/kubernetes/AddMonitoringOnboardingTags.sh | bash -s "name of the cloud" "00000000-0000-0000-0000-000000000000" "Resource Group Name of AKS-Engine cluster" "/subscriptions//resourceGroups//providers/Microsoft.OperationalInsights/workspaces/" "clusterName of AKS-Engine cluster" # nameoftheCloud=${1} diff --git a/scripts/onboarding/attach-monitoring-tags.md b/scripts/onboarding/attach-monitoring-tags.md index f1c9a2e32..f7a802750 100644 --- a/scripts/onboarding/attach-monitoring-tags.md +++ b/scripts/onboarding/attach-monitoring-tags.md @@ -10,7 +10,7 @@ If you are not familiar with the concepts of azure resource tags (https://docs.m ## Attach tags using Powershell Get the below powershell script files to your local computer. - - Powershell script file [AddMonitoringWorkspaceTags.ps1](https://github.com/Microsoft/OMS-docker/blob/ci_feature/docs/aksengine/kubernetes/AddMonitoringWorkspaceTags.ps1) + - Powershell script file [AddMonitoringWorkspaceTags.ps1](https://raw.githubusercontent.com/microsoft/Docker-Provider/ci_prod/scripts/onboarding/aksengine/kubernetes/AddMonitoringWorkspaceTags.ps1) - Refer for updating the Powershell execution policy (https://docs.microsoft.com/en-us/powershell/module/microsoft.powershell.security/set-executionpolicy?view=powershell-6) - Log analytics workspace resource Id can retrieved either Azure CLI or Powershell or Azure Portal Azure CLI @@ -50,14 +50,14 @@ The configuration change can take a few minutes to complete. When it finishes, y ``` sh -curl -sL https://raw.githubusercontent.com/microsoft/OMS-docker/ci_feature/docs/aksengine/kubernetes/AddMonitoringOnboardingTags.sh | bash -s +curl -sL https://raw.githubusercontent.com/microsoft/Docker-Provider/ci_prod/scripts/onboarding/aksengine/kubernetes/AddMonitoringOnboardingTags.sh | bash -s Example for AKS-Engine clusters in Azure Public cloud -curl -sL https://raw.githubusercontent.com/microsoft/OMS-docker/ci_feature/docs/aksengine/kubernetes/AddMonitoringOnboardingTags.sh | bash -s "AzureCloud" "00000000-0000-0000-0000-000000000000" "my-aks-engine-cluster-rg" "/subscriptions//resourceGroups/workspaceRg/providers/Microsoft.OperationalInsights/workspaces/workspaceName" "my-aks-engine-cluster" +curl -sL https://raw.githubusercontent.com/microsoft/Docker-Provider/ci_prod/scripts/onboarding/aksengine/kubernetes/AddMonitoringOnboardingTags.sh | bash -s "AzureCloud" "00000000-0000-0000-0000-000000000000" "my-aks-engine-cluster-rg" "/subscriptions//resourceGroups/workspaceRg/providers/Microsoft.OperationalInsights/workspaces/workspaceName" "my-aks-engine-cluster" Example for AKS-Engine clusters in Azure China cloud -curl -sL https://raw.githubusercontent.com/microsoft/OMS-docker/ci_feature/docs/aksengine/kubernetes/AddMonitoringOnboardingTags.sh | bash -s "AzureChinaCloud" "00000000-0000-0000-0000-000000000000" "my-aks-engine-cluster-rg" "/subscriptions//resourceGroups/workspaceRg/providers/Microsoft.OperationalInsights/workspaces/workspaceName" "my-aks-engine-cluster" +curl -sL https://raw.githubusercontent.com/microsoft/Docker-Provider/ci_prod/scripts/onboarding/aksengine/kubernetes/AddMonitoringOnboardingTags.sh | bash -s "AzureChinaCloud" "00000000-0000-0000-0000-000000000000" "my-aks-engine-cluster-rg" "/subscriptions//resourceGroups/workspaceRg/providers/Microsoft.OperationalInsights/workspaces/workspaceName" "my-aks-engine-cluster" ``` diff --git a/scripts/onboarding/hybrid/onboarding_azuremonitor_for_containers.sh b/scripts/onboarding/hybrid/onboarding_azuremonitor_for_containers.sh index b66dca67d..e2afa579d 100644 --- a/scripts/onboarding/hybrid/onboarding_azuremonitor_for_containers.sh +++ b/scripts/onboarding/hybrid/onboarding_azuremonitor_for_containers.sh @@ -151,7 +151,7 @@ echo "workspaceResourceId:"$workspaceResourceId echo "workspaceGuid:"$workspaceGuid echo "adding containerinsights solution to workspace" -solution=$(az group deployment create -g $defaultWorkspaceResourceGroup --template-uri https://raw.githubusercontent.com/microsoft/OMS-docker/ci_feature_prod/docs/templates/azuremonitor-containerSolution.json --parameters workspaceResourceId=$workspaceResourceId --parameters workspaceRegion=$workspaceRegion) +solution=$(az group deployment create -g $defaultWorkspaceResourceGroup --template-uri https://raw.githubusercontent.com/microsoft/Docker-Provider/ci_prod/scripts/onboarding/templates/azuremonitor-containerSolution.json --parameters workspaceResourceId=$workspaceResourceId --parameters workspaceRegion=$workspaceRegion) echo "getting workspace primaryshared key" workspaceKey=$(az rest --method post --uri $workspaceResourceId/sharedKeys?api-version=2015-11-01-preview --query primarySharedKey) diff --git a/scripts/onboarding/managed/disable-monitoring.ps1 b/scripts/onboarding/managed/disable-monitoring.ps1 index 41ba2adb0..ea66cb3a3 100644 --- a/scripts/onboarding/managed/disable-monitoring.ps1 +++ b/scripts/onboarding/managed/disable-monitoring.ps1 @@ -7,6 +7,12 @@ .PARAMETER clusterResourceId Id of the Azure Managed Cluster such as Azure ARC K8s, ARO v4 etc. + .PARAMETER servicePrincipalClientId + client Id of the service principal which will be used for the azure login + .PARAMETER servicePrincipalClientSecret + client secret of the service principal which will be used for the azure login + .PARAMETER tenantId + tenantId of the service principal which will be used for the azure login .PARAMETER kubeContext (optional) kube-context of the k8 cluster to install Azure Monitor for containers HELM chart @@ -22,6 +28,11 @@ param( [Parameter(mandatory = $true)] [string]$clusterResourceId, + [string]$servicePrincipalClientId, + [Parameter(mandatory = $false)] + [string]$servicePrincipalClientSecret, + [Parameter(mandatory = $false)] + [string]$tenantId, [Parameter(mandatory = $false)] [string]$kubeContext ) @@ -33,6 +44,7 @@ $helmChartName = "azuremonitor-containers" $isArcK8sCluster = $false $isAksCluster = $false $isAroV4Cluster = $false +$isUsingServicePrincipal = $false # checks the required Powershell modules exist and if not exists, request the user permission to install $azAccountModule = Get-Module -ListAvailable -Name Az.Accounts @@ -199,11 +211,24 @@ if ($clusterResourceId.ToLower().Contains("microsoft.kubernetes/connectedcluster $isAroV4Cluster = $true } +if(([string]::IsNullOrEmpty($servicePrincipalClientId) -eq $false) -and + ([string]::IsNullOrEmpty($servicePrincipalClientSecret) -eq $false) -and + ([string]::IsNullOrEmpty($tenantId) -eq $false)) { + Write-Host("Using service principal creds for the azure login since provided.") + $isUsingServicePrincipal = $true + } + $resourceParts = $clusterResourceId.Split("/") $clusterSubscriptionId = $resourceParts[2] Write-Host("Cluster SubscriptionId : '" + $clusterSubscriptionId + "' ") -ForegroundColor Green +if ($isUsingServicePrincipal) { + $spSecret = ConvertTo-SecureString -String $servicePrincipalClientSecret -AsPlainText -Force + $spCreds = New-Object -TypeName "System.Management.Automation.PSCredential" -ArgumentList $servicePrincipalClientId,$spSecret + Connect-AzAccount -Credential $spCreds -Tenant $tenantId -Subscription $clusterSubscriptionId +} + try { Write-Host("") Write-Host("Trying to get the current Az login context...") @@ -220,8 +245,15 @@ catch { if ($null -eq $account.Account) { try { - Write-Host("Please login...") - Connect-AzAccount -subscriptionid $clusterSubscriptionId + + if ($isUsingServicePrincipal) { + $spSecret = ConvertTo-SecureString -String $servicePrincipalClientSecret -AsPlainText -Force + $spCreds = New-Object -TypeName "System.Management.Automation.PSCredential" -ArgumentList $servicePrincipalClientId,$spSecret + Connect-AzAccount -Credential $spCreds -Tenant $tenantId -Subscription $clusterSubscriptionId + } else { + Write-Host("Please login...") + Connect-AzAccount -subscriptionid $clusterSubscriptionId + } } catch { Write-Host("") diff --git a/scripts/onboarding/managed/disable-monitoring.sh b/scripts/onboarding/managed/disable-monitoring.sh index f55b4e617..f20bd7d33 100644 --- a/scripts/onboarding/managed/disable-monitoring.sh +++ b/scripts/onboarding/managed/disable-monitoring.sh @@ -14,7 +14,10 @@ # 1. disable monitoring using current kube-context # bash disable_monitoring.sh --resource-id/-r -# 2. disable monitoring using specific kube-context +# 2. disable monitoring using specific kube-context using service principal creds for the azure login +# bash disable_monitoring.sh --resource-id --client-id --client-secret --tenant-id + +# 3. disable monitoring using specific kube-context # bash disable_monitoring.sh --resource-id/-r --kube-context/-k @@ -48,12 +51,18 @@ isAroV4Cluster=false clusterResourceId="" kubeconfigContext="" +# sp details for the login if provided +servicePrincipalClientId="" +servicePrincipalClientSecret="" +servicePrincipalTenantId="" +isUsingServicePrincipal=false + usage() { local basename=`basename $0` echo echo "Disable Azure Monitor for containers:" - echo "$basename --resource-id/-r [--kube-context/-k ]" + echo "$basename --resource-id/-r [--client-id ] [--client-secret ] [--tenant-id ] [--kube-context/-k ]" } delete_helm_release() @@ -105,8 +114,13 @@ remove_monitoring_tags() { echo "deleting monitoring tags ..." - echo "login to the azure interactively" - az login --use-device-code + if [ "$isUsingServicePrincipal" = true ] ; then + echo "login to the azure using provided service principal creds" + az login --service-principal --username $servicePrincipalClientId --password $servicePrincipalClientSecret --tenant $servicePrincipalTenantId + else + echo "login to the azure interactively" + az login --use-device-code + fi echo "set the cluster subscription id: ${clusterSubscriptionId}" az account set -s ${clusterSubscriptionId} @@ -159,6 +173,9 @@ for arg in "$@"; do case "$arg" in "--resource-id") set -- "$@" "-r" ;; "--kube-context") set -- "$@" "-k" ;; + "--client-id") set -- "$@" "-c" ;; + "--client-secret") set -- "$@" "-s" ;; + "--tenant-id") set -- "$@" "-t" ;; "--help") set -- "$@" "-h" ;; "--"*) usage ;; *) set -- "$@" "$arg" @@ -167,7 +184,7 @@ done local OPTIND opt - while getopts 'hk:r:' opt; do + while getopts 'hk:c:s:t:r:' opt; do case "$opt" in h) usage @@ -183,6 +200,21 @@ done echo "clusterResourceId is $OPTARG" ;; + c) + servicePrincipalClientId="$OPTARG" + echo "servicePrincipalClientId is $OPTARG" + ;; + + s) + servicePrincipalClientSecret="$OPTARG" + echo "clientSecret is *****" + ;; + + t) + servicePrincipalTenantId="$OPTARG" + echo "service principal tenantId is $OPTARG" + ;; + ?) usage exit 1 @@ -241,6 +273,11 @@ done exit 1 fi + if [ ! -z "$servicePrincipalClientId" -a ! -z "$servicePrincipalClientSecret" -a ! -z "$servicePrincipalTenantId" ]; then + echo "using service principal creds (clientId, secret and tenantId) for azure login since provided" + isUsingServicePrincipal=true + fi + } diff --git a/scripts/onboarding/managed/enable-monitoring.ps1 b/scripts/onboarding/managed/enable-monitoring.ps1 index 346cdc81a..b734ba347 100644 --- a/scripts/onboarding/managed/enable-monitoring.ps1 +++ b/scripts/onboarding/managed/enable-monitoring.ps1 @@ -9,6 +9,12 @@ .PARAMETER clusterResourceId Id of the Azure Managed Cluster such as Azure ARC K8s, ARO v4 etc. + .PARAMETER servicePrincipalClientId + Client Id of the service principal which will be used for the azure login + .PARAMETER servicePrincipalClientSecret + Client secret of the service principal which will be used for the azure login + .PARAMETER tenantId + Azure TenantId of the service principal which will be used for the azure login .PARAMETER kubeContext (optional) kube-context of the k8 cluster to install Azure Monitor for containers HELM chart .PARAMETER workspaceResourceId (optional) @@ -34,6 +40,12 @@ param( [Parameter(mandatory = $true)] [string]$clusterResourceId, [Parameter(mandatory = $false)] + [string]$servicePrincipalClientId, + [Parameter(mandatory = $false)] + [string]$servicePrincipalClientSecret, + [Parameter(mandatory = $false)] + [string]$tenantId, + [Parameter(mandatory = $false)] [string]$kubeContext, [Parameter(mandatory = $false)] [string]$workspaceResourceId, @@ -53,6 +65,7 @@ $helmChartRepoUrl = "https://kubernetes-charts-incubator.storage.googleapis.com/ # flags to indicate the cluster types $isArcK8sCluster = $false $isAksCluster = $false +$isUsingServicePrincipal = $false if([string]::IsNullOrEmpty($helmRepoName) -eq $false){ $helmChartRepoName = $helmRepoName @@ -220,6 +233,13 @@ if (($clusterResourceId.ToLower().Contains("microsoft.kubernetes/connectedcluste exit } +if(([string]::IsNullOrEmpty($servicePrincipalClientId) -eq $false) -and + ([string]::IsNullOrEmpty($servicePrincipalClientSecret) -eq $false) -and + ([string]::IsNullOrEmpty($tenantId) -eq $false)) { + Write-Host("Using service principal creds for the azure login since these provided.") + $isUsingServicePrincipal = $true +} + if ($clusterResourceId.ToLower().Contains("microsoft.kubernetes/connectedclusters") -eq $true) { $isArcK8sCluster = $true } elseif ($clusterResourceId.ToLower().Contains("microsoft.containerservice/managedclusters") -eq $true) { @@ -231,6 +251,12 @@ $clusterSubscriptionId = $resourceParts[2] Write-Host("Cluster SubscriptionId : '" + $clusterSubscriptionId + "' ") -ForegroundColor Green +if ($isUsingServicePrincipal) { + $spSecret = ConvertTo-SecureString -String $servicePrincipalClientSecret -AsPlainText -Force + $spCreds = New-Object -TypeName "System.Management.Automation.PSCredential" -ArgumentList $servicePrincipalClientId,$spSecret + Connect-AzAccount -Credential $spCreds -Tenant $tenantId -Subscription $clusterSubscriptionId +} + try { Write-Host("") Write-Host("Trying to get the current Az login context...") @@ -247,8 +273,14 @@ catch { if ($null -eq $account.Account) { try { - Write-Host("Please login...") - Connect-AzAccount -subscriptionid $clusterSubscriptionId + if ($isUsingServicePrincipal) { + $spSecret = ConvertTo-SecureString -String $servicePrincipalClientSecret -AsPlainText -Force + $spCreds = New-Object -TypeName "System.Management.Automation.PSCredential" -ArgumentList $servicePrincipalClientId,$spSecret + Connect-AzAccount -Credential $spCreds -Tenant $tenantId -Subscription $clusterSubscriptionId + } else { + Write-Host("Please login...") + Connect-AzAccount -subscriptionid $clusterSubscriptionId + } } catch { Write-Host("") @@ -498,7 +530,7 @@ try { helm repo add $helmChartRepoName $helmChartRepoUrl Write-Host("updating helm repo to get latest version of charts") helm repo update - $helmParameters = "omsagent.secret.wsid=$workspaceGUID,omsagent.secret.key=$workspacePrimarySharedKey,omsagent.env.clusterId=$clusterResourceId" + $helmParameters = "omsagent.secret.wsid=$workspaceGUID,omsagent.secret.key=$workspacePrimarySharedKey,omsagent.env.clusterId=$clusterResourceId,omsagent.env.clusterRegion=$clusterRegion" if([string]::IsNullOrEmpty($proxyEndpoint) -eq $false) { Write-Host("using proxy endpoint since its provided") $helmParameters = $helmParameters + ",omsagent.proxy=$proxyEndpoint" diff --git a/scripts/onboarding/managed/enable-monitoring.sh b/scripts/onboarding/managed/enable-monitoring.sh index 5a8e7e040..17c075725 100644 --- a/scripts/onboarding/managed/enable-monitoring.sh +++ b/scripts/onboarding/managed/enable-monitoring.sh @@ -20,17 +20,19 @@ # 1. Using Default Azure Log Analytics and no-proxy with current kube config context # bash enable-monitoring.sh --resource-id -# 2. Using Default Azure Log Analytics and no-proxy +# 2. Using Default Azure Log Analytics and no-proxy with current kube config context, and using service principal creds for the azure login +# bash enable-monitoring.sh --resource-id --client-id --client-secret --tenant-id + +# 3. Using Default Azure Log Analytics and no-proxy # bash enable-monitoring.sh --resource-id --kube-context -# 3. Using Default Azure Log Analytics and with proxy endpoint configuration +# 4. Using Default Azure Log Analytics and with proxy endpoint configuration # bash enable-monitoring.sh --resource-id --kube-context --proxy - -# 4. Using Existing Azure Log Analytics and no-proxy +# 5. Using Existing Azure Log Analytics and no-proxy # bash enable-monitoring.sh --resource-id --kube-context --workspace-id -# 5. Using Existing Azure Log Analytics and proxy +# 6. Using Existing Azure Log Analytics and proxy # bash enable-monitoring.sh --resource-id --kube-context --workspace-id --proxy set -e @@ -95,12 +97,18 @@ workspaceResourceGroup="DefaultResourceGroup-"$workspaceRegionCode workspaceGuid="" workspaceKey="" +# sp details for the login if provided +servicePrincipalClientId="" +servicePrincipalClientSecret="" +servicePrincipalTenantId="" +isUsingServicePrincipal=false + usage() { local basename=`basename $0` echo echo "Enable Azure Monitor for containers:" - echo "$basename --resource-id [--kube-context ] [--workspace-id ] [--proxy ]" + echo "$basename --resource-id [--client-id ] [--client-secret ] [--tenant-id ] [--kube-context ] [--workspace-id ] [--proxy ]" } parse_args() @@ -120,8 +128,12 @@ for arg in "$@"; do "--kube-context") set -- "$@" "-k" ;; "--workspace-id") set -- "$@" "-w" ;; "--proxy") set -- "$@" "-p" ;; + "--client-id") set -- "$@" "-c" ;; + "--client-secret") set -- "$@" "-s" ;; + "--tenant-id") set -- "$@" "-t" ;; "--helm-repo-name") set -- "$@" "-n" ;; "--helm-repo-url") set -- "$@" "-u" ;; + "--container-log-volume") set -- "$@" "-v" ;; "--"*) usage ;; *) set -- "$@" "$arg" esac @@ -129,7 +141,7 @@ done local OPTIND opt -while getopts 'hk:r:w:p:n:u:' opt; do +while getopts 'hk:r:w:p:c:s:t:n:u:v:' opt; do case "$opt" in h) usage @@ -155,6 +167,21 @@ while getopts 'hk:r:w:p:n:u:' opt; do echo "proxyEndpoint is $OPTARG" ;; + c) + servicePrincipalClientId="$OPTARG" + echo "servicePrincipalClientId is $OPTARG" + ;; + + s) + servicePrincipalClientSecret="$OPTARG" + echo "clientSecret is *****" + ;; + + t) + servicePrincipalTenantId="$OPTARG" + echo "service principal tenantId is $OPTARG" + ;; + n) helmRepoName="$OPTARG" echo "helm repo name is $OPTARG" @@ -277,6 +304,11 @@ if [ ! -z "$proxyEndpoint" ]; then fi fi +if [ ! -z "$servicePrincipalClientId" -a ! -z "$servicePrincipalClientSecret" -a ! -z "$servicePrincipalTenantId" ]; then + echo "using service principal creds (clientId, secret and tenantId) for azure login since provided" + isUsingServicePrincipal=true +fi + } configure_to_public_cloud() @@ -309,7 +341,9 @@ create_default_log_analytics_workspace() # extract subscription from cluster resource id local subscriptionId="$(echo $clusterResourceId | cut -d'/' -f3)" - local clusterRegion=$(az resource show --ids ${clusterResourceId} --query location) + local clusterRegion=$(az resource show --ids ${clusterResourceId} --query location -o tsv) + # convert cluster region to lower case + clusterRegion=$(echo $clusterRegion | tr "[:upper:]" "[:lower:]") echo "cluster region:" $clusterRegion # mapping fors for default Azure Log Analytics workspace @@ -464,6 +498,10 @@ install_helm_chart() echo "installing Azure Monitor for containers HELM chart on to the cluster with kubecontext:${kubeconfigContext} ..." fi + echo "getting the region of the cluster" + clusterRegion=$(az resource show --ids ${clusterResourceId} --query location) + echo "cluster region is : ${clusterRegion}" + echo "adding helm repo:" $helmRepoName helm repo add $helmRepoName $helmRepoUrl @@ -474,18 +512,18 @@ install_helm_chart() echo "using proxy endpoint since proxy configuration passed in" if [ -z "$kubeconfigContext" ]; then echo "using current kube-context since --kube-context/-k parameter not passed in" - helm upgrade --install azmon-containers-release-1 --set omsagent.proxy=$proxyEndpoint,omsagent.secret.wsid=$workspaceGuid,omsagent.secret.key=$workspaceKey,omsagent.env.clusterId=$clusterResourceId $helmRepoName/$helmChartName + helm upgrade --install azmon-containers-release-1 --set omsagent.proxy=$proxyEndpoint,omsagent.secret.wsid=$workspaceGuid,omsagent.secret.key=$workspaceKey,omsagent.env.clusterId=$clusterResourceId,omsagent.env.clusterRegion=$clusterRegion $helmRepoName/$helmChartName else echo "using --kube-context:${kubeconfigContext} since passed in" - helm upgrade --install azmon-containers-release-1 --set omsagent.proxy=$proxyEndpoint,omsagent.secret.wsid=$workspaceGuid,omsagent.secret.key=$workspaceKey,omsagent.env.clusterId=$clusterResourceId $helmRepoName/$helmChartName --kube-context ${kubeconfigContext} + helm upgrade --install azmon-containers-release-1 --set omsagent.proxy=$proxyEndpoint,omsagent.secret.wsid=$workspaceGuid,omsagent.secret.key=$workspaceKey,omsagent.env.clusterId=$clusterResourceId,omsagent.env.clusterRegion=$clusterRegion $helmRepoName/$helmChartName --kube-context ${kubeconfigContext} fi else if [ -z "$kubeconfigContext" ]; then echo "using current kube-context since --kube-context/-k parameter not passed in" - helm upgrade --install azmon-containers-release-1 --set omsagent.secret.wsid=$workspaceGuid,omsagent.secret.key=$workspaceKey,omsagent.env.clusterId=$clusterResourceId $helmRepoName/$helmChartName + helm upgrade --install azmon-containers-release-1 --set omsagent.secret.wsid=$workspaceGuid,omsagent.secret.key=$workspaceKey,omsagent.env.clusterId=$clusterResourceId,omsagent.env.clusterRegion=$clusterRegion $helmRepoName/$helmChartName else echo "using --kube-context:${kubeconfigContext} since passed in" - helm upgrade --install azmon-containers-release-1 --set omsagent.secret.wsid=$workspaceGuid,omsagent.secret.key=$workspaceKey,omsagent.env.clusterId=$clusterResourceId $helmRepoName/$helmChartName --kube-context ${kubeconfigContext} + helm upgrade --install azmon-containers-release-1 --set omsagent.secret.wsid=$workspaceGuid,omsagent.secret.key=$workspaceKey,omsagent.env.clusterId=$clusterResourceId,omsagent.env.clusterRegion=$clusterRegion $helmRepoName/$helmChartName --kube-context ${kubeconfigContext} fi fi @@ -495,8 +533,13 @@ install_helm_chart() login_to_azure() { - echo "login to the azure interactively" - az login --use-device-code + if [ "$isUsingServicePrincipal" = true ] ; then + echo "login to the azure using provided service principal creds" + az login --service-principal --username $servicePrincipalClientId --password $servicePrincipalClientSecret --tenant $servicePrincipalTenantId + else + echo "login to the azure interactively" + az login --use-device-code + fi } set_azure_subscription() diff --git a/scripts/onboarding/solution-onboarding.md b/scripts/onboarding/solution-onboarding.md index 045738762..13e76530d 100644 --- a/scripts/onboarding/solution-onboarding.md +++ b/scripts/onboarding/solution-onboarding.md @@ -6,8 +6,8 @@ You can either use the Azure Powershell or Azure cli to deploy the solution. If you are not familiar with the concepts of deploying resources using a template with PowerShell, see [Deploy resources with Resource Manager templates and Azure PowerShell](https://review.docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-template-deploy) 1. Get the below template files to your local computer. - - Template file [azuremonitor-containerSolution.json](https://github.com/Microsoft/OMS-docker/blob/ci_feature_prod/docs/templates/azuremonitor-containerSolution.json) - - TemplateParams file [azuremonitor-containerSolutionParams.json](https://github.com/Microsoft/OMS-docker/blob/ci_feature_prod/docs/templates/azuremonitor-containerSolutionParams.json) + - Template file [azuremonitor-containerSolution.json](https://raw.githubusercontent.com/microsoft/Docker-Provider/ci_prod/scripts/onboarding/templates/azuremonitor-containerSolution.json) + - TemplateParams file [azuremonitor-containerSolutionParams.json](https://raw.githubusercontent.com/microsoft/Docker-Provider/ci_prod/scripts/onboarding/templates/azuremonitor-containerSolutionParams.json) 2. Edit the TemplateParams file in your local computer. * workspaceResourceId parameter : - Replace `` with Azure subscriptionID for your Workspace diff --git a/scripts/preview/health/HealthAgentOnboarding.ps1 b/scripts/preview/health/HealthAgentOnboarding.ps1 index 881dd2549..9ce8eca74 100644 --- a/scripts/preview/health/HealthAgentOnboarding.ps1 +++ b/scripts/preview/health/HealthAgentOnboarding.ps1 @@ -339,7 +339,7 @@ if ($false -eq $isSolutionOnboarded) { try { New-AzResourceGroupDeployment -Name $DeploymentName ` -ResourceGroupName $workspaceResourceGroupName ` - -TemplateUri https://raw.githubusercontent.com/Microsoft/OMS-docker/ci_feature/docs/templates/azuremonitor-containerSolution.json ` + -TemplateUri https://raw.githubusercontent.com/microsoft/Docker-Provider/ci_prod/scripts/onboarding/templates/azuremonitor-containerSolution.json ` -TemplateParameterObject $Parameters -ErrorAction Stop` diff --git a/scripts/troubleshoot/README.md b/scripts/troubleshoot/README.md index d4e2e9cf4..5ffa07639 100644 --- a/scripts/troubleshoot/README.md +++ b/scripts/troubleshoot/README.md @@ -7,7 +7,7 @@ The table below summarizes known issues you may face while using Azure Monitor f | ---- | --- | | Error Message `No data for selected filters` | It may take some time to establish monitoring data flow for newly created clusters. Please allow at least 10-15 minutes for data to appear for your cluster. | | Error Message `Error retrieving data` | While Azure Kubenetes Service cluster is setting up for health and performance monitoring, a connection is established between the cluster and Azure Log Analytics workspace. Log Analytics workspace is used to store all monitoring data for your cluster. This error may occurr when your Log Analytics workspace has been deleted or lost. Please check whether your Log Analytics workspace is available. To find your Log Analytics workspace go [here.](https://docs.microsoft.com/en-us/azure/log-analytics/log-analytics-manage-access) and your workspace is available. If the workspace is missing, you will need to re-onboard Container Health to your cluster. To re-onboard, you will need to [opt out](https://docs.microsoft.com/en-us/azure/azure-monitor/insights/container-insights-optout) of monitoring for the cluster and [onboard](https://docs.microsoft.com/en-us/azure/azure-monitor/insights/container-insights-enable-existing-clusters) again to Container Health. | -| `Error retrieving data` after adding Container Health through az aks cli | When onboarding using az aks cli, very seldom, Container Health may not be properly onboarded. Please check whether the Container Insights Solution is onboarded. To do this, go to your [Log Analytics workspace](https://docs.microsoft.com/en-us/azure/log-analytics/log-analytics-manage-access) and see if Container Insights Solution is available by going to the "Solutions" tab under General. To resolve this issue, you will need to redeploy the Container Insights Solution. Please follow the instructions on [how to deploy Azure Monitor - container health solution to your Log Analytics workspace. ](https://github.com/Microsoft/OMS-docker/blob/ci_feature_prod/docs/solution-onboarding.md) | +| `Error retrieving data` after adding Container Health through az aks cli | When onboarding using az aks cli, very seldom, Container Health may not be properly onboarded. Please check whether the Container Insights Solution is onboarded. To do this, go to your [Log Analytics workspace](https://docs.microsoft.com/en-us/azure/log-analytics/log-analytics-manage-access) and see if Container Insights Solution is available by going to the "Solutions" tab under General. To resolve this issue, you will need to redeploy the Container Insights Solution. Please follow the instructions on [how to deploy Azure Monitor - container health solution to your Log Analytics workspace. ](https://github.com/microsoft/Docker-Provider/blob/ci_prod/scripts/onboarding/solution-onboarding.md) | | Failed to `Enable fast alerting experience on basic metrics for this Azure Kubernetes Services cluster` | The action is trying to grant the Monitoring Metrics Publisher role assignment on the cluster resource. The user initiating the process must have access to the **Microsoft.Authorization/roleAssignments/write** permission on the AKS cluster resource scope. Only members of the **Owner** and **User Access Administrator** built-in roles are granted access to this permission. If your security policies require assigning granular level permissions, we recommend you view [custom roles](https://docs.microsoft.com/en-us/azure/role-based-access-control/custom-roles) and assign it to the users who require it. | # Azure Red Hat OpenShift Service (ARO) @@ -36,7 +36,7 @@ Prequisites: # AKS or ARO -You can use the troubleshooting script provided [here](https://raw.githubusercontent.com/microsoft/OMS-docker/ci_feature_prod/Troubleshoot/TroubleshootError.ps1) to diagnose the problem. +You can use the troubleshooting script provided [here](https://raw.githubusercontent.com/microsoft/Docker-Provider/ci_prod/scripts/troubleshoot/TroubleshootError.ps1) to diagnose the problem. Steps: - Open powershell using the [cloudshell](https://docs.microsoft.com/en-us/azure/cloud-shell/overview) in the azure portal. @@ -45,8 +45,8 @@ Steps: For Mac OS, refer [install-powershell-core-on-mac](https://docs.microsoft.com/en-us/powershell/scripting/install/installing-powershell-core-on-macos?view=powershell-6) how to install powershell - Make sure that you're using powershell (selected by default) - Run the following command to change home directory - `cd ~` -- Run the following command to download the script - `curl -LO https://raw.githubusercontent.com/microsoft/OMS-docker/ci_feature/Troubleshoot/TroubleshootError.ps1` - > Note: In some versions of Powershell above CURL command may not work in such cases, you can try `curl https://raw.githubusercontent.com/microsoft/OMS-docker/ci_feature/Troubleshoot/TroubleshootError.ps1 -O TroubleshootError.ps1` +- Run the following command to download the script - `curl -LO https://raw.githubusercontent.com/microsoft/Docker-Provider/ci_prod/scripts/troubleshoot/TroubleshootError.ps1` + > Note: In some versions of Powershell above CURL command may not work in such cases, you can try `curl https://raw.githubusercontent.com/microsoft/Docker-Provider/ci_prod/scripts/troubleshoot/TroubleshootError.ps1 -O TroubleshootError.ps1` - Run the following command to execute the script - `./TroubleshootError.ps1 -ClusterResourceId ` > Note: For AKS, resourceIdoftheCluster should be in this format `/subscriptions//resourceGroups//providers/Microsoft.ContainerService/managedClusters/`.For ARO, should be in this format `/subscriptions//resourceGroups//providers/Microsoft.ContainerService/openShiftManagedClusters/`. - This script will generate a TroubleshootDump.txt which collects detailed information about container health onboarding. @@ -54,10 +54,10 @@ Steps: # Aks-Engine Kubernetes -You can use the troubleshooting script provided [here](https://raw.githubusercontent.com/microsoft/OMS-docker/ci_feature_prod/Troubleshoot/TroubleshootError_AcsEngine.ps1) to diagnose the problem. +You can use the troubleshooting script provided [here](https://raw.githubusercontent.com/microsoft/Docker-Provider/ci_prod/scripts/troubleshoot/TroubleshootError_AcsEngine.ps1) to diagnose the problem. Steps: -- Download [TroubleshootError_AcsEngine.ps1](https://raw.githubusercontent.com/microsoft/OMS-docker/ci_feature_prod/Troubleshoot/TroubleshootError_AcsEngine.ps1), [ContainerInsightsSolution.json](https://raw.githubusercontent.com/microsoft/OMS-docker/ci_feature_prod/Troubleshoot/ContainerInsightsSolution.json) +- Download [TroubleshootError_AcsEngine.ps1](https://raw.githubusercontent.com/microsoft/Docker-Provider/ci_prod/scripts/troubleshoot/TroubleshootError_AcsEngine.ps1), [ContainerInsightsSolution.json](https://raw.githubusercontent.com/microsoft/Docker-Provider/ci_prod/scripts/troubleshoot/ContainerInsightsSolution.json) - Collect Subscription ID, Resource group name of the Aks-Engine Kubernetes cluster - Use the following command to run the script : `.\TroubleshootError_AcsEngine.ps1 -SubscriptionId -ResourceGroupName `. This script will generate a TroubleshootDump.txt which collects detailed information about container health onboarding. diff --git a/scripts/troubleshoot/TroubleshootError.ps1 b/scripts/troubleshoot/TroubleshootError.ps1 index 7f857caa3..754a43e74 100644 --- a/scripts/troubleshoot/TroubleshootError.ps1 +++ b/scripts/troubleshoot/TroubleshootError.ps1 @@ -671,7 +671,7 @@ else { try { New-AzResourceGroupDeployment -Name $DeploymentName ` -ResourceGroupName $workspaceResourceGroupName ` - -TemplateUri https://raw.githubusercontent.com/Microsoft/OMS-docker/ci_feature/docs/templates/azuremonitor-containerSolution.json ` + -TemplateUri https://raw.githubusercontent.com/microsoft/Docker-Provider/ci_prod/scripts/onboarding/templates/azuremonitor-containerSolution.json ` -TemplateParameterObject $Parameters -ErrorAction Stop` Write-Host("") diff --git a/scripts/troubleshoot/TroubleshootError_nonAzureK8s.ps1 b/scripts/troubleshoot/TroubleshootError_nonAzureK8s.ps1 index c7509a940..14b080b23 100644 --- a/scripts/troubleshoot/TroubleshootError_nonAzureK8s.ps1 +++ b/scripts/troubleshoot/TroubleshootError_nonAzureK8s.ps1 @@ -345,7 +345,7 @@ else { try { New-AzResourceGroupDeployment -Name $DeploymentName ` -ResourceGroupName $defaultWorkspaceResourceGroup ` - -TemplateUri https://raw.githubusercontent.com/Microsoft/OMS-docker/ci_feature/docs/templates/azuremonitor-containerSolution.json ` + -TemplateUri https://raw.githubusercontent.com/microsoft/Docker-Provider/ci_prod/scripts/onboarding/templates/azuremonitor-containerSolution.json ` -TemplateParameterObject $Parameters -ErrorAction Stop` Write-Host("") From 2d56087e528a145aeb06b5beb6a60092dfa41e15 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Mon, 14 Sep 2020 12:34:33 -0700 Subject: [PATCH 017/301] add missing serviceprincipal in ps scripts (#435) --- scripts/onboarding/managed/disable-monitoring.ps1 | 4 ++-- scripts/onboarding/managed/enable-monitoring.ps1 | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/scripts/onboarding/managed/disable-monitoring.ps1 b/scripts/onboarding/managed/disable-monitoring.ps1 index ea66cb3a3..8945f90b6 100644 --- a/scripts/onboarding/managed/disable-monitoring.ps1 +++ b/scripts/onboarding/managed/disable-monitoring.ps1 @@ -226,7 +226,7 @@ Write-Host("Cluster SubscriptionId : '" + $clusterSubscriptionId + "' ") -Foregr if ($isUsingServicePrincipal) { $spSecret = ConvertTo-SecureString -String $servicePrincipalClientSecret -AsPlainText -Force $spCreds = New-Object -TypeName "System.Management.Automation.PSCredential" -ArgumentList $servicePrincipalClientId,$spSecret - Connect-AzAccount -Credential $spCreds -Tenant $tenantId -Subscription $clusterSubscriptionId + Connect-AzAccount -ServicePrincipal -Credential $spCreds -Tenant $tenantId -Subscription $clusterSubscriptionId } try { @@ -249,7 +249,7 @@ if ($null -eq $account.Account) { if ($isUsingServicePrincipal) { $spSecret = ConvertTo-SecureString -String $servicePrincipalClientSecret -AsPlainText -Force $spCreds = New-Object -TypeName "System.Management.Automation.PSCredential" -ArgumentList $servicePrincipalClientId,$spSecret - Connect-AzAccount -Credential $spCreds -Tenant $tenantId -Subscription $clusterSubscriptionId + Connect-AzAccount -ServicePrincipal -Credential $spCreds -Tenant $tenantId -Subscription $clusterSubscriptionId } else { Write-Host("Please login...") Connect-AzAccount -subscriptionid $clusterSubscriptionId diff --git a/scripts/onboarding/managed/enable-monitoring.ps1 b/scripts/onboarding/managed/enable-monitoring.ps1 index b734ba347..338de6cbc 100644 --- a/scripts/onboarding/managed/enable-monitoring.ps1 +++ b/scripts/onboarding/managed/enable-monitoring.ps1 @@ -254,7 +254,7 @@ Write-Host("Cluster SubscriptionId : '" + $clusterSubscriptionId + "' ") -Foregr if ($isUsingServicePrincipal) { $spSecret = ConvertTo-SecureString -String $servicePrincipalClientSecret -AsPlainText -Force $spCreds = New-Object -TypeName "System.Management.Automation.PSCredential" -ArgumentList $servicePrincipalClientId,$spSecret - Connect-AzAccount -Credential $spCreds -Tenant $tenantId -Subscription $clusterSubscriptionId + Connect-AzAccount -ServicePrincipal -Credential $spCreds -Tenant $tenantId -Subscription $clusterSubscriptionId } try { @@ -276,7 +276,7 @@ if ($null -eq $account.Account) { if ($isUsingServicePrincipal) { $spSecret = ConvertTo-SecureString -String $servicePrincipalClientSecret -AsPlainText -Force $spCreds = New-Object -TypeName "System.Management.Automation.PSCredential" -ArgumentList $servicePrincipalClientId,$spSecret - Connect-AzAccount -Credential $spCreds -Tenant $tenantId -Subscription $clusterSubscriptionId + Connect-AzAccount -ServicePrincipal -Credential $spCreds -Tenant $tenantId -Subscription $clusterSubscriptionId } else { Write-Host("Please login...") Connect-AzAccount -subscriptionid $clusterSubscriptionId From a28aaf025f91957f193121e66fbfb1c1f9d6abe4 Mon Sep 17 00:00:00 2001 From: Vishwanath Date: Tue, 15 Sep 2020 11:46:14 -0700 Subject: [PATCH 018/301] fix telemetry bug (#436) --- source/plugins/ruby/out_mdm.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/plugins/ruby/out_mdm.rb b/source/plugins/ruby/out_mdm.rb index b28c17034..c4cc46dd7 100644 --- a/source/plugins/ruby/out_mdm.rb +++ b/source/plugins/ruby/out_mdm.rb @@ -272,7 +272,7 @@ def send_to_mdm(post_body) @last_telemetry_sent_time = Time.now end rescue Net::HTTPServerException => e - if !response.nil && !response.body.nil? #body will have actual error + if !response.nil? && !response.body.nil? #body will have actual error @log.info "Failed to Post Metrics to MDM : #{e} Response.body: #{response.body}" else @log.info "Failed to Post Metrics to MDM : #{e} Response: #{response}" From 0062b32da17eece46f6e754c0f8a35ac57c75c92 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Wed, 16 Sep 2020 10:59:03 -0700 Subject: [PATCH 019/301] Gangams/readmeupdates non aks 09162020 (#437) * changes for ciprod09162020 non-aks release * fix script to handle cross sub scenario * fix minor comment * fix date in version file * fix pr comments --- ReleaseNotes.md | 12 +++++++++++- build/version | 4 ++-- charts/azuremonitor-containers/values.yaml | 6 +++--- kubernetes/linux/Dockerfile | 2 +- kubernetes/omsagent.yaml | 12 ++++++------ kubernetes/windows/Dockerfile | 2 +- scripts/onboarding/managed/enable-monitoring.sh | 2 +- 7 files changed, 25 insertions(+), 15 deletions(-) diff --git a/ReleaseNotes.md b/ReleaseNotes.md index 0f1d932a8..547d00573 100644 --- a/ReleaseNotes.md +++ b/ReleaseNotes.md @@ -11,6 +11,17 @@ additional questions or comments. Note : The agent version(s) below has dates (ciprod), which indicate the agent build dates (not release dates) +### 09/16/2020 - +> Note: This agent release targetted ONLY for non-AKS clusters via Azure Monitor for containers HELM chart update +##### Version microsoft/oms:ciprod09162020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod09162020 (linux) +##### Version microsoft/oms:win-ciprod09162020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod09162020 (windows) +##### Code change log +- Collection of Azure Network Policy Manager Basic and Advanced metrics +- Add support in Windows Agent for Container log collection of CRI runtimes such as ContainerD +- Alertable metrics support Arc K8s cluster to parity with AKS +- Support for multiple container log mount paths when docker is updated through knode +- Bug fix related to MDM telemetry + ### 08/07/2020 - ##### Version microsoft/oms:ciprod08072020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08072020 (linux) ##### Version microsoft/oms:win-ciprod08072020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod08072020 (windows) @@ -26,7 +37,6 @@ Note : The agent version(s) below has dates (ciprod), which indicate t - Add region check before sending alertable metrics to MDM - Telemetry fix for agent telemetry for sov. clouds - ### 07/15/2020 - ##### Version microsoft/oms:ciprod07152020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod07152020 (linux) ##### Version microsoft/oms:win-ciprod05262020-2 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod05262020-2 (windows) diff --git a/build/version b/build/version index f26973116..b53b0dcfb 100644 --- a/build/version +++ b/build/version @@ -5,8 +5,8 @@ CONTAINER_BUILDVERSION_MAJOR=10 CONTAINER_BUILDVERSION_MINOR=0 CONTAINER_BUILDVERSION_PATCH=0 -CONTAINER_BUILDVERSION_BUILDNR=4 -CONTAINER_BUILDVERSION_DATE=20200805 +CONTAINER_BUILDVERSION_BUILDNR=5 +CONTAINER_BUILDVERSION_DATE=20200916 CONTAINER_BUILDVERSION_STATUS=Developer_Build #-------------------------------- End of File ----------------------------------- diff --git a/charts/azuremonitor-containers/values.yaml b/charts/azuremonitor-containers/values.yaml index 4d0d7f8f2..9c48cf9fb 100644 --- a/charts/azuremonitor-containers/values.yaml +++ b/charts/azuremonitor-containers/values.yaml @@ -12,10 +12,10 @@ Azure: omsagent: image: repo: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod" - tag: "ciprod08072020" - tagWindows: "win-ciprod08072020" + tag: "ciprod09162020" + tagWindows: "win-ciprod09162020" pullPolicy: IfNotPresent - dockerProviderVersion: "10.0.0-4" + dockerProviderVersion: "10.0.0-5" agentVersion: "1.10.0.1" ## To get your workspace id and key do the following ## You can create a Azure Loganalytics workspace from portal.azure.com and get its ID & PRIMARY KEY from 'Advanced Settings' tab in the Ux. diff --git a/kubernetes/linux/Dockerfile b/kubernetes/linux/Dockerfile index bc27a5384..ee35cd556 100644 --- a/kubernetes/linux/Dockerfile +++ b/kubernetes/linux/Dockerfile @@ -2,7 +2,7 @@ FROM ubuntu:18.04 MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=ciprod08072020 +ARG IMAGE_TAG=ciprod09162020 ENV AGENT_VERSION ${IMAGE_TAG} ENV tmpdir /opt ENV APPLICATIONINSIGHTS_AUTH NzAwZGM5OGYtYTdhZC00NThkLWI5NWMtMjA3ZjM3NmM3YmRi diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index 947620ebc..b71a95227 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -337,13 +337,13 @@ spec: tier: node annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "10.0.0-4" + dockerProviderVersion: "10.0.0-5" schema-versions: "v1" spec: serviceAccountName: omsagent containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08072020" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod09162020" imagePullPolicy: IfNotPresent resources: limits: @@ -493,13 +493,13 @@ spec: rsName: "omsagent-rs" annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "10.0.0-4" + dockerProviderVersion: "10.0.0-5" schema-versions: "v1" spec: serviceAccountName: omsagent containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08072020" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod09162020" imagePullPolicy: IfNotPresent resources: limits: @@ -639,13 +639,13 @@ spec: tier: node-win annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "10.0.0-4" + dockerProviderVersion: "10.0.0-5" schema-versions: "v1" spec: serviceAccountName: omsagent containers: - name: omsagent-win - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod08072020" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod09162020" imagePullPolicy: IfNotPresent resources: limits: diff --git a/kubernetes/windows/Dockerfile b/kubernetes/windows/Dockerfile index 70a5f6045..ca89d1c80 100644 --- a/kubernetes/windows/Dockerfile +++ b/kubernetes/windows/Dockerfile @@ -3,7 +3,7 @@ MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=win-ciprod08072020 +ARG IMAGE_TAG=win-ciprod09162020 # Do not split this into multiple RUN! # Docker creates a layer for every RUN-Statement diff --git a/scripts/onboarding/managed/enable-monitoring.sh b/scripts/onboarding/managed/enable-monitoring.sh index 17c075725..4142dbf6c 100644 --- a/scripts/onboarding/managed/enable-monitoring.sh +++ b/scripts/onboarding/managed/enable-monitoring.sh @@ -620,7 +620,7 @@ add_container_insights_solution $workspaceResourceId # get workspace guid and key get_workspace_guid_and_key $workspaceResourceId -if [ "$isClusterAndWorkspaceInSameSubscription" = true ] ; then +if [ "$isClusterAndWorkspaceInSameSubscription" = false ] ; then echo "switch to cluster subscription id as active subscription for cli: ${clusterSubscriptionId}" set_azure_subscription $clusterSubscriptionId fi From 1a7ef1cfbfe611e8d14218167c393a2becafc8f9 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Wed, 16 Sep 2020 14:53:21 -0700 Subject: [PATCH 020/301] Gangams/fix weird conflicts (#439) * separate build yamls for ci_prod branch (#415) (#416) * [Merge] dev to prod for ciprod08072020 release (#424) * separate build yamls for ci_prod branch (#415) * re-enable adx path (#420) * Gangams/release changes (#419) * updates related to release * updates related to release * fix the incorrect version * fix pr feedback * fix some typos in the release notes * fix for zero filled metrics (#423) * consolidate windows agent image docker files (#422) * consolidate windows agent image docker files * revert docker file consolidation * revert readme updates * merge back windows dockerfiles * image tag update Co-authored-by: Vishwanath Co-authored-by: rashmichandrashekar Co-authored-by: Vishwanath Co-authored-by: rashmichandrashekar --- ReleaseNotes.md | 1 + 1 file changed, 1 insertion(+) diff --git a/ReleaseNotes.md b/ReleaseNotes.md index 547d00573..499c99f02 100644 --- a/ReleaseNotes.md +++ b/ReleaseNotes.md @@ -37,6 +37,7 @@ Note : The agent version(s) below has dates (ciprod), which indicate t - Add region check before sending alertable metrics to MDM - Telemetry fix for agent telemetry for sov. clouds + ### 07/15/2020 - ##### Version microsoft/oms:ciprod07152020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod07152020 (linux) ##### Version microsoft/oms:win-ciprod05262020-2 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod05262020-2 (windows) From bf75bf04ac28f1462ea358ea4762610b0cf70553 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Mon, 21 Sep 2020 10:07:52 -0700 Subject: [PATCH 021/301] fix quote issue for the region (#441) --- scripts/onboarding/managed/enable-monitoring.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/onboarding/managed/enable-monitoring.sh b/scripts/onboarding/managed/enable-monitoring.sh index 4142dbf6c..226fd978b 100644 --- a/scripts/onboarding/managed/enable-monitoring.sh +++ b/scripts/onboarding/managed/enable-monitoring.sh @@ -499,7 +499,7 @@ install_helm_chart() fi echo "getting the region of the cluster" - clusterRegion=$(az resource show --ids ${clusterResourceId} --query location) + clusterRegion=$(az resource show --ids ${clusterResourceId} --query location -o tsv) echo "cluster region is : ${clusterRegion}" echo "adding helm repo:" $helmRepoName From 6287724c89ae6e8d0ac74789e472c99fed28bb48 Mon Sep 17 00:00:00 2001 From: Vishwanath Date: Mon, 21 Sep 2020 14:16:21 -0700 Subject: [PATCH 022/301] fix cpucapacity/limit bug (#442) --- source/plugins/ruby/KubernetesApiClient.rb | 3 +++ 1 file changed, 3 insertions(+) diff --git a/source/plugins/ruby/KubernetesApiClient.rb b/source/plugins/ruby/KubernetesApiClient.rb index 36dcdd8c6..073eb0417 100644 --- a/source/plugins/ruby/KubernetesApiClient.rb +++ b/source/plugins/ruby/KubernetesApiClient.rb @@ -719,6 +719,9 @@ def getMetricNumericValue(metricName, metricVal) if (metricValue.end_with?("m")) metricValue.chomp!("m") metricValue = Float(metricValue) * 1000.0 ** 2 + elsif (metricValue.end_with?("k")) + metricValue.chomp!("k") + metricValue = Float(metricValue) * 1000.0 else #assuming no units specified, it is cores that we are converting to nanocores (the below conversion will fail for other unsupported 'units') metricValue = Float(metricValue) * 1000.0 ** 3 end From bd30a47ecb9b6ea5867fbd9ceff4810d3b5d4431 Mon Sep 17 00:00:00 2001 From: Grace Wehner Date: Wed, 23 Sep 2020 09:01:24 -0700 Subject: [PATCH 023/301] grwehner/pv-usage-metrics (#431) - Send persistent volume usage and capacity metrics to LA for PVs with PVCs at the pod level; config to include or exclude kube-system namespace. - Send PV usage percentage to MDM if over the configurable threshold. - Add PV usage recommended alert template. --- .../PVUsagePercentage.json | 174 ++++++++++++++++++ build/linux/installer/conf/container.conf | 2 +- build/linux/installer/conf/kube.conf | 2 +- .../installer/datafiles/base_container.data | 1 + .../scripts/tomlparser-mdm-metrics-config.rb | 32 +++- .../tomlparser-metric-collection-config.rb | 71 +++++++ kubernetes/container-azm-ms-agentconfig.yaml | 15 ++ kubernetes/linux/main.sh | 8 + kubernetes/omsagent.yaml | 2 +- .../plugins/ruby/CAdvisorMetricsAPIClient.rb | 77 ++++++++ source/plugins/ruby/MdmAlertTemplates.rb | 32 ++++ source/plugins/ruby/MdmMetricsGenerator.rb | 36 ++++ source/plugins/ruby/constants.rb | 11 ++ source/plugins/ruby/filter_cadvisor2mdm.rb | 76 +++++++- source/plugins/ruby/in_cadvisor_perf.rb | 1 + source/plugins/ruby/in_win_cadvisor_perf.rb | 1 + 16 files changed, 533 insertions(+), 8 deletions(-) create mode 100644 alerts/recommended_alerts_ARM/PVUsagePercentage.json create mode 100644 build/linux/installer/scripts/tomlparser-metric-collection-config.rb diff --git a/alerts/recommended_alerts_ARM/PVUsagePercentage.json b/alerts/recommended_alerts_ARM/PVUsagePercentage.json new file mode 100644 index 000000000..e6cdbee15 --- /dev/null +++ b/alerts/recommended_alerts_ARM/PVUsagePercentage.json @@ -0,0 +1,174 @@ +{ + "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "alertName": { + "type": "string", + "minLength": 1, + "metadata": { + "description": "Name of the alert" + } + }, + "alertDescription": { + "type": "string", + "defaultValue": "This is a metric alert", + "metadata": { + "description": "Description of alert" + } + }, + "alertSeverity": { + "type": "int", + "defaultValue": 3, + "allowedValues": [ + 0, + 1, + 2, + 3, + 4 + ], + "metadata": { + "description": "Severity of alert {0,1,2,3,4}" + } + }, + "isEnabled": { + "type": "bool", + "defaultValue": true, + "metadata": { + "description": "Specifies whether the alert is enabled" + } + }, + "clusterResourceId": { + "type": "string", + "minLength": 1, + "metadata": { + "description": "Full Resource ID of the kubernetes cluster emitting the metric that will be used for the comparison. For example /subscriptions/00000000-0000-0000-0000-0000-00000000/resourceGroups/ResourceGroupName/providers/Microsoft.ContainerService/managedClusters/cluster-xyz" + } + }, + "operator": { + "type": "string", + "defaultValue": "GreaterThan", + "allowedValues": [ + "Equals", + "NotEquals", + "GreaterThan", + "GreaterThanOrEqual", + "LessThan", + "LessThanOrEqual" + ], + "metadata": { + "description": "Operator comparing the current value with the threshold value." + } + }, + "threshold": { + "type": "int", + "defaultValue": 80, + "metadata": { + "description": "The threshold value at which the alert is activated." + }, + "minValue": 1, + "maxValue": 100 + }, + "timeAggregation": { + "type": "string", + "defaultValue": "Average", + "allowedValues": [ + "Average", + "Minimum", + "Maximum", + "Count" + ], + "metadata": { + "description": "How the data that is collected should be combined over time." + } + }, + "windowSize": { + "type": "string", + "defaultValue": "PT5M", + "allowedValues": [ + "PT1M", + "PT5M", + "PT15M", + "PT30M", + "PT1H", + "PT6H", + "PT12H", + "PT24H" + ], + "metadata": { + "description": "Period of time used to monitor alert activity based on the threshold. Must be between one minute and one day. ISO 8601 duration format." + } + }, + "evaluationFrequency": { + "type": "string", + "defaultValue": "PT1M", + "allowedValues": [ + "PT1M", + "PT5M", + "PT15M", + "PT30M", + "PT1H" + ], + "metadata": { + "description": "how often the metric alert is evaluated represented in ISO 8601 duration format" + } + }, + "actionGroupId": { + "type": "string", + "defaultValue": "", + "metadata": { + "description": "The ID of the action group that is triggered when the alert is activated or deactivated" + } + } + }, + "variables": {}, + "resources": [ + { + "name": "[parameters('alertName')]", + "type": "Microsoft.Insights/metricAlerts", + "location": "global", + "apiVersion": "2018-03-01", + "tags": {}, + "properties": { + "description": "[parameters('alertDescription')]", + "severity": "[parameters('alertSeverity')]", + "enabled": "[parameters('isEnabled')]", + "scopes": [ + "[parameters('clusterResourceId')]" + ], + "evaluationFrequency": "[parameters('evaluationFrequency')]", + "windowSize": "[parameters('windowSize')]", + "criteria": { + "odata.type": "Microsoft.Azure.Monitor.SingleResourceMultipleMetricCriteria", + "allOf": [ + { + "name": "1st criterion", + "metricName": "pvUsageExceededPercentage", + "metricNamespace": "Insights.Container/persistentvolumes", + "dimensions": [ + { + "name": "kubernetesNamespace", + "operator": "Include", + "values": [ + "*" + ] + }, + { + "name": "podName", + "operator": "Include", + "values": [ + "*" + ] + } + ], + "operator": "[parameters('operator')]", + "threshold": "[parameters('threshold')]", + "timeAggregation": "[parameters('timeAggregation')]", + "skipMetricValidation": true + } + ] + }, + "actions": "[if(empty(parameters('actionGroupId')), json('null'), json(concat('[{\"actionGroupId\": \"',parameters('actionGroupId'),'\"}]')))]" + } + } + ] +} diff --git a/build/linux/installer/conf/container.conf b/build/linux/installer/conf/container.conf index f02ec0131..e55c62fbc 100644 --- a/build/linux/installer/conf/container.conf +++ b/build/linux/installer/conf/container.conf @@ -46,7 +46,7 @@ type filter_cadvisor2mdm custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast - metrics_to_collect cpuUsageNanoCores,memoryWorkingSetBytes,memoryRssBytes + metrics_to_collect cpuUsageNanoCores,memoryWorkingSetBytes,memoryRssBytes,pvUsedBytes log_level info diff --git a/build/linux/installer/conf/kube.conf b/build/linux/installer/conf/kube.conf index 9ada8425f..ba40b7a35 100644 --- a/build/linux/installer/conf/kube.conf +++ b/build/linux/installer/conf/kube.conf @@ -74,7 +74,7 @@ type filter_cadvisor2mdm custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast - metrics_to_collect cpuUsageNanoCores,memoryWorkingSetBytes + metrics_to_collect cpuUsageNanoCores,memoryWorkingSetBytes,pvUsedBytes log_level info diff --git a/build/linux/installer/datafiles/base_container.data b/build/linux/installer/datafiles/base_container.data index 87b89b14c..ca2538b79 100644 --- a/build/linux/installer/datafiles/base_container.data +++ b/build/linux/installer/datafiles/base_container.data @@ -120,6 +120,7 @@ MAINTAINER: 'Microsoft Corporation' /opt/livenessprobe.sh; build/linux/installer/scripts/livenessprobe.sh; 755; root; root /opt/tomlparser-prom-customconfig.rb; build/linux/installer/scripts/tomlparser-prom-customconfig.rb; 755; root; root /opt/tomlparser-mdm-metrics-config.rb; build/linux/installer/scripts/tomlparser-mdm-metrics-config.rb; 755; root; root +/opt/tomlparser-metric-collection-config.rb; build/linux/installer/scripts/tomlparser-metric-collection-config.rb; 755; root; root /opt/tomlparser-health-config.rb; build/linux/installer/scripts/tomlparser-health-config.rb; 755; root; root /opt/tomlparser.rb; build/common/installer/scripts/tomlparser.rb; 755; root; root diff --git a/build/linux/installer/scripts/tomlparser-mdm-metrics-config.rb b/build/linux/installer/scripts/tomlparser-mdm-metrics-config.rb index 1c01dd8c6..345c51633 100644 --- a/build/linux/installer/scripts/tomlparser-mdm-metrics-config.rb +++ b/build/linux/installer/scripts/tomlparser-mdm-metrics-config.rb @@ -12,6 +12,7 @@ @percentageCpuUsageThreshold = Constants::DEFAULT_MDM_CPU_UTILIZATION_THRESHOLD @percentageMemoryRssThreshold = Constants::DEFAULT_MDM_MEMORY_RSS_THRESHOLD @percentageMemoryWorkingSetThreshold = Constants::DEFAULT_MDM_MEMORY_WORKING_SET_THRESHOLD +@percentagePVUsageThreshold = Constants::DEFAULT_MDM_PV_UTILIZATION_THRESHOLD # Use parser to parse the configmap toml file to a ruby structure def parseConfigMap @@ -35,7 +36,7 @@ def parseConfigMap # Use the ruby structure created after config parsing to set the right values to be used for MDM metric configuration settings def populateSettingValuesFromConfigMap(parsedConfig) if !parsedConfig.nil? && !parsedConfig[:alertable_metrics_configuration_settings].nil? - # Get mdm metrics config settings for resource utilization + # Get mdm metrics config settings for container resource utilization begin resourceUtilization = parsedConfig[:alertable_metrics_configuration_settings][:container_resource_utilization_thresholds] if !resourceUtilization.nil? @@ -66,7 +67,7 @@ def populateSettingValuesFromConfigMap(parsedConfig) puts "config::Non floating point value or value not convertible to float specified for Memory Working Set threshold, using default " @percentageMemoryWorkingSetThreshold = Constants::DEFAULT_MDM_MEMORY_WORKING_SET_THRESHOLD end - puts "config::Using config map settings for MDM metric configuration settings for resource utilization" + puts "config::Using config map settings for MDM metric configuration settings for container resource utilization" end rescue => errorStr ConfigParseErrorLogger.logError("Exception while reading config map settings for MDM metric configuration settings for resource utilization - #{errorStr}, using defaults, please check config map for errors") @@ -74,6 +75,32 @@ def populateSettingValuesFromConfigMap(parsedConfig) @percentageMemoryRssThreshold = Constants::DEFAULT_MDM_MEMORY_RSS_THRESHOLD @percentageMemoryWorkingSetThreshold = Constants::DEFAULT_MDM_MEMORY_WORKING_SET_THRESHOLD end + + # Get mdm metrics config settings for PV utilization + begin + isUsingPVThresholdConfig = false + pvUtilizationThresholds = parsedConfig[:alertable_metrics_configuration_settings][:pv_utilization_thresholds] + if !pvUtilizationThresholds.nil? + pvUsageThreshold = pvUtilizationThresholds[:pv_usage_threshold_percentage] + if !pvUsageThreshold.nil? + pvUsageThresholdFloat = pvUsageThreshold.to_f + if pvUsageThresholdFloat.kind_of? Float + @percentagePVUsageThreshold = pvUsageThresholdFloat + isUsingPVThresholdConfig = true + end + end + end + + if isUsingPVThresholdConfig + puts "config::Using config map settings for MDM metric configuration settings for PV utilization" + else + puts "config::Non floating point value or value not convertible to float specified for PV threshold, using default " + @percentagePVUsageThreshold = Constants::DEFAULT_MDM_PV_UTILIZATION_THRESHOLD + end + rescue => errorStr + ConfigParseErrorLogger.logError("Exception while reading config map settings for MDM metric configuration settings for PV utilization - #{errorStr}, using defaults, please check config map for errors") + @percentagePVUsageThreshold = Constants::DEFAULT_MDM_PV_UTILIZATION_THRESHOLD + end end end @@ -97,6 +124,7 @@ def populateSettingValuesFromConfigMap(parsedConfig) file.write("export AZMON_ALERT_CONTAINER_CPU_THRESHOLD=#{@percentageCpuUsageThreshold}\n") file.write("export AZMON_ALERT_CONTAINER_MEMORY_RSS_THRESHOLD=#{@percentageMemoryRssThreshold}\n") file.write("export AZMON_ALERT_CONTAINER_MEMORY_WORKING_SET_THRESHOLD=\"#{@percentageMemoryWorkingSetThreshold}\"\n") + file.write("export AZMON_ALERT_PV_USAGE_THRESHOLD=#{@percentagePVUsageThreshold}\n") # Close file after writing all MDM setting environment variables file.close puts "****************End MDM Metrics Config Processing********************" diff --git a/build/linux/installer/scripts/tomlparser-metric-collection-config.rb b/build/linux/installer/scripts/tomlparser-metric-collection-config.rb new file mode 100644 index 000000000..40d87b7f1 --- /dev/null +++ b/build/linux/installer/scripts/tomlparser-metric-collection-config.rb @@ -0,0 +1,71 @@ +#!/usr/local/bin/ruby +# frozen_string_literal: true + +require_relative "tomlrb" +require_relative "ConfigParseErrorLogger" +require_relative "microsoft/omsagent/plugin/constants" + +@configMapMountPath = "/etc/config/settings/metric_collection_settings" +@configVersion = "" +@configSchemaVersion = "" + +# Setting default values which will be used in case they are not set in the configmap or if configmap doesnt exist +@collectPVKubeSystemMetrics = false + +# Use parser to parse the configmap toml file to a ruby structure +def parseConfigMap + begin + # Check to see if config map is created + if (File.file?(@configMapMountPath)) + puts "config::configmap container-azm-ms-agentconfig for metric collection settings mounted, parsing values" + parsedConfig = Tomlrb.load_file(@configMapMountPath, symbolize_keys: true) + puts "config::Successfully parsed mounted config map" + return parsedConfig + else + puts "config::configmap container-azm-ms-agentconfig for metric collection settings not mounted, using defaults" + return nil + end + rescue => errorStr + ConfigParseErrorLogger.logError("Exception while parsing config map for metric collection settings: #{errorStr}, using defaults, please check config map for errors") + return nil + end +end + +# Use the ruby structure created after config parsing to set the right values to be used for metric collection settings +def populateSettingValuesFromConfigMap(parsedConfig) + # Get metric collection settings for including or excluding kube-system namespace in PV metrics + begin + if !parsedConfig.nil? && !parsedConfig[:metric_collection_settings][:collect_kube_system_pv_metrics].nil? && !parsedConfig[:metric_collection_settings][:collect_kube_system_pv_metrics][:enabled].nil? + @collectPVKubeSystemMetrics = parsedConfig[:metric_collection_settings][:collect_kube_system_pv_metrics][:enabled] + puts "config::Using config map setting for PV kube-system collection" + end + rescue => errorStr + ConfigParseErrorLogger.logError("Exception while reading config map settings for PV kube-system collection - #{errorStr}, using defaults, please check config map for errors") + end +end + +@configSchemaVersion = ENV["AZMON_AGENT_CFG_SCHEMA_VERSION"] +puts "****************Start Metric Collection Settings Processing********************" +if !@configSchemaVersion.nil? && !@configSchemaVersion.empty? && @configSchemaVersion.strip.casecmp("v1") == 0 #note v1 is the only supported schema version, so hardcoding it + configMapSettings = parseConfigMap + if !configMapSettings.nil? + populateSettingValuesFromConfigMap(configMapSettings) + end +else + if (File.file?(@configMapMountPath)) + ConfigParseErrorLogger.logError("config::unsupported/missing config schema version - '#{@configSchemaVersion}' , using defaults, please use supported schema version") + end +end + +# Write the settings to file, so that they can be set as environment variables +file = File.open("config_metric_collection_env_var", "w") + +if !file.nil? + file.write("export AZMON_PV_COLLECT_KUBE_SYSTEM_METRICS=#{@collectPVKubeSystemMetrics}\n") + # Close file after writing all metric collection setting environment variables + file.close + puts "****************End Metric Collection Settings Processing********************" +else + puts "Exception while opening file for writing MDM metric config environment variables" + puts "****************End Metric Collection Settings Processing********************" +end diff --git a/kubernetes/container-azm-ms-agentconfig.yaml b/kubernetes/container-azm-ms-agentconfig.yaml index 58e09f041..aec1bb456 100644 --- a/kubernetes/container-azm-ms-agentconfig.yaml +++ b/kubernetes/container-azm-ms-agentconfig.yaml @@ -42,6 +42,7 @@ data: # When the setting is set to false, only the kube events with !normal event type will be collected enabled = false # When this is enabled (enabled = true), all kube events including normal events will be collected + prometheus-data-collection-settings: |- # Custom Prometheus metrics data collection settings [prometheus_data_collection_settings.cluster] @@ -90,6 +91,15 @@ data: #fieldpass = ["metric_to_pass1", "metric_to_pass12"] #fielddrop = ["metric_to_drop"] + + metric_collection_settings: |- + # Metrics collection settings for metrics sent to Log Analytics and MDM + [metric_collection_settings.collect_kube_system_pv_metrics] + # In the absense of this configmap, default value for collect_kube_system_pv_metrics is false + # When the setting is set to false, only the persistent volume metrics outside the kube-system namespace will be collected + enabled = false + # When this is enabled (enabled = true), persistent volume metrics including those in the kube-system namespace will be collected + alertable-metrics-configuration-settings: |- # Alertable metrics configuration settings for container resource utilization [alertable_metrics_configuration_settings.container_resource_utilization_thresholds] @@ -100,6 +110,11 @@ data: container_memory_rss_threshold_percentage = 95.0 # Threshold for container memoryWorkingSet, metric will be sent only when memory working set exceeds or becomes equal to the following percentage container_memory_working_set_threshold_percentage = 95.0 + + # Alertable metrics configuration settings for persistent volume utilization + [alertable_metrics_configuration_settings.pv_utilization_thresholds] + # Threshold for persistent volume usage bytes, metric will be sent only when persistent volume utilization exceeds or becomes equal to the following percentage + pv_usage_threshold_percentage = 60.0 integrations: |- [integrations.azure_network_policy_manager] collect_basic_metrics = false diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index 311470660..d9fdc42e9 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -236,6 +236,14 @@ cat config_mdm_metrics_env_var | while read line; do done source config_mdm_metrics_env_var +#Parse the configmap to set the right environment variables for metric collection settings +/opt/microsoft/omsagent/ruby/bin/ruby tomlparser-metric-collection-config.rb + +cat config_metric_collection_env_var | while read line; do + echo $line >> ~/.bashrc +done +source config_metric_collection_env_var + #Setting environment variable for CAdvisor metrics to use port 10255/10250 based on curl request echo "Making wget request to cadvisor endpoint with port 10250" #Defaults to use port 10255 diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index b71a95227..5cda4dcb3 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -125,7 +125,7 @@ data: type filter_cadvisor2mdm custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast - metrics_to_collect cpuUsageNanoCores,memoryWorkingSetBytes + metrics_to_collect cpuUsageNanoCores,memoryWorkingSetBytes,pvUsedBytes log_level info diff --git a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb index 13796cd1e..7661bb7a1 100644 --- a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb +++ b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb @@ -20,6 +20,7 @@ class CAdvisorMetricsAPIClient @clusterEnvVarCollectionEnabled = ENV["AZMON_CLUSTER_COLLECT_ENV_VAR"] @clusterStdErrLogCollectionEnabled = ENV["AZMON_COLLECT_STDERR_LOGS"] @clusterStdOutLogCollectionEnabled = ENV["AZMON_COLLECT_STDOUT_LOGS"] + @pvKubeSystemCollectionMetricsEnabled = ENV["AZMON_PV_COLLECT_KUBE_SYSTEM_METRICS"] @clusterLogTailExcludPath = ENV["AZMON_CLUSTER_LOG_TAIL_EXCLUDE_PATH"] @clusterLogTailPath = ENV["AZMON_LOG_TAIL_PATH"] @clusterAgentSchemaVersion = ENV["AZMON_AGENT_CFG_SCHEMA_VERSION"] @@ -53,6 +54,7 @@ class CAdvisorMetricsAPIClient @@winNodePrevMetricRate = {} @@telemetryCpuMetricTimeTracker = DateTime.now.to_time.to_i @@telemetryMemoryMetricTimeTracker = DateTime.now.to_time.to_i + @@telemetryPVKubeSystemMetricsTimeTracker = DateTime.now.to_time.to_i #Containers a hash of node name and the last time telemetry was sent for this node @@nodeTelemetryTimeTracker = {} @@ -301,6 +303,8 @@ def getInsightsMetrics(winNode: nil, metricTime: Time.now.utc.iso8601) metricDataItems.concat(getContainerGpuMetricsAsInsightsMetrics(metricInfo, hostName, "memoryTotal", "containerGpumemoryTotalBytes", metricTime)) metricDataItems.concat(getContainerGpuMetricsAsInsightsMetrics(metricInfo, hostName, "memoryUsed","containerGpumemoryUsedBytes", metricTime)) metricDataItems.concat(getContainerGpuMetricsAsInsightsMetrics(metricInfo, hostName, "dutyCycle","containerGpuDutyCycle", metricTime)) + + metricDataItems.concat(getPersistentVolumeMetrics(metricInfo, hostName, "usedBytes", Constants::PV_USED_BYTES, metricTime)) else @Log.warn("Couldn't get Insights metrics information for host: #{hostName} os:#{operatingSystem}") end @@ -311,6 +315,79 @@ def getInsightsMetrics(winNode: nil, metricTime: Time.now.utc.iso8601) return metricDataItems end + def getPersistentVolumeMetrics(metricJSON, hostName, metricNameToCollect, metricNameToReturn, metricPollTime) + telemetryTimeDifference = (DateTime.now.to_time.to_i - @@telemetryPVKubeSystemMetricsTimeTracker).abs + telemetryTimeDifferenceInMinutes = telemetryTimeDifference / 60 + + metricItems = [] + clusterId = KubernetesApiClient.getClusterId + clusterName = KubernetesApiClient.getClusterName + begin + metricInfo = metricJSON + metricInfo["pods"].each do |pod| + + podNamespace = pod["podRef"]["namespace"] + excludeNamespace = false + if (podNamespace.downcase == "kube-system") && @pvKubeSystemCollectionMetricsEnabled == "false" + excludeNamespace = true + end + + if (!excludeNamespace && !pod["volume"].nil?) + pod["volume"].each do |volume| + if (!volume["pvcRef"].nil?) + pvcRef = volume["pvcRef"] + if (!pvcRef["name"].nil?) + + # A PVC exists on this volume + podUid = pod["podRef"]["uid"] + podName = pod["podRef"]["name"] + pvcName = pvcRef["name"] + pvcNamespace = pvcRef["namespace"] + + metricItem = {} + metricItem["CollectionTime"] = metricPollTime + metricItem["Computer"] = hostName + metricItem["Name"] = metricNameToReturn + metricItem["Value"] = volume[metricNameToCollect] + metricItem["Origin"] = Constants::INSIGHTSMETRICS_TAGS_ORIGIN + metricItem["Namespace"] = Constants::INSIGTHTSMETRICS_TAGS_PV_NAMESPACE + + metricTags = {} + metricTags[Constants::INSIGHTSMETRICS_TAGS_CLUSTERID ] = clusterId + metricTags[Constants::INSIGHTSMETRICS_TAGS_CLUSTERNAME] = clusterName + metricTags[Constants::INSIGHTSMETRICS_TAGS_POD_UID] = podUid + metricTags[Constants::INSIGHTSMETRICS_TAGS_POD_NAME] = podName + metricTags[Constants::INSIGHTSMETRICS_TAGS_PVC_NAME] = pvcName + metricTags[Constants::INSIGHTSMETRICS_TAGS_PVC_NAMESPACE] = pvcNamespace + metricTags[Constants::INSIGHTSMETRICS_TAGS_PV_CAPACITY_BYTES] = volume["capacityBytes"] + + metricItem["Tags"] = metricTags + + metricItems.push(metricItem) + end + end + end + end + end + rescue => errorStr + @Log.warn("getPersistentVolumeMetrics failed: #{errorStr} for metric #{metricNameToCollect}") + return metricItems + end + + # If kube-system metrics collection enabled, send telemetry + begin + if telemetryTimeDifferenceInMinutes >= Constants::TELEMETRY_FLUSH_INTERVAL_IN_MINUTES && @pvKubeSystemCollectionMetricsEnabled == "true" + ApplicationInsightsUtility.sendCustomEvent(Constants::PV_KUBE_SYSTEM_METRICS_ENABLED_EVENT, {}) + @@telemetryPVKubeSystemMetricsTimeTracker = DateTime.now.to_time.to_i + end + rescue => errorStr + @Log.warn("getPersistentVolumeMetrics kube-system metrics enabled telemetry failed: #{errorStr}") + end + + return metricItems + end + + def getContainerGpuMetricsAsInsightsMetrics(metricJSON, hostName, metricNameToCollect, metricNametoReturn, metricPollTime) metricItems = [] clusterId = KubernetesApiClient.getClusterId diff --git a/source/plugins/ruby/MdmAlertTemplates.rb b/source/plugins/ruby/MdmAlertTemplates.rb index 2e516a99d..d5107fea1 100644 --- a/source/plugins/ruby/MdmAlertTemplates.rb +++ b/source/plugins/ruby/MdmAlertTemplates.rb @@ -90,6 +90,38 @@ class MdmAlertTemplates } }' + PV_resource_utilization_template = ' + { + "time": "%{timestamp}", + "data": { + "baseData": { + "metric": "%{metricName}", + "namespace": "insights.container/persistentvolumes", + "dimNames": [ + "podName", + "node", + "kubernetesNamespace", + "thresholdPercentage" + ], + "series": [ + { + "dimValues": [ + "%{podNameDimValue}", + "%{computerNameDimValue}", + "%{namespaceDimValue}", + "%{thresholdPercentageDimValue}" + ], + "min": %{pvResourceUtilizationPercentage}, + "max": %{pvResourceUtilizationPercentage}, + "sum": %{pvResourceUtilizationPercentage}, + "count": 1 + } + ] + } + } + }' + + Node_resource_metrics_template = ' { "time": "%{timestamp}", diff --git a/source/plugins/ruby/MdmMetricsGenerator.rb b/source/plugins/ruby/MdmMetricsGenerator.rb index 3d75dc6f4..1e7db37cc 100644 --- a/source/plugins/ruby/MdmMetricsGenerator.rb +++ b/source/plugins/ruby/MdmMetricsGenerator.rb @@ -37,6 +37,10 @@ class MdmMetricsGenerator Constants::MEMORY_WORKING_SET_BYTES => Constants::MDM_CONTAINER_MEMORY_WORKING_SET_UTILIZATION_METRIC, } + @@pod_metric_name_metric_percentage_name_hash = { + Constants::PV_USED_BYTES => Constants::MDM_PV_UTILIZATION_METRIC + } + # Setting this to true since we need to send zero filled metrics at startup. If metrics are absent alert creation fails @sendZeroFilledMetrics = true @@ -259,6 +263,31 @@ def getContainerResourceUtilMetricRecords(recordTimeStamp, metricName, percentag return records end + def getPVResourceUtilMetricRecords(recordTimeStamp, metricName, computer, percentageMetricValue, dims, thresholdPercentage) + records = [] + begin + containerName = dims[Constants::INSIGHTSMETRICS_TAGS_CONTAINER_NAME] + pvcNamespace = dims[Constants::INSIGHTSMETRICS_TAGS_PVC_NAMESPACE] + podName = dims[Constants::INSIGHTSMETRICS_TAGS_POD_NAME] + podUid = dims[Constants::INSIGHTSMETRICS_TAGS_POD_UID] + + resourceUtilRecord = MdmAlertTemplates::PV_resource_utilization_template % { + timestamp: recordTimeStamp, + metricName: @@pod_metric_name_metric_percentage_name_hash[metricName], + podNameDimValue: podName, + computerNameDimValue: computer, + namespaceDimValue: pvcNamespace, + pvResourceUtilizationPercentage: percentageMetricValue, + thresholdPercentageDimValue: thresholdPercentage, + } + records.push(Yajl::Parser.parse(StringIO.new(resourceUtilRecord))) + rescue => errorStr + @log.info "Error in getPVResourceUtilMetricRecords: #{errorStr}" + ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) + end + return records + end + def getDiskUsageMetricRecords(record) records = [] usedPercent = nil @@ -356,6 +385,7 @@ def getContainerResourceUtilizationThresholds metric_threshold_hash[Constants::CPU_USAGE_NANO_CORES] = Constants::DEFAULT_MDM_CPU_UTILIZATION_THRESHOLD metric_threshold_hash[Constants::MEMORY_RSS_BYTES] = Constants::DEFAULT_MDM_MEMORY_RSS_THRESHOLD metric_threshold_hash[Constants::MEMORY_WORKING_SET_BYTES] = Constants::DEFAULT_MDM_MEMORY_WORKING_SET_THRESHOLD + metric_threshold_hash[Constants::PV_USED_BYTES] = Constants::DEFAULT_MDM_PV_UTILIZATION_THRESHOLD cpuThreshold = ENV["AZMON_ALERT_CONTAINER_CPU_THRESHOLD"] if !cpuThreshold.nil? && !cpuThreshold.empty? @@ -375,6 +405,12 @@ def getContainerResourceUtilizationThresholds memoryWorkingSetThresholdFloat = (memoryWorkingSetThreshold.to_f).round(2) metric_threshold_hash[Constants::MEMORY_WORKING_SET_BYTES] = memoryWorkingSetThresholdFloat end + + pvUsagePercentageThreshold = ENV["AZMON_ALERT_PV_USAGE_THRESHOLD"] + if !pvUsagePercentageThreshold.nil? && !pvUsagePercentageThreshold.empty? + pvUsagePercentageThresholdFloat = (pvUsagePercentageThreshold.to_f).round(2) + metric_threshold_hash[Constants::PV_USED_BYTES] = pvUsagePercentageThresholdFloat + end rescue => errorStr @log.info "Error in getContainerResourceUtilizationThresholds: #{errorStr}" ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) diff --git a/source/plugins/ruby/constants.rb b/source/plugins/ruby/constants.rb index dd1ba24b3..82a6e8814 100644 --- a/source/plugins/ruby/constants.rb +++ b/source/plugins/ruby/constants.rb @@ -13,6 +13,12 @@ class Constants INSIGHTSMETRICS_TAGS_K8SNAMESPACE = "k8sNamespace" INSIGHTSMETRICS_TAGS_CONTROLLER_NAME = "controllerName" INSIGHTSMETRICS_TAGS_CONTROLLER_KIND = "controllerKind" + INSIGHTSMETRICS_TAGS_POD_UID = "podUid" + INSIGTHTSMETRICS_TAGS_PV_NAMESPACE = "container.azm.ms/pv" + INSIGHTSMETRICS_TAGS_PVC_NAME = "pvcName" + INSIGHTSMETRICS_TAGS_PVC_NAMESPACE = "pvcNamespace" + INSIGHTSMETRICS_TAGS_POD_NAME = "podName" + INSIGHTSMETRICS_TAGS_PV_CAPACITY_BYTES = "pvCapacityBytes" INSIGHTSMETRICS_FLUENT_TAG = "oms.api.InsightsMetrics" REASON_OOM_KILLED = "oomkilled" #Kubestate (common) @@ -45,6 +51,7 @@ class Constants MDM_CONTAINER_CPU_UTILIZATION_METRIC = "cpuExceededPercentage" MDM_CONTAINER_MEMORY_RSS_UTILIZATION_METRIC = "memoryRssExceededPercentage" MDM_CONTAINER_MEMORY_WORKING_SET_UTILIZATION_METRIC = "memoryWorkingSetExceededPercentage" + MDM_PV_UTILIZATION_METRIC = "pvUsageExceededPercentage" MDM_NODE_CPU_USAGE_PERCENTAGE = "cpuUsagePercentage" MDM_NODE_MEMORY_RSS_PERCENTAGE = "memoryRssPercentage" MDM_NODE_MEMORY_WORKING_SET_PERCENTAGE = "memoryWorkingSetPercentage" @@ -56,9 +63,11 @@ class Constants CPU_USAGE_MILLI_CORES = "cpuUsageMillicores" MEMORY_WORKING_SET_BYTES= "memoryWorkingSetBytes" MEMORY_RSS_BYTES = "memoryRssBytes" + PV_USED_BYTES = "pvUsedBytes" DEFAULT_MDM_CPU_UTILIZATION_THRESHOLD = 95.0 DEFAULT_MDM_MEMORY_RSS_THRESHOLD = 95.0 DEFAULT_MDM_MEMORY_WORKING_SET_THRESHOLD = 95.0 + DEFAULT_MDM_PV_UTILIZATION_THRESHOLD = 60.0 CONTROLLER_KIND_JOB = "job" CONTAINER_TERMINATION_REASON_COMPLETED = "completed" CONTAINER_STATE_TERMINATED = "terminated" @@ -71,6 +80,8 @@ class Constants CONTAINER_METRICS_HEART_BEAT_EVENT = "ContainerMetricsMdmHeartBeatEvent" POD_READY_PERCENTAGE_HEART_BEAT_EVENT = "PodReadyPercentageMdmHeartBeatEvent" CONTAINER_RESOURCE_UTIL_HEART_BEAT_EVENT = "ContainerResourceUtilMdmHeartBeatEvent" + PV_USAGE_HEART_BEAT_EVENT = "PVUsageMdmHeartBeatEvent" + PV_KUBE_SYSTEM_METRICS_ENABLED_EVENT = "CollectPVKubeSystemMetricsEnabled" TELEMETRY_FLUSH_INTERVAL_IN_MINUTES = 10 KUBE_STATE_TELEMETRY_FLUSH_INTERVAL_IN_MINUTES = 15 MDM_TIME_SERIES_FLUSHED_IN_LAST_HOUR = "MdmTimeSeriesFlushedInLastHour" diff --git a/source/plugins/ruby/filter_cadvisor2mdm.rb b/source/plugins/ruby/filter_cadvisor2mdm.rb index fd43ef98b..3bc674ea8 100644 --- a/source/plugins/ruby/filter_cadvisor2mdm.rb +++ b/source/plugins/ruby/filter_cadvisor2mdm.rb @@ -16,7 +16,7 @@ class CAdvisor2MdmFilter < Filter config_param :enable_log, :integer, :default => 0 config_param :log_path, :string, :default => "/var/opt/microsoft/docker-cimprov/log/filter_cadvisor2mdm.log" config_param :custom_metrics_azure_regions, :string - config_param :metrics_to_collect, :string, :default => "Constants::CPU_USAGE_NANO_CORES,Constants::MEMORY_WORKING_SET_BYTES,Constants::MEMORY_RSS_BYTES" + config_param :metrics_to_collect, :string, :default => "Constants::CPU_USAGE_NANO_CORES,Constants::MEMORY_WORKING_SET_BYTES,Constants::MEMORY_RSS_BYTES,Constants::PV_USED_BYTES" @@hostName = (OMS::Common.get_hostname) @@ -46,11 +46,13 @@ def start @metrics_to_collect_hash = build_metrics_hash @log.debug "After check_custom_metrics_availability process_incoming_stream #{@process_incoming_stream}" @@containerResourceUtilTelemetryTimeTracker = DateTime.now.to_time.to_i + @@pvUsageTelemetryTimeTracker = DateTime.now.to_time.to_i # These variables keep track if any resource utilization threshold exceeded in the last 10 minutes @containersExceededCpuThreshold = false @containersExceededMemRssThreshold = false @containersExceededMemWorkingSetThreshold = false + @pvExceededUsageThreshold = false # initialize cpu and memory limit if @process_incoming_stream @@ -60,6 +62,7 @@ def start @containerCpuLimitHash = {} @containerMemoryLimitHash = {} @containerResourceDimensionHash = {} + @pvUsageHash = {} @@metric_threshold_hash = MdmMetricsGenerator.getContainerResourceUtilizationThresholds end rescue => e @@ -87,6 +90,8 @@ def setThresholdExceededTelemetry(metricName) @containersExceededMemRssThreshold = true elsif metricName == Constants::MEMORY_WORKING_SET_BYTES @containersExceededMemWorkingSetThreshold = true + elsif metricName == Constants::PV_USED_BYTES + @pvExceededUsageThreshold = true end rescue => errorStr @log.info "Error in setThresholdExceededTelemetry: #{errorStr}" @@ -109,13 +114,30 @@ def flushMetricTelemetry properties["MemRssThresholdExceededInLastFlushInterval"] = @containersExceededMemRssThreshold properties["MemWSetThresholdExceededInLastFlushInterval"] = @containersExceededMemWorkingSetThreshold ApplicationInsightsUtility.sendCustomEvent(Constants::CONTAINER_RESOURCE_UTIL_HEART_BEAT_EVENT, properties) - @@containerResourceUtilTelemetryTimeTracker = DateTime.now.to_time.to_i @containersExceededCpuThreshold = false @containersExceededMemRssThreshold = false @containersExceededMemWorkingSetThreshold = false + @@containerResourceUtilTelemetryTimeTracker = DateTime.now.to_time.to_i + end + rescue => errorStr + @log.info "Error in flushMetricTelemetry: #{errorStr} for container resource util telemetry" + ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) + end + + # Also send for PV usage metrics + begin + pvTimeDifference = (DateTime.now.to_time.to_i - @@pvUsageTelemetryTimeTracker).abs + pvTimeDifferenceInMinutes = pvTimeDifference / 60 + if (pvTimeDifferenceInMinutes >= Constants::TELEMETRY_FLUSH_INTERVAL_IN_MINUTES) + pvProperties = {} + pvProperties["PVUsageThresholdPercentage"] = @@metric_threshold_hash[Constants::PV_USED_BYTES] + pvProperties["PVUsageThresholdExceededInLastFlushInterval"] = @pvExceededUsageThreshold + ApplicationInsightsUtility.sendCustomEvent(Constants::PV_USAGE_HEART_BEAT_EVENT, pvProperties) + @pvExceededUsageThreshold = false + @@pvUsageTelemetryTimeTracker = DateTime.now.to_time.to_i end rescue => errorStr - @log.info "Error in flushMetricTelemetry: #{errorStr}" + @log.info "Error in flushMetricTelemetry: #{errorStr} for PV usage telemetry" ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) end end @@ -123,6 +145,13 @@ def flushMetricTelemetry def filter(tag, time, record) begin if @process_incoming_stream + + # Check if insights metrics for PV metrics + data_type = record["DataType"] + if data_type == "INSIGHTS_METRICS_BLOB" + return filterPVInsightsMetrics(record) + end + object_name = record["DataItems"][0]["ObjectName"] counter_name = record["DataItems"][0]["Collections"][0]["CounterName"] percentage_metric_value = 0.0 @@ -204,6 +233,47 @@ def filter(tag, time, record) end end + def filterPVInsightsMetrics(record) + begin + mdmMetrics = [] + record["DataItems"].each do |dataItem| + + if dataItem["Name"] == Constants::PV_USED_BYTES && @metrics_to_collect_hash.key?(dataItem["Name"].downcase) + metricName = dataItem["Name"] + usage = dataItem["Value"] + capacity = dataItem["Tags"][Constants::INSIGHTSMETRICS_TAGS_PV_CAPACITY_BYTES] + if capacity != 0 + percentage_metric_value = (usage * 100.0) / capacity + end + @log.info "percentage_metric_value for metric: #{metricName} percentage: #{percentage_metric_value}" + @log.info "@@metric_threshold_hash for #{metricName}: #{@@metric_threshold_hash[metricName]}" + + computer = dataItem["Computer"] + resourceDimensions = dataItem["Tags"] + thresholdPercentage = @@metric_threshold_hash[metricName] + + flushMetricTelemetry + if percentage_metric_value >= thresholdPercentage + setThresholdExceededTelemetry(metricName) + return MdmMetricsGenerator.getPVResourceUtilMetricRecords(dataItem["CollectionTime"], + metricName, + computer, + percentage_metric_value, + resourceDimensions, + thresholdPercentage) + else + return [] + end # end if block for percentage metric > configured threshold % check + end # end if block for dataItem name check + end # end for block of looping through data items + return [] + rescue Exception => e + @log.info "Error processing cadvisor insights metrics record Exception: #{e.class} Message: #{e.message}" + ApplicationInsightsUtility.sendExceptionTelemetry(e.backtrace) + return [] #return empty array if we ran into any errors + end + end + def ensure_cpu_memory_capacity_set if @cpu_capacity != 0.0 && @memory_capacity != 0.0 @log.info "CPU And Memory Capacity are already set" diff --git a/source/plugins/ruby/in_cadvisor_perf.rb b/source/plugins/ruby/in_cadvisor_perf.rb index a44365e9d..b706ff00a 100644 --- a/source/plugins/ruby/in_cadvisor_perf.rb +++ b/source/plugins/ruby/in_cadvisor_perf.rb @@ -88,6 +88,7 @@ def enumerate() end router.emit_stream(Constants::INSIGHTSMETRICS_FLUENT_TAG, insightsMetricsEventStream) if insightsMetricsEventStream + router.emit_stream(@mdmtag, insightsMetricsEventStream) if insightsMetricsEventStream if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0 && insightsMetricsEventStream.count > 0) $log.info("cAdvisorInsightsMetricsEmitStreamSuccess @ #{Time.now.utc.iso8601}") diff --git a/source/plugins/ruby/in_win_cadvisor_perf.rb b/source/plugins/ruby/in_win_cadvisor_perf.rb index 38868f2f5..4e90195e5 100644 --- a/source/plugins/ruby/in_win_cadvisor_perf.rb +++ b/source/plugins/ruby/in_win_cadvisor_perf.rb @@ -101,6 +101,7 @@ def enumerate() end router.emit_stream(Constants::INSIGHTSMETRICS_FLUENT_TAG, insightsMetricsEventStream) if insightsMetricsEventStream + router.emit_stream(@mdmtag, insightsMetricsEventStream) if insightsMetricsEventStream if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0 && insightsMetricsEventStream.count > 0) $log.info("winCAdvisorInsightsMetricsEmitStreamSuccess @ #{Time.now.utc.iso8601}") end From 7304a6b32652a870087ac39f49b640bca85da1c1 Mon Sep 17 00:00:00 2001 From: Vishwanath Date: Wed, 23 Sep 2020 13:00:03 -0700 Subject: [PATCH 024/301] add new custom metric regions (#444) * add new custom metric regions * fix commas --- build/linux/installer/conf/container.conf | 4 ++-- build/linux/installer/conf/kube.conf | 6 +++--- .../templates/omsagent-rs-configmap.yaml | 6 +++--- kubernetes/omsagent.yaml | 6 +++--- scripts/troubleshoot/TroubleshootError.ps1 | 12 +++++++++++- 5 files changed, 22 insertions(+), 12 deletions(-) diff --git a/build/linux/installer/conf/container.conf b/build/linux/installer/conf/container.conf index e55c62fbc..f7e6e1da9 100644 --- a/build/linux/installer/conf/container.conf +++ b/build/linux/installer/conf/container.conf @@ -45,14 +45,14 @@ #custom_metrics_mdm filter plugin type filter_cadvisor2mdm - custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast + custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast,eastus2,westus,australiasoutheast,brazilsouth,germanywestcentral,northcentralus,switzerlandnorth metrics_to_collect cpuUsageNanoCores,memoryWorkingSetBytes,memoryRssBytes,pvUsedBytes log_level info type filter_telegraf2mdm - custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast + custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast,eastus2,westus,australiasoutheast,brazilsouth,germanywestcentral,northcentralus,switzerlandnorth log_level debug diff --git a/build/linux/installer/conf/kube.conf b/build/linux/installer/conf/kube.conf index ba40b7a35..dbb4db0da 100644 --- a/build/linux/installer/conf/kube.conf +++ b/build/linux/installer/conf/kube.conf @@ -13,7 +13,7 @@ tag oms.containerinsights.KubePodInventory run_interval 60 log_level debug - custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast + custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast,eastus2,westus,australiasoutheast,brazilsouth,germanywestcentral,northcentralus,switzerlandnorth #Kubernetes events @@ -66,14 +66,14 @@ type filter_inventory2mdm - custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast + custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast,eastus2,westus,australiasoutheast,brazilsouth,germanywestcentral,northcentralus,switzerlandnorth log_level info #custom_metrics_mdm filter plugin for perf data from windows nodes type filter_cadvisor2mdm - custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast + custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast,eastus2,westus,australiasoutheast,brazilsouth,germanywestcentral,northcentralus,switzerlandnorth metrics_to_collect cpuUsageNanoCores,memoryWorkingSetBytes,pvUsedBytes log_level info diff --git a/charts/azuremonitor-containers/templates/omsagent-rs-configmap.yaml b/charts/azuremonitor-containers/templates/omsagent-rs-configmap.yaml index ee0664495..475b17a46 100644 --- a/charts/azuremonitor-containers/templates/omsagent-rs-configmap.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-rs-configmap.yaml @@ -18,7 +18,7 @@ data: tag oms.containerinsights.KubePodInventory run_interval 60 log_level debug - custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast + custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast,eastus2,westus,australiasoutheast,brazilsouth,germanywestcentral,northcentralus,switzerlandnorth #Kubernetes events @@ -70,14 +70,14 @@ data: type filter_inventory2mdm - custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast + custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast,eastus2,westus,australiasoutheast,brazilsouth,germanywestcentral,northcentralus,switzerlandnorth log_level info # custom_metrics_mdm filter plugin for perf data from windows nodes type filter_cadvisor2mdm - custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast + custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast,eastus2,westus,australiasoutheast,brazilsouth,germanywestcentral,northcentralus,switzerlandnorth metrics_to_collect cpuUsageNanoCores,memoryWorkingSetBytes log_level info diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index 5cda4dcb3..9c8f9de14 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -64,7 +64,7 @@ data: tag oms.containerinsights.KubePodInventory run_interval 60 log_level debug - custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast + custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast,eastus2,westus,australiasoutheast,brazilsouth,germanywestcentral,northcentralus,switzerlandnorth #Kubernetes events @@ -117,14 +117,14 @@ data: type filter_inventory2mdm - custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast + custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast,eastus2,westus,australiasoutheast,brazilsouth,germanywestcentral,northcentralus,switzerlandnorth log_level info #custom_metrics_mdm filter plugin for perf data from windows nodes type filter_cadvisor2mdm - custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast + custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast,eastus2,westus,australiasoutheast,brazilsouth,germanywestcentral,northcentralus,switzerlandnorth metrics_to_collect cpuUsageNanoCores,memoryWorkingSetBytes,pvUsedBytes log_level info diff --git a/scripts/troubleshoot/TroubleshootError.ps1 b/scripts/troubleshoot/TroubleshootError.ps1 index 754a43e74..4c2d95ac6 100644 --- a/scripts/troubleshoot/TroubleshootError.ps1 +++ b/scripts/troubleshoot/TroubleshootError.ps1 @@ -234,7 +234,17 @@ $MdmCustomMetricAvailabilityLocations = ( 'eastasia', 'centralindia', 'uksouth', - 'canadacentral' + 'canadacentral', + 'francecentral', + 'japaneast', + 'australiaeast', + 'eastus2', + 'westus', + 'australiasoutheast', + 'brazilsouth', + 'germanywestcentral', + 'northcentralus', + 'switzerlandnorth' ); try { From 2d8c03fec9edc15da7df5a14b9b5d561b4e85add Mon Sep 17 00:00:00 2001 From: Vishwanath Date: Wed, 23 Sep 2020 13:01:07 -0700 Subject: [PATCH 025/301] add 'Terminating' state (#443) --- source/plugins/ruby/constants.rb | 3 +++ source/plugins/ruby/in_kube_podinventory.rb | 3 +++ 2 files changed, 6 insertions(+) diff --git a/source/plugins/ruby/constants.rb b/source/plugins/ruby/constants.rb index 82a6e8814..a64a4c97c 100644 --- a/source/plugins/ruby/constants.rb +++ b/source/plugins/ruby/constants.rb @@ -85,4 +85,7 @@ class Constants TELEMETRY_FLUSH_INTERVAL_IN_MINUTES = 10 KUBE_STATE_TELEMETRY_FLUSH_INTERVAL_IN_MINUTES = 15 MDM_TIME_SERIES_FLUSHED_IN_LAST_HOUR = "MdmTimeSeriesFlushedInLastHour" + + #Pod Statuses + POD_STATUS_TERMINATING = "Terminating" end \ No newline at end of file diff --git a/source/plugins/ruby/in_kube_podinventory.rb b/source/plugins/ruby/in_kube_podinventory.rb index bffa725ee..4880d80e7 100644 --- a/source/plugins/ruby/in_kube_podinventory.rb +++ b/source/plugins/ruby/in_kube_podinventory.rb @@ -194,6 +194,9 @@ def parse_and_emit_records(podInventory, serviceList, continuationToken, batchTi if podReadyCondition == false record["PodStatus"] = "Unknown" + # ICM - https://portal.microsofticm.com/imp/v3/incidents/details/187091803/home + elsif !items["metadata"]["deletionTimestamp"].nil? && !items["metadata"]["deletionTimestamp"].empty? + record["PodStatus"] = Constants::POD_STATUS_TERMINATING else record["PodStatus"] = items["status"]["phase"] end From da06d760ccb324e034a84187a3766c89d6bffb02 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Fri, 25 Sep 2020 12:36:27 -0700 Subject: [PATCH 026/301] Gangams/sept agent release tasks (#445) * turnoff mdm nonsupported cluster types * enable validation of server cert for ai ruby http client * add kubelet operations total and total error metrics * node selector label change * label update * wip * wip * wip * revert quotes --- build/linux/installer/conf/telegraf.conf | 9 +++---- .../templates/omsagent-daemonset-windows.yaml | 5 ++++ charts/azuremonitor-containers/values.yaml | 26 +++++++++++++++++++ kubernetes/linux/main.sh | 9 +++---- kubernetes/omsagent.yaml | 3 ++- .../channel/sender_base.rb | 4 +-- source/plugins/ruby/out_mdm.rb | 9 +++++-- 7 files changed, 50 insertions(+), 15 deletions(-) diff --git a/build/linux/installer/conf/telegraf.conf b/build/linux/installer/conf/telegraf.conf index 013aa1af2..202ac9741 100644 --- a/build/linux/installer/conf/telegraf.conf +++ b/build/linux/installer/conf/telegraf.conf @@ -632,8 +632,7 @@ name_prefix="container.azm.ms/" ## An array of urls to scrape metrics from. urls = ["$CADVISOR_METRICS_URL"] - ## Include "$KUBELET_RUNTIME_OPERATIONS_TOTAL_METRIC", "$KUBELET_RUNTIME_OPERATIONS_ERRORS_TOTAL_METRIC" when we add for support for 1.18 - fieldpass = ["$KUBELET_RUNTIME_OPERATIONS_METRIC", "$KUBELET_RUNTIME_OPERATIONS_ERRORS_METRIC"] + fieldpass = ["$KUBELET_RUNTIME_OPERATIONS_METRIC", "$KUBELET_RUNTIME_OPERATIONS_ERRORS_METRIC", "$KUBELET_RUNTIME_OPERATIONS_TOTAL_METRIC", "$KUBELET_RUNTIME_OPERATIONS_ERRORS_TOTAL_METRIC"] metric_version = 2 url_tag = "scrapeUrl" @@ -675,7 +674,7 @@ name_prefix="container.azm.ms/" ## An array of urls to scrape metrics from. urls = ["$CADVISOR_METRICS_URL"] - + fieldpass = ["kubelet_running_pod_count","volume_manager_total_volumes", "kubelet_node_config_error", "process_resident_memory_bytes", "process_cpu_seconds_total"] metric_version = 2 @@ -690,7 +689,7 @@ ## Optional TLS Config tls_ca = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" insecure_skip_verify = true - + ## prometheus custom metrics [[inputs.prometheus]] @@ -731,7 +730,7 @@ #name_prefix="container.azm.ms/" ## An array of urls to scrape metrics from. urls = $AZMON_INTEGRATION_NPM_METRICS_URL_LIST_NODE - + metric_version = 2 url_tag = "scrapeUrl" diff --git a/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml b/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml index 7acd46c37..72b09f6c1 100644 --- a/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml @@ -25,8 +25,13 @@ spec: dockerProviderVersion: {{ .Values.omsagent.image.dockerProviderVersion }} schema-versions: "v1" spec: +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion }} + nodeSelector: + kubernetes.io/os: windows +{{- else }} nodeSelector: beta.kubernetes.io/os: windows +{{- end }} {{- if .Values.omsagent.rbac }} serviceAccountName: omsagent {{- end }} diff --git a/charts/azuremonitor-containers/values.yaml b/charts/azuremonitor-containers/values.yaml index 9c48cf9fb..1804d1197 100644 --- a/charts/azuremonitor-containers/values.yaml +++ b/charts/azuremonitor-containers/values.yaml @@ -56,6 +56,17 @@ omsagent: affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - labelSelector: + matchExpressions: + - key: kubernetes.io/os + operator: In + values: + - linux + - key: type + operator: NotIn + values: + - virtual-kubelet nodeSelectorTerms: - labelSelector: matchExpressions: @@ -71,6 +82,21 @@ omsagent: affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - labelSelector: + matchExpressions: + - key: kubernetes.io/os + operator: In + values: + - linux + - key: type + operator: NotIn + values: + - virtual-kubelet + - key: kubernetes.io/role + operator: NotIn + values: + - master nodeSelectorTerms: - labelSelector: matchExpressions: diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index d9fdc42e9..11972f0f4 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -300,11 +300,10 @@ fi echo "configured container runtime on kubelet is : "$CONTAINER_RUNTIME echo "export CONTAINER_RUNTIME="$CONTAINER_RUNTIME >> ~/.bashrc -# enable these metrics in next agent release -# export KUBELET_RUNTIME_OPERATIONS_TOTAL_METRIC="kubelet_runtime_operations_total" -# echo "export KUBELET_RUNTIME_OPERATIONS_TOTAL_METRIC="$KUBELET_RUNTIME_OPERATIONS_TOTAL_METRIC >> ~/.bashrc -# export KUBELET_RUNTIME_OPERATIONS_ERRORS_TOTAL_METRIC="kubelet_runtime_operations_errors_total" -# echo "export KUBELET_RUNTIME_OPERATIONS_ERRORS_TOTAL_METRIC="$KUBELET_RUNTIME_OPERATIONS_ERRORS_TOTAL_METRIC >> ~/.bashrc +export KUBELET_RUNTIME_OPERATIONS_TOTAL_METRIC="kubelet_runtime_operations_total" +echo "export KUBELET_RUNTIME_OPERATIONS_TOTAL_METRIC="$KUBELET_RUNTIME_OPERATIONS_TOTAL_METRIC >> ~/.bashrc +export KUBELET_RUNTIME_OPERATIONS_ERRORS_TOTAL_METRIC="kubelet_runtime_operations_errors_total" +echo "export KUBELET_RUNTIME_OPERATIONS_ERRORS_TOTAL_METRIC="$KUBELET_RUNTIME_OPERATIONS_ERRORS_TOTAL_METRIC >> ~/.bashrc # default to docker metrics export KUBELET_RUNTIME_OPERATIONS_METRIC="kubelet_docker_operations" diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index 9c8f9de14..09e50b5a4 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -419,7 +419,8 @@ spec: nodeSelectorTerms: - labelSelector: matchExpressions: - - key: beta.kubernetes.io/os + # kubernetes.io/os label doesnt exist in k8s versions < 1.14 so make sure to choose label based on k8s version in aks yaml + - key: kubernetes.io/os operator: In values: - linux diff --git a/source/plugins/ruby/lib/application_insights/channel/sender_base.rb b/source/plugins/ruby/lib/application_insights/channel/sender_base.rb index 33ac49286..bedbae4ee 100644 --- a/source/plugins/ruby/lib/application_insights/channel/sender_base.rb +++ b/source/plugins/ruby/lib/application_insights/channel/sender_base.rb @@ -66,12 +66,12 @@ def send(data_to_send) request.body = compressed_data if @proxy.nil? || @proxy.empty? http = Net::HTTP.new uri.hostname, uri.port - else + else http = Net::HTTP.new(uri.hostname, uri.port, @proxy[:addr], @proxy[:port], @proxy[:user], @proxy[:pass]) end if uri.scheme.downcase == 'https' http.use_ssl = true - http.verify_mode = OpenSSL::SSL::VERIFY_NONE + http.verify_mode = OpenSSL::SSL::VERIFY_PEER end response = http.request(request) diff --git a/source/plugins/ruby/out_mdm.rb b/source/plugins/ruby/out_mdm.rb index c4cc46dd7..1c805255a 100644 --- a/source/plugins/ruby/out_mdm.rb +++ b/source/plugins/ruby/out_mdm.rb @@ -61,15 +61,17 @@ def configure(conf) def start super begin - file = File.read(@@azure_json_path) - @data_hash = JSON.parse(file) aks_resource_id = ENV["AKS_RESOURCE_ID"] aks_region = ENV["AKS_REGION"] if aks_resource_id.to_s.empty? @log.info "Environment Variable AKS_RESOURCE_ID is not set.. " @can_send_data_to_mdm = false + elsif !aks_resource_id.downcase.include?("/microsoft.containerservice/managedclusters/") && !aks_resource_id.downcase.include?("/microsoft.kubernetes/connectedclusters/") + @log.info "MDM Metris not supported for this cluster type resource: #{aks_resource_id}" + @can_send_data_to_mdm = false end + if aks_region.to_s.empty? @log.info "Environment Variable AKS_REGION is not set.. " @can_send_data_to_mdm = false @@ -106,6 +108,9 @@ def start @cluster_identity = ArcK8sClusterIdentity.new @cached_access_token = @cluster_identity.get_cluster_identity_token else + # azure json file only used for aks and doesnt exist in non-azure envs + file = File.read(@@azure_json_path) + @data_hash = JSON.parse(file) # Check to see if SP exists, if it does use SP. Else, use msi sp_client_id = @data_hash["aadClientId"] sp_client_secret = @data_hash["aadClientSecret"] From 545305438d54d44c5d3b02cd075019eb57617a48 Mon Sep 17 00:00:00 2001 From: Grace Wehner Date: Mon, 28 Sep 2020 11:36:38 -0700 Subject: [PATCH 027/301] grwehner/pv-collect-volume-name (#448) Collect and send the volume name as another tag for pvUsedBytes in InsightsMetrics, so that it can be displayed in the workload workbook. Does not affect the PV MDM metric --- source/plugins/ruby/CAdvisorMetricsAPIClient.rb | 1 + source/plugins/ruby/constants.rb | 1 + 2 files changed, 2 insertions(+) diff --git a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb index 7661bb7a1..9e0935480 100644 --- a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb +++ b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb @@ -359,6 +359,7 @@ def getPersistentVolumeMetrics(metricJSON, hostName, metricNameToCollect, metric metricTags[Constants::INSIGHTSMETRICS_TAGS_POD_NAME] = podName metricTags[Constants::INSIGHTSMETRICS_TAGS_PVC_NAME] = pvcName metricTags[Constants::INSIGHTSMETRICS_TAGS_PVC_NAMESPACE] = pvcNamespace + metricTags[Constants::INSIGHTSMETRICS_TAGS_VOLUME_NAME] = volume["name"] metricTags[Constants::INSIGHTSMETRICS_TAGS_PV_CAPACITY_BYTES] = volume["capacityBytes"] metricItem["Tags"] = metricTags diff --git a/source/plugins/ruby/constants.rb b/source/plugins/ruby/constants.rb index a64a4c97c..73e3af471 100644 --- a/source/plugins/ruby/constants.rb +++ b/source/plugins/ruby/constants.rb @@ -19,6 +19,7 @@ class Constants INSIGHTSMETRICS_TAGS_PVC_NAMESPACE = "pvcNamespace" INSIGHTSMETRICS_TAGS_POD_NAME = "podName" INSIGHTSMETRICS_TAGS_PV_CAPACITY_BYTES = "pvCapacityBytes" + INSIGHTSMETRICS_TAGS_VOLUME_NAME = "volumeName" INSIGHTSMETRICS_FLUENT_TAG = "oms.api.InsightsMetrics" REASON_OOM_KILLED = "oomkilled" #Kubestate (common) From fe9f14df60f8d9a0cc52d33ad13c8c05b0c76cbb Mon Sep 17 00:00:00 2001 From: rashmichandrashekar Date: Tue, 29 Sep 2020 17:34:30 -0700 Subject: [PATCH 028/301] Changes for september agent release (#449) Moving from v1beta1 to v1 for health CRD Adding timer for zero filling Adding zero filling for PV metrics --- .../templates/omsagent-crd.yaml | 24 ++++++ kubernetes/omsagent.yaml | 14 +++- source/plugins/ruby/MdmMetricsGenerator.rb | 77 ++++++++++++------- source/plugins/ruby/constants.rb | 63 +++++++-------- 4 files changed, 116 insertions(+), 62 deletions(-) diff --git a/charts/azuremonitor-containers/templates/omsagent-crd.yaml b/charts/azuremonitor-containers/templates/omsagent-crd.yaml index f4a028bd3..bbaf89a52 100644 --- a/charts/azuremonitor-containers/templates/omsagent-crd.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-crd.yaml @@ -1,3 +1,4 @@ +{{- if semverCompare "<1.19-0" .Capabilities.KubeVersion.GitVersion }} apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: @@ -10,3 +11,26 @@ spec: names: plural: healthstates kind: HealthState +{{- else }} +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: healthstates.azmon.container.insights + namespace: kube-system +spec: + group: azmon.container.insights + versions: + - name: v1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + state: + type: string + scope: Namespaced + names: + plural: healthstates + kind: HealthState +{{- end }} \ No newline at end of file diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index 09e50b5a4..e8352e020 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -746,14 +746,24 @@ spec: port: 25227 targetPort: in-rs-tcp --- -apiVersion: apiextensions.k8s.io/v1beta1 +# this is for versions >=1.19, for versions <1.19 we continue to use v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: healthstates.azmon.container.insights namespace: kube-system spec: group: azmon.container.insights - version: v1 + versions: + - name: v1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + state: + type: string scope: Namespaced names: plural: healthstates diff --git a/source/plugins/ruby/MdmMetricsGenerator.rb b/source/plugins/ruby/MdmMetricsGenerator.rb index 1e7db37cc..b8104212d 100644 --- a/source/plugins/ruby/MdmMetricsGenerator.rb +++ b/source/plugins/ruby/MdmMetricsGenerator.rb @@ -8,9 +8,11 @@ class MdmMetricsGenerator require_relative "MdmAlertTemplates" require_relative "ApplicationInsightsUtility" require_relative "constants" + require_relative "oms_common" @log_path = "/var/opt/microsoft/docker-cimprov/log/mdm_metrics_generator.log" @log = Logger.new(@log_path, 1, 5000000) + @@hostName = (OMS::Common.get_hostname) @oom_killed_container_count_hash = {} @container_restart_count_hash = {} @@ -38,11 +40,12 @@ class MdmMetricsGenerator } @@pod_metric_name_metric_percentage_name_hash = { - Constants::PV_USED_BYTES => Constants::MDM_PV_UTILIZATION_METRIC + Constants::PV_USED_BYTES => Constants::MDM_PV_UTILIZATION_METRIC, } # Setting this to true since we need to send zero filled metrics at startup. If metrics are absent alert creation fails @sendZeroFilledMetrics = true + @zeroFilledMetricsTimeTracker = DateTime.now.to_time.to_i def initialize end @@ -179,6 +182,19 @@ def zeroFillMetricRecords(records, batch_time) if !containerMemoryWorkingSetRecord.nil? && !containerMemoryWorkingSetRecord.empty? && !containerMemoryWorkingSetRecord[0].nil? && !containerMemoryWorkingSetRecord[0].empty? records.push(containerMemoryWorkingSetRecord[0]) end + + pvZeroFillDims = {} + pvZeroFillDims[Constants::INSIGHTSMETRICS_TAGS_PVC_NAMESPACE] = Constants::KUBESYSTEM_NAMESPACE_ZERO_FILL + pvZeroFillDims[Constants::INSIGHTSMETRICS_TAGS_POD_NAME] = Constants::OMSAGENT_ZERO_FILL + pvResourceUtilMetricRecord = getPVResourceUtilMetricRecords(batch_time, + Constants::PV_USED_BYTES, + @@hostName, + 0, + pvZeroFillDims, + metric_threshold_hash[Constants::PV_USED_BYTES]) + if !pvResourceUtilMetricRecord.nil? && !pvResourceUtilMetricRecord.empty? && !pvResourceUtilMetricRecord[0].nil? && !pvResourceUtilMetricRecord[0].empty? + records.push(pvResourceUtilMetricRecord[0]) + end rescue => errorStr @log.info "Error in zeroFillMetricRecords: #{errorStr}" ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) @@ -189,10 +205,13 @@ def zeroFillMetricRecords(records, batch_time) def appendAllPodMetrics(records, batch_time) begin @log.info "in appendAllPodMetrics..." - if @sendZeroFilledMetrics == true + timeDifference = (DateTime.now.to_time.to_i - @zeroFilledMetricsTimeTracker).abs + timeDifferenceInMinutes = timeDifference / 60 + if @sendZeroFilledMetrics == true || (timeDifferenceInMinutes >= Constants::ZERO_FILL_METRICS_INTERVAL_IN_MINUTES) records = zeroFillMetricRecords(records, batch_time) # Setting it to false after startup @sendZeroFilledMetrics = false + @zeroFilledMetricsTimeTracker = DateTime.now.to_time.to_i end records = appendPodMetrics(records, Constants::MDM_OOM_KILLED_CONTAINER_COUNT, @@ -325,22 +344,22 @@ def getMetricRecords(record) begin dimNames = String.new "" #mutable string dimValues = String.new "" - noDimVal ="-" + noDimVal = "-" metricValue = 0 if !record["tags"].nil? - dimCount = 0 - record["tags"].each { |k, v| - dimCount = dimCount+1 - if (dimCount <= 10) #MDM = 10 dims - dimNames.concat("\"#{k}\"") - dimNames.concat(",") - if !v.nil? && v.length >0 - dimValues.concat("\"#{v}\"") - else - dimValues.concat("\"#{noDimVal}\"") - end - dimValues.concat(",") + dimCount = 0 + record["tags"].each { |k, v| + dimCount = dimCount + 1 + if (dimCount <= 10) #MDM = 10 dims + dimNames.concat("\"#{k}\"") + dimNames.concat(",") + if !v.nil? && v.length > 0 + dimValues.concat("\"#{v}\"") + else + dimValues.concat("\"#{noDimVal}\"") end + dimValues.concat(",") + end } if (dimNames.end_with?(",")) dimNames.chomp!(",") @@ -353,19 +372,19 @@ def getMetricRecords(record) convertedTimestamp = Time.at(timestamp.to_i).utc.iso8601 if !record["fields"].nil? record["fields"].each { |k, v| - if is_numeric(v) - metricRecord = MdmAlertTemplates::Generic_metric_template % { - timestamp: convertedTimestamp, - metricName: k, - namespaceSuffix: record["name"], - dimNames: dimNames, - dimValues: dimValues, - metricValue: v, - } - records.push(Yajl::Parser.parse(StringIO.new(metricRecord))) - #@log.info "pushed mdmgenericmetric: #{k},#{v}" - end - } + if is_numeric(v) + metricRecord = MdmAlertTemplates::Generic_metric_template % { + timestamp: convertedTimestamp, + metricName: k, + namespaceSuffix: record["name"], + dimNames: dimNames, + dimValues: dimValues, + metricValue: v, + } + records.push(Yajl::Parser.parse(StringIO.new(metricRecord))) + #@log.info "pushed mdmgenericmetric: #{k},#{v}" + end + } end rescue => errorStr @log.info "getMetricRecords:Error: #{errorStr} for record #{record}" @@ -375,7 +394,7 @@ def getMetricRecords(record) end def is_numeric(o) - true if Float(o) rescue false + true if Float(o) rescue false end def getContainerResourceUtilizationThresholds diff --git a/source/plugins/ruby/constants.rb b/source/plugins/ruby/constants.rb index 73e3af471..be1a9de64 100644 --- a/source/plugins/ruby/constants.rb +++ b/source/plugins/ruby/constants.rb @@ -57,36 +57,37 @@ class Constants MDM_NODE_MEMORY_RSS_PERCENTAGE = "memoryRssPercentage" MDM_NODE_MEMORY_WORKING_SET_PERCENTAGE = "memoryWorkingSetPercentage" - CONTAINER_TERMINATED_RECENTLY_IN_MINUTES = 5 - OBJECT_NAME_K8S_CONTAINER = "K8SContainer" - OBJECT_NAME_K8S_NODE = "K8SNode" - CPU_USAGE_NANO_CORES = "cpuUsageNanoCores" - CPU_USAGE_MILLI_CORES = "cpuUsageMillicores" - MEMORY_WORKING_SET_BYTES= "memoryWorkingSetBytes" - MEMORY_RSS_BYTES = "memoryRssBytes" - PV_USED_BYTES = "pvUsedBytes" - DEFAULT_MDM_CPU_UTILIZATION_THRESHOLD = 95.0 - DEFAULT_MDM_MEMORY_RSS_THRESHOLD = 95.0 - DEFAULT_MDM_MEMORY_WORKING_SET_THRESHOLD = 95.0 - DEFAULT_MDM_PV_UTILIZATION_THRESHOLD = 60.0 - CONTROLLER_KIND_JOB = "job" - CONTAINER_TERMINATION_REASON_COMPLETED = "completed" - CONTAINER_STATE_TERMINATED = "terminated" - STALE_JOB_TIME_IN_MINUTES = 360 - TELEGRAF_DISK_METRICS = "container.azm.ms/disk" - OMSAGENT_ZERO_FILL = "omsagent" - KUBESYSTEM_NAMESPACE_ZERO_FILL = "kube-system" + CONTAINER_TERMINATED_RECENTLY_IN_MINUTES = 5 + OBJECT_NAME_K8S_CONTAINER = "K8SContainer" + OBJECT_NAME_K8S_NODE = "K8SNode" + CPU_USAGE_NANO_CORES = "cpuUsageNanoCores" + CPU_USAGE_MILLI_CORES = "cpuUsageMillicores" + MEMORY_WORKING_SET_BYTES = "memoryWorkingSetBytes" + MEMORY_RSS_BYTES = "memoryRssBytes" + PV_USED_BYTES = "pvUsedBytes" + DEFAULT_MDM_CPU_UTILIZATION_THRESHOLD = 95.0 + DEFAULT_MDM_MEMORY_RSS_THRESHOLD = 95.0 + DEFAULT_MDM_MEMORY_WORKING_SET_THRESHOLD = 95.0 + DEFAULT_MDM_PV_UTILIZATION_THRESHOLD = 60.0 + CONTROLLER_KIND_JOB = "job" + CONTAINER_TERMINATION_REASON_COMPLETED = "completed" + CONTAINER_STATE_TERMINATED = "terminated" + STALE_JOB_TIME_IN_MINUTES = 360 + TELEGRAF_DISK_METRICS = "container.azm.ms/disk" + OMSAGENT_ZERO_FILL = "omsagent" + KUBESYSTEM_NAMESPACE_ZERO_FILL = "kube-system" - #Telemetry constants - CONTAINER_METRICS_HEART_BEAT_EVENT = "ContainerMetricsMdmHeartBeatEvent" - POD_READY_PERCENTAGE_HEART_BEAT_EVENT = "PodReadyPercentageMdmHeartBeatEvent" - CONTAINER_RESOURCE_UTIL_HEART_BEAT_EVENT = "ContainerResourceUtilMdmHeartBeatEvent" - PV_USAGE_HEART_BEAT_EVENT = "PVUsageMdmHeartBeatEvent" - PV_KUBE_SYSTEM_METRICS_ENABLED_EVENT = "CollectPVKubeSystemMetricsEnabled" - TELEMETRY_FLUSH_INTERVAL_IN_MINUTES = 10 - KUBE_STATE_TELEMETRY_FLUSH_INTERVAL_IN_MINUTES = 15 - MDM_TIME_SERIES_FLUSHED_IN_LAST_HOUR = "MdmTimeSeriesFlushedInLastHour" + #Telemetry constants + CONTAINER_METRICS_HEART_BEAT_EVENT = "ContainerMetricsMdmHeartBeatEvent" + POD_READY_PERCENTAGE_HEART_BEAT_EVENT = "PodReadyPercentageMdmHeartBeatEvent" + CONTAINER_RESOURCE_UTIL_HEART_BEAT_EVENT = "ContainerResourceUtilMdmHeartBeatEvent" + PV_USAGE_HEART_BEAT_EVENT = "PVUsageMdmHeartBeatEvent" + PV_KUBE_SYSTEM_METRICS_ENABLED_EVENT = "CollectPVKubeSystemMetricsEnabled" + TELEMETRY_FLUSH_INTERVAL_IN_MINUTES = 10 + KUBE_STATE_TELEMETRY_FLUSH_INTERVAL_IN_MINUTES = 15 + ZERO_FILL_METRICS_INTERVAL_IN_MINUTES = 30 + MDM_TIME_SERIES_FLUSHED_IN_LAST_HOUR = "MdmTimeSeriesFlushedInLastHour" - #Pod Statuses - POD_STATUS_TERMINATING = "Terminating" -end \ No newline at end of file + #Pod Statuses + POD_STATUS_TERMINATING = "Terminating" +end From f1657c65f2408bfd66a45cfa54c2d8a27770ac6a Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Wed, 30 Sep 2020 18:13:25 -0700 Subject: [PATCH 029/301] Gangams/arc k8s related scripts, charts and doc updates (#450) * checksum annotations * script update for chart from mcr * chart updates * update chart version to match with chart release * script updates * latest chart updates * version updates for chart release * script updates * script updates * doc updates * doc updates * update comments * fix bug in ps script * fix bug in ps script * minor update * release process updates * use consistent name across scripts * use consistent names --- ....sh => push-helm-chart-to-canary-repos.sh} | 24 +- .pipelines/push-helm-chart-to-prod-repos.sh | 53 ++ ReleaseProcess.md | 5 +- charts/azuremonitor-containers/Chart.yaml | 2 +- .../templates/omsagent-daemonset-windows.yaml | 2 + .../templates/omsagent-daemonset.yaml | 3 + .../templates/omsagent-deployment.yaml | 3 + charts/azuremonitor-containers/values.yaml | 6 +- .../onboarding/managed/disable-monitoring.ps1 | 12 +- .../onboarding/managed/disable-monitoring.sh | 10 +- .../onboarding/managed/enable-monitoring.ps1 | 118 ++-- .../onboarding/managed/enable-monitoring.sh | 552 +++++++++--------- .../onboarding/managed/upgrade-monitoring.sh | 314 ++++++++++ 13 files changed, 733 insertions(+), 371 deletions(-) rename .pipelines/{push-helm-chart-as-oci-artifact.sh => push-helm-chart-to-canary-repos.sh} (54%) create mode 100644 .pipelines/push-helm-chart-to-prod-repos.sh create mode 100644 scripts/onboarding/managed/upgrade-monitoring.sh diff --git a/.pipelines/push-helm-chart-as-oci-artifact.sh b/.pipelines/push-helm-chart-to-canary-repos.sh similarity index 54% rename from .pipelines/push-helm-chart-as-oci-artifact.sh rename to .pipelines/push-helm-chart-to-canary-repos.sh index 50e16e3d0..db8bff56e 100644 --- a/.pipelines/push-helm-chart-as-oci-artifact.sh +++ b/.pipelines/push-helm-chart-to-canary-repos.sh @@ -1,8 +1,9 @@ #!/bin/bash -# push the helm chart as an OCI artifact to specified ACR # working directory of this script should be charts/azuremonitor-containers -export REPO_PATH="batch1/test/azure-monitor-containers" +# note: this repo registered in arc k8s extension for canary region +export REPO_PATH="public/azuremonitor/containerinsights/canary/preview/azuremonitor-containers" + export HELM_EXPERIMENTAL_OCI=1 for ARGUMENT in "$@" @@ -11,13 +12,13 @@ do VALUE=$(echo $ARGUMENT | cut -f2 -d=) case "$KEY" in - CIARCACR) CIARCACR=$VALUE ;; + CIACR) CIACR=$VALUE ;; CICHARTVERSION) CHARTVERSION=$VALUE ;; *) esac done -echo "CI ARC K8S ACR: ${CIARCACR}" +echo "CI ARC K8S ACR: ${CIACR}" echo "CI HELM CHART VERSION: ${CHARTVERSION}" echo "start: read appid and appsecret" @@ -25,18 +26,19 @@ ACR_APP_ID=$(cat ~/acrappid) ACR_APP_SECRET=$(cat ~/acrappsecret) echo "end: read appid and appsecret" -ACR=${CIARCACR} +ACR=${CIACR} + +echo "login to acr:${ACR} using helm" +helm registry login $ACR --username $ACR_APP_ID --password $ACR_APP_SECRET -echo "login to acr:${ACR} using oras" -oras login $ACR --username $ACR_APP_ID --password $ACR_APP_SECRET echo "login to acr:${ACR} completed: ${ACR}" echo "start: push the chart version: ${CHARTVERSION} to acr repo: ${ACR}" -echo "generate helm package" -helm package . +echo "save the chart locally with acr full path" +helm chart save . ${ACR}/${REPO_PATH}:${CHARTVERSION} -echo "pushing the helm chart as an OCI artifact" -oras push ${ACR}/${REPO_PATH}:${CHARTVERSION} --manifest-config /dev/null:application/vnd.unknown.config.v1+json ./azuremonitor-containers-${CHARTVERSION}.tgz:application/tar+gzip +echo "pushing the helm chart to ACR: ${ACR}" +helm chart push ${ACR}/${REPO_PATH}:${CHARTVERSION} echo "end: push the chart version: ${CHARTVERSION} to acr repo: ${ACR}" diff --git a/.pipelines/push-helm-chart-to-prod-repos.sh b/.pipelines/push-helm-chart-to-prod-repos.sh new file mode 100644 index 000000000..71aa989de --- /dev/null +++ b/.pipelines/push-helm-chart-to-prod-repos.sh @@ -0,0 +1,53 @@ +#!/bin/bash +# working directory of this script should be charts/azuremonitor-containers + +# this repo used without extension public preview release +export PROD_REPO_PATH="public/azuremonitor/containerinsights/preview/azuremonitor-containers" + +# note: this repo registered in arc k8s extension for prod group1 regions. +export EXTENSION_PROD_REPO_PATH="public/azuremonitor/containerinsights/prod1/preview/azuremonitor-containers" + +export HELM_EXPERIMENTAL_OCI=1 + +for ARGUMENT in "$@" +do + KEY=$(echo $ARGUMENT | cut -f1 -d=) + VALUE=$(echo $ARGUMENT | cut -f2 -d=) + + case "$KEY" in + CIACR) CIACR=$VALUE ;; + CICHARTVERSION) CHARTVERSION=$VALUE ;; + *) + esac +done + +echo "CI ARC K8S ACR: ${CIACR}" +echo "CI HELM CHART VERSION: ${CHARTVERSION}" + +echo "start: read appid and appsecret" +ACR_APP_ID=$(cat ~/acrappid) +ACR_APP_SECRET=$(cat ~/acrappsecret) +echo "end: read appid and appsecret" + +ACR=${CIACR} + +echo "login to acr:${ACR} using helm" +helm registry login $ACR --username $ACR_APP_ID --password $ACR_APP_SECRET + +echo "login to acr:${ACR} completed: ${ACR}" + +echo "start: push the chart version: ${CHARTVERSION} to acr repo: ${ACR}" + +echo "save the chart locally with acr full path: ${ACR}/${EXTENSION_PROD_REPO_PATH}:${CHARTVERSION}" +helm chart save . ${ACR}/${EXTENSION_PROD_REPO_PATH}:${CHARTVERSION} + +echo "save the chart locally with acr full path: ${ACR}/${PROD_REPO_PATH}:${CHARTVERSION}" +helm chart save . ${ACR}/${PROD_REPO_PATH}:${CHARTVERSION} + +echo "pushing the helm chart to ACR: ${ACR}/${EXTENSION_PROD_REPO_PATH}:${CHARTVERSION}" +helm chart push ${ACR}/${EXTENSION_PROD_REPO_PATH}:${CHARTVERSION} + +echo "pushing the helm chart to ACR: ${ACR}/${PROD_REPO_PATH}:${CHARTVERSION}" +helm chart push ${ACR}/${PROD_REPO_PATH}:${CHARTVERSION} + +echo "end: push the chart version: ${CHARTVERSION} to acr repo: ${ACR}" diff --git a/ReleaseProcess.md b/ReleaseProcess.md index 19802e22c..2a3e6001a 100644 --- a/ReleaseProcess.md +++ b/ReleaseProcess.md @@ -45,7 +45,10 @@ Make PR against [AKS-Engine](https://github.com/Azure/aks-engine). Refer PR http ## ARO v4, On-prem K8s, Azure Arc K8s and OpenShift v4 clusters -Make PR against [HELM-charts](https://github.com/helm/charts) with Azure Monitor for containers chart update. +Make sure azuremonitor-containers chart yamls updates with all changes going with the release and also make sure to bump the chart version, imagetag and docker provider version etc. Similar to agent container image, build pipeline automatically push the chart to container insights prod acr for canary and prod repos accordingly. +Both the agent and helm chart will be replicated to `mcr.microsoft.com`. + +The way, customers will be onboard the monitoring to these clusters using onboarding scripts under `onboarding\managed` directory so please bump chart version for prod release. Once we move to Arc K8s Monitoring extension Public preview, these will be taken care so at that point of time no manual changes like this required. # 4. Monitor agent roll-out status diff --git a/charts/azuremonitor-containers/Chart.yaml b/charts/azuremonitor-containers/Chart.yaml index 8976b5561..1d3fed86f 100644 --- a/charts/azuremonitor-containers/Chart.yaml +++ b/charts/azuremonitor-containers/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v1 appVersion: 7.0.0-1 description: Helm chart for deploying Azure Monitor container monitoring agent in Kubernetes name: azuremonitor-containers -version: 2.7.4 +version: 2.7.6 kubeVersion: "^1.10.0-0" keywords: - monitoring diff --git a/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml b/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml index 72b09f6c1..e65f9a98d 100644 --- a/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml @@ -24,6 +24,8 @@ spec: agentVersion: {{ .Values.omsagent.image.tagWindows }} dockerProviderVersion: {{ .Values.omsagent.image.dockerProviderVersion }} schema-versions: "v1" + checksum/secret: {{ include (print $.Template.BasePath "/omsagent-secret.yaml") . | sha256sum }} + checksum/config: {{ toYaml .Values.omsagent.resources | sha256sum }} spec: {{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion }} nodeSelector: diff --git a/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml b/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml index 7514247a0..438294ce5 100644 --- a/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml @@ -24,6 +24,9 @@ spec: agentVersion: {{ .Values.omsagent.image.tag }} dockerProviderVersion: {{ .Values.omsagent.image.dockerProviderVersion }} schema-versions: "v1" + checksum/secret: {{ include (print $.Template.BasePath "/omsagent-secret.yaml") . | sha256sum }} + checksum/config: {{ toYaml .Values.omsagent.resources | sha256sum }} + checksum/logsettings: {{ toYaml .Values.omsagent.logsettings | sha256sum }} spec: {{- if .Values.omsagent.rbac }} serviceAccountName: omsagent diff --git a/charts/azuremonitor-containers/templates/omsagent-deployment.yaml b/charts/azuremonitor-containers/templates/omsagent-deployment.yaml index 7d7ac7040..8609d25c9 100644 --- a/charts/azuremonitor-containers/templates/omsagent-deployment.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-deployment.yaml @@ -25,6 +25,9 @@ spec: agentVersion: {{ .Values.omsagent.image.tag }} dockerProviderVersion: {{ .Values.omsagent.image.dockerProviderVersion }} schema-versions: "v1" + checksum/secret: {{ include (print $.Template.BasePath "/omsagent-secret.yaml") . | sha256sum }} + checksum/config: {{ toYaml .Values.omsagent.resources | sha256sum }} + checksum/logsettings: {{ toYaml .Values.omsagent.logsettings | sha256sum }} spec: {{- if .Values.omsagent.rbac }} serviceAccountName: omsagent diff --git a/charts/azuremonitor-containers/values.yaml b/charts/azuremonitor-containers/values.yaml index 1804d1197..2711cb372 100644 --- a/charts/azuremonitor-containers/values.yaml +++ b/charts/azuremonitor-containers/values.yaml @@ -12,10 +12,10 @@ Azure: omsagent: image: repo: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod" - tag: "ciprod09162020" - tagWindows: "win-ciprod09162020" + tag: "ciprod09252020" + tagWindows: "win-ciprod09252020" pullPolicy: IfNotPresent - dockerProviderVersion: "10.0.0-5" + dockerProviderVersion: "10.0.0-6" agentVersion: "1.10.0.1" ## To get your workspace id and key do the following ## You can create a Azure Loganalytics workspace from portal.azure.com and get its ID & PRIMARY KEY from 'Advanced Settings' tab in the Ux. diff --git a/scripts/onboarding/managed/disable-monitoring.ps1 b/scripts/onboarding/managed/disable-monitoring.ps1 index 8945f90b6..1c011bfff 100644 --- a/scripts/onboarding/managed/disable-monitoring.ps1 +++ b/scripts/onboarding/managed/disable-monitoring.ps1 @@ -1,12 +1,12 @@ <# .DESCRIPTION - Disables Azure Monitor for containers to monitoring enabled Azure Managed K8s cluster such as Azure Arc K8s, ARO v4 and AKS etc. + Disables Azure Monitor for containers to monitoring enabled Azure Managed K8s cluster such as Azure Arc enabled Kubernetes, ARO v4 and AKS etc. 1. Deletes the existing Azure Monitor for containers helm release 2. Deletes logAnalyticsWorkspaceResourceId tag on the provided Managed cluster .PARAMETER clusterResourceId - Id of the Azure Managed Cluster such as Azure ARC K8s, ARO v4 etc. + Id of the Azure Managed Cluster such as Azure Arc enabled Kubernetes, ARO v4 etc. .PARAMETER servicePrincipalClientId client Id of the service principal which will be used for the azure login .PARAMETER servicePrincipalClientSecret @@ -18,7 +18,7 @@ Pre-requisites: - Azure Managed cluster Resource Id - - Contributor role permission on the Subscription of the Azure Arc Cluster + - Contributor role permission on the Subscription of the Azure Arc enabled Kubernetes Cluster - Helm v3.0.0 or higher https://github.com/helm/helm/releases - kube-context of the K8s cluster Note: 1. Please make sure you have all the pre-requisistes before running this script. @@ -298,7 +298,7 @@ if ($isArcK8sCluster -eq $true) { # validate identity $clusterIdentity = $clusterResource.identity.type.ToString().ToLower() if ($clusterIdentity.Contains("systemassigned") -eq $false) { - Write-Host("Identity of Azure Arc K8s cluster should be systemassigned but it has identity: $clusterIdentity") -ForegroundColor Red + Write-Host("Identity of Azure Arc enabled Kubernetes cluster should be systemassigned but it has identity: $clusterIdentity") -ForegroundColor Red exit } } @@ -354,7 +354,3 @@ catch { } Write-Host("Successfully disabled Azure Monitor for containers for cluster: $clusteResourceId") -ForegroundColor Green - - - - diff --git a/scripts/onboarding/managed/disable-monitoring.sh b/scripts/onboarding/managed/disable-monitoring.sh index f20bd7d33..c11426f30 100644 --- a/scripts/onboarding/managed/disable-monitoring.sh +++ b/scripts/onboarding/managed/disable-monitoring.sh @@ -26,10 +26,10 @@ set -o pipefail # default release name used during onboarding releaseName="azmon-containers-release-1" -# resource type for azure arc clusters +# resource type for Azure Arc enabled Kubernetes clusters resourceProvider="Microsoft.Kubernetes/connectedClusters" -# resource provider for azure arc connected cluster +# resource provider for Azure Arc enabled Kubernetes cluster arcK8sResourceProvider="Microsoft.Kubernetes/connectedClusters" # resource provider for azure redhat openshift v4 cluster aroV4ResourceProvider="Microsoft.RedHatOpenShift/OpenShiftClusters" @@ -125,13 +125,13 @@ remove_monitoring_tags() echo "set the cluster subscription id: ${clusterSubscriptionId}" az account set -s ${clusterSubscriptionId} - # validate cluster identity for ARC k8s cluster + # validate cluster identity for Azure Arc enabled Kubernetes cluster if [ "$isArcK8sCluster" = true ] ; then identitytype=$(az resource show -g ${clusterResourceGroup} -n ${clusterName} --resource-type $resourceProvider --query identity.type) identitytype=$(echo $identitytype | tr "[:upper:]" "[:lower:]" | tr -d '"') echo "cluster identity type:" $identitytype if [[ "$identitytype" != "systemassigned" ]]; then - echo "-e only supported cluster identity is systemassigned for Azure ARC K8s cluster type" + echo "-e only supported cluster identity is systemassigned for Azure Arc enabled Kubernetes cluster type" exit 1 fi fi @@ -257,7 +257,7 @@ done # detect the resource provider from the provider name in the cluster resource id if [ $providerName = "microsoft.kubernetes/connectedclusters" ]; then - echo "provider cluster resource is of Azure ARC K8s cluster type" + echo "provider cluster resource is of Azure Arc enabled Kubernetes cluster type" isArcK8sCluster=true resourceProvider=$arcK8sResourceProvider elif [ $providerName = "microsoft.redhatopenshift/openshiftclusters" ]; then diff --git a/scripts/onboarding/managed/enable-monitoring.ps1 b/scripts/onboarding/managed/enable-monitoring.ps1 index 338de6cbc..1e1669400 100644 --- a/scripts/onboarding/managed/enable-monitoring.ps1 +++ b/scripts/onboarding/managed/enable-monitoring.ps1 @@ -1,14 +1,14 @@ <# .DESCRIPTION - Onboards Azure Monitor for containers to Azure Managed Kuberenetes such as Azure Arc K8s, ARO v4 and AKS etc. + Onboards Azure Monitor for containers to Azure Managed Kuberenetes such as Azure Arc enabled Kubernetes, ARO v4 and AKS etc. 1. Creates the Default Azure log analytics workspace if doesn't exist one in specified subscription 2. Adds the ContainerInsights solution to the Azure log analytics workspace 3. Adds the workspaceResourceId tag or enable addon (if the cluster is AKS) on the provided Managed cluster resource id 4. Installs Azure Monitor for containers HELM chart to the K8s cluster in provided via --kube-context .PARAMETER clusterResourceId - Id of the Azure Managed Cluster such as Azure ARC K8s, ARO v4 etc. + Id of the Azure Managed Cluster such as Azure Arc enabled Kubernetes, ARO v4 etc. .PARAMETER servicePrincipalClientId Client Id of the service principal which will be used for the azure login .PARAMETER servicePrincipalClientSecret @@ -22,10 +22,6 @@ .PARAMETER proxyEndpoint (optional) Provide Proxy endpoint if you have K8s cluster behind the proxy and would like to route Azure Monitor for containers outbound traffic via proxy. Format of the proxy endpoint should be http(s://:@: - .PARAMETER helmRepoName (optional) - helm repo name. should be used only for the private preview features - .PARAMETER helmRepoUrl (optional) - helm repo url. should be used only for the private preview features Pre-requisites: - Azure Managed cluster Resource Id @@ -50,30 +46,23 @@ param( [Parameter(mandatory = $false)] [string]$workspaceResourceId, [Parameter(mandatory = $false)] - [string]$proxyEndpoint, - [Parameter(mandatory = $false)] - [string]$helmRepoName, - [Parameter(mandatory = $false)] - [string]$helmRepoUrl + [string]$proxyEndpoint ) -$solutionTemplateUri= "https://raw.githubusercontent.com/microsoft/Docker-Provider/ci_dev/scripts/onboarding/templates/azuremonitor-containerSolution.json" +$solutionTemplateUri = "https://raw.githubusercontent.com/microsoft/Docker-Provider/ci_dev/scripts/onboarding/templates/azuremonitor-containerSolution.json" $helmChartReleaseName = "azmon-containers-release-1" $helmChartName = "azuremonitor-containers" -$helmChartRepoName = "incubator" -$helmChartRepoUrl = "https://kubernetes-charts-incubator.storage.googleapis.com/" + # flags to indicate the cluster types $isArcK8sCluster = $false -$isAksCluster = $false +$isAksCluster = $false $isUsingServicePrincipal = $false -if([string]::IsNullOrEmpty($helmRepoName) -eq $false){ - $helmChartRepoName = $helmRepoName -} - -if([string]::IsNullOrEmpty($helmRepoUrl) -eq $false){ - $helmChartRepoUrl = $helmRepoUrl -} +# released chart version in mcr +$mcr = "mcr.microsoft.com" +$mcrChartVersion = "2.7.6" +$mcrChartRepoPath = "azuremonitor/containerinsights/preview/azuremonitor-containers" +$helmLocalRepoName = "." # checks the required Powershell modules exist and if not exists, request the user permission to install $azAccountModule = Get-Module -ListAvailable -Name Az.Accounts @@ -200,7 +189,7 @@ if (($null -eq $azAccountModule) -or ($null -eq $azResourcesModule) -or ($null - } if ([string]::IsNullOrEmpty($clusterResourceId)) { - Write-Host("Specified Azure Arc ClusterResourceId should not be NULL or empty") -ForegroundColor Red + Write-Host("Specified Azure Arc enabled Kubernetes ClusterResourceId should not be NULL or empty") -ForegroundColor Red exit } @@ -220,30 +209,31 @@ if ($clusterResourceId.StartsWith("/") -eq $false) { $clusterResourceId = "/" + $clusterResourceId } -if ($clusterResourceId.Split("/").Length -ne 9){ - Write-Host("Provided Cluster Resource Id is not in expected format") -ForegroundColor Red +if ($clusterResourceId.Split("/").Length -ne 9) { + Write-Host("Provided Cluster Resource Id is not in expected format") -ForegroundColor Red exit } if (($clusterResourceId.ToLower().Contains("microsoft.kubernetes/connectedclusters") -ne $true) -and ($clusterResourceId.ToLower().Contains("microsoft.redhatopenshift/openshiftclusters") -ne $true) -and ($clusterResourceId.ToLower().Contains("microsoft.containerservice/managedclusters") -ne $true) - ) { +) { Write-Host("Provided cluster ResourceId is not supported cluster type: $clusterResourceId") -ForegroundColor Red exit } -if(([string]::IsNullOrEmpty($servicePrincipalClientId) -eq $false) -and - ([string]::IsNullOrEmpty($servicePrincipalClientSecret) -eq $false) -and - ([string]::IsNullOrEmpty($tenantId) -eq $false)) { - Write-Host("Using service principal creds for the azure login since these provided.") - $isUsingServicePrincipal = $true +if (([string]::IsNullOrEmpty($servicePrincipalClientId) -eq $false) -and + ([string]::IsNullOrEmpty($servicePrincipalClientSecret) -eq $false) -and + ([string]::IsNullOrEmpty($tenantId) -eq $false)) { + Write-Host("Using service principal creds for the azure login since these provided.") + $isUsingServicePrincipal = $true } if ($clusterResourceId.ToLower().Contains("microsoft.kubernetes/connectedclusters") -eq $true) { - $isArcK8sCluster = $true -} elseif ($clusterResourceId.ToLower().Contains("microsoft.containerservice/managedclusters") -eq $true) { - $isAksCluster = $true + $isArcK8sCluster = $true +} +elseif ($clusterResourceId.ToLower().Contains("microsoft.containerservice/managedclusters") -eq $true) { + $isAksCluster = $true } $resourceParts = $clusterResourceId.Split("/") @@ -253,7 +243,7 @@ Write-Host("Cluster SubscriptionId : '" + $clusterSubscriptionId + "' ") -Foregr if ($isUsingServicePrincipal) { $spSecret = ConvertTo-SecureString -String $servicePrincipalClientSecret -AsPlainText -Force - $spCreds = New-Object -TypeName "System.Management.Automation.PSCredential" -ArgumentList $servicePrincipalClientId,$spSecret + $spCreds = New-Object -TypeName "System.Management.Automation.PSCredential" -ArgumentList $servicePrincipalClientId, $spSecret Connect-AzAccount -ServicePrincipal -Credential $spCreds -Tenant $tenantId -Subscription $clusterSubscriptionId } @@ -275,12 +265,13 @@ if ($null -eq $account.Account) { try { if ($isUsingServicePrincipal) { $spSecret = ConvertTo-SecureString -String $servicePrincipalClientSecret -AsPlainText -Force - $spCreds = New-Object -TypeName "System.Management.Automation.PSCredential" -ArgumentList $servicePrincipalClientId,$spSecret + $spCreds = New-Object -TypeName "System.Management.Automation.PSCredential" -ArgumentList $servicePrincipalClientId, $spSecret Connect-AzAccount -ServicePrincipal -Credential $spCreds -Tenant $tenantId -Subscription $clusterSubscriptionId - } else { - Write-Host("Please login...") - Connect-AzAccount -subscriptionid $clusterSubscriptionId - } + } + else { + Write-Host("Please login...") + Connect-AzAccount -subscriptionid $clusterSubscriptionId + } } catch { Write-Host("") @@ -322,12 +313,12 @@ if ($null -eq $clusterResource) { $clusterRegion = $clusterResource.Location.ToLower() if ($isArcK8sCluster -eq $true) { - # validate identity - $clusterIdentity = $clusterResource.identity.type.ToString().ToLower() - if ($clusterIdentity.contains("systemassigned") -eq $false) { - Write-Host("Identity of Azure Arc K8s cluster should be systemassigned but it has identity: $clusterIdentity") -ForegroundColor Red - exit - } + # validate identity + $clusterIdentity = $clusterResource.identity.type.ToString().ToLower() + if ($clusterIdentity.contains("systemassigned") -eq $false) { + Write-Host("Identity of Azure Arc enabled Kubernetes cluster should be systemassigned but it has identity: $clusterIdentity") -ForegroundColor Red + exit + } } if ([string]::IsNullOrEmpty($workspaceResourceId)) { @@ -514,7 +505,8 @@ if ($account.Subscription.Id -eq $clusterSubscriptionId) { if ($isAksCluster -eq $true) { Write-Host ("Enabling AKS Monitoring Addon ..") # TBD -} else { +} +else { Write-Host("Attaching workspaceResourceId tag on the cluster ResourceId") $clusterResource.Tags["logAnalyticsWorkspaceResourceId"] = $WorkspaceInformation.ResourceId Set-AzResource -Tag $clusterResource.Tags -ResourceId $clusterResource.ResourceId -Force @@ -526,20 +518,30 @@ Write-Host "Helm version" : $helmVersion Write-Host("Installing or upgrading if exists, Azure Monitor for containers HELM chart ...") try { - Write-Host("Adding $helmChartRepoName repo to helm: $helmChartRepoUrl") - helm repo add $helmChartRepoName $helmChartRepoUrl - Write-Host("updating helm repo to get latest version of charts") - helm repo update + Write-Host("pull the chart from mcr.microsoft.com") + [System.Environment]::SetEnvironmentVariable("HELM_EXPERIMENTAL_OCI", 1, "Process") + + Write-Host("pull the chart from mcr.microsoft.com") + helm chart pull ${mcr}/${mcrChartRepoPath}:${mcrChartVersion} + + Write-Host("export the chart from local cache to current directory") + helm chart export ${mcr}/${mcrChartRepoPath}:${mcrChartVersion} --destination . + + $helmChartRepoPath = "${helmLocalRepoName}" + "/" + "${helmChartName}" + + Write-Host("helmChartRepoPath is : ${helmChartRepoPath}") + $helmParameters = "omsagent.secret.wsid=$workspaceGUID,omsagent.secret.key=$workspacePrimarySharedKey,omsagent.env.clusterId=$clusterResourceId,omsagent.env.clusterRegion=$clusterRegion" - if([string]::IsNullOrEmpty($proxyEndpoint) -eq $false) { + if ([string]::IsNullOrEmpty($proxyEndpoint) -eq $false) { Write-Host("using proxy endpoint since its provided") $helmParameters = $helmParameters + ",omsagent.proxy=$proxyEndpoint" } if ([string]::IsNullOrEmpty($kubeContext)) { - helm upgrade --install $helmChartReleaseName --set $helmParameters $helmChartRepoName/$helmChartName - } else { - Write-Host("using provided kube-context: $kubeContext") - helm upgrade --install $helmChartReleaseName --set $helmParameters $helmChartRepoName/$helmChartName --kube-context $kubeContext + helm upgrade --install $helmChartReleaseName --set $helmParameters $helmChartRepoPath + } + else { + Write-Host("using provided kube-context: $kubeContext") + helm upgrade --install $helmChartReleaseName --set $helmParameters $helmChartRepoPath --kube-context $kubeContext } } catch { @@ -548,7 +550,3 @@ catch { Write-Host("Successfully enabled Azure Monitor for containers for cluster: $clusterResourceId") -ForegroundColor Green Write-Host("Proceed to https://aka.ms/azmon-containers to view your newly onboarded Azure Managed cluster") -ForegroundColor Green - - - - diff --git a/scripts/onboarding/managed/enable-monitoring.sh b/scripts/onboarding/managed/enable-monitoring.sh index 226fd978b..ce62a581a 100644 --- a/scripts/onboarding/managed/enable-monitoring.sh +++ b/scripts/onboarding/managed/enable-monitoring.sh @@ -41,9 +41,11 @@ set -o pipefail # default to public cloud since only supported cloud is azure public clod defaultAzureCloud="AzureCloud" -# helm repo details -helmRepoName="incubator" -helmRepoUrl="https://kubernetes-charts-incubator.storage.googleapis.com/" +# released chart version in mcr +mcrChartVersion="2.7.6" +mcr="mcr.microsoft.com" +mcrChartRepoPath="azuremonitor/containerinsights/preview/azuremonitor-containers" +helmLocalRepoName="." helmChartName="azuremonitor-containers" # default release name used during onboarding @@ -58,19 +60,18 @@ aroV4ResourceProvider="Microsoft.RedHatOpenShift/OpenShiftClusters" # resource provider for aks cluster aksResourceProvider="Microsoft.ContainerService/managedClusters" -# default of resourceProvider is arc k8s and this will get updated based on the provider cluster resource +# default of resourceProvider is Azure Arc enabled Kubernetes and this will get updated based on the provider cluster resource resourceProvider="Microsoft.Kubernetes/connectedClusters" - # resource type for azure log analytics workspace workspaceResourceProvider="Microsoft.OperationalInsights/workspaces" # openshift project name for aro v4 cluster openshiftProjectName="azure-monitor-for-containers" -# arc k8s cluster resource +# AROv4 cluster resource isAroV4Cluster=false -# arc k8s cluster resource +# Azure Arc enabled Kubernetes cluster resource isArcK8sCluster=false # aks cluster resource @@ -103,28 +104,25 @@ servicePrincipalClientSecret="" servicePrincipalTenantId="" isUsingServicePrincipal=false -usage() -{ - local basename=`basename $0` - echo - echo "Enable Azure Monitor for containers:" - echo "$basename --resource-id [--client-id ] [--client-secret ] [--tenant-id ] [--kube-context ] [--workspace-id ] [--proxy ]" +usage() { + local basename=$(basename $0) + echo + echo "Enable Azure Monitor for containers:" + echo "$basename --resource-id [--client-id ] [--client-secret ] [--tenant-id ] [--kube-context ] [--workspace-id ] [--proxy ]" } -parse_args() -{ +parse_args() { - if [ $# -le 1 ] - then + if [ $# -le 1 ]; then usage exit 1 - fi + fi -# Transform long options to short ones -for arg in "$@"; do - shift - case "$arg" in - "--resource-id") set -- "$@" "-r" ;; + # Transform long options to short ones + for arg in "$@"; do + shift + case "$arg" in + "--resource-id") set -- "$@" "-r" ;; "--kube-context") set -- "$@" "-k" ;; "--workspace-id") set -- "$@" "-w" ;; "--proxy") set -- "$@" "-p" ;; @@ -134,130 +132,128 @@ for arg in "$@"; do "--helm-repo-name") set -- "$@" "-n" ;; "--helm-repo-url") set -- "$@" "-u" ;; "--container-log-volume") set -- "$@" "-v" ;; - "--"*) usage ;; - *) set -- "$@" "$arg" - esac -done + "--"*) usage ;; + *) set -- "$@" "$arg" ;; + esac + done -local OPTIND opt + local OPTIND opt -while getopts 'hk:r:w:p:c:s:t:n:u:v:' opt; do + while getopts 'hk:r:w:p:c:s:t:n:u:v:' opt; do case "$opt" in - h) + h) + usage + ;; + + k) + kubeconfigContext="$OPTARG" + echo "name of kube-context is $OPTARG" + ;; + + r) + clusterResourceId="$OPTARG" + echo "clusterResourceId is $OPTARG" + ;; + + w) + workspaceResourceId="$OPTARG" + echo "workspaceResourceId is $OPTARG" + ;; + + p) + proxyEndpoint="$OPTARG" + echo "proxyEndpoint is $OPTARG" + ;; + + c) + servicePrincipalClientId="$OPTARG" + echo "servicePrincipalClientId is $OPTARG" + ;; + + s) + servicePrincipalClientSecret="$OPTARG" + echo "clientSecret is *****" + ;; + + t) + servicePrincipalTenantId="$OPTARG" + echo "service principal tenantId is $OPTARG" + ;; + + n) + helmRepoName="$OPTARG" + echo "helm repo name is $OPTARG" + ;; + + u) + helmRepoUrl="$OPTARG" + echo "helm repo url is $OPTARG" + ;; + + v) + containerLogVolume="$OPTARG" + echo "container log volume is $OPTARG" + ;; + + ?) usage - ;; - - k) - kubeconfigContext="$OPTARG" - echo "name of kube-context is $OPTARG" - ;; - - r) - clusterResourceId="$OPTARG" - echo "clusterResourceId is $OPTARG" - ;; - - w) - workspaceResourceId="$OPTARG" - echo "workspaceResourceId is $OPTARG" - ;; - - p) - proxyEndpoint="$OPTARG" - echo "proxyEndpoint is $OPTARG" - ;; - - c) - servicePrincipalClientId="$OPTARG" - echo "servicePrincipalClientId is $OPTARG" - ;; - - s) - servicePrincipalClientSecret="$OPTARG" - echo "clientSecret is *****" - ;; - - t) - servicePrincipalTenantId="$OPTARG" - echo "service principal tenantId is $OPTARG" - ;; - - n) - helmRepoName="$OPTARG" - echo "helm repo name is $OPTARG" - ;; - - u) - helmRepoUrl="$OPTARG" - echo "helm repo url is $OPTARG" - ;; - - v) - containerLogVolume="$OPTARG" - echo "container log volume is $OPTARG" - ;; - - ?) - usage - exit 1 - ;; + exit 1 + ;; esac done - shift "$(($OPTIND -1))" + shift "$(($OPTIND - 1))" + local subscriptionId="$(echo ${clusterResourceId} | cut -d'/' -f3)" + local resourceGroup="$(echo ${clusterResourceId} | cut -d'/' -f5)" - local subscriptionId="$(echo ${clusterResourceId} | cut -d'/' -f3)" - local resourceGroup="$(echo ${clusterResourceId} | cut -d'/' -f5)" + # get resource parts and join back to get the provider name + local providerNameResourcePart1="$(echo ${clusterResourceId} | cut -d'/' -f7)" + local providerNameResourcePart2="$(echo ${clusterResourceId} | cut -d'/' -f8)" + local providerName="$(echo ${providerNameResourcePart1}/${providerNameResourcePart2})" - # get resource parts and join back to get the provider name - local providerNameResourcePart1="$(echo ${clusterResourceId} | cut -d'/' -f7)" - local providerNameResourcePart2="$(echo ${clusterResourceId} | cut -d'/' -f8)" - local providerName="$(echo ${providerNameResourcePart1}/${providerNameResourcePart2} )" + local clusterName="$(echo ${clusterResourceId} | cut -d'/' -f9)" - local clusterName="$(echo ${clusterResourceId} | cut -d'/' -f9)" + # convert to lowercase for validation + providerName=$(echo $providerName | tr "[:upper:]" "[:lower:]") - # convert to lowercase for validation - providerName=$(echo $providerName | tr "[:upper:]" "[:lower:]") + echo "cluster SubscriptionId:" $subscriptionId + echo "cluster ResourceGroup:" $resourceGroup + echo "cluster ProviderName:" $providerName + echo "cluster Name:" $clusterName - echo "cluster SubscriptionId:" $subscriptionId - echo "cluster ResourceGroup:" $resourceGroup - echo "cluster ProviderName:" $providerName - echo "cluster Name:" $clusterName - - if [ -z "$subscriptionId" -o -z "$resourceGroup" -o -z "$providerName" -o -z "$clusterName" ]; then + if [ -z "$subscriptionId" -o -z "$resourceGroup" -o -z "$providerName" -o -z "$clusterName" ]; then echo "-e invalid cluster resource id. Please try with valid fully qualified resource id of the cluster" exit 1 - fi + fi - if [[ $providerName != microsoft.* ]]; then - echo "-e invalid azure cluster resource id format." - exit 1 - fi + if [[ $providerName != microsoft.* ]]; then + echo "-e invalid azure cluster resource id format." + exit 1 + fi - # detect the resource provider from the provider name in the cluster resource id - # detect the resource provider from the provider name in the cluster resource id - if [ $providerName = "microsoft.kubernetes/connectedclusters" ]; then - echo "provider cluster resource is of Azure ARC K8s cluster type" + # detect the resource provider from the provider name in the cluster resource id + if [ $providerName = "microsoft.kubernetes/connectedclusters" ]; then + echo "provider cluster resource is of Azure Arc enabled Kubernetes cluster type" isArcK8sCluster=true resourceProvider=$arcK8sResourceProvider - elif [ $providerName = "microsoft.redhatopenshift/openshiftclusters" ]; then + elif [ $providerName = "microsoft.redhatopenshift/openshiftclusters" ]; then echo "provider cluster resource is of AROv4 cluster type" resourceProvider=$aroV4ResourceProvider isAroV4Cluster=true - elif [ $providerName = "microsoft.containerservice/managedclusters" ]; then + elif [ $providerName = "microsoft.containerservice/managedclusters" ]; then echo "provider cluster resource is of AKS cluster type" isAksCluster=true resourceProvider=$aksResourceProvider - else - echo "-e unsupported azure managed cluster type" - exit 1 - fi + else + echo "-e unsupported azure managed cluster type" + exit 1 + fi - if [ -z "$kubeconfigContext" ]; then + if [ -z "$kubeconfigContext" ]; then echo "using or getting current kube config context since --kube-context parameter not set " - fi + fi -if [ ! -z "$workspaceResourceId" ]; then + if [ ! -z "$workspaceResourceId" ]; then local workspaceSubscriptionId="$(echo $workspaceResourceId | cut -d'/' -f3)" local workspaceResourceGroup="$(echo $workspaceResourceId | cut -d'/' -f5)" local workspaceProviderName="$(echo $workspaceResourceId | cut -d'/' -f7)" @@ -269,13 +265,13 @@ if [ ! -z "$workspaceResourceId" ]; then echo "workspace ProviderName:" $workspaceName echo "workspace Name:" $workspaceName - if [[ $workspaceProviderName != microsoft.operationalinsights* ]]; then - echo "-e invalid azure log analytics resource id format." - exit 1 - fi -fi + if [[ $workspaceProviderName != microsoft.operationalinsights* ]]; then + echo "-e invalid azure log analytics resource id format." + exit 1 + fi + fi -if [ ! -z "$proxyEndpoint" ]; then + if [ ! -z "$proxyEndpoint" ]; then # Validate Proxy Endpoint URL # extract the protocol:// proto="$(echo $proxyEndpoint | grep :// | sed -e's,^\(.*://\).*,\1,g')" @@ -302,23 +298,21 @@ if [ ! -z "$proxyEndpoint" ]; then else echo "successfully validated provided proxy endpoint is valid and in expected format" fi -fi + fi -if [ ! -z "$servicePrincipalClientId" -a ! -z "$servicePrincipalClientSecret" -a ! -z "$servicePrincipalTenantId" ]; then - echo "using service principal creds (clientId, secret and tenantId) for azure login since provided" - isUsingServicePrincipal=true -fi + if [ ! -z "$servicePrincipalClientId" -a ! -z "$servicePrincipalClientSecret" -a ! -z "$servicePrincipalTenantId" ]; then + echo "using service principal creds (clientId, secret and tenantId) for azure login since provided" + isUsingServicePrincipal=true + fi } -configure_to_public_cloud() -{ +configure_to_public_cloud() { echo "Set AzureCloud as active cloud for az cli" az cloud set -n $defaultAzureCloud } -validate_cluster_identity() -{ +validate_cluster_identity() { echo "validating cluster identity" local rgName="$(echo ${1})" @@ -329,15 +323,14 @@ validate_cluster_identity() echo "cluster identity type:" $identitytype if [[ "$identitytype" != "systemassigned" ]]; then - echo "-e only supported cluster identity is systemassigned for Azure ARC K8s cluster type" - exit 1 + echo "-e only supported cluster identity is systemassigned for Azure Arc enabled Kubernetes cluster type" + exit 1 fi echo "successfully validated the identity of the cluster" } -create_default_log_analytics_workspace() -{ +create_default_log_analytics_workspace() { # extract subscription from cluster resource id local subscriptionId="$(echo $clusterResourceId | cut -d'/' -f3)" @@ -348,73 +341,71 @@ create_default_log_analytics_workspace() # mapping fors for default Azure Log Analytics workspace declare -A AzureCloudLocationToOmsRegionCodeMap=( - [australiasoutheast]=ASE - [australiaeast]=EAU - [australiacentral]=CAU - [canadacentral]=CCA - [centralindia]=CIN - [centralus]=CUS - [eastasia]=EA - [eastus]=EUS - [eastus2]=EUS2 - [eastus2euap]=EAP - [francecentral]=PAR - [japaneast]=EJP - [koreacentral]=SE - [northeurope]=NEU - [southcentralus]=SCUS - [southeastasia]=SEA - [uksouth]=SUK - [usgovvirginia]=USGV - [westcentralus]=EUS - [westeurope]=WEU - [westus]=WUS - [westus2]=WUS2 + [australiasoutheast]=ASE + [australiaeast]=EAU + [australiacentral]=CAU + [canadacentral]=CCA + [centralindia]=CIN + [centralus]=CUS + [eastasia]=EA + [eastus]=EUS + [eastus2]=EUS2 + [eastus2euap]=EAP + [francecentral]=PAR + [japaneast]=EJP + [koreacentral]=SE + [northeurope]=NEU + [southcentralus]=SCUS + [southeastasia]=SEA + [uksouth]=SUK + [usgovvirginia]=USGV + [westcentralus]=EUS + [westeurope]=WEU + [westus]=WUS + [westus2]=WUS2 ) declare -A AzureCloudRegionToOmsRegionMap=( - [australiacentral]=australiacentral - [australiacentral2]=australiacentral - [australiaeast]=australiaeast - [australiasoutheast]=australiasoutheast - [brazilsouth]=southcentralus - [canadacentral]=canadacentral - [canadaeast]=canadacentral - [centralus]=centralus - [centralindia]=centralindia - [eastasia]=eastasia - [eastus]=eastus - [eastus2]=eastus2 - [francecentral]=francecentral - [francesouth]=francecentral - [japaneast]=japaneast - [japanwest]=japaneast - [koreacentral]=koreacentral - [koreasouth]=koreacentral - [northcentralus]=eastus - [northeurope]=northeurope - [southafricanorth]=westeurope - [southafricawest]=westeurope - [southcentralus]=southcentralus - [southeastasia]=southeastasia - [southindia]=centralindia - [uksouth]=uksouth - [ukwest]=uksouth - [westcentralus]=eastus - [westeurope]=westeurope - [westindia]=centralindia - [westus]=westus - [westus2]=westus2 + [australiacentral]=australiacentral + [australiacentral2]=australiacentral + [australiaeast]=australiaeast + [australiasoutheast]=australiasoutheast + [brazilsouth]=southcentralus + [canadacentral]=canadacentral + [canadaeast]=canadacentral + [centralus]=centralus + [centralindia]=centralindia + [eastasia]=eastasia + [eastus]=eastus + [eastus2]=eastus2 + [francecentral]=francecentral + [francesouth]=francecentral + [japaneast]=japaneast + [japanwest]=japaneast + [koreacentral]=koreacentral + [koreasouth]=koreacentral + [northcentralus]=eastus + [northeurope]=northeurope + [southafricanorth]=westeurope + [southafricawest]=westeurope + [southcentralus]=southcentralus + [southeastasia]=southeastasia + [southindia]=centralindia + [uksouth]=uksouth + [ukwest]=uksouth + [westcentralus]=eastus + [westeurope]=westeurope + [westindia]=centralindia + [westus]=westus + [westus2]=westus2 ) - if [ -n "${AzureCloudRegionToOmsRegionMap[$clusterRegion]}" ]; - then + if [ -n "${AzureCloudRegionToOmsRegionMap[$clusterRegion]}" ]; then workspaceRegion=${AzureCloudRegionToOmsRegionMap[$clusterRegion]} fi echo "Workspace Region:"$workspaceRegion - if [ -n "${AzureCloudLocationToOmsRegionCodeMap[$workspaceRegion]}" ]; - then + if [ -n "${AzureCloudLocationToOmsRegionCodeMap[$workspaceRegion]}" ]; then workspaceRegionCode=${AzureCloudLocationToOmsRegionCodeMap[$workspaceRegion]} fi echo "Workspace Region Code:"$workspaceRegionCode @@ -423,30 +414,28 @@ create_default_log_analytics_workspace() isRGExists=$(az group exists -g $workspaceResourceGroup) workspaceName="DefaultWorkspace-"$subscriptionId"-"$workspaceRegionCode - if $isRGExists - then echo "using existing default resource group:"$workspaceResourceGroup + if $isRGExists; then + echo "using existing default resource group:"$workspaceResourceGroup else echo "creating resource group: $workspaceResourceGroup in region: $workspaceRegion" az group create -g $workspaceResourceGroup -l $workspaceRegion fi - workspaceList=$(az resource list -g $workspaceResourceGroup -n $workspaceName --resource-type $workspaceResourceProvider) - if [ "$workspaceList" = "[]" ]; - then - # create new default workspace since no mapped existing default workspace - echo '{"location":"'"$workspaceRegion"'", "properties":{"sku":{"name": "standalone"}}}' > WorkspaceProps.json - cat WorkspaceProps.json - workspace=$(az resource create -g $workspaceResourceGroup -n $workspaceName --resource-type $workspaceResourceProvider --is-full-object -p @WorkspaceProps.json) + workspaceList=$(az resource list -g $workspaceResourceGroup -n $workspaceName --resource-type $workspaceResourceProvider) + if [ "$workspaceList" = "[]" ]; then + # create new default workspace since no mapped existing default workspace + echo '{"location":"'"$workspaceRegion"'", "properties":{"sku":{"name": "standalone"}}}' >WorkspaceProps.json + cat WorkspaceProps.json + workspace=$(az resource create -g $workspaceResourceGroup -n $workspaceName --resource-type $workspaceResourceProvider --is-full-object -p @WorkspaceProps.json) else echo "using existing default workspace:"$workspaceName fi - workspaceResourceId=$(az resource show -g $workspaceResourceGroup -n $workspaceName --resource-type $workspaceResourceProvider --query id) + workspaceResourceId=$(az resource show -g $workspaceResourceGroup -n $workspaceName --resource-type $workspaceResourceProvider --query id) workspaceResourceId=$(echo $workspaceResourceId | tr -d '"') } -add_container_insights_solution() -{ +add_container_insights_solution() { local resourceId="$(echo ${1})" # extract resource group from workspace resource id @@ -456,10 +445,9 @@ add_container_insights_solution() solution=$(az deployment group create -g $resourceGroup --template-uri $solutionTemplateUri --parameters workspaceResourceId=$resourceId --parameters workspaceRegion=$workspaceRegion) } -get_workspace_guid_and_key() -{ +get_workspace_guid_and_key() { # extract resource parts from workspace resource id - local resourceId="$(echo ${1} | tr -d '"' )" + local resourceId="$(echo ${1} | tr -d '"')" local subId="$(echo ${resourceId} | cut -d'/' -f3)" local rgName="$(echo ${resourceId} | cut -d'/' -f5)" local wsName="$(echo ${resourceId} | cut -d'/' -f9)" @@ -474,11 +462,10 @@ get_workspace_guid_and_key() workspaceKey=$(echo $workspaceKey | tr -d '"') } -install_helm_chart() -{ +install_helm_chart() { - # get the config-context for ARO v4 cluster - if [ "$isAroV4Cluster" = true ] ; then + # get the config-context for ARO v4 cluster + if [ "$isAroV4Cluster" = true ]; then echo "getting config-context of ARO v4 cluster " echo "getting admin user creds for aro v4 cluster" adminUserName=$(az aro list-credentials -g $clusterResourceGroup -n $clusterName --query 'kubeadminUsername' -o tsv) @@ -490,83 +477,84 @@ install_helm_chart() oc new-project $openshiftProjectName echo "getting config-context of aro v4 cluster" kubeconfigContext=$(oc config current-context) - fi - - if [ -z "$kubeconfigContext" ]; then - echo "installing Azure Monitor for containers HELM chart on to the cluster and using current kube context ..." - else - echo "installing Azure Monitor for containers HELM chart on to the cluster with kubecontext:${kubeconfigContext} ..." - fi - - echo "getting the region of the cluster" - clusterRegion=$(az resource show --ids ${clusterResourceId} --query location -o tsv) - echo "cluster region is : ${clusterRegion}" - - echo "adding helm repo:" $helmRepoName - helm repo add $helmRepoName $helmRepoUrl - - echo "updating helm repo to get latest charts" - helm repo update - - if [ ! -z "$proxyEndpoint" ]; then - echo "using proxy endpoint since proxy configuration passed in" - if [ -z "$kubeconfigContext" ]; then - echo "using current kube-context since --kube-context/-k parameter not passed in" - helm upgrade --install azmon-containers-release-1 --set omsagent.proxy=$proxyEndpoint,omsagent.secret.wsid=$workspaceGuid,omsagent.secret.key=$workspaceKey,omsagent.env.clusterId=$clusterResourceId,omsagent.env.clusterRegion=$clusterRegion $helmRepoName/$helmChartName - else - echo "using --kube-context:${kubeconfigContext} since passed in" - helm upgrade --install azmon-containers-release-1 --set omsagent.proxy=$proxyEndpoint,omsagent.secret.wsid=$workspaceGuid,omsagent.secret.key=$workspaceKey,omsagent.env.clusterId=$clusterResourceId,omsagent.env.clusterRegion=$clusterRegion $helmRepoName/$helmChartName --kube-context ${kubeconfigContext} - fi - else - if [ -z "$kubeconfigContext" ]; then - echo "using current kube-context since --kube-context/-k parameter not passed in" - helm upgrade --install azmon-containers-release-1 --set omsagent.secret.wsid=$workspaceGuid,omsagent.secret.key=$workspaceKey,omsagent.env.clusterId=$clusterResourceId,omsagent.env.clusterRegion=$clusterRegion $helmRepoName/$helmChartName - else - echo "using --kube-context:${kubeconfigContext} since passed in" - helm upgrade --install azmon-containers-release-1 --set omsagent.secret.wsid=$workspaceGuid,omsagent.secret.key=$workspaceKey,omsagent.env.clusterId=$clusterResourceId,omsagent.env.clusterRegion=$clusterRegion $helmRepoName/$helmChartName --kube-context ${kubeconfigContext} - fi - fi - - echo "chart installation completed." + fi + + if [ -z "$kubeconfigContext" ]; then + echo "installing Azure Monitor for containers HELM chart on to the cluster and using current kube context ..." + else + echo "installing Azure Monitor for containers HELM chart on to the cluster with kubecontext:${kubeconfigContext} ..." + fi + + echo "getting the region of the cluster" + clusterRegion=$(az resource show --ids ${clusterResourceId} --query location -o tsv) + echo "cluster region is : ${clusterRegion}" + + echo "pull the chart version ${mcrChartVersion} from ${mcr}/${mcrChartRepoPath}" + export HELM_EXPERIMENTAL_OCI=1 + helm chart pull $mcr/$mcrChartRepoPath:$mcrChartVersion + + echo "export the chart from local cache to current directory" + helm chart export $mcr/$mcrChartRepoPath:$mcrChartVersion --destination . + + helmChartRepoPath=$helmLocalRepoName/$helmChartName + + echo "helm chart repo path: ${helmChartRepoPath}" + + if [ ! -z "$proxyEndpoint" ]; then + echo "using proxy endpoint since proxy configuration passed in" + if [ -z "$kubeconfigContext" ]; then + echo "using current kube-context since --kube-context/-k parameter not passed in" + helm upgrade --install $releaseName --set omsagent.proxy=$proxyEndpoint,omsagent.secret.wsid=$workspaceGuid,omsagent.secret.key=$workspaceKey,omsagent.env.clusterId=$clusterResourceId,omsagent.env.clusterRegion=$clusterRegion $helmChartRepoPath + else + echo "using --kube-context:${kubeconfigContext} since passed in" + helm upgrade --install $releaseName --set omsagent.proxy=$proxyEndpoint,omsagent.secret.wsid=$workspaceGuid,omsagent.secret.key=$workspaceKey,omsagent.env.clusterId=$clusterResourceId,omsagent.env.clusterRegion=$clusterRegion $helmChartRepoPath --kube-context ${kubeconfigContext} + fi + else + if [ -z "$kubeconfigContext" ]; then + echo "using current kube-context since --kube-context/-k parameter not passed in" + helm upgrade --install $releaseName --set omsagent.secret.wsid=$workspaceGuid,omsagent.secret.key=$workspaceKey,omsagent.env.clusterId=$clusterResourceId,omsagent.env.clusterRegion=$clusterRegion $helmChartRepoPath + else + echo "using --kube-context:${kubeconfigContext} since passed in" + helm upgrade --install $releaseName --set omsagent.secret.wsid=$workspaceGuid,omsagent.secret.key=$workspaceKey,omsagent.env.clusterId=$clusterResourceId,omsagent.env.clusterRegion=$clusterRegion $helmChartRepoPath --kube-context ${kubeconfigContext} + fi + fi + + echo "chart installation completed." } -login_to_azure() -{ - if [ "$isUsingServicePrincipal" = true ] ; then - echo "login to the azure using provided service principal creds" - az login --service-principal --username $servicePrincipalClientId --password $servicePrincipalClientSecret --tenant $servicePrincipalTenantId +login_to_azure() { + if [ "$isUsingServicePrincipal" = true ]; then + echo "login to the azure using provided service principal creds" + az login --service-principal --username $servicePrincipalClientId --password $servicePrincipalClientSecret --tenant $servicePrincipalTenantId else echo "login to the azure interactively" az login --use-device-code fi } -set_azure_subscription() -{ - local subscriptionId="$(echo ${1})" - echo "setting the subscription id: ${subscriptionId} as current subscription for the azure cli" - az account set -s ${subscriptionId} - echo "successfully configured subscription id: ${subscriptionId} as current subscription for the azure cli" +set_azure_subscription() { + local subscriptionId="$(echo ${1})" + echo "setting the subscription id: ${subscriptionId} as current subscription for the azure cli" + az account set -s ${subscriptionId} + echo "successfully configured subscription id: ${subscriptionId} as current subscription for the azure cli" } -attach_monitoring_tags() -{ +attach_monitoring_tags() { echo "attach loganalyticsworkspaceResourceId tag on to cluster resource" - status=$(az resource update --set tags.logAnalyticsWorkspaceResourceId=$workspaceResourceId -g $clusterResourceGroup -n $clusterName --resource-type $resourceProvider) + status=$(az resource update --set tags.logAnalyticsWorkspaceResourceId=$workspaceResourceId -g $clusterResourceGroup -n $clusterName --resource-type $resourceProvider) echo "$status" echo "successfully attached logAnalyticsWorkspaceResourceId tag on the cluster resource" } # enables aks monitoring addon for private preview and dont use this for aks prod -enable_aks_monitoring_addon() -{ - echo "getting cluster object" - clusterGetResponse=$(az rest --method get --uri $clusterResourceId?api-version=2020-03-01) - export jqquery=".properties.addonProfiles.omsagent.config.logAnalyticsWorkspaceResourceID=\"$workspaceResourceId\"" - echo $clusterGetResponse | jq $jqquery > putrequestbody.json - status=$(az rest --method put --uri $clusterResourceId?api-version=2020-03-01 --body @putrequestbody.json --headers Content-Type=application/json) - echo "status after enabling of aks monitoringa addon:$status" +enable_aks_monitoring_addon() { + echo "getting cluster object" + clusterGetResponse=$(az rest --method get --uri $clusterResourceId?api-version=2020-03-01) + export jqquery=".properties.addonProfiles.omsagent.config.logAnalyticsWorkspaceResourceID=\"$workspaceResourceId\"" + echo $clusterGetResponse | jq $jqquery >putrequestbody.json + status=$(az rest --method put --uri $clusterResourceId?api-version=2020-03-01 --body @putrequestbody.json --headers Content-Type=application/json) + echo "status after enabling of aks monitoringa addon:$status" } # parse and validate args @@ -587,9 +575,9 @@ login_to_azure # set the cluster subscription id as active sub for azure cli set_azure_subscription $clusterSubscriptionId -# validate cluster identity if its ARC k8s cluster -if [ "$isArcK8sCluster" = true ] ; then - validate_cluster_identity $clusterResourceGroup $clusterName +# validate cluster identity if its Azure Arc enabled Kubernetes cluster +if [ "$isArcK8sCluster" = true ]; then + validate_cluster_identity $clusterResourceGroup $clusterName fi if [ -z $workspaceResourceId ]; then @@ -598,7 +586,7 @@ if [ -z $workspaceResourceId ]; then else echo "using provided azure log analytics workspace:${workspaceResourceId}" workspaceResourceId=$(echo $workspaceResourceId | tr -d '"') - workspaceSubscriptionId="$(echo ${workspaceResourceId} | cut -d'/' -f3 | tr "[:upper:]" "[:lower:]" )" + workspaceSubscriptionId="$(echo ${workspaceResourceId} | cut -d'/' -f3 | tr "[:upper:]" "[:lower:]")" workspaceResourceGroup="$(echo ${workspaceResourceId} | cut -d'/' -f5)" workspaceName="$(echo ${workspaceResourceId} | cut -d'/' -f9)" @@ -620,13 +608,13 @@ add_container_insights_solution $workspaceResourceId # get workspace guid and key get_workspace_guid_and_key $workspaceResourceId -if [ "$isClusterAndWorkspaceInSameSubscription" = false ] ; then +if [ "$isClusterAndWorkspaceInSameSubscription" = false ]; then echo "switch to cluster subscription id as active subscription for cli: ${clusterSubscriptionId}" set_azure_subscription $clusterSubscriptionId fi # attach monitoring tags on to cluster resource -if [ "$isAksCluster" = true ] ; then +if [ "$isAksCluster" = true ]; then enable_aks_monitoring_addon else attach_monitoring_tags diff --git a/scripts/onboarding/managed/upgrade-monitoring.sh b/scripts/onboarding/managed/upgrade-monitoring.sh new file mode 100644 index 000000000..8a12b2f02 --- /dev/null +++ b/scripts/onboarding/managed/upgrade-monitoring.sh @@ -0,0 +1,314 @@ +#!/bin/bash +# +# Execute this directly in Azure Cloud Shell (https://shell.azure.com) by pasting (SHIFT+INS on Windows, CTRL+V on Mac or Linux) +# the following line (beginning with curl...) at the command prompt and then replacing the args: +# This scripts upgrades the existing Azure Monitor for containers release on Azure Arc enabled Kubernetes cluster +# +# 1. Upgrades existing Azure Monitor for containers release to the K8s cluster in provided via --kube-context +# Prerequisites : +# Azure CLI: https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest +# Helm3 : https://helm.sh/docs/intro/install/ + +# download script +# curl -o enable-monitoring.sh -L https://aka.ms/upgrade-monitoring-bash-script +# 1. Using Service Principal for Azure Login +## bash upgrade-monitoring.sh --client-id --client-secret --tenant-id +# 2. Using Interactive device login +# bash upgrade-monitoring.sh --resource-id + +set -e +set -o pipefail + +# released chart version for Azure Arc enabled Kubernetes public preview +mcrChartVersion="2.7.6" +mcr="mcr.microsoft.com" +mcrChartRepoPath="azuremonitor/containerinsights/preview/azuremonitor-containers" + +# default to public cloud since only supported cloud is azure public clod +defaultAzureCloud="AzureCloud" +helmLocalRepoName="." +helmChartName="azuremonitor-containers" + +# default release name used during onboarding +releaseName="azmon-containers-release-1" + +# resource provider for azure arc connected cluster +arcK8sResourceProvider="Microsoft.Kubernetes/connectedClusters" + +# default of resourceProvider is Azure Arc enabled Kubernetes and this will get updated based on the provider cluster resource +resourceProvider="Microsoft.Kubernetes/connectedClusters" + +# Azure Arc enabled Kubernetes cluster resource +isArcK8sCluster=false + +# openshift project name for aro v4 cluster +openshiftProjectName="azure-monitor-for-containers" + +# Azure Arc enabled Kubernetes cluster resource +isAroV4Cluster=false + +# default global params +clusterResourceId="" +kubeconfigContext="" + +# default workspace region and code +workspaceRegion="eastus" +workspaceRegionCode="EUS" +workspaceResourceGroup="DefaultResourceGroup-"$workspaceRegionCode + +# default workspace guid and key +workspaceGuid="" +workspaceKey="" + +# sp details for the login if provided +servicePrincipalClientId="" +servicePrincipalClientSecret="" +servicePrincipalTenantId="" +isUsingServicePrincipal=false + +usage() { + local basename=$(basename $0) + echo + echo "Upgrade Azure Monitor for containers:" + echo "$basename --resource-id [--client-id ] [--client-secret ] [--tenant-id ] [--kube-context ]" +} + +parse_args() { + + if [ $# -le 1 ]; then + usage + exit 1 + fi + + # Transform long options to short ones + for arg in "$@"; do + shift + case "$arg" in + "--resource-id") set -- "$@" "-r" ;; + "--kube-context") set -- "$@" "-k" ;; + "--client-id") set -- "$@" "-c" ;; + "--client-secret") set -- "$@" "-s" ;; + "--tenant-id") set -- "$@" "-t" ;; + "--"*) usage ;; + *) set -- "$@" "$arg" ;; + esac + done + + local OPTIND opt + + while getopts 'hk:r:c:s:t:' opt; do + case "$opt" in + h) + usage + ;; + + k) + kubeconfigContext="$OPTARG" + echo "name of kube-context is $OPTARG" + ;; + + r) + clusterResourceId="$OPTARG" + echo "clusterResourceId is $OPTARG" + ;; + + c) + servicePrincipalClientId="$OPTARG" + echo "servicePrincipalClientId is $OPTARG" + ;; + + s) + servicePrincipalClientSecret="$OPTARG" + echo "clientSecret is *****" + ;; + + t) + servicePrincipalTenantId="$OPTARG" + echo "service principal tenantId is $OPTARG" + ;; + + ?) + usage + exit 1 + ;; + esac + done + shift "$(($OPTIND - 1))" + + local subscriptionId="$(echo ${clusterResourceId} | cut -d'/' -f3)" + local resourceGroup="$(echo ${clusterResourceId} | cut -d'/' -f5)" + + # get resource parts and join back to get the provider name + local providerNameResourcePart1="$(echo ${clusterResourceId} | cut -d'/' -f7)" + local providerNameResourcePart2="$(echo ${clusterResourceId} | cut -d'/' -f8)" + local providerName="$(echo ${providerNameResourcePart1}/${providerNameResourcePart2})" + + local clusterName="$(echo ${clusterResourceId} | cut -d'/' -f9)" + + # convert to lowercase for validation + providerName=$(echo $providerName | tr "[:upper:]" "[:lower:]") + + echo "cluster SubscriptionId:" $subscriptionId + echo "cluster ResourceGroup:" $resourceGroup + echo "cluster ProviderName:" $providerName + echo "cluster Name:" $clusterName + + if [ -z "$subscriptionId" -o -z "$resourceGroup" -o -z "$providerName" -o -z "$clusterName" ]; then + echo "-e invalid cluster resource id. Please try with valid fully qualified resource id of the cluster" + exit 1 + fi + + if [[ $providerName != microsoft.* ]]; then + echo "-e invalid azure cluster resource id format." + exit 1 + fi + + # detect the resource provider from the provider name in the cluster resource id + if [ $providerName = "microsoft.kubernetes/connectedclusters" ]; then + echo "provider cluster resource is of Azure Arc enabled Kubernetes cluster type" + isArcK8sCluster=true + resourceProvider=$arcK8sResourceProvider + elif [ $providerName = "microsoft.redhatopenshift/openshiftclusters" ]; then + echo "provider cluster resource is of AROv4 cluster type" + resourceProvider=$aroV4ResourceProvider + isAroV4Cluster=true + elif [ $providerName = "microsoft.containerservice/managedclusters" ]; then + echo "provider cluster resource is of AKS cluster type" + isAksCluster=true + resourceProvider=$aksResourceProvider + else + echo "-e unsupported azure managed cluster type" + exit 1 + fi + + if [ -z "$kubeconfigContext" ]; then + echo "using or getting current kube config context since --kube-context parameter not set " + fi + + if [ ! -z "$servicePrincipalClientId" -a ! -z "$servicePrincipalClientSecret" -a ! -z "$servicePrincipalTenantId" ]; then + echo "using service principal creds (clientId, secret and tenantId) for azure login since provided" + isUsingServicePrincipal=true + fi +} + +configure_to_public_cloud() { + echo "Set AzureCloud as active cloud for az cli" + az cloud set -n $defaultAzureCloud +} + +validate_cluster_identity() { + echo "validating cluster identity" + + local rgName="$(echo ${1})" + local clusterName="$(echo ${2})" + + local identitytype=$(az resource show -g ${rgName} -n ${clusterName} --resource-type $resourceProvider --query identity.type) + identitytype=$(echo $identitytype | tr "[:upper:]" "[:lower:]" | tr -d '"') + echo "cluster identity type:" $identitytype + + if [[ "$identitytype" != "systemassigned" ]]; then + echo "-e only supported cluster identity is systemassigned for Azure Arc enabled Kubernetes cluster type" + exit 1 + fi + + echo "successfully validated the identity of the cluster" +} + +validate_monitoring_tags() { + echo "get loganalyticsworkspaceResourceId tag on to cluster resource" + logAnalyticsWorkspaceResourceIdTag=$(az resource show --query tags.logAnalyticsWorkspaceResourceId -g $clusterResourceGroup -n $clusterName --resource-type $resourceProvider) + echo "configured log analytics workspace: ${logAnalyticsWorkspaceResourceIdTag}" + echo "successfully got logAnalyticsWorkspaceResourceId tag on the cluster resource" + if [ -z "$logAnalyticsWorkspaceResourceIdTag" ]; then + echo "-e logAnalyticsWorkspaceResourceId doesnt exist on this cluster which indicates cluster not enabled for monitoring" + exit 1 + fi +} + + +upgrade_helm_chart_release() { + + # get the config-context for ARO v4 cluster + if [ "$isAroV4Cluster" = true ]; then + echo "getting config-context of ARO v4 cluster " + echo "getting admin user creds for aro v4 cluster" + adminUserName=$(az aro list-credentials -g $clusterResourceGroup -n $clusterName --query 'kubeadminUsername' -o tsv) + adminPassword=$(az aro list-credentials -g $clusterResourceGroup -n $clusterName --query 'kubeadminPassword' -o tsv) + apiServer=$(az aro show -g $clusterResourceGroup -n $clusterName --query apiserverProfile.url -o tsv) + echo "login to the cluster via oc login" + oc login $apiServer -u $adminUserName -p $adminPassword + echo "creating project azure-monitor-for-containers" + oc new-project $openshiftProjectName + echo "getting config-context of aro v4 cluster" + kubeconfigContext=$(oc config current-context) + fi + + if [ -z "$kubeconfigContext" ]; then + echo "installing Azure Monitor for containers HELM chart on to the cluster and using current kube context ..." + else + echo "installing Azure Monitor for containers HELM chart on to the cluster with kubecontext:${kubeconfigContext} ..." + fi + + export HELM_EXPERIMENTAL_OCI=1 + + echo "pull the chart from ${mcr}/${mcrChartRepoPath}:${mcrChartVersion}" + helm chart pull ${mcr}/${mcrChartRepoPath}:${mcrChartVersion} + + echo "export the chart from local cache to current directory" + helm chart export ${mcr}/${mcrChartRepoPath}:${mcrChartVersion} --destination . + + helmChartRepoPath=$helmLocalRepoName/$helmChartName + + echo "upgrading the release: $releaseName to chart version : ${mcrChartVersion}" + helm get values $releaseName -o yaml | helm upgrade --install $releaseName $helmChartRepoPath -f - + echo "$releaseName got upgraded successfully." +} + +login_to_azure() { + if [ "$isUsingServicePrincipal" = true ]; then + echo "login to the azure using provided service principal creds" + az login --service-principal --username $servicePrincipalClientId --password $servicePrincipalClientSecret --tenant $servicePrincipalTenantId + else + echo "login to the azure interactively" + az login --use-device-code + fi +} + +set_azure_subscription() { + local subscriptionId="$(echo ${1})" + echo "setting the subscription id: ${subscriptionId} as current subscription for the azure cli" + az account set -s ${subscriptionId} + echo "successfully configured subscription id: ${subscriptionId} as current subscription for the azure cli" +} + +# parse and validate args +parse_args $@ + +# configure azure cli for public cloud +configure_to_public_cloud + +# parse cluster resource id +clusterSubscriptionId="$(echo $clusterResourceId | cut -d'/' -f3 | tr "[:upper:]" "[:lower:]")" +clusterResourceGroup="$(echo $clusterResourceId | cut -d'/' -f5)" +providerName="$(echo $clusterResourceId | cut -d'/' -f7)" +clusterName="$(echo $clusterResourceId | cut -d'/' -f9)" + +# login to azure +login_to_azure + +# set the cluster subscription id as active sub for azure cli +set_azure_subscription $clusterSubscriptionId + +# validate cluster identity if its Azure Arc enabled Kubernetes cluster +if [ "$isArcK8sCluster" = true ]; then + validate_cluster_identity $clusterResourceGroup $clusterName +fi + +# validate the cluster has monitoring tags +validate_monitoring_tags + +# upgrade helm chart release +upgrade_helm_chart_release + +# portal link +echo "Proceed to https://aka.ms/azmon-containers to view health of your newly onboarded cluster" From e6dad8354e38efc1fdd9eafbb269aa9d9e26fefd Mon Sep 17 00:00:00 2001 From: rashmichandrashekar Date: Thu, 1 Oct 2020 14:08:31 -0700 Subject: [PATCH 030/301] Install CA certs from wireserver (#451) --- kubernetes/windows/main.ps1 | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/kubernetes/windows/main.ps1 b/kubernetes/windows/main.ps1 index de82722ad..2e8659601 100644 --- a/kubernetes/windows/main.ps1 +++ b/kubernetes/windows/main.ps1 @@ -263,6 +263,27 @@ function Generate-Certificates { C:\\opt\\omsagentwindows\\certgenerator\\certificategenerator.exe } +function Bootstrap-CACertificates { + try { + # This is required when the root CA certs are different for some clouds. + $caCerts=Invoke-WebRequest 'http://168.63.129.16/machine?comp=acmspackage&type=cacertificates&ext=json' -UseBasicParsing | ConvertFrom-Json + if (![string]::IsNullOrEmpty($caCerts)) { + $certificates = $caCerts.Certificates + for ($index = 0; $index -lt $certificates.Length ; $index++) { + $name=$certificates[$index].Name + $certificates[$index].CertBody > $name + Write-Host "name: $($name)" + Import-Certificate -FilePath .\$name -CertStoreLocation 'Cert:\LocalMachine\Root' -Verbose + } + } + } + catch { + $e = $_.Exception + Write-Host $e + Write-Host "exception occured in Bootstrap-CACertificates..." + } +} + function Test-CertificatePath { $certLocation = $env:CI_CERT_LOCATION $keyLocation = $env:CI_KEY_LOCATION @@ -288,6 +309,14 @@ Start-Transcript -Path main.txt Remove-WindowsServiceIfItExists "fluentdwinaks" Set-EnvironmentVariables Start-FileSystemWatcher + +#Bootstrapping CA certs for non public clouds and AKS clusters +$aksResourceId = [System.Environment]::GetEnvironmentVariable("AKS_RESOURCE_ID") +if (![string]::IsNullOrEmpty($aksResourceId) -and $aksResourceId.ToLower().Contains("/microsoft.containerservice/managedclusters/")) +{ + Bootstrap-CACertificates +} + Generate-Certificates Test-CertificatePath Start-Fluent From 23397edf3764870dde9d7f4eef10f0842ae5adc6 Mon Sep 17 00:00:00 2001 From: Grace Wehner Date: Thu, 1 Oct 2020 16:14:49 -0700 Subject: [PATCH 031/301] grwehner/pv-volume-name-in-mdm (#452) Add volume name for PV to mdm dimensions and zero fill it --- source/plugins/ruby/MdmAlertTemplates.rb | 2 ++ source/plugins/ruby/MdmMetricsGenerator.rb | 3 +++ source/plugins/ruby/constants.rb | 1 + 3 files changed, 6 insertions(+) diff --git a/source/plugins/ruby/MdmAlertTemplates.rb b/source/plugins/ruby/MdmAlertTemplates.rb index d5107fea1..ef63cf219 100644 --- a/source/plugins/ruby/MdmAlertTemplates.rb +++ b/source/plugins/ruby/MdmAlertTemplates.rb @@ -101,6 +101,7 @@ class MdmAlertTemplates "podName", "node", "kubernetesNamespace", + "volumeName", "thresholdPercentage" ], "series": [ @@ -109,6 +110,7 @@ class MdmAlertTemplates "%{podNameDimValue}", "%{computerNameDimValue}", "%{namespaceDimValue}", + "%{volumeNameDimValue}", "%{thresholdPercentageDimValue}" ], "min": %{pvResourceUtilizationPercentage}, diff --git a/source/plugins/ruby/MdmMetricsGenerator.rb b/source/plugins/ruby/MdmMetricsGenerator.rb index b8104212d..12d462e44 100644 --- a/source/plugins/ruby/MdmMetricsGenerator.rb +++ b/source/plugins/ruby/MdmMetricsGenerator.rb @@ -186,6 +186,7 @@ def zeroFillMetricRecords(records, batch_time) pvZeroFillDims = {} pvZeroFillDims[Constants::INSIGHTSMETRICS_TAGS_PVC_NAMESPACE] = Constants::KUBESYSTEM_NAMESPACE_ZERO_FILL pvZeroFillDims[Constants::INSIGHTSMETRICS_TAGS_POD_NAME] = Constants::OMSAGENT_ZERO_FILL + pvZeroFillDims[Constants::INSIGHTSMETRICS_TAGS_VOLUME_NAME] = Constants::VOLUME_NAME_ZERO_FILL pvResourceUtilMetricRecord = getPVResourceUtilMetricRecords(batch_time, Constants::PV_USED_BYTES, @@hostName, @@ -289,6 +290,7 @@ def getPVResourceUtilMetricRecords(recordTimeStamp, metricName, computer, percen pvcNamespace = dims[Constants::INSIGHTSMETRICS_TAGS_PVC_NAMESPACE] podName = dims[Constants::INSIGHTSMETRICS_TAGS_POD_NAME] podUid = dims[Constants::INSIGHTSMETRICS_TAGS_POD_UID] + volumeName = dims[Constants::INSIGHTSMETRICS_TAGS_VOLUME_NAME] resourceUtilRecord = MdmAlertTemplates::PV_resource_utilization_template % { timestamp: recordTimeStamp, @@ -296,6 +298,7 @@ def getPVResourceUtilMetricRecords(recordTimeStamp, metricName, computer, percen podNameDimValue: podName, computerNameDimValue: computer, namespaceDimValue: pvcNamespace, + volumeNameDimValue: volumeName, pvResourceUtilizationPercentage: percentageMetricValue, thresholdPercentageDimValue: thresholdPercentage, } diff --git a/source/plugins/ruby/constants.rb b/source/plugins/ruby/constants.rb index be1a9de64..35e5f9334 100644 --- a/source/plugins/ruby/constants.rb +++ b/source/plugins/ruby/constants.rb @@ -76,6 +76,7 @@ class Constants TELEGRAF_DISK_METRICS = "container.azm.ms/disk" OMSAGENT_ZERO_FILL = "omsagent" KUBESYSTEM_NAMESPACE_ZERO_FILL = "kube-system" + VOLUME_NAME_ZERO_FILL = "-" #Telemetry constants CONTAINER_METRICS_HEART_BEAT_EVENT = "ContainerMetricsMdmHeartBeatEvent" From 7562a96696cb4882f8387ba405b8a0f0145b00ad Mon Sep 17 00:00:00 2001 From: Vishwanath Date: Mon, 5 Oct 2020 13:57:01 -0700 Subject: [PATCH 032/301] Release changes for 10052020 release (#453) * Release changes for 10052020 release * remove redundant kubelet metrics as part of PR feedback --- ReleaseNotes.md | 18 ++++++++++++++++++ build/version | 6 +++--- charts/azuremonitor-containers/Chart.yaml | 2 +- charts/azuremonitor-containers/values.yaml | 6 +++--- kubernetes/linux/Dockerfile | 2 +- kubernetes/omsagent.yaml | 12 ++++++------ kubernetes/windows/Dockerfile | 2 +- 7 files changed, 33 insertions(+), 15 deletions(-) diff --git a/ReleaseNotes.md b/ReleaseNotes.md index 499c99f02..e1892d083 100644 --- a/ReleaseNotes.md +++ b/ReleaseNotes.md @@ -11,6 +11,24 @@ additional questions or comments. Note : The agent version(s) below has dates (ciprod), which indicate the agent build dates (not release dates) +### 10/05/2020 - +##### Version microsoft/oms:ciprod10052020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod10052020 (linux) +##### Version microsoft/oms:win-ciprod10052020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod10052020 (windows) +##### Code change log +- Health CRD to version v1 (from v1beta1) for k8s versions >= 1.19.0 +- Collection of PV usage metrics for PVs mounted by pods (kube-system pods excluded by default)(doc-link-needed) +- Zero fill few custom metrics under a timer, also add zero filling for new PV usage metrics +- Collection of additional Kubelet metrics ('kubelet_running_pod_count','volume_manager_total_volumes','kubelet_node_config_error','process_resident_memory_bytes','process_cpu_seconds_total','kubelet_runtime_operations_total','kubelet_runtime_operations_errors_total'). This also includes updates to 'kubelet' workbook to include these new metrics +- Collection of Azure NPM (Network Policy Manager) metrics (basic & advanced. By default, NPM metrics collection is turned OFF)(doc-link-needed) +- Support log collection when docker root is changed with knode. Tracked by [this](https://github.com/Azure/AKS/issues/1373) issue +- Support for Pods in 'Terminating' state for nodelost scenarios +- Fix for reduction in telemetry for custom metrics ingestion failures +- Fix CPU capacity/limits metrics being 0 for Virtual nodes (VK) +- Add new custom metric regions (eastus2,westus,australiasoutheast,brazilsouth,germanywestcentral,northcentralus,switzerlandnorth) +- Enable strict SSL validation for AppInsights Ruby SDK +- Turn off custom metrics upload for unsupported cluster types +- Install CA certs from wire server for windows (in certain clouds) + ### 09/16/2020 - > Note: This agent release targetted ONLY for non-AKS clusters via Azure Monitor for containers HELM chart update ##### Version microsoft/oms:ciprod09162020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod09162020 (linux) diff --git a/build/version b/build/version index b53b0dcfb..9587328de 100644 --- a/build/version +++ b/build/version @@ -3,10 +3,10 @@ # Build Version Information CONTAINER_BUILDVERSION_MAJOR=10 -CONTAINER_BUILDVERSION_MINOR=0 +CONTAINER_BUILDVERSION_MINOR=1 CONTAINER_BUILDVERSION_PATCH=0 -CONTAINER_BUILDVERSION_BUILDNR=5 -CONTAINER_BUILDVERSION_DATE=20200916 +CONTAINER_BUILDVERSION_BUILDNR=0 +CONTAINER_BUILDVERSION_DATE=20201005 CONTAINER_BUILDVERSION_STATUS=Developer_Build #-------------------------------- End of File ----------------------------------- diff --git a/charts/azuremonitor-containers/Chart.yaml b/charts/azuremonitor-containers/Chart.yaml index 1d3fed86f..6d45b05d8 100644 --- a/charts/azuremonitor-containers/Chart.yaml +++ b/charts/azuremonitor-containers/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v1 appVersion: 7.0.0-1 description: Helm chart for deploying Azure Monitor container monitoring agent in Kubernetes name: azuremonitor-containers -version: 2.7.6 +version: 2.7.7 kubeVersion: "^1.10.0-0" keywords: - monitoring diff --git a/charts/azuremonitor-containers/values.yaml b/charts/azuremonitor-containers/values.yaml index 2711cb372..f841dc5d7 100644 --- a/charts/azuremonitor-containers/values.yaml +++ b/charts/azuremonitor-containers/values.yaml @@ -12,10 +12,10 @@ Azure: omsagent: image: repo: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod" - tag: "ciprod09252020" - tagWindows: "win-ciprod09252020" + tag: "ciprod10052020" + tagWindows: "win-ciprod10052020" pullPolicy: IfNotPresent - dockerProviderVersion: "10.0.0-6" + dockerProviderVersion: "10.1.0-0" agentVersion: "1.10.0.1" ## To get your workspace id and key do the following ## You can create a Azure Loganalytics workspace from portal.azure.com and get its ID & PRIMARY KEY from 'Advanced Settings' tab in the Ux. diff --git a/kubernetes/linux/Dockerfile b/kubernetes/linux/Dockerfile index ee35cd556..f4324a18a 100644 --- a/kubernetes/linux/Dockerfile +++ b/kubernetes/linux/Dockerfile @@ -2,7 +2,7 @@ FROM ubuntu:18.04 MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=ciprod09162020 +ARG IMAGE_TAG=ciprod10052020 ENV AGENT_VERSION ${IMAGE_TAG} ENV tmpdir /opt ENV APPLICATIONINSIGHTS_AUTH NzAwZGM5OGYtYTdhZC00NThkLWI5NWMtMjA3ZjM3NmM3YmRi diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index e8352e020..18bc203d4 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -337,13 +337,13 @@ spec: tier: node annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "10.0.0-5" + dockerProviderVersion: "10.1.0-0" schema-versions: "v1" spec: serviceAccountName: omsagent containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod09162020" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod10052020" imagePullPolicy: IfNotPresent resources: limits: @@ -494,13 +494,13 @@ spec: rsName: "omsagent-rs" annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "10.0.0-5" + dockerProviderVersion: "10.1.0-0" schema-versions: "v1" spec: serviceAccountName: omsagent containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod09162020" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod10052020" imagePullPolicy: IfNotPresent resources: limits: @@ -640,13 +640,13 @@ spec: tier: node-win annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "10.0.0-5" + dockerProviderVersion: "10.1.0-0" schema-versions: "v1" spec: serviceAccountName: omsagent containers: - name: omsagent-win - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod09162020" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod10052020" imagePullPolicy: IfNotPresent resources: limits: diff --git a/kubernetes/windows/Dockerfile b/kubernetes/windows/Dockerfile index ca89d1c80..c7dee60af 100644 --- a/kubernetes/windows/Dockerfile +++ b/kubernetes/windows/Dockerfile @@ -3,7 +3,7 @@ MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=win-ciprod09162020 +ARG IMAGE_TAG=win-ciprod10052020 # Do not split this into multiple RUN! # Docker creates a layer for every RUN-Statement From 4b47f44491a77d7321cbbba6e5d2941326b06159 Mon Sep 17 00:00:00 2001 From: saaror <31900410+saaror@users.noreply.github.com> Date: Mon, 12 Oct 2020 16:49:16 -0700 Subject: [PATCH 033/301] Update onboarding_instructions.md (#456) * Update onboarding_instructions.md Updated the documentation to reflect where to update the config map. * Update onboarding_instructions.md * Update onboarding_instructions.md * Update onboarding_instructions.md Updated the link --- Health/onboarding_instructions.md | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/Health/onboarding_instructions.md b/Health/onboarding_instructions.md index 9c07b2167..4c83577b5 100644 --- a/Health/onboarding_instructions.md +++ b/Health/onboarding_instructions.md @@ -6,12 +6,28 @@ For on-boarding to Health(Tab), you would need to complete two steps ## Configure agent through ConfigMap -1. Include the following section in ConfigMap yaml file -```cmd:agent-settings: |- - [agent_settings.health_model] +1. If you are configuring your existing ConfigMap, append the following section in your existing ConfigMap yaml file +``` +#Append this section in your existing configmap +agent-settings: |- + # agent health model feature settings + [agent_settings.health_model] + # In the absence of this configmap, default value for enabled is false + enabled = true +``` +2. Else if you don't have ConfigMap, download the new ConfigMap from [here.](https://github.com/microsoft/Docker-Provider/blob/ci_prod/kubernetes/container-azm-ms-agentconfig.yaml) & then set `enabled =true` + +``` +#For new downloaded configmap enabled this default setting to true +agent-settings: |- + # agent health model feature settings + [agent_settings.health_model] + # In the absence of this configmap, default value for enabled is false enabled = true ``` -2. Run the following kubectl command: + + +3. Run the following kubectl command: `kubectl apply -f ` Example: `kubectl apply -f container-azm-ms-agentconfig.yaml`. From 3f86b23523da9082e1a36faec00af992994622cb Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Mon, 19 Oct 2020 12:42:22 -0700 Subject: [PATCH 034/301] chart update for sept2020 release (#457) --- scripts/onboarding/managed/enable-monitoring.ps1 | 2 +- scripts/onboarding/managed/enable-monitoring.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/onboarding/managed/enable-monitoring.ps1 b/scripts/onboarding/managed/enable-monitoring.ps1 index 1e1669400..4815dc958 100644 --- a/scripts/onboarding/managed/enable-monitoring.ps1 +++ b/scripts/onboarding/managed/enable-monitoring.ps1 @@ -60,7 +60,7 @@ $isUsingServicePrincipal = $false # released chart version in mcr $mcr = "mcr.microsoft.com" -$mcrChartVersion = "2.7.6" +$mcrChartVersion = "2.7.7" $mcrChartRepoPath = "azuremonitor/containerinsights/preview/azuremonitor-containers" $helmLocalRepoName = "." diff --git a/scripts/onboarding/managed/enable-monitoring.sh b/scripts/onboarding/managed/enable-monitoring.sh index ce62a581a..d7edf49dc 100644 --- a/scripts/onboarding/managed/enable-monitoring.sh +++ b/scripts/onboarding/managed/enable-monitoring.sh @@ -42,7 +42,7 @@ set -o pipefail defaultAzureCloud="AzureCloud" # released chart version in mcr -mcrChartVersion="2.7.6" +mcrChartVersion="2.7.7" mcr="mcr.microsoft.com" mcrChartRepoPath="azuremonitor/containerinsights/preview/azuremonitor-containers" helmLocalRepoName="." From 6203c3a0dd3a1deafd39aaa18e08968f01f45ab8 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Mon, 19 Oct 2020 16:58:12 -0700 Subject: [PATCH 035/301] add missing version update in the script (#458) --- scripts/onboarding/managed/upgrade-monitoring.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/onboarding/managed/upgrade-monitoring.sh b/scripts/onboarding/managed/upgrade-monitoring.sh index 8a12b2f02..23594c7bc 100644 --- a/scripts/onboarding/managed/upgrade-monitoring.sh +++ b/scripts/onboarding/managed/upgrade-monitoring.sh @@ -20,7 +20,7 @@ set -e set -o pipefail # released chart version for Azure Arc enabled Kubernetes public preview -mcrChartVersion="2.7.6" +mcrChartVersion="2.7.7" mcr="mcr.microsoft.com" mcrChartRepoPath="azuremonitor/containerinsights/preview/azuremonitor-containers" From 5b154691ba558c1257e15879b3a6f34655a3fc45 Mon Sep 17 00:00:00 2001 From: Vishwanath Date: Tue, 27 Oct 2020 12:54:03 -0700 Subject: [PATCH 036/301] November release fixes - activate one agent, adx schema v2, win perf issue, syslog deactivation (#459) * activate one agent, adx schema v2, win perf issue, syslog deactivation * update chart --- .../linux/installer/scripts/livenessprobe.sh | 14 +- .../templates/omsagent-daemonset-windows.yaml | 3 +- .../templates/omsagent-daemonset.yaml | 2 +- charts/azuremonitor-containers/values.yaml | 6 +- kubernetes/linux/main.sh | 123 ++++++++++++++---- kubernetes/linux/setup.sh | 4 +- kubernetes/omsagent.yaml | 10 +- source/plugins/go/src/oms.go | 62 +++++---- source/plugins/go/src/utils.go | 2 +- .../plugins/ruby/CAdvisorMetricsAPIClient.rb | 4 +- source/plugins/ruby/constants.rb | 2 + 11 files changed, 165 insertions(+), 67 deletions(-) diff --git a/build/linux/installer/scripts/livenessprobe.sh b/build/linux/installer/scripts/livenessprobe.sh index 87f68a560..e3f9fb475 100644 --- a/build/linux/installer/scripts/livenessprobe.sh +++ b/build/linux/installer/scripts/livenessprobe.sh @@ -4,15 +4,25 @@ (ps -ef | grep omsagent- | grep -v "grep") if [ $? -ne 0 ] then - echo "Agent is NOT running" > /dev/termination-log + echo " omsagent is not running" > /dev/termination-log exit 1 fi +#optionally test to exit non zero value if oneagent is not running +if [ -e "/opt/AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE_V2" ]; then + (ps -ef | grep "mdsd -l" | grep -v "grep") + if [ $? -ne 0 ] + then + echo "oneagent is not running" > /dev/termination-log + exit 1 + fi +fi + #test to exit non zero value if fluentbit is not running (ps -ef | grep td-agent-bit | grep -v "grep") if [ $? -ne 0 ] then - echo "Fluentbit is NOT running" > /dev/termination-log + echo "Fluentbit is not running" > /dev/termination-log exit 1 fi diff --git a/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml b/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml index e65f9a98d..c916fadf6 100644 --- a/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml @@ -46,7 +46,7 @@ spec: {{- end }} imagePullPolicy: IfNotPresent resources: -{{ toYaml .Values.omsagent.resources.daemonset | indent 9 }} +{{ toYaml .Values.omsagent.resources.daemonset-windows | indent 9 }} env: {{- if ne .Values.omsagent.env.clusterId "" }} - name: AKS_RESOURCE_ID @@ -96,6 +96,7 @@ spec: - C:\opt\omsagentwindows\scripts\cmd\livenessProbe.cmd periodSeconds: 60 initialDelaySeconds: 180 + timeoutSeconds: 15 {{- with .Values.omsagent.tolerations }} tolerations: {{- toYaml . | nindent 8 }} {{- end }} diff --git a/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml b/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml index 438294ce5..8af13b6ee 100644 --- a/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml @@ -40,7 +40,7 @@ spec: {{- end }} imagePullPolicy: IfNotPresent resources: -{{ toYaml .Values.omsagent.resources.daemonset | indent 9 }} +{{ toYaml .Values.omsagent.resources.daemonset-linux | indent 9 }} env: {{- if ne .Values.omsagent.env.clusterId "" }} - name: AKS_RESOURCE_ID diff --git a/charts/azuremonitor-containers/values.yaml b/charts/azuremonitor-containers/values.yaml index f841dc5d7..fa01c05bd 100644 --- a/charts/azuremonitor-containers/values.yaml +++ b/charts/azuremonitor-containers/values.yaml @@ -116,13 +116,17 @@ omsagent: ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ ## resources: - daemonset: + daemonset-linux: requests: cpu: 75m memory: 225Mi limits: cpu: 150m memory: 600Mi + daemonset-windows: + limits: + cpu: 200m + memory: 600Mi deployment: requests: cpu: 150m diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index 11972f0f4..b093eb74b 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -416,6 +416,97 @@ echo "DOCKER_CIMPROV_VERSION=$DOCKER_CIMPROV_VERSION" export DOCKER_CIMPROV_VERSION=$DOCKER_CIMPROV_VERSION echo "export DOCKER_CIMPROV_VERSION=$DOCKER_CIMPROV_VERSION" >> ~/.bashrc +#region check to auto-activate oneagent, to route container logs, +#Intent is to activate one agent routing for all managed clusters with region in the regionllist, unless overridden by configmap +# AZMON_CONTAINER_LOGS_ROUTE will have route (if any) specified in the config map +# AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE will have the final route that we compute & set, based on our region list logic +echo "************start oneagent log routing checks************" +# by default, use configmap route for safer side +AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE=$AZMON_CONTAINER_LOGS_ROUTE + +#trim region list +oneagentregions="$(echo $AZMON_CONTAINERLOGS_ONEAGENT_REGIONS | xargs)" +#lowercase region list +typeset -l oneagentregions=$oneagentregions +echo "oneagent regions: $oneagentregions" +#trim current region +currentregion="$(echo $AKS_REGION | xargs)" +#lowercase current region +typeset -l currentregion=$currentregion +echo "current region: $currentregion" + +#initilze isoneagentregion as false +isoneagentregion=false + +#set isoneagentregion as true if matching region is found +if [ ! -z $oneagentregions ] && [ ! -z $currentregion ]; then + for rgn in $(echo $oneagentregions | sed "s/,/ /g"); do + if [ "$rgn" == "$currentregion" ]; then + isoneagentregion=true + echo "current region is in oneagent regions..." + break + fi + done +else + echo "current region is not in oneagent regions..." +fi + +if [ "$isoneagentregion" = true ]; then + #if configmap has a routing for logs, but current region is in the oneagent region list, take the configmap route + if [ ! -z $AZMON_CONTAINER_LOGS_ROUTE ]; then + AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE=$AZMON_CONTAINER_LOGS_ROUTE + echo "oneagent region is true for current region:$currentregion and config map logs route is not empty. so using config map logs route as effective route:$AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE" + else #there is no configmap route, so route thru oneagent + AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE="v2" + echo "oneagent region is true for current region:$currentregion and config map logs route is empty. so using oneagent as effective route:$AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE" + fi +else + echo "oneagent region is false for current region:$currentregion" +fi + + +#start oneagent +if [ ! -e "/etc/config/kube.conf" ]; then + if [ ! -z $AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE ]; then + echo "container logs configmap route is $AZMON_CONTAINER_LOGS_ROUTE" + echo "container logs effective route is $AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE" + #trim + containerlogsroute="$(echo $AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE | xargs)" + # convert to lowercase + typeset -l containerlogsroute=$containerlogsroute + + echo "setting AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE as :$containerlogsroute" + export AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE=$containerlogsroute + echo "export AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE=$containerlogsroute" >> ~/.bashrc + source ~/.bashrc + + if [ "$containerlogsroute" == "v2" ]; then + echo "activating oneagent..." + echo "configuring mdsd..." + cat /etc/mdsd.d/envmdsd | while read line; do + echo $line >> ~/.bashrc + done + source /etc/mdsd.d/envmdsd + + echo "setting mdsd workspaceid & key for workspace:$CIWORKSPACE_id" + export CIWORKSPACE_id=$CIWORKSPACE_id + echo "export CIWORKSPACE_id=$CIWORKSPACE_id" >> ~/.bashrc + export CIWORKSPACE_key=$CIWORKSPACE_key + echo "export CIWORKSPACE_key=$CIWORKSPACE_key" >> ~/.bashrc + + source ~/.bashrc + + dpkg -l | grep mdsd | awk '{print $2 " " $3}' + + echo "starting mdsd ..." + mdsd -l -e ${MDSD_LOG}/mdsd.err -w ${MDSD_LOG}/mdsd.warn -o ${MDSD_LOG}/mdsd.info -q ${MDSD_LOG}/mdsd.qos & + + touch /opt/AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE_V2 + fi + fi +fi +echo "************end oneagent log routing checks************" + #telegraf & fluentbit requirements if [ ! -e "/etc/config/kube.conf" ]; then if [ "$CONTAINER_RUNTIME" == "docker" ]; then @@ -491,37 +582,13 @@ dpkg -l | grep td-agent-bit | awk '{print $2 " " $3}' #dpkg -l | grep telegraf | awk '{print $2 " " $3}' -#start oneagent -if [ ! -e "/etc/config/kube.conf" ]; then - if [ ! -z $AZMON_CONTAINER_LOGS_ROUTE ]; then - echo "container logs route is defined as $AZMON_CONTAINER_LOGS_ROUTE" - #trim - containerlogsroute="$(echo $AZMON_CONTAINER_LOGS_ROUTE | xargs)" - # convert to lowercase - typeset -l containerlogsroute=$containerlogsroute - if [ "$containerlogsroute" == "v2" ]; then - echo "containerlogsroute $containerlogsroute" - echo "configuring mdsd..." - cat /etc/mdsd.d/envmdsd | while read line; do - echo $line >> ~/.bashrc - done - source /etc/mdsd.d/envmdsd - echo "setting mdsd workspaceid & key for workspace:$CIWORKSPACE_id" - export CIWORKSPACE_id=$CIWORKSPACE_id - echo "export CIWORKSPACE_id=$CIWORKSPACE_id" >> ~/.bashrc - export CIWORKSPACE_key=$CIWORKSPACE_key - echo "export CIWORKSPACE_key=$CIWORKSPACE_key" >> ~/.bashrc - source ~/.bashrc +echo "stopping rsyslog..." +service rsyslog stop - dpkg -l | grep mdsd | awk '{print $2 " " $3}' - - echo "starting mdsd ..." - mdsd -l -e ${MDSD_LOG}/mdsd.err -w ${MDSD_LOG}/mdsd.warn -o ${MDSD_LOG}/mdsd.info -q ${MDSD_LOG}/mdsd.qos & - fi - fi -fi +echo "getting rsyslog status..." +service rsyslog status shutdown() { /opt/microsoft/omsagent/bin/service_control stop diff --git a/kubernetes/linux/setup.sh b/kubernetes/linux/setup.sh index 67a981dfa..fb41d4782 100644 --- a/kubernetes/linux/setup.sh +++ b/kubernetes/linux/setup.sh @@ -31,8 +31,8 @@ mv $TMPDIR/omsbundle* $TMPDIR/omsbundle /usr/bin/dpkg -i $TMPDIR/omsbundle/110/omsagent*.deb #/usr/bin/dpkg -i $TMPDIR/omsbundle/100/omsconfig*.deb -#install oneagent - Latest dev bits (7/17) -wget https://github.com/microsoft/Docker-Provider/releases/download/7172020-oneagent/azure-mdsd_1.5.124-build.develop.1294_x86_64.deb +#install oneagent - Official bits (10/18) +wget https://github.com/microsoft/Docker-Provider/releases/download/10182020-oneagent/azure-mdsd_1.5.126-build.master.99_x86_64.deb /usr/bin/dpkg -i $TMPDIR/azure-mdsd*.deb cp -f $TMPDIR/mdsd.xml /etc/mdsd.d cp -f $TMPDIR/envmdsd /etc/mdsd.d diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index 18bc203d4..61f89b808 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -347,7 +347,7 @@ spec: imagePullPolicy: IfNotPresent resources: limits: - cpu: 150m + cpu: 250m memory: 600Mi requests: cpu: 75m @@ -370,6 +370,8 @@ spec: # Update this with the user assigned msi client id for omsagent - name: USER_ASSIGNED_IDENTITY_CLIENT_ID value: "" + - name: AZMON_CONTAINERLOGS_ONEAGENT_REGIONS + value: "koreacentral,norwayeast" securityContext: privileged: true ports: @@ -650,11 +652,8 @@ spec: imagePullPolicy: IfNotPresent resources: limits: - cpu: 150m + cpu: 200m memory: 600Mi - requests: - cpu: 75m - memory: 225Mi env: # azure devops pipeline uses AKS_RESOURCE_ID and AKS_REGION hence ensure to uncomment these - name: AKS_RESOURCE_ID @@ -696,6 +695,7 @@ spec: - C:\opt\omsagentwindows\scripts\cmd\livenessProbe.cmd periodSeconds: 60 initialDelaySeconds: 180 + timeoutSeconds: 15 affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: diff --git a/source/plugins/go/src/oms.go b/source/plugins/go/src/oms.go index 63ca6de10..5a678781c 100644 --- a/source/plugins/go/src/oms.go +++ b/source/plugins/go/src/oms.go @@ -194,15 +194,15 @@ type DataItem struct { } type DataItemADX struct { - LogEntry string `json:"LogEntry"` - LogEntrySource string `json:"LogEntrySource"` - LogEntryTimeStamp string `json:"LogEntryTimeStamp"` - LogEntryTimeOfCommand string `json:"TimeOfCommand"` - ID string `json:"Id"` - Image string `json:"Image"` - Name string `json:"Name"` - SourceSystem string `json:"SourceSystem"` + TimeGenerated string `json:"TimeGenerated"` Computer string `json:"Computer"` + ContainerID string `json:"ContainerID"` + ContainerName string `json:"ContainerName"` + PodName string `json:"PodName"` + PodNamespace string `json:"PodNamespace"` + LogMessage string `json:"LogMessage"` + LogSource string `json:"LogSource"` + //PodLabels string `json:"PodLabels"` AzureResourceId string `json:"AzureResourceId"` } @@ -422,7 +422,7 @@ func convert(in interface{}) (float64, bool) { func populateKubeMonAgentEventHash(record map[interface{}]interface{}, errType KubeMonAgentEventType) { var logRecordString = ToString(record["log"]) var eventTimeStamp = ToString(record["time"]) - containerID, _, podName := GetContainerIDK8sNamespacePodNameFromFileName(ToString(record["filepath"])) + containerID, _, podName, _ := GetContainerIDK8sNamespacePodNameFromFileName(ToString(record["filepath"])) Log("Locked EventHashUpdateMutex for updating hash \n ") EventHashUpdateMutex.Lock() @@ -816,7 +816,7 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { DataUpdateMutex.Unlock() for _, record := range tailPluginRecords { - containerID, k8sNamespace, _ := GetContainerIDK8sNamespacePodNameFromFileName(ToString(record["filepath"])) + containerID, k8sNamespace, k8sPodName, containerName := GetContainerIDK8sNamespacePodNameFromFileName(ToString(record["filepath"])) logEntrySource := ToString(record["stream"]) if strings.EqualFold(logEntrySource, "stdout") { @@ -867,16 +867,18 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { if ResourceCentric == true { stringMap["AzureResourceId"] = ResourceID } + stringMap["PodName"] = k8sPodName + stringMap["PodNamespace"] = k8sNamespace + stringMap["ContainerName"] = containerName dataItemADX = DataItemADX{ - ID: stringMap["Id"], - LogEntry: stringMap["LogEntry"], - LogEntrySource: stringMap["LogEntrySource"], - LogEntryTimeStamp: stringMap["LogEntryTimeStamp"], - LogEntryTimeOfCommand: stringMap["TimeOfCommand"], - SourceSystem: stringMap["SourceSystem"], + TimeGenerated: stringMap["LogEntryTimeStamp"], Computer: stringMap["Computer"], - Image: stringMap["Image"], - Name: stringMap["Name"], + ContainerID: stringMap["Id"], + ContainerName: stringMap["ContainerName"], + PodName: stringMap["PodName"], + PodNamespace: stringMap["PodNamespace"], + LogMessage: stringMap["LogEntry"], + LogSource: stringMap["LogEntrySource"], AzureResourceId: stringMap["AzureResourceId"], } //ADX @@ -1018,7 +1020,7 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { //ADXFlushMutex.Lock() //defer ADXFlushMutex.Unlock() //MultiJSON support is not there yet - if ingestionErr := ADXIngestor.FromReader(ctx, r, ingest.IngestionMappingRef("ContainerLogMapping", ingest.JSON), ingest.FileFormat(ingest.JSON), ingest.FlushImmediately()); ingestionErr != nil { + if ingestionErr := ADXIngestor.FromReader(ctx, r, ingest.IngestionMappingRef("ContainerLogv2Mapping", ingest.JSON), ingest.FileFormat(ingest.JSON)); ingestionErr != nil { Log("Error when streaming to ADX Ingestion: %s", ingestionErr.Error()) //ADXIngestor = nil //not required as per ADX team. Will keep it to indicate that we tried this approach @@ -1107,12 +1109,13 @@ func containsKey(currentMap map[string]bool, key string) bool { return c } -// GetContainerIDK8sNamespacePodNameFromFileName Gets the container ID, k8s namespace and pod name From the file Name +// GetContainerIDK8sNamespacePodNameFromFileName Gets the container ID, k8s namespace, pod name and containername From the file Name // sample filename kube-proxy-dgcx7_kube-system_kube-proxy-8df7e49e9028b60b5b0d0547f409c455a9567946cf763267b7e6fa053ab8c182.log -func GetContainerIDK8sNamespacePodNameFromFileName(filename string) (string, string, string) { +func GetContainerIDK8sNamespacePodNameFromFileName(filename string) (string, string, string, string) { id := "" ns := "" podName := "" + containerName := "" start := strings.LastIndex(filename, "-") end := strings.LastIndex(filename, ".") @@ -1132,6 +1135,15 @@ func GetContainerIDK8sNamespacePodNameFromFileName(filename string) (string, str ns = filename[start+1 : end] } + start = strings.LastIndex(filename, "_") + end = strings.LastIndex(filename, "-") + + if start >= end || start == -1 || end == -1 { + containerName = "" + } else { + containerName = filename[start+1 : end] + } + start = strings.Index(filename, "/containers/") end = strings.Index(filename, "_") @@ -1141,7 +1153,7 @@ func GetContainerIDK8sNamespacePodNameFromFileName(filename string) (string, str podName = filename[(start + len("/containers/")):end] } - return id, ns, podName + return id, ns, podName, containerName } // InitializePlugin reads and populates plugin configuration @@ -1313,8 +1325,8 @@ func InitializePlugin(pluginConfPath string, agentVersion string) { CreateHTTPClient() - ContainerLogsRoute := strings.TrimSpace(strings.ToLower(os.Getenv("AZMON_CONTAINER_LOGS_ROUTE"))) - Log("AZMON_CONTAINER_LOGS_ROUTE:%s", ContainerLogsRoute) + ContainerLogsRoute := strings.TrimSpace(strings.ToLower(os.Getenv("AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE"))) + Log("AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE:%s", ContainerLogsRoute) ContainerLogsRouteV2 = false //default is ODS ContainerLogsRouteADX = false //default is LA @@ -1365,7 +1377,7 @@ func InitializePlugin(pluginConfPath string, agentVersion string) { if strings.Compare(strings.ToLower(os.Getenv("CONTROLLER_TYPE")), "daemonset") == 0 { populateExcludedStdoutNamespaces() populateExcludedStderrNamespaces() - if enrichContainerLogs == true { + if enrichContainerLogs == true && ContainerLogsRouteADX != true { Log("ContainerLogEnrichment=true; starting goroutine to update containerimagenamemaps \n") go updateContainerImageNameMaps() } else { diff --git a/source/plugins/go/src/utils.go b/source/plugins/go/src/utils.go index 8b1a3df65..91791ae1a 100644 --- a/source/plugins/go/src/utils.go +++ b/source/plugins/go/src/utils.go @@ -145,7 +145,7 @@ func CreateADXClient() { //log.Fatalf("Unable to create ADX connection %s", err.Error()) } else { Log("Successfully created ADX Client. Creating Ingestor...") - ingestor, ingestorErr := ingest.New(client, "containerinsights", "ContainerLog") + ingestor, ingestorErr := ingest.New(client, "containerinsights", "ContainerLogv2") if ingestorErr != nil { Log("Error::mdsd::Unable to create ADX ingestor %s", ingestorErr.Error()) } else { diff --git a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb index 9e0935480..67bd61667 100644 --- a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb +++ b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb @@ -248,7 +248,9 @@ def getContainerCpuMetricItems(metricJSON, hostName, cpuMetricNameToCollect, met telemetryProps["dsPromUrl"] = @dsPromUrlCount end #telemetry about containerlogs Routing for daemonset - if (!@containerLogsRoute.nil? && !@containerLogsRoute.empty?) + if File.exist?(Constants::AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE_V2_FILENAME) + telemetryProps["containerLogsRoute"] = "v2" + elsif (!@containerLogsRoute.nil? && !@containerLogsRoute.empty?) telemetryProps["containerLogsRoute"] = @containerLogsRoute end #telemetry about health model diff --git a/source/plugins/ruby/constants.rb b/source/plugins/ruby/constants.rb index 35e5f9334..0e5099c5e 100644 --- a/source/plugins/ruby/constants.rb +++ b/source/plugins/ruby/constants.rb @@ -91,4 +91,6 @@ class Constants #Pod Statuses POD_STATUS_TERMINATING = "Terminating" + + AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE_V2_FILENAME = "/opt/AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE_V2" end From 157ba201f426a0f53193a9eb26a6ad650edc9442 Mon Sep 17 00:00:00 2001 From: Vishwanath Date: Tue, 27 Oct 2020 20:17:03 -0700 Subject: [PATCH 037/301] remove hiphen for params in chart (#462) Merging as its a simple fix (remove hiphen) --- .../templates/omsagent-daemonset-windows.yaml | 2 +- .../azuremonitor-containers/templates/omsagent-daemonset.yaml | 2 +- charts/azuremonitor-containers/values.yaml | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml b/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml index c916fadf6..6a309c121 100644 --- a/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml @@ -46,7 +46,7 @@ spec: {{- end }} imagePullPolicy: IfNotPresent resources: -{{ toYaml .Values.omsagent.resources.daemonset-windows | indent 9 }} +{{ toYaml .Values.omsagent.resources.daemonsetwindows | indent 9 }} env: {{- if ne .Values.omsagent.env.clusterId "" }} - name: AKS_RESOURCE_ID diff --git a/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml b/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml index 8af13b6ee..d57c4d82b 100644 --- a/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml @@ -40,7 +40,7 @@ spec: {{- end }} imagePullPolicy: IfNotPresent resources: -{{ toYaml .Values.omsagent.resources.daemonset-linux | indent 9 }} +{{ toYaml .Values.omsagent.resources.daemonsetlinux | indent 9 }} env: {{- if ne .Values.omsagent.env.clusterId "" }} - name: AKS_RESOURCE_ID diff --git a/charts/azuremonitor-containers/values.yaml b/charts/azuremonitor-containers/values.yaml index fa01c05bd..774e6203f 100644 --- a/charts/azuremonitor-containers/values.yaml +++ b/charts/azuremonitor-containers/values.yaml @@ -116,14 +116,14 @@ omsagent: ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ ## resources: - daemonset-linux: + daemonsetlinux: requests: cpu: 75m memory: 225Mi limits: cpu: 150m memory: 600Mi - daemonset-windows: + daemonsetwindows: limits: cpu: 200m memory: 600Mi From 7c448bc5f561b2a72c33c689dda0db893bd41038 Mon Sep 17 00:00:00 2001 From: Vishwanath Date: Tue, 27 Oct 2020 21:22:34 -0700 Subject: [PATCH 038/301] Changes for cutting a new build for ciprod10272020 release (#460) --- ReleaseNotes.md | 10 ++++++++++ build/version | 6 +++--- charts/azuremonitor-containers/Chart.yaml | 2 +- charts/azuremonitor-containers/values.yaml | 6 +++--- kubernetes/linux/Dockerfile | 2 +- kubernetes/omsagent.yaml | 12 ++++++------ kubernetes/windows/Dockerfile | 2 +- scripts/onboarding/managed/enable-monitoring.ps1 | 2 +- scripts/onboarding/managed/enable-monitoring.sh | 2 +- scripts/onboarding/managed/upgrade-monitoring.sh | 2 +- 10 files changed, 28 insertions(+), 18 deletions(-) diff --git a/ReleaseNotes.md b/ReleaseNotes.md index e1892d083..eb8e282b9 100644 --- a/ReleaseNotes.md +++ b/ReleaseNotes.md @@ -11,6 +11,16 @@ additional questions or comments. Note : The agent version(s) below has dates (ciprod), which indicate the agent build dates (not release dates) +### 10/27/2020 - +##### Version microsoft/oms:ciprod10272020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod10272020 (linux) +##### Version microsoft/oms:win-ciprod10272020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod10052020 (windows) +##### Code change log +- Activate oneagent in few AKS regions (koreacentral,norwayeast) +- Disable syslog +- Fix timeout for Windows daemonset liveness probe +- Make request == limit for Windows daemonset resources (cpu & memory) +- Schema v2 for container log (ADX only - applicable only for select customers for piloting) + ### 10/05/2020 - ##### Version microsoft/oms:ciprod10052020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod10052020 (linux) ##### Version microsoft/oms:win-ciprod10052020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod10052020 (windows) diff --git a/build/version b/build/version index 9587328de..71c70020e 100644 --- a/build/version +++ b/build/version @@ -2,11 +2,11 @@ # Build Version Information -CONTAINER_BUILDVERSION_MAJOR=10 -CONTAINER_BUILDVERSION_MINOR=1 +CONTAINER_BUILDVERSION_MAJOR=11 +CONTAINER_BUILDVERSION_MINOR=0 CONTAINER_BUILDVERSION_PATCH=0 CONTAINER_BUILDVERSION_BUILDNR=0 -CONTAINER_BUILDVERSION_DATE=20201005 +CONTAINER_BUILDVERSION_DATE=20201027 CONTAINER_BUILDVERSION_STATUS=Developer_Build #-------------------------------- End of File ----------------------------------- diff --git a/charts/azuremonitor-containers/Chart.yaml b/charts/azuremonitor-containers/Chart.yaml index 6d45b05d8..bc35690e4 100644 --- a/charts/azuremonitor-containers/Chart.yaml +++ b/charts/azuremonitor-containers/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v1 appVersion: 7.0.0-1 description: Helm chart for deploying Azure Monitor container monitoring agent in Kubernetes name: azuremonitor-containers -version: 2.7.7 +version: 2.7.8 kubeVersion: "^1.10.0-0" keywords: - monitoring diff --git a/charts/azuremonitor-containers/values.yaml b/charts/azuremonitor-containers/values.yaml index 774e6203f..0f07a98c1 100644 --- a/charts/azuremonitor-containers/values.yaml +++ b/charts/azuremonitor-containers/values.yaml @@ -12,10 +12,10 @@ Azure: omsagent: image: repo: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod" - tag: "ciprod10052020" - tagWindows: "win-ciprod10052020" + tag: "ciprod10272020" + tagWindows: "win-ciprod10272020" pullPolicy: IfNotPresent - dockerProviderVersion: "10.1.0-0" + dockerProviderVersion: "11.0.0-0" agentVersion: "1.10.0.1" ## To get your workspace id and key do the following ## You can create a Azure Loganalytics workspace from portal.azure.com and get its ID & PRIMARY KEY from 'Advanced Settings' tab in the Ux. diff --git a/kubernetes/linux/Dockerfile b/kubernetes/linux/Dockerfile index f4324a18a..c3428a44a 100644 --- a/kubernetes/linux/Dockerfile +++ b/kubernetes/linux/Dockerfile @@ -2,7 +2,7 @@ FROM ubuntu:18.04 MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=ciprod10052020 +ARG IMAGE_TAG=ciprod10272020 ENV AGENT_VERSION ${IMAGE_TAG} ENV tmpdir /opt ENV APPLICATIONINSIGHTS_AUTH NzAwZGM5OGYtYTdhZC00NThkLWI5NWMtMjA3ZjM3NmM3YmRi diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index 61f89b808..ca47d898d 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -337,13 +337,13 @@ spec: tier: node annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "10.1.0-0" + dockerProviderVersion: "11.0.0-0" schema-versions: "v1" spec: serviceAccountName: omsagent containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod10052020" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod10272020" imagePullPolicy: IfNotPresent resources: limits: @@ -496,13 +496,13 @@ spec: rsName: "omsagent-rs" annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "10.1.0-0" + dockerProviderVersion: "11.0.0-0" schema-versions: "v1" spec: serviceAccountName: omsagent containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod10052020" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod10272020" imagePullPolicy: IfNotPresent resources: limits: @@ -642,13 +642,13 @@ spec: tier: node-win annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "10.1.0-0" + dockerProviderVersion: "11.0.0-0" schema-versions: "v1" spec: serviceAccountName: omsagent containers: - name: omsagent-win - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod10052020" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod10272020" imagePullPolicy: IfNotPresent resources: limits: diff --git a/kubernetes/windows/Dockerfile b/kubernetes/windows/Dockerfile index c7dee60af..414817559 100644 --- a/kubernetes/windows/Dockerfile +++ b/kubernetes/windows/Dockerfile @@ -3,7 +3,7 @@ MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=win-ciprod10052020 +ARG IMAGE_TAG=win-ciprod10272020 # Do not split this into multiple RUN! # Docker creates a layer for every RUN-Statement diff --git a/scripts/onboarding/managed/enable-monitoring.ps1 b/scripts/onboarding/managed/enable-monitoring.ps1 index 4815dc958..22d34894f 100644 --- a/scripts/onboarding/managed/enable-monitoring.ps1 +++ b/scripts/onboarding/managed/enable-monitoring.ps1 @@ -60,7 +60,7 @@ $isUsingServicePrincipal = $false # released chart version in mcr $mcr = "mcr.microsoft.com" -$mcrChartVersion = "2.7.7" +$mcrChartVersion = "2.7.8" $mcrChartRepoPath = "azuremonitor/containerinsights/preview/azuremonitor-containers" $helmLocalRepoName = "." diff --git a/scripts/onboarding/managed/enable-monitoring.sh b/scripts/onboarding/managed/enable-monitoring.sh index d7edf49dc..e0d26c370 100644 --- a/scripts/onboarding/managed/enable-monitoring.sh +++ b/scripts/onboarding/managed/enable-monitoring.sh @@ -42,7 +42,7 @@ set -o pipefail defaultAzureCloud="AzureCloud" # released chart version in mcr -mcrChartVersion="2.7.7" +mcrChartVersion="2.7.8" mcr="mcr.microsoft.com" mcrChartRepoPath="azuremonitor/containerinsights/preview/azuremonitor-containers" helmLocalRepoName="." diff --git a/scripts/onboarding/managed/upgrade-monitoring.sh b/scripts/onboarding/managed/upgrade-monitoring.sh index 23594c7bc..4134d710f 100644 --- a/scripts/onboarding/managed/upgrade-monitoring.sh +++ b/scripts/onboarding/managed/upgrade-monitoring.sh @@ -20,7 +20,7 @@ set -e set -o pipefail # released chart version for Azure Arc enabled Kubernetes public preview -mcrChartVersion="2.7.7" +mcrChartVersion="2.7.8" mcr="mcr.microsoft.com" mcrChartRepoPath="azuremonitor/containerinsights/preview/azuremonitor-containers" From 62b27d79ba9622a939b6d20e33292725bb2e9bef Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Thu, 29 Oct 2020 08:18:07 -0700 Subject: [PATCH 039/301] using latest stable version of msys2 (#465) --- kubernetes/windows/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kubernetes/windows/Dockerfile b/kubernetes/windows/Dockerfile index 414817559..c4545d705 100644 --- a/kubernetes/windows/Dockerfile +++ b/kubernetes/windows/Dockerfile @@ -10,7 +10,7 @@ ARG IMAGE_TAG=win-ciprod10272020 RUN powershell -Command "Set-ExecutionPolicy Bypass -Scope Process -Force; iex ((New-Object System.Net.WebClient).DownloadString('https://chocolatey.org/install.ps1'))" # Fluentd depends on cool.io whose fat gem is only available for Ruby < 2.5, so need to specify --platform ruby when install Ruby > 2.5 and install msys2 to get dev tools RUN choco install -y ruby --version 2.6.5.1 --params "'/InstallDir:C:\ruby26'" \ -&& choco install -y msys2 --version 20190524.0.0.20191030 --params "'/NoPath /NoUpdate /InstallDir:C:\ruby26\msys64'" \ +&& choco install -y msys2 --version 20200903.0.0 --params "'/NoPath /NoUpdate /InstallDir:C:\ruby26\msys64'" \ && choco install -y vim # gangams - optional MSYS2 update via ridk failing in merged docker file so skipping that since we dont need optional update From 909cc16348135c31f8d82af130a75f8bc54f7b6f Mon Sep 17 00:00:00 2001 From: rashmichandrashekar Date: Thu, 29 Oct 2020 14:48:00 -0700 Subject: [PATCH 040/301] fixing the windows-perf-dups (#466) --- source/plugins/ruby/in_win_cadvisor_perf.rb | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/source/plugins/ruby/in_win_cadvisor_perf.rb b/source/plugins/ruby/in_win_cadvisor_perf.rb index 4e90195e5..9c267cf4f 100644 --- a/source/plugins/ruby/in_win_cadvisor_perf.rb +++ b/source/plugins/ruby/in_win_cadvisor_perf.rb @@ -10,7 +10,7 @@ class Win_CAdvisor_Perf_Input < Input def initialize super require "yaml" - require 'yajl/json_gem' + require "yajl/json_gem" require "time" require_relative "CAdvisorMetricsAPIClient" @@ -52,8 +52,6 @@ def shutdown def enumerate() time = Time.now.to_f begin - eventStream = MultiEventStream.new - insightsMetricsEventStream = MultiEventStream.new timeDifference = (DateTime.now.to_time.to_i - @@winNodeQueryTimeTracker).abs timeDifferenceInMinutes = timeDifference / 60 @@istestvar = ENV["ISTEST"] @@ -70,6 +68,7 @@ def enumerate() @@winNodeQueryTimeTracker = DateTime.now.to_time.to_i end @@winNodes.each do |winNode| + eventStream = MultiEventStream.new metricData = CAdvisorMetricsAPIClient.getMetrics(winNode: winNode, metricTime: Time.now.utc.iso8601) metricData.each do |record| if !record.empty? @@ -81,7 +80,6 @@ def enumerate() router.emit_stream(@tag, eventStream) if eventStream router.emit_stream(@mdmtag, eventStream) if eventStream - if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0 && eventStream.count > 0) $log.info("winCAdvisorPerfEmitStreamSuccess @ #{Time.now.utc.iso8601}") end @@ -90,6 +88,7 @@ def enumerate() begin containerGPUusageInsightsMetricsDataItems = [] containerGPUusageInsightsMetricsDataItems.concat(CAdvisorMetricsAPIClient.getInsightsMetrics(winNode: winNode, metricTime: Time.now.utc.iso8601)) + insightsMetricsEventStream = MultiEventStream.new containerGPUusageInsightsMetricsDataItems.each do |insightsMetricsRecord| wrapper = { @@ -104,12 +103,12 @@ def enumerate() router.emit_stream(@mdmtag, insightsMetricsEventStream) if insightsMetricsEventStream if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0 && insightsMetricsEventStream.count > 0) $log.info("winCAdvisorInsightsMetricsEmitStreamSuccess @ #{Time.now.utc.iso8601}") - end + end rescue => errorStr $log.warn "Failed when processing GPU Usage metrics in_win_cadvisor_perf : #{errorStr}" $log.debug_backtrace(errorStr.backtrace) ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) - end + end #end GPU InsightsMetrics items end From d481c066df67ce9cf76d163c0776502f3989aea1 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Fri, 6 Nov 2020 00:02:52 -0800 Subject: [PATCH 041/301] chart updates related to new microsoft/charts repo (#467) --- charts/azuremonitor-containers/README.md | 18 ++++++++++-------- .../templates/NOTES.txt | 2 +- 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/charts/azuremonitor-containers/README.md b/charts/azuremonitor-containers/README.md index 3b357ffd5..469fac94a 100644 --- a/charts/azuremonitor-containers/README.md +++ b/charts/azuremonitor-containers/README.md @@ -29,6 +29,8 @@ Monitoring your Kubernetes cluster and containers is critical, especially when r ## Installing the Chart +> Note: If you want to customize the chart, fork the chart code in https://github.com/microsoft/Docker-Provider/tree/ci_prod/charts/azuremonitor-containers + > Note: `--name` flag not required in Helm3 since this flag is deprecated > Note: use `omsagent.proxy` parameter to set the proxy endpoint if your K8s cluster configured behind the proxy. Refer to [configure proxy](#Configuring-Proxy-Endpoint) for more details about proxy. @@ -36,25 +38,25 @@ Monitoring your Kubernetes cluster and containers is critical, especially when r ### To Use Azure Log Analytics Workspace in Public Cloud ```bash -$ helm repo add incubator https://kubernetes-charts-incubator.storage.googleapis.com/ +$ helm repo add microsoft https://microsoft.github.io/charts/repo $ helm install --name azmon-containers-release-1 \ ---set omsagent.secret.wsid=,omsagent.secret.key=,omsagent.env.clusterName= incubator/azuremonitor-containers +--set omsagent.secret.wsid=,omsagent.secret.key=,omsagent.env.clusterName= microsoft/azuremonitor-containers ``` ### To Use Azure Log Analytics Workspace in Azure China Cloud ```bash -$ helm repo add incubator https://kubernetes-charts-incubator.storage.googleapis.com/ +$ helm repo add microsoft https://microsoft.github.io/charts/repo $ helm install --name azmon-containers-release-1 \ ---set omsagent.domain=opinsights.azure.cn,omsagent.secret.wsid=,omsagent.secret.key=,omsagent.env.clusterName= incubator/azuremonitor-containers +--set omsagent.domain=opinsights.azure.cn,omsagent.secret.wsid=,omsagent.secret.key=,omsagent.env.clusterName= microsoft/azuremonitor-containers ``` ### To Use Azure Log Analytics Workspace in Azure US Government Cloud ```bash -$ helm repo add incubator https://kubernetes-charts-incubator.storage.googleapis.com/ +$ helm repo add microsoft https://microsoft.github.io/charts/repo $ helm install --name azmon-containers-release-1 \ ---set omsagent.domain=opinsights.azure.us,omsagent.secret.wsid=,omsagent.secret.key=,omsagent.env.clusterName= incubator/azuremonitor-containers +--set omsagent.domain=opinsights.azure.us,omsagent.secret.wsid=,omsagent.secret.key=,omsagent.env.clusterName= microsoft/azuremonitor-containers ``` ## Upgrading an existing Release to a new version @@ -112,13 +114,13 @@ Specify each parameter using the `--set key=value[,key=value]` argument to `helm $ helm install --name myrelease-1 \ --set omsagent.secret.wsid=,omsagent.secret.key=,omsagent.env.clusterName= - incubator/azuremonitor-containers + microsoft/azuremonitor-containers ``` Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, ```bash -$ helm install --name myrelease-1 -f values.yaml incubator/azuremonitor-containers +$ helm install --name myrelease-1 -f values.yaml microsoft/azuremonitor-containers ``` diff --git a/charts/azuremonitor-containers/templates/NOTES.txt b/charts/azuremonitor-containers/templates/NOTES.txt index 372cecb95..48ebf33fc 100644 --- a/charts/azuremonitor-containers/templates/NOTES.txt +++ b/charts/azuremonitor-containers/templates/NOTES.txt @@ -29,7 +29,7 @@ This deployment will not complete. To proceed, run --set omsagent.secret.wsid= \ --set omsagent.secret.key= \ --set omsagent.env.clusterName= \ - incubator/azuremonitor-containers + microsoft/azuremonitor-containers {{- else -}} From aff1e13c240836cea73f3913f098b2737f186b89 Mon Sep 17 00:00:00 2001 From: Vishwanath Date: Mon, 9 Nov 2020 13:18:02 -0800 Subject: [PATCH 042/301] Changes for creating 11092020 release (#468) --- ReleaseNotes.md | 6 ++++++ build/version | 4 ++-- charts/azuremonitor-containers/Chart.yaml | 2 +- charts/azuremonitor-containers/values.yaml | 6 +++--- kubernetes/linux/Dockerfile | 2 +- kubernetes/omsagent.yaml | 12 ++++++------ kubernetes/windows/Dockerfile | 2 +- scripts/onboarding/managed/enable-monitoring.ps1 | 2 +- scripts/onboarding/managed/enable-monitoring.sh | 2 +- scripts/onboarding/managed/upgrade-monitoring.sh | 2 +- 10 files changed, 23 insertions(+), 17 deletions(-) diff --git a/ReleaseNotes.md b/ReleaseNotes.md index eb8e282b9..ddfd01314 100644 --- a/ReleaseNotes.md +++ b/ReleaseNotes.md @@ -11,6 +11,12 @@ additional questions or comments. Note : The agent version(s) below has dates (ciprod), which indicate the agent build dates (not release dates) +### 11/09/2020 - +##### Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod11092020 (linux) +##### Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod11092020 (windows) +##### Code change log +- Fix for duplicate windows metrics + ### 10/27/2020 - ##### Version microsoft/oms:ciprod10272020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod10272020 (linux) ##### Version microsoft/oms:win-ciprod10272020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod10052020 (windows) diff --git a/build/version b/build/version index 71c70020e..a8b78ecac 100644 --- a/build/version +++ b/build/version @@ -5,8 +5,8 @@ CONTAINER_BUILDVERSION_MAJOR=11 CONTAINER_BUILDVERSION_MINOR=0 CONTAINER_BUILDVERSION_PATCH=0 -CONTAINER_BUILDVERSION_BUILDNR=0 -CONTAINER_BUILDVERSION_DATE=20201027 +CONTAINER_BUILDVERSION_BUILDNR=1 +CONTAINER_BUILDVERSION_DATE=20201109 CONTAINER_BUILDVERSION_STATUS=Developer_Build #-------------------------------- End of File ----------------------------------- diff --git a/charts/azuremonitor-containers/Chart.yaml b/charts/azuremonitor-containers/Chart.yaml index bc35690e4..987841f77 100644 --- a/charts/azuremonitor-containers/Chart.yaml +++ b/charts/azuremonitor-containers/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v1 appVersion: 7.0.0-1 description: Helm chart for deploying Azure Monitor container monitoring agent in Kubernetes name: azuremonitor-containers -version: 2.7.8 +version: 2.7.9 kubeVersion: "^1.10.0-0" keywords: - monitoring diff --git a/charts/azuremonitor-containers/values.yaml b/charts/azuremonitor-containers/values.yaml index 0f07a98c1..76ea0a26d 100644 --- a/charts/azuremonitor-containers/values.yaml +++ b/charts/azuremonitor-containers/values.yaml @@ -12,10 +12,10 @@ Azure: omsagent: image: repo: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod" - tag: "ciprod10272020" - tagWindows: "win-ciprod10272020" + tag: "ciprod11092020" + tagWindows: "win-ciprod11092020" pullPolicy: IfNotPresent - dockerProviderVersion: "11.0.0-0" + dockerProviderVersion: "11.0.0-1" agentVersion: "1.10.0.1" ## To get your workspace id and key do the following ## You can create a Azure Loganalytics workspace from portal.azure.com and get its ID & PRIMARY KEY from 'Advanced Settings' tab in the Ux. diff --git a/kubernetes/linux/Dockerfile b/kubernetes/linux/Dockerfile index c3428a44a..d04e86128 100644 --- a/kubernetes/linux/Dockerfile +++ b/kubernetes/linux/Dockerfile @@ -2,7 +2,7 @@ FROM ubuntu:18.04 MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=ciprod10272020 +ARG IMAGE_TAG=ciprod11092020 ENV AGENT_VERSION ${IMAGE_TAG} ENV tmpdir /opt ENV APPLICATIONINSIGHTS_AUTH NzAwZGM5OGYtYTdhZC00NThkLWI5NWMtMjA3ZjM3NmM3YmRi diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index ca47d898d..7d07eafcd 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -337,13 +337,13 @@ spec: tier: node annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "11.0.0-0" + dockerProviderVersion: "11.0.0-1" schema-versions: "v1" spec: serviceAccountName: omsagent containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod10272020" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod11092020" imagePullPolicy: IfNotPresent resources: limits: @@ -496,13 +496,13 @@ spec: rsName: "omsagent-rs" annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "11.0.0-0" + dockerProviderVersion: "11.0.0-1" schema-versions: "v1" spec: serviceAccountName: omsagent containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod10272020" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod11092020" imagePullPolicy: IfNotPresent resources: limits: @@ -642,13 +642,13 @@ spec: tier: node-win annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "11.0.0-0" + dockerProviderVersion: "11.0.0-1" schema-versions: "v1" spec: serviceAccountName: omsagent containers: - name: omsagent-win - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod10272020" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod11092020" imagePullPolicy: IfNotPresent resources: limits: diff --git a/kubernetes/windows/Dockerfile b/kubernetes/windows/Dockerfile index c4545d705..10ea235b2 100644 --- a/kubernetes/windows/Dockerfile +++ b/kubernetes/windows/Dockerfile @@ -3,7 +3,7 @@ MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=win-ciprod10272020 +ARG IMAGE_TAG=win-ciprod11092020 # Do not split this into multiple RUN! # Docker creates a layer for every RUN-Statement diff --git a/scripts/onboarding/managed/enable-monitoring.ps1 b/scripts/onboarding/managed/enable-monitoring.ps1 index 22d34894f..b052f22c5 100644 --- a/scripts/onboarding/managed/enable-monitoring.ps1 +++ b/scripts/onboarding/managed/enable-monitoring.ps1 @@ -60,7 +60,7 @@ $isUsingServicePrincipal = $false # released chart version in mcr $mcr = "mcr.microsoft.com" -$mcrChartVersion = "2.7.8" +$mcrChartVersion = "2.7.9" $mcrChartRepoPath = "azuremonitor/containerinsights/preview/azuremonitor-containers" $helmLocalRepoName = "." diff --git a/scripts/onboarding/managed/enable-monitoring.sh b/scripts/onboarding/managed/enable-monitoring.sh index e0d26c370..bb6974258 100644 --- a/scripts/onboarding/managed/enable-monitoring.sh +++ b/scripts/onboarding/managed/enable-monitoring.sh @@ -42,7 +42,7 @@ set -o pipefail defaultAzureCloud="AzureCloud" # released chart version in mcr -mcrChartVersion="2.7.8" +mcrChartVersion="2.7.9" mcr="mcr.microsoft.com" mcrChartRepoPath="azuremonitor/containerinsights/preview/azuremonitor-containers" helmLocalRepoName="." diff --git a/scripts/onboarding/managed/upgrade-monitoring.sh b/scripts/onboarding/managed/upgrade-monitoring.sh index 4134d710f..11ecf6819 100644 --- a/scripts/onboarding/managed/upgrade-monitoring.sh +++ b/scripts/onboarding/managed/upgrade-monitoring.sh @@ -20,7 +20,7 @@ set -e set -o pipefail # released chart version for Azure Arc enabled Kubernetes public preview -mcrChartVersion="2.7.8" +mcrChartVersion="2.7.9" mcr="mcr.microsoft.com" mcrChartRepoPath="azuremonitor/containerinsights/preview/azuremonitor-containers" From ca18850046fd54f7830bbe2addb51039928c3514 Mon Sep 17 00:00:00 2001 From: rashmichandrashekar Date: Mon, 9 Nov 2020 18:47:36 -0800 Subject: [PATCH 043/301] MDM exception aggregation (#470) --- source/plugins/ruby/constants.rb | 112 ++++++++++++++++--------------- source/plugins/ruby/out_mdm.rb | 51 ++++++++++++-- 2 files changed, 104 insertions(+), 59 deletions(-) diff --git a/source/plugins/ruby/constants.rb b/source/plugins/ruby/constants.rb index 0e5099c5e..079584c7b 100644 --- a/source/plugins/ruby/constants.rb +++ b/source/plugins/ruby/constants.rb @@ -1,61 +1,61 @@ # frozen_string_literal: true class Constants - INSIGHTSMETRICS_TAGS_ORIGIN = "container.azm.ms" - INSIGHTSMETRICS_TAGS_CLUSTERID = "container.azm.ms/clusterId" - INSIGHTSMETRICS_TAGS_CLUSTERNAME = "container.azm.ms/clusterName" - INSIGHTSMETRICS_TAGS_GPU_VENDOR = "gpuVendor" - INSIGHTSMETRICS_TAGS_GPU_NAMESPACE = "container.azm.ms/gpu" - INSIGHTSMETRICS_TAGS_GPU_MODEL = "gpuModel" - INSIGHTSMETRICS_TAGS_GPU_ID = "gpuId" - INSIGHTSMETRICS_TAGS_CONTAINER_NAME = "containerName" - INSIGHTSMETRICS_TAGS_CONTAINER_ID = "containerName" - INSIGHTSMETRICS_TAGS_K8SNAMESPACE = "k8sNamespace" - INSIGHTSMETRICS_TAGS_CONTROLLER_NAME = "controllerName" - INSIGHTSMETRICS_TAGS_CONTROLLER_KIND = "controllerKind" - INSIGHTSMETRICS_TAGS_POD_UID = "podUid" - INSIGTHTSMETRICS_TAGS_PV_NAMESPACE = "container.azm.ms/pv" - INSIGHTSMETRICS_TAGS_PVC_NAME = "pvcName" - INSIGHTSMETRICS_TAGS_PVC_NAMESPACE = "pvcNamespace" - INSIGHTSMETRICS_TAGS_POD_NAME = "podName" - INSIGHTSMETRICS_TAGS_PV_CAPACITY_BYTES = "pvCapacityBytes" - INSIGHTSMETRICS_TAGS_VOLUME_NAME = "volumeName" - INSIGHTSMETRICS_FLUENT_TAG = "oms.api.InsightsMetrics" - REASON_OOM_KILLED = "oomkilled" - #Kubestate (common) - INSIGHTSMETRICS_TAGS_KUBESTATE_NAMESPACE = "container.azm.ms/kubestate" - INSIGHTSMETRICS_TAGS_KUBE_STATE_CREATIONTIME = "creationTime" - #Kubestate (deployments) - INSIGHTSMETRICS_METRIC_NAME_KUBE_STATE_DEPLOYMENT_STATE = "kube_deployment_status_replicas_ready" - INSIGHTSMETRICS_TAGS_KUBE_STATE_DEPLOYMENT_NAME = "deployment" - INSIGHTSMETRICS_TAGS_KUBE_STATE_DEPLOYMENT_CREATIONTIME = "creationTime" - INSIGHTSMETRICS_TAGS_KUBE_STATE_DEPLOYMENT_STRATEGY = "deploymentStrategy" - INSIGHTSMETRICS_TAGS_KUBE_STATE_DEPLOYMENT_SPEC_REPLICAS = "spec_replicas" - INSIGHTSMETRICS_TAGS_KUBE_STATE_DEPLOYMENT_STATUS_REPLICAS_UPDATED = "status_replicas_updated" - INSIGHTSMETRICS_TAGS_KUBE_STATE_DEPLOYMENT_STATUS_REPLICAS_AVAILABLE = "status_replicas_available" - #Kubestate (HPA) - INSIGHTSMETRICS_METRIC_NAME_KUBE_STATE_HPA_STATE = "kube_hpa_status_current_replicas" - INSIGHTSMETRICS_TAGS_KUBE_STATE_HPA_NAME = "hpa" - INSIGHTSMETRICS_TAGS_KUBE_STATE_HPA_SPEC_MAX_REPLICAS = "spec_max_replicas" - INSIGHTSMETRICS_TAGS_KUBE_STATE_HPA_SPEC_MIN_REPLICAS = "spec_min_replicas" - INSIGHTSMETRICS_TAGS_KUBE_STATE_HPA_SPEC_SCALE_TARGET_KIND = "targetKind" - INSIGHTSMETRICS_TAGS_KUBE_STATE_HPA_SPEC_SCALE_TARGET_NAME = "targetName" - INSIGHTSMETRICS_TAGS_KUBE_STATE_HPA_STATUS_DESIRED_REPLICAS = "status_desired_replicas" - - INSIGHTSMETRICS_TAGS_KUBE_STATE_HPA_STATUS_LAST_SCALE_TIME = "lastScaleTime" - # MDM Metric names - MDM_OOM_KILLED_CONTAINER_COUNT = "oomKilledContainerCount" - MDM_CONTAINER_RESTART_COUNT = "restartingContainerCount" - MDM_POD_READY_PERCENTAGE = "podReadyPercentage" - MDM_STALE_COMPLETED_JOB_COUNT = "completedJobsCount" - MDM_DISK_USED_PERCENTAGE = "diskUsedPercentage" - MDM_CONTAINER_CPU_UTILIZATION_METRIC = "cpuExceededPercentage" - MDM_CONTAINER_MEMORY_RSS_UTILIZATION_METRIC = "memoryRssExceededPercentage" - MDM_CONTAINER_MEMORY_WORKING_SET_UTILIZATION_METRIC = "memoryWorkingSetExceededPercentage" - MDM_PV_UTILIZATION_METRIC = "pvUsageExceededPercentage" - MDM_NODE_CPU_USAGE_PERCENTAGE = "cpuUsagePercentage" - MDM_NODE_MEMORY_RSS_PERCENTAGE = "memoryRssPercentage" - MDM_NODE_MEMORY_WORKING_SET_PERCENTAGE = "memoryWorkingSetPercentage" + INSIGHTSMETRICS_TAGS_ORIGIN = "container.azm.ms" + INSIGHTSMETRICS_TAGS_CLUSTERID = "container.azm.ms/clusterId" + INSIGHTSMETRICS_TAGS_CLUSTERNAME = "container.azm.ms/clusterName" + INSIGHTSMETRICS_TAGS_GPU_VENDOR = "gpuVendor" + INSIGHTSMETRICS_TAGS_GPU_NAMESPACE = "container.azm.ms/gpu" + INSIGHTSMETRICS_TAGS_GPU_MODEL = "gpuModel" + INSIGHTSMETRICS_TAGS_GPU_ID = "gpuId" + INSIGHTSMETRICS_TAGS_CONTAINER_NAME = "containerName" + INSIGHTSMETRICS_TAGS_CONTAINER_ID = "containerName" + INSIGHTSMETRICS_TAGS_K8SNAMESPACE = "k8sNamespace" + INSIGHTSMETRICS_TAGS_CONTROLLER_NAME = "controllerName" + INSIGHTSMETRICS_TAGS_CONTROLLER_KIND = "controllerKind" + INSIGHTSMETRICS_TAGS_POD_UID = "podUid" + INSIGTHTSMETRICS_TAGS_PV_NAMESPACE = "container.azm.ms/pv" + INSIGHTSMETRICS_TAGS_PVC_NAME = "pvcName" + INSIGHTSMETRICS_TAGS_PVC_NAMESPACE = "pvcNamespace" + INSIGHTSMETRICS_TAGS_POD_NAME = "podName" + INSIGHTSMETRICS_TAGS_PV_CAPACITY_BYTES = "pvCapacityBytes" + INSIGHTSMETRICS_TAGS_VOLUME_NAME = "volumeName" + INSIGHTSMETRICS_FLUENT_TAG = "oms.api.InsightsMetrics" + REASON_OOM_KILLED = "oomkilled" + #Kubestate (common) + INSIGHTSMETRICS_TAGS_KUBESTATE_NAMESPACE = "container.azm.ms/kubestate" + INSIGHTSMETRICS_TAGS_KUBE_STATE_CREATIONTIME = "creationTime" + #Kubestate (deployments) + INSIGHTSMETRICS_METRIC_NAME_KUBE_STATE_DEPLOYMENT_STATE = "kube_deployment_status_replicas_ready" + INSIGHTSMETRICS_TAGS_KUBE_STATE_DEPLOYMENT_NAME = "deployment" + INSIGHTSMETRICS_TAGS_KUBE_STATE_DEPLOYMENT_CREATIONTIME = "creationTime" + INSIGHTSMETRICS_TAGS_KUBE_STATE_DEPLOYMENT_STRATEGY = "deploymentStrategy" + INSIGHTSMETRICS_TAGS_KUBE_STATE_DEPLOYMENT_SPEC_REPLICAS = "spec_replicas" + INSIGHTSMETRICS_TAGS_KUBE_STATE_DEPLOYMENT_STATUS_REPLICAS_UPDATED = "status_replicas_updated" + INSIGHTSMETRICS_TAGS_KUBE_STATE_DEPLOYMENT_STATUS_REPLICAS_AVAILABLE = "status_replicas_available" + #Kubestate (HPA) + INSIGHTSMETRICS_METRIC_NAME_KUBE_STATE_HPA_STATE = "kube_hpa_status_current_replicas" + INSIGHTSMETRICS_TAGS_KUBE_STATE_HPA_NAME = "hpa" + INSIGHTSMETRICS_TAGS_KUBE_STATE_HPA_SPEC_MAX_REPLICAS = "spec_max_replicas" + INSIGHTSMETRICS_TAGS_KUBE_STATE_HPA_SPEC_MIN_REPLICAS = "spec_min_replicas" + INSIGHTSMETRICS_TAGS_KUBE_STATE_HPA_SPEC_SCALE_TARGET_KIND = "targetKind" + INSIGHTSMETRICS_TAGS_KUBE_STATE_HPA_SPEC_SCALE_TARGET_NAME = "targetName" + INSIGHTSMETRICS_TAGS_KUBE_STATE_HPA_STATUS_DESIRED_REPLICAS = "status_desired_replicas" + + INSIGHTSMETRICS_TAGS_KUBE_STATE_HPA_STATUS_LAST_SCALE_TIME = "lastScaleTime" + # MDM Metric names + MDM_OOM_KILLED_CONTAINER_COUNT = "oomKilledContainerCount" + MDM_CONTAINER_RESTART_COUNT = "restartingContainerCount" + MDM_POD_READY_PERCENTAGE = "podReadyPercentage" + MDM_STALE_COMPLETED_JOB_COUNT = "completedJobsCount" + MDM_DISK_USED_PERCENTAGE = "diskUsedPercentage" + MDM_CONTAINER_CPU_UTILIZATION_METRIC = "cpuExceededPercentage" + MDM_CONTAINER_MEMORY_RSS_UTILIZATION_METRIC = "memoryRssExceededPercentage" + MDM_CONTAINER_MEMORY_WORKING_SET_UTILIZATION_METRIC = "memoryWorkingSetExceededPercentage" + MDM_PV_UTILIZATION_METRIC = "pvUsageExceededPercentage" + MDM_NODE_CPU_USAGE_PERCENTAGE = "cpuUsagePercentage" + MDM_NODE_MEMORY_RSS_PERCENTAGE = "memoryRssPercentage" + MDM_NODE_MEMORY_WORKING_SET_PERCENTAGE = "memoryWorkingSetPercentage" CONTAINER_TERMINATED_RECENTLY_IN_MINUTES = 5 OBJECT_NAME_K8S_CONTAINER = "K8SContainer" @@ -88,6 +88,8 @@ class Constants KUBE_STATE_TELEMETRY_FLUSH_INTERVAL_IN_MINUTES = 15 ZERO_FILL_METRICS_INTERVAL_IN_MINUTES = 30 MDM_TIME_SERIES_FLUSHED_IN_LAST_HOUR = "MdmTimeSeriesFlushedInLastHour" + MDM_EXCEPTION_TELEMETRY_METRIC = "AKSCustomMetricsMdmExceptions" + MDM_EXCEPTIONS_METRIC_FLUSH_INTERVAL = 30 #Pod Statuses POD_STATUS_TERMINATING = "Terminating" diff --git a/source/plugins/ruby/out_mdm.rb b/source/plugins/ruby/out_mdm.rb index 1c805255a..6238eb51a 100644 --- a/source/plugins/ruby/out_mdm.rb +++ b/source/plugins/ruby/out_mdm.rb @@ -50,6 +50,10 @@ def initialize @cluster_identity = nil @isArcK8sCluster = false @get_access_token_backoff_expiry = Time.now + + @mdm_exceptions_hash = {} + @mdm_exceptions_count = 0 + @mdm_exception_telemetry_time_tracker = DateTime.now.to_time.to_i end def configure(conf) @@ -221,10 +225,49 @@ def format(tag, time, record) end end + def exception_aggregator(error) + begin + errorStr = error.to_s + if (@mdm_exceptions_hash[errorStr].nil?) + @mdm_exceptions_hash[errorStr] = 1 + else + @mdm_exceptions_hash[errorStr] += 1 + end + #Keeping track of all exceptions to send the total in the last flush interval as a metric + @mdm_exceptions_count += 1 + rescue => error + @log.info "Error in MDM exception_aggregator method: #{error}" + ApplicationInsightsUtility.sendExceptionTelemetry(error) + end + end + + def flush_mdm_exception_telemetry + begin + #Flush out exception telemetry as a metric for the last 30 minutes + timeDifference = (DateTime.now.to_time.to_i - @mdm_exception_telemetry_time_tracker).abs + timeDifferenceInMinutes = timeDifference / 60 + if (timeDifferenceInMinutes >= Constants::MDM_EXCEPTIONS_METRIC_FLUSH_INTERVAL) + telemetryProperties = {} + telemetryProperties["ExceptionsHashForFlushInterval"] = @mdm_exceptions_hash.to_json + telemetryProperties["FlushInterval"] = Constants::MDM_EXCEPTIONS_METRIC_FLUSH_INTERVAL + ApplicationInsightsUtility.sendMetricTelemetry(Constants::MDM_EXCEPTION_TELEMETRY_METRIC, @mdm_exceptions_count, telemetryProperties) + # Resetting values after flushing + @mdm_exceptions_count = 0 + @mdm_exceptions_hash = {} + @mdm_exception_telemetry_time_tracker = DateTime.now.to_time.to_i + end + rescue => error + @log.info "Error in flush_mdm_exception_telemetry method: #{error}" + ApplicationInsightsUtility.sendExceptionTelemetry(error) + end + end + # This method is called every flush interval. Send the buffer chunk to MDM. # 'chunk' is a buffer chunk that includes multiple formatted records def write(chunk) begin + # Adding this before trying to flush out metrics, since adding after can lead to metrics never being sent + flush_mdm_exception_telemetry if (!@first_post_attempt_made || (Time.now > @last_post_attempt_time + retry_mdm_post_wait_minutes * 60)) && @can_send_data_to_mdm post_body = [] chunk.msgpack_each { |(tag, record)| @@ -247,7 +290,8 @@ def write(chunk) end end rescue Exception => e - ApplicationInsightsUtility.sendExceptionTelemetry(e) + # Adding exceptions to hash to aggregate and send telemetry for all write errors + exception_aggregator(e) @log.info "Exception when writing to MDM: #{e}" raise e end @@ -282,7 +326,6 @@ def send_to_mdm(post_body) else @log.info "Failed to Post Metrics to MDM : #{e} Response: #{response}" end - #@log.info "MDM request : #{post_body}" @log.debug_backtrace(e.backtrace) if !response.code.empty? && response.code == 403.to_s @log.info "Response Code #{response.code} Updating @last_post_attempt_time" @@ -297,15 +340,15 @@ def send_to_mdm(post_body) @log.info "HTTPServerException when POSTing Metrics to MDM #{e} Response: #{response}" raise e end + # Adding exceptions to hash to aggregate and send telemetry for all 400 error codes + exception_aggregator(e) rescue Errno::ETIMEDOUT => e @log.info "Timed out when POSTing Metrics to MDM : #{e} Response: #{response}" @log.debug_backtrace(e.backtrace) - ApplicationInsightsUtility.sendExceptionTelemetry(e) raise e rescue Exception => e @log.info "Exception POSTing Metrics to MDM : #{e} Response: #{response}" @log.debug_backtrace(e.backtrace) - ApplicationInsightsUtility.sendExceptionTelemetry(e) raise e end end From 18c27dda3e8af3187502f4ecfc9475dea74f3ce5 Mon Sep 17 00:00:00 2001 From: Grace Wehner Date: Mon, 23 Nov 2020 08:37:38 -0800 Subject: [PATCH 044/301] grwehner/mdm custom metric regions (#471) Remove custom metrics region check for public cloud --- build/linux/installer/conf/container.conf | 2 -- build/linux/installer/conf/kube.conf | 3 --- .../templates/omsagent-rs-configmap.yaml | 3 --- kubernetes/linux/main.sh | 11 +++++++++++ kubernetes/omsagent.yaml | 3 --- kubernetes/windows/main.ps1 | 6 ++++++ .../preview/health/omsagent-template-aks-engine.yaml | 2 -- scripts/preview/health/omsagent-template.yaml | 2 -- source/plugins/ruby/CustomMetricsUtils.rb | 12 +++--------- source/plugins/ruby/filter_cadvisor2mdm.rb | 3 +-- source/plugins/ruby/filter_inventory2mdm.rb | 3 +-- source/plugins/ruby/filter_telegraf2mdm.rb | 3 +-- source/plugins/ruby/in_kube_podinventory.rb | 3 +-- source/plugins/ruby/podinventory_to_mdm.rb | 4 ++-- 14 files changed, 26 insertions(+), 34 deletions(-) diff --git a/build/linux/installer/conf/container.conf b/build/linux/installer/conf/container.conf index f7e6e1da9..958a85eb6 100644 --- a/build/linux/installer/conf/container.conf +++ b/build/linux/installer/conf/container.conf @@ -45,14 +45,12 @@ #custom_metrics_mdm filter plugin type filter_cadvisor2mdm - custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast,eastus2,westus,australiasoutheast,brazilsouth,germanywestcentral,northcentralus,switzerlandnorth metrics_to_collect cpuUsageNanoCores,memoryWorkingSetBytes,memoryRssBytes,pvUsedBytes log_level info type filter_telegraf2mdm - custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast,eastus2,westus,australiasoutheast,brazilsouth,germanywestcentral,northcentralus,switzerlandnorth log_level debug diff --git a/build/linux/installer/conf/kube.conf b/build/linux/installer/conf/kube.conf index dbb4db0da..121472eba 100644 --- a/build/linux/installer/conf/kube.conf +++ b/build/linux/installer/conf/kube.conf @@ -13,7 +13,6 @@ tag oms.containerinsights.KubePodInventory run_interval 60 log_level debug - custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast,eastus2,westus,australiasoutheast,brazilsouth,germanywestcentral,northcentralus,switzerlandnorth #Kubernetes events @@ -66,14 +65,12 @@ type filter_inventory2mdm - custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast,eastus2,westus,australiasoutheast,brazilsouth,germanywestcentral,northcentralus,switzerlandnorth log_level info #custom_metrics_mdm filter plugin for perf data from windows nodes type filter_cadvisor2mdm - custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast,eastus2,westus,australiasoutheast,brazilsouth,germanywestcentral,northcentralus,switzerlandnorth metrics_to_collect cpuUsageNanoCores,memoryWorkingSetBytes,pvUsedBytes log_level info diff --git a/charts/azuremonitor-containers/templates/omsagent-rs-configmap.yaml b/charts/azuremonitor-containers/templates/omsagent-rs-configmap.yaml index 475b17a46..e1bc969cb 100644 --- a/charts/azuremonitor-containers/templates/omsagent-rs-configmap.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-rs-configmap.yaml @@ -18,7 +18,6 @@ data: tag oms.containerinsights.KubePodInventory run_interval 60 log_level debug - custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast,eastus2,westus,australiasoutheast,brazilsouth,germanywestcentral,northcentralus,switzerlandnorth #Kubernetes events @@ -70,14 +69,12 @@ data: type filter_inventory2mdm - custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast,eastus2,westus,australiasoutheast,brazilsouth,germanywestcentral,northcentralus,switzerlandnorth log_level info # custom_metrics_mdm filter plugin for perf data from windows nodes type filter_cadvisor2mdm - custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast,eastus2,westus,australiasoutheast,brazilsouth,germanywestcentral,northcentralus,switzerlandnorth metrics_to_collect cpuUsageNanoCores,memoryWorkingSetBytes log_level info diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index b093eb74b..a2ba6a1d1 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -150,6 +150,17 @@ else echo "LA Onboarding:Workspace Id not mounted, skipping the telemetry check" fi +# Set environment variable for if public cloud by checking the workspace domain. +if [ -z $domain ]; then + ClOUD_ENVIRONMENT="unknown" +elif [ $domain == "opinsights.azure.com" ]; then + CLOUD_ENVIRONMENT="public" +else + CLOUD_ENVIRONMENT="national" +fi +export CLOUD_ENVIRONMENT=$CLOUD_ENVIRONMENT +echo "export CLOUD_ENVIRONMENT=$CLOUD_ENVIRONMENT" >> ~/.bashrc + #Parse the configmap to set the right environment variables. /opt/microsoft/omsagent/ruby/bin/ruby tomlparser.rb diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index 7d07eafcd..2155361e9 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -64,7 +64,6 @@ data: tag oms.containerinsights.KubePodInventory run_interval 60 log_level debug - custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast,eastus2,westus,australiasoutheast,brazilsouth,germanywestcentral,northcentralus,switzerlandnorth #Kubernetes events @@ -117,14 +116,12 @@ data: type filter_inventory2mdm - custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast,eastus2,westus,australiasoutheast,brazilsouth,germanywestcentral,northcentralus,switzerlandnorth log_level info #custom_metrics_mdm filter plugin for perf data from windows nodes type filter_cadvisor2mdm - custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast,eastus2,westus,australiasoutheast,brazilsouth,germanywestcentral,northcentralus,switzerlandnorth metrics_to_collect cpuUsageNanoCores,memoryWorkingSetBytes,pvUsedBytes log_level info diff --git a/kubernetes/windows/main.ps1 b/kubernetes/windows/main.ps1 index 2e8659601..d32e5068a 100644 --- a/kubernetes/windows/main.ps1 +++ b/kubernetes/windows/main.ps1 @@ -43,15 +43,21 @@ function Start-FileSystemWatcher { function Set-EnvironmentVariables { $domain = "opinsights.azure.com" + $cloud_environment = "public" if (Test-Path /etc/omsagent-secret/DOMAIN) { # TODO: Change to omsagent-secret before merging $domain = Get-Content /etc/omsagent-secret/DOMAIN + $cloud_environment = "national" } # Set DOMAIN [System.Environment]::SetEnvironmentVariable("DOMAIN", $domain, "Process") [System.Environment]::SetEnvironmentVariable("DOMAIN", $domain, "Machine") + # Set CLOUD_ENVIRONMENT + [System.Environment]::SetEnvironmentVariable("CLOUD_ENVIRONMENT", $cloud_environment, "Process") + [System.Environment]::SetEnvironmentVariable("CLOUD_ENVIRONMENT", $cloud_environment, "Machine") + $wsID = "" if (Test-Path /etc/omsagent-secret/WSID) { # TODO: Change to omsagent-secret before merging diff --git a/scripts/preview/health/omsagent-template-aks-engine.yaml b/scripts/preview/health/omsagent-template-aks-engine.yaml index 5526602c0..5e063fd54 100644 --- a/scripts/preview/health/omsagent-template-aks-engine.yaml +++ b/scripts/preview/health/omsagent-template-aks-engine.yaml @@ -108,14 +108,12 @@ data: type filter_inventory2mdm - custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westEurope log_level info # custom_metrics_mdm filter plugin for perf data from windows nodes type filter_cadvisor2mdm - custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westEurope metrics_to_collect cpuUsageNanoCores,memoryWorkingSetBytes log_level info diff --git a/scripts/preview/health/omsagent-template.yaml b/scripts/preview/health/omsagent-template.yaml index 6e3a52020..e58e9c33f 100644 --- a/scripts/preview/health/omsagent-template.yaml +++ b/scripts/preview/health/omsagent-template.yaml @@ -108,14 +108,12 @@ data: type filter_inventory2mdm - custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westEurope log_level info # custom_metrics_mdm filter plugin for perf data from windows nodes type filter_cadvisor2mdm - custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westEurope metrics_to_collect cpuUsageNanoCores,memoryWorkingSetBytes log_level info diff --git a/source/plugins/ruby/CustomMetricsUtils.rb b/source/plugins/ruby/CustomMetricsUtils.rb index a19580630..220313e6b 100644 --- a/source/plugins/ruby/CustomMetricsUtils.rb +++ b/source/plugins/ruby/CustomMetricsUtils.rb @@ -6,21 +6,15 @@ def initialize end class << self - def check_custom_metrics_availability(custom_metric_regions) + def check_custom_metrics_availability aks_region = ENV['AKS_REGION'] aks_resource_id = ENV['AKS_RESOURCE_ID'] + aks_cloud_environment = ENV['CLOUD_ENVIRONMENT'] if aks_region.to_s.empty? || aks_resource_id.to_s.empty? return false # This will also take care of AKS-Engine Scenario. AKS_REGION/AKS_RESOURCE_ID is not set for AKS-Engine. Only ACS_RESOURCE_NAME is set end - custom_metrics_regions_arr = custom_metric_regions.split(',') - custom_metrics_regions_hash = custom_metrics_regions_arr.map {|x| [x.downcase,true]}.to_h - - if custom_metrics_regions_hash.key?(aks_region.downcase) - true - else - false - end + return aks_cloud_environment.to_s.downcase == 'public' end end end \ No newline at end of file diff --git a/source/plugins/ruby/filter_cadvisor2mdm.rb b/source/plugins/ruby/filter_cadvisor2mdm.rb index 3bc674ea8..2423ad024 100644 --- a/source/plugins/ruby/filter_cadvisor2mdm.rb +++ b/source/plugins/ruby/filter_cadvisor2mdm.rb @@ -15,7 +15,6 @@ class CAdvisor2MdmFilter < Filter config_param :enable_log, :integer, :default => 0 config_param :log_path, :string, :default => "/var/opt/microsoft/docker-cimprov/log/filter_cadvisor2mdm.log" - config_param :custom_metrics_azure_regions, :string config_param :metrics_to_collect, :string, :default => "Constants::CPU_USAGE_NANO_CORES,Constants::MEMORY_WORKING_SET_BYTES,Constants::MEMORY_RSS_BYTES,Constants::PV_USED_BYTES" @@hostName = (OMS::Common.get_hostname) @@ -42,7 +41,7 @@ def configure(conf) def start super begin - @process_incoming_stream = CustomMetricsUtils.check_custom_metrics_availability(@custom_metrics_azure_regions) + @process_incoming_stream = CustomMetricsUtils.check_custom_metrics_availability @metrics_to_collect_hash = build_metrics_hash @log.debug "After check_custom_metrics_availability process_incoming_stream #{@process_incoming_stream}" @@containerResourceUtilTelemetryTimeTracker = DateTime.now.to_time.to_i diff --git a/source/plugins/ruby/filter_inventory2mdm.rb b/source/plugins/ruby/filter_inventory2mdm.rb index b5ef587ff..38ccab885 100644 --- a/source/plugins/ruby/filter_inventory2mdm.rb +++ b/source/plugins/ruby/filter_inventory2mdm.rb @@ -13,7 +13,6 @@ class Inventory2MdmFilter < Filter config_param :enable_log, :integer, :default => 0 config_param :log_path, :string, :default => '/var/opt/microsoft/docker-cimprov/log/filter_inventory2mdm.log' - config_param :custom_metrics_azure_regions, :string @@node_count_metric_name = 'nodesCount' @@pod_count_metric_name = 'podCount' @@ -98,7 +97,7 @@ def configure(conf) def start super - @process_incoming_stream = CustomMetricsUtils.check_custom_metrics_availability(@custom_metrics_azure_regions) + @process_incoming_stream = CustomMetricsUtils.check_custom_metrics_availability @log.debug "After check_custom_metrics_availability process_incoming_stream #{@process_incoming_stream}" end diff --git a/source/plugins/ruby/filter_telegraf2mdm.rb b/source/plugins/ruby/filter_telegraf2mdm.rb index 98d258ea5..88ae428d1 100644 --- a/source/plugins/ruby/filter_telegraf2mdm.rb +++ b/source/plugins/ruby/filter_telegraf2mdm.rb @@ -15,7 +15,6 @@ class Telegraf2MdmFilter < Filter config_param :enable_log, :integer, :default => 0 config_param :log_path, :string, :default => "/var/opt/microsoft/docker-cimprov/log/filter_telegraf2mdm.log" - config_param :custom_metrics_azure_regions, :string @process_incoming_stream = true @@ -36,7 +35,7 @@ def configure(conf) def start super begin - @process_incoming_stream = CustomMetricsUtils.check_custom_metrics_availability(@custom_metrics_azure_regions) + @process_incoming_stream = CustomMetricsUtils.check_custom_metrics_availability @log.debug "After check_custom_metrics_availability process_incoming_stream #{@process_incoming_stream}" rescue => errorStr @log.info "Error initializing plugin #{errorStr}" diff --git a/source/plugins/ruby/in_kube_podinventory.rb b/source/plugins/ruby/in_kube_podinventory.rb index 4880d80e7..bba3e920f 100644 --- a/source/plugins/ruby/in_kube_podinventory.rb +++ b/source/plugins/ruby/in_kube_podinventory.rb @@ -36,11 +36,10 @@ def initialize config_param :run_interval, :time, :default => 60 config_param :tag, :string, :default => "oms.containerinsights.KubePodInventory" - config_param :custom_metrics_azure_regions, :string def configure(conf) super - @inventoryToMdmConvertor = Inventory2MdmConvertor.new(@custom_metrics_azure_regions) + @inventoryToMdmConvertor = Inventory2MdmConvertor.new() end def start diff --git a/source/plugins/ruby/podinventory_to_mdm.rb b/source/plugins/ruby/podinventory_to_mdm.rb index 834515969..77370e284 100644 --- a/source/plugins/ruby/podinventory_to_mdm.rb +++ b/source/plugins/ruby/podinventory_to_mdm.rb @@ -80,14 +80,14 @@ class Inventory2MdmConvertor @@pod_phase_values = ["Running", "Pending", "Succeeded", "Failed", "Unknown"] @process_incoming_stream = false - def initialize(custom_metrics_azure_regions) + def initialize() @log_path = "/var/opt/microsoft/docker-cimprov/log/mdm_metrics_generator.log" @log = Logger.new(@log_path, 1, 5000000) @pod_count_hash = {} @no_phase_dim_values_hash = {} @pod_count_by_phase = {} @pod_uids = {} - @process_incoming_stream = CustomMetricsUtils.check_custom_metrics_availability(custom_metrics_azure_regions) + @process_incoming_stream = CustomMetricsUtils.check_custom_metrics_availability @log.debug "After check_custom_metrics_availability process_incoming_stream #{@process_incoming_stream}" @log.debug { "Starting podinventory_to_mdm plugin" } end From a5c12e9a5e28dc27b8288d21bc72b5937b93e370 Mon Sep 17 00:00:00 2001 From: rashmichandrashekar Date: Thu, 3 Dec 2020 17:20:51 -0800 Subject: [PATCH 045/301] updaitng rs limit to 1gb (#474) --- charts/azuremonitor-containers/values.yaml | 2 +- kubernetes/omsagent.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/charts/azuremonitor-containers/values.yaml b/charts/azuremonitor-containers/values.yaml index 76ea0a26d..e8acda20e 100644 --- a/charts/azuremonitor-containers/values.yaml +++ b/charts/azuremonitor-containers/values.yaml @@ -133,4 +133,4 @@ omsagent: memory: 250Mi limits: cpu: 1 - memory: 750Mi + memory: 1Gi diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index 2155361e9..296de02bf 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -504,7 +504,7 @@ spec: resources: limits: cpu: 1 - memory: 750Mi + memory: 1Gi requests: cpu: 150m memory: 250Mi From 7453fd4e3d8a918a70683a5a3a8344bd550a5349 Mon Sep 17 00:00:00 2001 From: Grace Wehner Date: Thu, 10 Dec 2020 10:45:09 -0800 Subject: [PATCH 046/301] grwehner/pv inventory (#455) Add fluentd plugin to request persistent volume info from the kubernetes api and send to LA --- build/linux/installer/conf/kube.conf | 23 ++ .../installer/datafiles/base_container.data | 1 + kubernetes/omsagent.yaml | 24 ++ source/plugins/ruby/constants.rb | 4 + source/plugins/ruby/in_kube_pvinventory.rb | 253 ++++++++++++++++++ 5 files changed, 305 insertions(+) create mode 100644 source/plugins/ruby/in_kube_pvinventory.rb diff --git a/build/linux/installer/conf/kube.conf b/build/linux/installer/conf/kube.conf index 121472eba..fb566c360 100644 --- a/build/linux/installer/conf/kube.conf +++ b/build/linux/installer/conf/kube.conf @@ -15,6 +15,14 @@ log_level debug + #Kubernetes Persistent Volume inventory + + type kubepvinventory + tag oms.containerinsights.KubePVInventory + run_interval 60 + log_level debug + + #Kubernetes events type kubeevents @@ -95,6 +103,21 @@ max_retry_wait 5m + + type out_oms + log_level debug + num_threads 5 + buffer_chunk_limit 4m + buffer_type file + buffer_path %STATE_DIR_WS%/state/out_oms_kubepv*.buffer + buffer_queue_limit 20 + buffer_queue_full_action drop_oldest_chunk + flush_interval 20s + retry_limit 10 + retry_wait 5s + max_retry_wait 5m + + type out_oms log_level debug diff --git a/build/linux/installer/datafiles/base_container.data b/build/linux/installer/datafiles/base_container.data index ca2538b79..ec42d5967 100644 --- a/build/linux/installer/datafiles/base_container.data +++ b/build/linux/installer/datafiles/base_container.data @@ -22,6 +22,7 @@ MAINTAINER: 'Microsoft Corporation' /opt/microsoft/omsagent/plugin/filter_container.rb; source/plugins/ruby/filter_container.rb; 644; root; root /opt/microsoft/omsagent/plugin/in_kube_podinventory.rb; source/plugins/ruby/in_kube_podinventory.rb; 644; root; root +/opt/microsoft/omsagent/plugin/in_kube_pvinventory.rb; source/plugins/ruby/in_kube_pvinventory.rb; 644; root; root /opt/microsoft/omsagent/plugin/in_kube_events.rb; source/plugins/ruby/in_kube_events.rb; 644; root; root /opt/microsoft/omsagent/plugin/KubernetesApiClient.rb; source/plugins/ruby/KubernetesApiClient.rb; 644; root; root diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index 296de02bf..26c7ae9a0 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -21,6 +21,7 @@ rules: "nodes/proxy", "namespaces", "services", + "persistentvolumes" ] verbs: ["list", "get", "watch"] - apiGroups: ["apps", "extensions", "autoscaling"] @@ -66,6 +67,14 @@ data: log_level debug + #Kubernetes Persistent Volume inventory + + type kubepvinventory + tag oms.containerinsights.KubePVInventory + run_interval 60 + log_level debug + + #Kubernetes events type kubeevents @@ -146,6 +155,21 @@ data: max_retry_wait 5m + + type out_oms + log_level debug + num_threads 5 + buffer_chunk_limit 4m + buffer_type file + buffer_path %STATE_DIR_WS%/state/out_oms_kubepv*.buffer + buffer_queue_limit 20 + buffer_queue_full_action drop_oldest_chunk + flush_interval 20s + retry_limit 10 + retry_wait 5s + max_retry_wait 5m + + type out_oms log_level debug diff --git a/source/plugins/ruby/constants.rb b/source/plugins/ruby/constants.rb index 079584c7b..cf41900dc 100644 --- a/source/plugins/ruby/constants.rb +++ b/source/plugins/ruby/constants.rb @@ -77,6 +77,9 @@ class Constants OMSAGENT_ZERO_FILL = "omsagent" KUBESYSTEM_NAMESPACE_ZERO_FILL = "kube-system" VOLUME_NAME_ZERO_FILL = "-" + PV_TYPES =["awsElasticBlockStore", "azureDisk", "azureFile", "cephfs", "cinder", "csi", "fc", "flexVolume", + "flocker", "gcePersistentDisk", "glusterfs", "hostPath", "iscsi", "local", "nfs", + "photonPersistentDisk", "portworxVolume", "quobyte", "rbd", "scaleIO", "storageos", "vsphereVolume"] #Telemetry constants CONTAINER_METRICS_HEART_BEAT_EVENT = "ContainerMetricsMdmHeartBeatEvent" @@ -84,6 +87,7 @@ class Constants CONTAINER_RESOURCE_UTIL_HEART_BEAT_EVENT = "ContainerResourceUtilMdmHeartBeatEvent" PV_USAGE_HEART_BEAT_EVENT = "PVUsageMdmHeartBeatEvent" PV_KUBE_SYSTEM_METRICS_ENABLED_EVENT = "CollectPVKubeSystemMetricsEnabled" + PV_INVENTORY_HEART_BEAT_EVENT = "KubePVInventoryHeartBeatEvent" TELEMETRY_FLUSH_INTERVAL_IN_MINUTES = 10 KUBE_STATE_TELEMETRY_FLUSH_INTERVAL_IN_MINUTES = 15 ZERO_FILL_METRICS_INTERVAL_IN_MINUTES = 30 diff --git a/source/plugins/ruby/in_kube_pvinventory.rb b/source/plugins/ruby/in_kube_pvinventory.rb new file mode 100644 index 000000000..b0e09c85b --- /dev/null +++ b/source/plugins/ruby/in_kube_pvinventory.rb @@ -0,0 +1,253 @@ +module Fluent + class Kube_PVInventory_Input < Input + Plugin.register_input("kubepvinventory", self) + + @@hostName = (OMS::Common.get_hostname) + + def initialize + super + require "yaml" + require "yajl/json_gem" + require "yajl" + require "time" + require_relative "KubernetesApiClient" + require_relative "ApplicationInsightsUtility" + require_relative "oms_common" + require_relative "omslog" + require_relative "constants" + + # Response size is around 1500 bytes per PV + @PV_CHUNK_SIZE = "5000" + @pvTypeToCountHash = {} + end + + config_param :run_interval, :time, :default => 60 + config_param :tag, :string, :default => "oms.containerinsights.KubePVInventory" + + def configure(conf) + super + end + + def start + if @run_interval + @finished = false + @condition = ConditionVariable.new + @mutex = Mutex.new + @thread = Thread.new(&method(:run_periodic)) + @@pvTelemetryTimeTracker = DateTime.now.to_time.to_i + end + end + + def shutdown + if @run_interval + @mutex.synchronize { + @finished = true + @condition.signal + } + @thread.join + end + end + + def enumerate + begin + pvInventory = nil + telemetryFlush = false + @pvTypeToCountHash = {} + currentTime = Time.now + batchTime = currentTime.utc.iso8601 + + continuationToken = nil + $log.info("in_kube_pvinventory::enumerate : Getting PVs from Kube API @ #{Time.now.utc.iso8601}") + continuationToken, pvInventory = KubernetesApiClient.getResourcesAndContinuationToken("persistentvolumes?limit=#{@PV_CHUNK_SIZE}") + $log.info("in_kube_pvinventory::enumerate : Done getting PVs from Kube API @ #{Time.now.utc.iso8601}") + + if (!pvInventory.nil? && !pvInventory.empty? && pvInventory.key?("items") && !pvInventory["items"].nil? && !pvInventory["items"].empty?) + parse_and_emit_records(pvInventory, batchTime) + else + $log.warn "in_kube_pvinventory::enumerate:Received empty pvInventory" + end + + # If we receive a continuation token, make calls, process and flush data until we have processed all data + while (!continuationToken.nil? && !continuationToken.empty?) + continuationToken, pvInventory = KubernetesApiClient.getResourcesAndContinuationToken("persistentvolumes?limit=#{@PV_CHUNK_SIZE}&continue=#{continuationToken}") + if (!pvInventory.nil? && !pvInventory.empty? && pvInventory.key?("items") && !pvInventory["items"].nil? && !pvInventory["items"].empty?) + parse_and_emit_records(pvInventory, batchTime) + else + $log.warn "in_kube_pvinventory::enumerate:Received empty pvInventory" + end + end + + # Setting this to nil so that we dont hold memory until GC kicks in + pvInventory = nil + + # Adding telemetry to send pod telemetry every 10 minutes + timeDifference = (DateTime.now.to_time.to_i - @@pvTelemetryTimeTracker).abs + timeDifferenceInMinutes = timeDifference / 60 + if (timeDifferenceInMinutes >= Constants::TELEMETRY_FLUSH_INTERVAL_IN_MINUTES) + telemetryFlush = true + end + + # Flush AppInsights telemetry once all the processing is done + if telemetryFlush == true + telemetryProperties = {} + telemetryProperties["CountsOfPVTypes"] = @pvTypeToCountHash + ApplicationInsightsUtility.sendCustomEvent(Constants::PV_INVENTORY_HEART_BEAT_EVENT, telemetryProperties) + @@pvTelemetryTimeTracker = DateTime.now.to_time.to_i + end + + rescue => errorStr + $log.warn "in_kube_pvinventory::enumerate:Failed in enumerate: #{errorStr}" + $log.debug_backtrace(errorStr.backtrace) + ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) + end + end # end enumerate + + def parse_and_emit_records(pvInventory, batchTime = Time.utc.iso8601) + currentTime = Time.now + emitTime = currentTime.to_f + eventStream = MultiEventStream.new + + begin + records = [] + pvInventory["items"].each do |item| + + # Node, pod, & usage info can be found by joining with pvUsedBytes metric using PVCNamespace/PVCName + record = {} + record["CollectionTime"] = batchTime + record["ClusterId"] = KubernetesApiClient.getClusterId + record["ClusterName"] = KubernetesApiClient.getClusterName + record["PVName"] = item["metadata"]["name"] + record["PVStatus"] = item["status"]["phase"] + record["PVAccessModes"] = item["spec"]["accessModes"].join(', ') + record["PVStorageClassName"] = item["spec"]["storageClassName"] + record["PVCapacityBytes"] = KubernetesApiClient.getMetricNumericValue("memory", item["spec"]["capacity"]["storage"]) + record["PVCreationTimeStamp"] = item["metadata"]["creationTimestamp"] + + # Optional values + pvcNamespace, pvcName = getPVCInfo(item) + type, typeInfo = getTypeInfo(item) + record["PVCNamespace"] = pvcNamespace + record["PVCName"] = pvcName + record["PVType"] = type + record["PVTypeInfo"] = typeInfo + + records.push(record) + + # Record telemetry + if type == nil + type = "empty" + end + if (@pvTypeToCountHash.has_key? type) + @pvTypeToCountHash[type] += 1 + else + @pvTypeToCountHash[type] = 1 + end + end + + records.each do |record| + if !record.nil? + wrapper = { + "DataType" => "KUBE_PV_INVENTORY_BLOB", + "IPName" => "ContainerInsights", + "DataItems" => [record.each { |k, v| record[k] = v }], + } + eventStream.add(emitTime, wrapper) if wrapper + end + end + + router.emit_stream(@tag, eventStream) if eventStream + + rescue => errorStr + $log.warn "Failed in parse_and_emit_record for in_kube_pvinventory: #{errorStr}" + $log.debug_backtrace(errorStr.backtrace) + ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) + end + end + + def getPVCInfo(item) + begin + if !item["spec"].nil? && !item["spec"]["claimRef"].nil? + claimRef = item["spec"]["claimRef"] + pvcNamespace = claimRef["namespace"] + pvcName = claimRef["name"] + return pvcNamespace, pvcName + end + rescue => errorStr + $log.warn "Failed in getPVCInfo for in_kube_pvinventory: #{errorStr}" + $log.debug_backtrace(errorStr.backtrace) + ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) + end + + # No PVC or an error + return nil, nil + end + + def getTypeInfo(item) + begin + if !item["spec"].nil? + (Constants::PV_TYPES).each do |pvType| + + # PV is this type + if !item["spec"][pvType].nil? + + # Get additional info if azure disk/file + typeInfo = {} + if pvType == "azureDisk" + azureDisk = item["spec"]["azureDisk"] + typeInfo["DiskName"] = azureDisk["diskName"] + typeInfo["DiskUri"] = azureDisk["diskURI"] + elsif pvType == "azureFile" + typeInfo["FileShareName"] = item["spec"]["azureFile"]["shareName"] + end + + # Can only have one type: return right away when found + return pvType, typeInfo + + end + end + end + rescue => errorStr + $log.warn "Failed in getTypeInfo for in_kube_pvinventory: #{errorStr}" + $log.debug_backtrace(errorStr.backtrace) + ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) + end + + # No matches from list of types or an error + return nil, {} + end + + + def run_periodic + @mutex.lock + done = @finished + @nextTimeToRun = Time.now + @waitTimeout = @run_interval + until done + @nextTimeToRun = @nextTimeToRun + @run_interval + @now = Time.now + if @nextTimeToRun <= @now + @waitTimeout = 1 + @nextTimeToRun = @now + else + @waitTimeout = @nextTimeToRun - @now + end + @condition.wait(@mutex, @waitTimeout) + done = @finished + @mutex.unlock + if !done + begin + $log.info("in_kube_pvinventory::run_periodic.enumerate.start #{Time.now.utc.iso8601}") + enumerate + $log.info("in_kube_pvinventory::run_periodic.enumerate.end #{Time.now.utc.iso8601}") + rescue => errorStr + $log.warn "in_kube_pvinventory::run_periodic: enumerate Failed to retrieve pod inventory: #{errorStr}" + ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) + end + end + @mutex.lock + end + @mutex.unlock + end + + end # Kube_PVInventory_Input +end # module \ No newline at end of file From 24b709f9e3c3b18779102b491fc98b87a99d1335 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Tue, 15 Dec 2020 09:42:52 -0800 Subject: [PATCH 047/301] Gangams/fix for build release pipeline issue (#476) * use isolated cdpx acr * correct comment --- .pipelines/get-aad-app-creds-from-kv.sh | 14 ++++++++++++++ ...ll-from-cdpx-and-push-to-ci-acr-linux-image.sh | 15 ++++++++++++--- ...-from-cdpx-and-push-to-ci-acr-windows-image.sh | 14 +++++++++++--- 3 files changed, 37 insertions(+), 6 deletions(-) diff --git a/.pipelines/get-aad-app-creds-from-kv.sh b/.pipelines/get-aad-app-creds-from-kv.sh index 8ef56cddb..a0ba464cc 100755 --- a/.pipelines/get-aad-app-creds-from-kv.sh +++ b/.pipelines/get-aad-app-creds-from-kv.sh @@ -11,6 +11,8 @@ do KV) KV=$VALUE ;; KVSECRETNAMEAPPID) AppId=$VALUE ;; KVSECRETNAMEAPPSECRET) AppSecret=$VALUE ;; + KVSECRETNAMECDPXAPPID) CdpxAppId=$VALUE ;; + KVSECRETNAMECDPXAPPSECRET) CdpxAppSecret=$VALUE ;; *) esac done @@ -27,4 +29,16 @@ az keyvault secret download --file ~/acrappsecret --vault-name ${KV} --name ${A echo "downloaded the appsecret from KV:${KV} and KV secret:${AppSecret}" +echo "key vault secret name for cdpx appid:${KVSECRETNAMECDPXAPPID}" + +echo "key vault secret name for cdpx appsecret:${KVSECRETNAMECDPXAPPSECRET}" + +az keyvault secret download --file ~/cdpxacrappid --vault-name ${KV} --name ${CdpxAppId} + +echo "downloaded the appid from KV:${KV} and KV secret:${CdpxAppId}" + +az keyvault secret download --file ~/cdpxacrappsecret --vault-name ${KV} --name ${CdpxAppSecret} + +echo "downloaded the appsecret from KV:${KV} and KV secret:${CdpxAppSecret}" + echo "end: get app id and secret from specified key vault" diff --git a/.pipelines/pull-from-cdpx-and-push-to-ci-acr-linux-image.sh b/.pipelines/pull-from-cdpx-and-push-to-ci-acr-linux-image.sh index 638d3a937..3844ea185 100755 --- a/.pipelines/pull-from-cdpx-and-push-to-ci-acr-linux-image.sh +++ b/.pipelines/pull-from-cdpx-and-push-to-ci-acr-linux-image.sh @@ -25,12 +25,21 @@ ACR_APP_ID=$(cat ~/acrappid) ACR_APP_SECRET=$(cat ~/acrappsecret) echo "end: read appid and appsecret" +echo "start: read appid and appsecret for cdpx" +CDPX_ACR_APP_ID=$(cat ~/cdpxacrappid) +CDPX_ACR_APP_SECRET=$(cat ~/cdpxacrappsecret) +echo "end: read appid and appsecret which has read access on cdpx acr" + + +# Name of CDPX_ACR should be in this format :Naming convention: 'cdpx' + service tree id without '-' + two digit suffix like'00'/'01 +# suffix 00 primary and 01 secondary, and we only use primary +# This configured via pipeline variable echo "login to cdpxlinux acr:${CDPX_ACR}" -docker login $CDPX_ACR --username $ACR_APP_ID --password $ACR_APP_SECRET +docker login $CDPX_ACR --username $CDPX_ACR_APP_ID --password $CDPX_ACR_APP_SECRET echo "login to cdpxlinux acr completed: ${CDPX_ACR}" echo "pull agent image from cdpxlinux acr: ${CDPX_ACR}" -docker pull ${CDPX_ACR}/artifact/3170cdd2-19f0-4027-912b-1027311691a2/official/${CDPX_REPO_NAME}:${CDPX_AGENT_IMAGE_TAG} +docker pull ${CDPX_ACR}/official/${CDPX_REPO_NAME}:${CDPX_AGENT_IMAGE_TAG} echo "pull image from cdpxlinux acr completed: ${CDPX_ACR}" echo "CI Release name is:"$CI_RELEASE @@ -41,7 +50,7 @@ echo "CI ACR : ${CI_ACR}" echo "CI AGENT REPOSITORY NAME : ${CI_AGENT_REPO}" echo "tag linux agent image" -docker tag ${CDPX_ACR}/artifact/3170cdd2-19f0-4027-912b-1027311691a2/official/${CDPX_REPO_NAME}:${CDPX_AGENT_IMAGE_TAG} ${CI_ACR}/public/azuremonitor/containerinsights/${CI_AGENT_REPO}:${imagetag} +docker tag ${CDPX_ACR}/official/${CDPX_REPO_NAME}:${CDPX_AGENT_IMAGE_TAG} ${CI_ACR}/public/azuremonitor/containerinsights/${CI_AGENT_REPO}:${imagetag} echo "login ciprod acr":$CI_ACR docker login $CI_ACR --username $ACR_APP_ID --password $ACR_APP_SECRET diff --git a/.pipelines/pull-from-cdpx-and-push-to-ci-acr-windows-image.sh b/.pipelines/pull-from-cdpx-and-push-to-ci-acr-windows-image.sh index 066410af5..095a00039 100755 --- a/.pipelines/pull-from-cdpx-and-push-to-ci-acr-windows-image.sh +++ b/.pipelines/pull-from-cdpx-and-push-to-ci-acr-windows-image.sh @@ -25,12 +25,20 @@ ACR_APP_ID=$(cat ~/acrappid ) ACR_APP_SECRET=$(cat ~/acrappsecret) echo "end: read appid and appsecret" +echo "start: read appid and appsecret for cdpx" +CDPX_ACR_APP_ID=$(cat ~/cdpxacrappid) +CDPX_ACR_APP_SECRET=$(cat ~/cdpxacrappsecret) +echo "end: read appid and appsecret which has read access on cdpx acr" + +# Name of CDPX_ACR should be in this format :Naming convention: 'cdpx' + service tree id without '-' + two digit suffix like'00'/'01 +# suffix 00 primary and 01 secondary, and we only use primary +# This configured via pipeline variable echo "login to cdpxwindows acr:${CDPX_ACR}" -docker login $CDPX_ACR --username $ACR_APP_ID --password $ACR_APP_SECRET +docker login $CDPX_ACR --username $CDPX_ACR_APP_ID --password $CDPX_ACR_APP_SECRET echo "login to cdpxwindows acr:${CDPX_ACR} completed" echo "pull image from cdpxwin acr: ${CDPX_ACR}" -docker pull ${CDPX_ACR}/artifact/3170cdd2-19f0-4027-912b-1027311691a2/official/${CDPX_REPO_NAME}:${CDPX_AGENT_IMAGE_TAG} +docker pull ${CDPX_ACR}/official/${CDPX_REPO_NAME}:${CDPX_AGENT_IMAGE_TAG} echo "pull image from cdpxwin acr completed: ${CDPX_ACR}" echo "CI Release name:"$CI_RELEASE @@ -40,7 +48,7 @@ imagetag="win-"$CI_RELEASE$CI_IMAGE_TAG_SUFFIX echo "agentimagetag="$imagetag echo "tag windows agent image" -docker tag ${CDPX_ACR}/artifact/3170cdd2-19f0-4027-912b-1027311691a2/official/${CDPX_REPO_NAME}:${CDPX_AGENT_IMAGE_TAG} ${CI_ACR}/public/azuremonitor/containerinsights/${CI_AGENT_REPO}:${imagetag} +docker tag ${CDPX_ACR}/official/${CDPX_REPO_NAME}:${CDPX_AGENT_IMAGE_TAG} ${CI_ACR}/public/azuremonitor/containerinsights/${CI_AGENT_REPO}:${imagetag} echo "login to ${CI_ACR} acr" docker login $CI_ACR --username $ACR_APP_ID --password $ACR_APP_SECRET From 9061201be9b7578057479abf6e612a05ca412778 Mon Sep 17 00:00:00 2001 From: Grace Wehner Date: Tue, 15 Dec 2020 12:26:25 -0800 Subject: [PATCH 048/301] add pv fluentd plugin config to helm rs config (#477) * add pv fluentd plugin to helm rs config * helm rbac permissions for pv api calls --- .../templates/omsagent-rbac.yaml | 2 +- .../templates/omsagent-rs-configmap.yaml | 23 +++++++++++++++++++ 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/charts/azuremonitor-containers/templates/omsagent-rbac.yaml b/charts/azuremonitor-containers/templates/omsagent-rbac.yaml index 4f7408e7c..bd4e9baf3 100644 --- a/charts/azuremonitor-containers/templates/omsagent-rbac.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-rbac.yaml @@ -19,7 +19,7 @@ metadata: heritage: {{ .Release.Service }} rules: - apiGroups: [""] - resources: ["pods", "events", "nodes", "nodes/stats", "nodes/metrics", "nodes/spec", "nodes/proxy", "namespaces", "services"] + resources: ["pods", "events", "nodes", "nodes/stats", "nodes/metrics", "nodes/spec", "nodes/proxy", "namespaces", "services", "persistentvolumes"] verbs: ["list", "get", "watch"] - apiGroups: ["apps", "extensions", "autoscaling"] resources: ["replicasets", "deployments", "horizontalpodautoscalers"] diff --git a/charts/azuremonitor-containers/templates/omsagent-rs-configmap.yaml b/charts/azuremonitor-containers/templates/omsagent-rs-configmap.yaml index e1bc969cb..baeedf1be 100644 --- a/charts/azuremonitor-containers/templates/omsagent-rs-configmap.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-rs-configmap.yaml @@ -20,6 +20,14 @@ data: log_level debug + #Kubernetes Persistent Volume inventory + + type kubepvinventory + tag oms.containerinsights.KubePVInventory + run_interval 60 + log_level debug + + #Kubernetes events type kubeevents @@ -99,6 +107,21 @@ data: max_retry_wait 5m + + type out_oms + log_level debug + num_threads 5 + buffer_chunk_limit 4m + buffer_type file + buffer_path %STATE_DIR_WS%/state/out_oms_kubepv*.buffer + buffer_queue_limit 20 + buffer_queue_full_action drop_oldest_chunk + flush_interval 20s + retry_limit 10 + retry_wait 5s + max_retry_wait 5m + + type out_oms log_level debug From 064bc068f70bacec13af02f6ab74180186a98356 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Wed, 16 Dec 2020 15:22:13 -0800 Subject: [PATCH 049/301] Gangams/fix rs ooming (#473) * optimize kpi * optimize kube node inventory * add flags for events, deployments and hpa * have separate function parseNodeLimits * refactor code * fix crash * fix bug with service name * fix bugs related to get service name * update oom fix test agent * debug logs * fix service label issue * update to latest agent and enable ephemeral annotation * change stream size to 200 from 250 * update yaml * adjust chunksizes * add ruby gc env * yaml changes for cioomtest11282020-3 * telemetry to track pods latency * service count telemetry * rename variables * wip * nodes inventory telemetry * configmap changes * add emit streams in configmap * yaml updates * fix copy and paste bug * add todo comments * fix node latency telemetry bug * update yaml with latest test image * fix bug * upping rs memory change * fix mdm bug with final emit stream * update to latest image * fix pr feedback * fix pr feedback * rename health config to agent config * fix max allowed hpa chunk size * update to use 1k pod chunk since validated on 1.18+ * remove debug logs * minor updates * move defaults to common place * chart updates * final oomfix agent * update to use prod image so that can be validated with build pipeline * fix typo in comment --- .../installer/datafiles/base_container.data | 2 +- .../scripts/tomlparser-agent-config.rb | 172 +++++ .../scripts/tomlparser-health-config.rb | 73 -- .../templates/omsagent-rs-configmap.yaml | 32 +- charts/azuremonitor-containers/values.yaml | 9 + kubernetes/linux/Dockerfile | 1 + kubernetes/linux/main.sh | 16 +- kubernetes/omsagent.yaml | 18 +- source/plugins/ruby/KubernetesApiClient.rb | 387 +++++----- source/plugins/ruby/in_kube_events.rb | 18 +- source/plugins/ruby/in_kube_nodes.rb | 410 ++++++---- source/plugins/ruby/in_kube_podinventory.rb | 717 ++++++++++-------- .../plugins/ruby/in_kubestate_deployments.rb | 424 ++++++----- source/plugins/ruby/in_kubestate_hpa.rb | 421 +++++----- 14 files changed, 1534 insertions(+), 1166 deletions(-) create mode 100644 build/linux/installer/scripts/tomlparser-agent-config.rb delete mode 100644 build/linux/installer/scripts/tomlparser-health-config.rb diff --git a/build/linux/installer/datafiles/base_container.data b/build/linux/installer/datafiles/base_container.data index ec42d5967..c680f0eea 100644 --- a/build/linux/installer/datafiles/base_container.data +++ b/build/linux/installer/datafiles/base_container.data @@ -123,7 +123,7 @@ MAINTAINER: 'Microsoft Corporation' /opt/tomlparser-mdm-metrics-config.rb; build/linux/installer/scripts/tomlparser-mdm-metrics-config.rb; 755; root; root /opt/tomlparser-metric-collection-config.rb; build/linux/installer/scripts/tomlparser-metric-collection-config.rb; 755; root; root -/opt/tomlparser-health-config.rb; build/linux/installer/scripts/tomlparser-health-config.rb; 755; root; root +/opt/tomlparser-agent-config.rb; build/linux/installer/scripts/tomlparser-agent-config.rb; 755; root; root /opt/tomlparser.rb; build/common/installer/scripts/tomlparser.rb; 755; root; root /opt/td-agent-bit-conf-customizer.rb; build/common/installer/scripts/td-agent-bit-conf-customizer.rb; 755; root; root /opt/ConfigParseErrorLogger.rb; build/common/installer/scripts/ConfigParseErrorLogger.rb; 755; root; root diff --git a/build/linux/installer/scripts/tomlparser-agent-config.rb b/build/linux/installer/scripts/tomlparser-agent-config.rb new file mode 100644 index 000000000..87c5194ed --- /dev/null +++ b/build/linux/installer/scripts/tomlparser-agent-config.rb @@ -0,0 +1,172 @@ +#!/usr/local/bin/ruby + +#this should be require relative in Linux and require in windows, since it is a gem install on windows +@os_type = ENV["OS_TYPE"] +if !@os_type.nil? && !@os_type.empty? && @os_type.strip.casecmp("windows") == 0 + require "tomlrb" +else + require_relative "tomlrb" +end + +require_relative "ConfigParseErrorLogger" + +@configMapMountPath = "/etc/config/settings/agent-settings" +@configSchemaVersion = "" +@enable_health_model = false + +# 250 Node items (15KB per node) account to approximately 4MB +@nodesChunkSize = 250 +# 1000 pods (10KB per pod) account to approximately 10MB +@podsChunkSize = 1000 +# 4000 events (1KB per event) account to approximately 4MB +@eventsChunkSize = 4000 +# roughly each deployment is 8k +# 500 deployments account to approximately 4MB +@deploymentsChunkSize = 500 +# roughly each HPA is 3k +# 2000 HPAs account to approximately 6-7MB +@hpaChunkSize = 2000 +# stream batch sizes to avoid large file writes +# too low will consume higher disk iops +@podsEmitStreamBatchSize = 200 +@nodesEmitStreamBatchSize = 100 + +# higher the chunk size rs pod memory consumption higher and lower api latency +# similarly lower the value, helps on the memory consumption but incurrs additional round trip latency +# these needs to be tuned be based on the workload +# nodes +@nodesChunkSizeMin = 100 +@nodesChunkSizeMax = 400 +# pods +@podsChunkSizeMin = 250 +@podsChunkSizeMax = 1500 +# events +@eventsChunkSizeMin = 2000 +@eventsChunkSizeMax = 10000 +# deployments +@deploymentsChunkSizeMin = 500 +@deploymentsChunkSizeMax = 1000 +# hpa +@hpaChunkSizeMin = 500 +@hpaChunkSizeMax = 2000 + +# emit stream sizes to prevent lower values which costs disk i/o +# max will be upto the chunk size +@podsEmitStreamBatchSizeMin = 50 +@nodesEmitStreamBatchSizeMin = 50 + +def is_number?(value) + true if Integer(value) rescue false +end + +# Use parser to parse the configmap toml file to a ruby structure +def parseConfigMap + begin + # Check to see if config map is created + if (File.file?(@configMapMountPath)) + puts "config::configmap container-azm-ms-agentconfig for agent settings mounted, parsing values" + parsedConfig = Tomlrb.load_file(@configMapMountPath, symbolize_keys: true) + puts "config::Successfully parsed mounted config map" + return parsedConfig + else + puts "config::configmap container-azm-ms-agentconfig for agent settings not mounted, using defaults" + return nil + end + rescue => errorStr + ConfigParseErrorLogger.logError("Exception while parsing config map for agent settings : #{errorStr}, using defaults, please check config map for errors") + return nil + end +end + +# Use the ruby structure created after config parsing to set the right values to be used as environment variables +def populateSettingValuesFromConfigMap(parsedConfig) + begin + if !parsedConfig.nil? && !parsedConfig[:agent_settings].nil? + if !parsedConfig[:agent_settings][:health_model].nil? && !parsedConfig[:agent_settings][:health_model][:enabled].nil? + @enable_health_model = parsedConfig[:agent_settings][:health_model][:enabled] + puts "enable_health_model = #{@enable_health_model}" + end + chunk_config = parsedConfig[:agent_settings][:chunk_config] + if !chunk_config.nil? + nodesChunkSize = chunk_config[:NODES_CHUNK_SIZE] + if !nodesChunkSize.nil? && is_number?(nodesChunkSize) && (@nodesChunkSizeMin..@nodesChunkSizeMax) === nodesChunkSize.to_i + @nodesChunkSize = nodesChunkSize.to_i + puts "Using config map value: NODES_CHUNK_SIZE = #{@nodesChunkSize}" + end + + podsChunkSize = chunk_config[:PODS_CHUNK_SIZE] + if !podsChunkSize.nil? && is_number?(podsChunkSize) && (@podsChunkSizeMin..@podsChunkSizeMax) === podsChunkSize.to_i + @podsChunkSize = podsChunkSize.to_i + puts "Using config map value: PODS_CHUNK_SIZE = #{@podsChunkSize}" + end + + eventsChunkSize = chunk_config[:EVENTS_CHUNK_SIZE] + if !eventsChunkSize.nil? && is_number?(eventsChunkSize) && (@eventsChunkSizeMin..@eventsChunkSizeMax) === eventsChunkSize.to_i + @eventsChunkSize = eventsChunkSize.to_i + puts "Using config map value: EVENTS_CHUNK_SIZE = #{@eventsChunkSize}" + end + + deploymentsChunkSize = chunk_config[:DEPLOYMENTS_CHUNK_SIZE] + if !deploymentsChunkSize.nil? && is_number?(deploymentsChunkSize) && (@deploymentsChunkSizeMin..@deploymentsChunkSizeMax) === deploymentsChunkSize.to_i + @deploymentsChunkSize = deploymentsChunkSize.to_i + puts "Using config map value: DEPLOYMENTS_CHUNK_SIZE = #{@deploymentsChunkSize}" + end + + hpaChunkSize = chunk_config[:HPA_CHUNK_SIZE] + if !hpaChunkSize.nil? && is_number?(hpaChunkSize) && (@hpaChunkSizeMin..@hpaChunkSizeMax) === hpaChunkSize.to_i + @hpaChunkSize = hpaChunkSize.to_i + puts "Using config map value: HPA_CHUNK_SIZE = #{@hpaChunkSize}" + end + + podsEmitStreamBatchSize = chunk_config[:PODS_EMIT_STREAM_BATCH_SIZE] + if !podsEmitStreamBatchSize.nil? && is_number?(podsEmitStreamBatchSize) && + podsEmitStreamBatchSize.to_i <= @podsChunkSize && podsEmitStreamBatchSize.to_i >= @podsEmitStreamBatchSizeMin + @podsEmitStreamBatchSize = podsEmitStreamBatchSize.to_i + puts "Using config map value: PODS_EMIT_STREAM_BATCH_SIZE = #{@podsEmitStreamBatchSize}" + end + nodesEmitStreamBatchSize = chunk_config[:NODES_EMIT_STREAM_BATCH_SIZE] + if !nodesEmitStreamBatchSize.nil? && is_number?(nodesEmitStreamBatchSize) && + nodesEmitStreamBatchSize.to_i <= @nodesChunkSize && nodesEmitStreamBatchSize.to_i >= @nodesEmitStreamBatchSizeMin + @nodesEmitStreamBatchSize = nodesEmitStreamBatchSize.to_i + puts "Using config map value: NODES_EMIT_STREAM_BATCH_SIZE = #{@nodesEmitStreamBatchSize}" + end + end + end + rescue => errorStr + puts "config::error:Exception while reading config settings for agent configuration setting - #{errorStr}, using defaults" + @enable_health_model = false + end +end + +@configSchemaVersion = ENV["AZMON_AGENT_CFG_SCHEMA_VERSION"] +puts "****************Start Config Processing********************" +if !@configSchemaVersion.nil? && !@configSchemaVersion.empty? && @configSchemaVersion.strip.casecmp("v1") == 0 #note v1 is the only supported schema version , so hardcoding it + configMapSettings = parseConfigMap + if !configMapSettings.nil? + populateSettingValuesFromConfigMap(configMapSettings) + end +else + if (File.file?(@configMapMountPath)) + ConfigParseErrorLogger.logError("config::unsupported/missing config schema version - '#{@configSchemaVersion}' , using defaults, please use supported schema version") + end + @enable_health_model = false +end + +# Write the settings to file, so that they can be set as environment variables +file = File.open("agent_config_env_var", "w") + +if !file.nil? + file.write("export AZMON_CLUSTER_ENABLE_HEALTH_MODEL=#{@enable_health_model}\n") + file.write("export NODES_CHUNK_SIZE=#{@nodesChunkSize}\n") + file.write("export PODS_CHUNK_SIZE=#{@podsChunkSize}\n") + file.write("export EVENTS_CHUNK_SIZE=#{@eventsChunkSize}\n") + file.write("export DEPLOYMENTS_CHUNK_SIZE=#{@deploymentsChunkSize}\n") + file.write("export HPA_CHUNK_SIZE=#{@hpaChunkSize}\n") + file.write("export PODS_EMIT_STREAM_BATCH_SIZE=#{@podsEmitStreamBatchSize}\n") + file.write("export NODES_EMIT_STREAM_BATCH_SIZE=#{@nodesEmitStreamBatchSize}\n") + # Close file after writing all environment variables + file.close +else + puts "Exception while opening file for writing config environment variables" + puts "****************End Config Processing********************" +end diff --git a/build/linux/installer/scripts/tomlparser-health-config.rb b/build/linux/installer/scripts/tomlparser-health-config.rb deleted file mode 100644 index 14c8bdb44..000000000 --- a/build/linux/installer/scripts/tomlparser-health-config.rb +++ /dev/null @@ -1,73 +0,0 @@ -#!/usr/local/bin/ruby - -#this should be require relative in Linux and require in windows, since it is a gem install on windows -@os_type = ENV["OS_TYPE"] -if !@os_type.nil? && !@os_type.empty? && @os_type.strip.casecmp("windows") == 0 - require "tomlrb" -else - require_relative "tomlrb" -end - -require_relative "ConfigParseErrorLogger" - -@configMapMountPath = "/etc/config/settings/agent-settings" -@configSchemaVersion = "" -@enable_health_model = false - -# Use parser to parse the configmap toml file to a ruby structure -def parseConfigMap - begin - # Check to see if config map is created - if (File.file?(@configMapMountPath)) - puts "config::configmap container-azm-ms-agentconfig for agent health settings mounted, parsing values" - parsedConfig = Tomlrb.load_file(@configMapMountPath, symbolize_keys: true) - puts "config::Successfully parsed mounted config map" - return parsedConfig - else - puts "config::configmap container-azm-ms-agentconfig for agent health settings not mounted, using defaults" - return nil - end - rescue => errorStr - ConfigParseErrorLogger.logError("Exception while parsing config map for enabling health: #{errorStr}, using defaults, please check config map for errors") - return nil - end -end - -# Use the ruby structure created after config parsing to set the right values to be used as environment variables -def populateSettingValuesFromConfigMap(parsedConfig) - begin - if !parsedConfig.nil? && !parsedConfig[:agent_settings].nil? && !parsedConfig[:agent_settings][:health_model].nil? && !parsedConfig[:agent_settings][:health_model][:enabled].nil? - @enable_health_model = parsedConfig[:agent_settings][:health_model][:enabled] - puts "enable_health_model = #{@enable_health_model}" - end - rescue => errorStr - puts "config::error:Exception while reading config settings for health_model enabled setting - #{errorStr}, using defaults" - @enable_health_model = false - end -end - -@configSchemaVersion = ENV["AZMON_AGENT_CFG_SCHEMA_VERSION"] -puts "****************Start Config Processing********************" -if !@configSchemaVersion.nil? && !@configSchemaVersion.empty? && @configSchemaVersion.strip.casecmp("v1") == 0 #note v1 is the only supported schema version , so hardcoding it - configMapSettings = parseConfigMap - if !configMapSettings.nil? - populateSettingValuesFromConfigMap(configMapSettings) - end -else - if (File.file?(@configMapMountPath)) - ConfigParseErrorLogger.logError("config::unsupported/missing config schema version - '#{@configSchemaVersion}' , using defaults, please use supported schema version") - end - @enable_health_model = false -end - -# Write the settings to file, so that they can be set as environment variables -file = File.open("health_config_env_var", "w") - -if !file.nil? - file.write("export AZMON_CLUSTER_ENABLE_HEALTH_MODEL=#{@enable_health_model}\n") - # Close file after writing all environment variables - file.close -else - puts "Exception while opening file for writing config environment variables" - puts "****************End Config Processing********************" -end \ No newline at end of file diff --git a/charts/azuremonitor-containers/templates/omsagent-rs-configmap.yaml b/charts/azuremonitor-containers/templates/omsagent-rs-configmap.yaml index baeedf1be..fc7c471f8 100644 --- a/charts/azuremonitor-containers/templates/omsagent-rs-configmap.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-rs-configmap.yaml @@ -95,7 +95,7 @@ data: type out_oms log_level debug - num_threads 5 + num_threads 2 buffer_chunk_limit 4m buffer_type file buffer_path %STATE_DIR_WS%/out_oms_kubepods*.buffer @@ -108,24 +108,24 @@ data: - type out_oms - log_level debug - num_threads 5 - buffer_chunk_limit 4m - buffer_type file - buffer_path %STATE_DIR_WS%/state/out_oms_kubepv*.buffer - buffer_queue_limit 20 - buffer_queue_full_action drop_oldest_chunk - flush_interval 20s - retry_limit 10 - retry_wait 5s - max_retry_wait 5m + type out_oms + log_level debug + num_threads 5 + buffer_chunk_limit 4m + buffer_type file + buffer_path %STATE_DIR_WS%/state/out_oms_kubepv*.buffer + buffer_queue_limit 20 + buffer_queue_full_action drop_oldest_chunk + flush_interval 20s + retry_limit 10 + retry_wait 5s + max_retry_wait 5m type out_oms log_level debug - num_threads 5 + num_threads 2 buffer_chunk_limit 4m buffer_type file buffer_path %STATE_DIR_WS%/out_oms_kubeevents*.buffer @@ -155,7 +155,7 @@ data: type out_oms log_level debug - num_threads 5 + num_threads 2 buffer_chunk_limit 4m buffer_type file buffer_path %STATE_DIR_WS%/state/out_oms_kubenodes*.buffer @@ -184,7 +184,7 @@ data: type out_oms log_level debug - num_threads 5 + num_threads 2 buffer_chunk_limit 4m buffer_type file buffer_path %STATE_DIR_WS%/out_oms_kubeperf*.buffer diff --git a/charts/azuremonitor-containers/values.yaml b/charts/azuremonitor-containers/values.yaml index e8acda20e..907e315d1 100644 --- a/charts/azuremonitor-containers/values.yaml +++ b/charts/azuremonitor-containers/values.yaml @@ -81,6 +81,15 @@ omsagent: deployment: affinity: nodeAffinity: + # affinity to schedule on to ephemeral os node if its available + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + preference: + matchExpressions: + - key: storageprofile + operator: NotIn + values: + - managed requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - labelSelector: diff --git a/kubernetes/linux/Dockerfile b/kubernetes/linux/Dockerfile index d04e86128..34ab133da 100644 --- a/kubernetes/linux/Dockerfile +++ b/kubernetes/linux/Dockerfile @@ -15,6 +15,7 @@ ENV HOST_VAR /hostfs/var ENV AZMON_COLLECT_ENV False ENV KUBE_CLIENT_BACKOFF_BASE 1 ENV KUBE_CLIENT_BACKOFF_DURATION 0 +ENV RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR 0.9 RUN /usr/bin/apt-get update && /usr/bin/apt-get install -y libc-bin wget openssl curl sudo python-ctypes init-system-helpers net-tools rsyslog cron vim dmidecode apt-transport-https gnupg && rm -rf /var/lib/apt/lists/* COPY setup.sh main.sh defaultpromenvvariables defaultpromenvvariables-rs mdsd.xml envmdsd $tmpdir/ WORKDIR ${tmpdir} diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index a2ba6a1d1..ed16d3e32 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -171,14 +171,14 @@ done source config_env_var -#Parse the configmap to set the right environment variables for health feature. -/opt/microsoft/omsagent/ruby/bin/ruby tomlparser-health-config.rb +#Parse the configmap to set the right environment variables for agent config. +/opt/microsoft/omsagent/ruby/bin/ruby tomlparser-agent-config.rb -cat health_config_env_var | while read line; do +cat agent_config_env_var | while read line; do #echo $line echo $line >> ~/.bashrc done -source health_config_env_var +source agent_config_env_var #Parse the configmap to set the right environment variables for network policy manager (npm) integration. /opt/microsoft/omsagent/ruby/bin/ruby tomlparser-npm-config.rb @@ -429,7 +429,7 @@ echo "export DOCKER_CIMPROV_VERSION=$DOCKER_CIMPROV_VERSION" >> ~/.bashrc #region check to auto-activate oneagent, to route container logs, #Intent is to activate one agent routing for all managed clusters with region in the regionllist, unless overridden by configmap -# AZMON_CONTAINER_LOGS_ROUTE will have route (if any) specified in the config map +# AZMON_CONTAINER_LOGS_ROUTE will have route (if any) specified in the config map # AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE will have the final route that we compute & set, based on our region list logic echo "************start oneagent log routing checks************" # by default, use configmap route for safer side @@ -462,9 +462,9 @@ else echo "current region is not in oneagent regions..." fi -if [ "$isoneagentregion" = true ]; then +if [ "$isoneagentregion" = true ]; then #if configmap has a routing for logs, but current region is in the oneagent region list, take the configmap route - if [ ! -z $AZMON_CONTAINER_LOGS_ROUTE ]; then + if [ ! -z $AZMON_CONTAINER_LOGS_ROUTE ]; then AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE=$AZMON_CONTAINER_LOGS_ROUTE echo "oneagent region is true for current region:$currentregion and config map logs route is not empty. so using config map logs route as effective route:$AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE" else #there is no configmap route, so route thru oneagent @@ -511,7 +511,7 @@ if [ ! -e "/etc/config/kube.conf" ]; then echo "starting mdsd ..." mdsd -l -e ${MDSD_LOG}/mdsd.err -w ${MDSD_LOG}/mdsd.warn -o ${MDSD_LOG}/mdsd.info -q ${MDSD_LOG}/mdsd.qos & - + touch /opt/AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE_V2 fi fi diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index 26c7ae9a0..013e2a6c0 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -143,7 +143,7 @@ data: type out_oms log_level debug - num_threads 5 + num_threads 2 buffer_chunk_limit 4m buffer_type file buffer_path %STATE_DIR_WS%/out_oms_kubepods*.buffer @@ -173,7 +173,7 @@ data: type out_oms log_level debug - num_threads 5 + num_threads 2 buffer_chunk_limit 4m buffer_type file buffer_path %STATE_DIR_WS%/out_oms_kubeevents*.buffer @@ -203,7 +203,7 @@ data: type out_oms log_level debug - num_threads 5 + num_threads 2 buffer_chunk_limit 4m buffer_type file buffer_path %STATE_DIR_WS%/state/out_oms_kubenodes*.buffer @@ -232,7 +232,7 @@ data: type out_oms log_level debug - num_threads 5 + num_threads 2 buffer_chunk_limit 4m buffer_type file buffer_path %STATE_DIR_WS%/out_oms_kubeperf*.buffer @@ -533,7 +533,6 @@ spec: cpu: 150m memory: 250Mi env: - # azure devops pipeline uses AKS_RESOURCE_ID and AKS_REGION hence ensure to uncomment these - name: AKS_RESOURCE_ID value: "VALUE_AKS_RESOURCE_ID_VALUE" - name: AKS_REGION @@ -588,6 +587,15 @@ spec: periodSeconds: 60 affinity: nodeAffinity: + # affinity to schedule on to ephemeral os node if its available + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + preference: + matchExpressions: + - key: storageprofile + operator: NotIn + values: + - managed requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - labelSelector: diff --git a/source/plugins/ruby/KubernetesApiClient.rb b/source/plugins/ruby/KubernetesApiClient.rb index 073eb0417..aca2142a0 100644 --- a/source/plugins/ruby/KubernetesApiClient.rb +++ b/source/plugins/ruby/KubernetesApiClient.rb @@ -172,6 +172,10 @@ def isAROV3Cluster return @@IsAROV3Cluster end + def isAROv3MasterOrInfraPod(nodeName) + return isAROV3Cluster() && (!nodeName.nil? && (nodeName.downcase.start_with?("infra-") || nodeName.downcase.start_with?("master-"))) + end + def isNodeMaster return @@IsNodeMaster if !@@IsNodeMaster.nil? @@IsNodeMaster = false @@ -276,7 +280,8 @@ def getPods(namespace) def getWindowsNodes winNodes = [] begin - resourceUri = getNodesResourceUri("nodes") + # get only windows nodes + resourceUri = getNodesResourceUri("nodes?labelSelector=kubernetes.io%2Fos%3Dwindows") nodeInventory = JSON.parse(getKubeResourceInfo(resourceUri).body) @Log.info "KubernetesAPIClient::getWindowsNodes : Got nodes from kube api" # Resetting the windows node cache @@ -396,42 +401,67 @@ def getPodUid(podNameSpace, podMetadata) return podUid end - def getContainerResourceRequestsAndLimits(metricJSON, metricCategory, metricNameToCollect, metricNametoReturn, metricTime = Time.now.utc.iso8601) + def getContainerResourceRequestsAndLimits(pod, metricCategory, metricNameToCollect, metricNametoReturn, metricTime = Time.now.utc.iso8601) metricItems = [] begin clusterId = getClusterId - metricInfo = metricJSON - metricInfo["items"].each do |pod| - podNameSpace = pod["metadata"]["namespace"] - podUid = getPodUid(podNameSpace, pod["metadata"]) - if podUid.nil? - next - end - - # For ARO, skip the pods scheduled on to master or infra nodes to ingest - if isAROV3Cluster() && !pod["spec"].nil? && !pod["spec"]["nodeName"].nil? && - (pod["spec"]["nodeName"].downcase.start_with?("infra-") || - pod["spec"]["nodeName"].downcase.start_with?("master-")) - next - end + podNameSpace = pod["metadata"]["namespace"] + podUid = getPodUid(podNameSpace, pod["metadata"]) + if podUid.nil? + return metricItems + end - podContainers = [] - if !pod["spec"]["containers"].nil? && !pod["spec"]["containers"].empty? - podContainers = podContainers + pod["spec"]["containers"] - end - # Adding init containers to the record list as well. - if !pod["spec"]["initContainers"].nil? && !pod["spec"]["initContainers"].empty? - podContainers = podContainers + pod["spec"]["initContainers"] - end + nodeName = "" + #for unscheduled (non-started) pods nodeName does NOT exist + if !pod["spec"]["nodeName"].nil? + nodeName = pod["spec"]["nodeName"] + end + # For ARO, skip the pods scheduled on to master or infra nodes to ingest + if isAROv3MasterOrInfraPod(nodeName) + return metricItems + end - if (!podContainers.nil? && !podContainers.empty? && !pod["spec"]["nodeName"].nil?) - nodeName = pod["spec"]["nodeName"] - podContainers.each do |container| - containerName = container["name"] - #metricTime = Time.now.utc.iso8601 #2018-01-30T19:36:14Z - if (!container["resources"].nil? && !container["resources"].empty? && !container["resources"][metricCategory].nil? && !container["resources"][metricCategory][metricNameToCollect].nil?) - metricValue = getMetricNumericValue(metricNameToCollect, container["resources"][metricCategory][metricNameToCollect]) + podContainers = [] + if !pod["spec"]["containers"].nil? && !pod["spec"]["containers"].empty? + podContainers = podContainers + pod["spec"]["containers"] + end + # Adding init containers to the record list as well. + if !pod["spec"]["initContainers"].nil? && !pod["spec"]["initContainers"].empty? + podContainers = podContainers + pod["spec"]["initContainers"] + end + if (!podContainers.nil? && !podContainers.empty? && !pod["spec"]["nodeName"].nil?) + podContainers.each do |container| + containerName = container["name"] + #metricTime = Time.now.utc.iso8601 #2018-01-30T19:36:14Z + if (!container["resources"].nil? && !container["resources"].empty? && !container["resources"][metricCategory].nil? && !container["resources"][metricCategory][metricNameToCollect].nil?) + metricValue = getMetricNumericValue(metricNameToCollect, container["resources"][metricCategory][metricNameToCollect]) + + metricItem = {} + metricItem["DataItems"] = [] + + metricProps = {} + metricProps["Timestamp"] = metricTime + metricProps["Host"] = nodeName + # Adding this so that it is not set by base omsagent since it was not set earlier and being set by base omsagent + metricProps["Computer"] = nodeName + metricProps["ObjectName"] = "K8SContainer" + metricProps["InstanceName"] = clusterId + "/" + podUid + "/" + containerName + + metricProps["Collections"] = [] + metricCollections = {} + metricCollections["CounterName"] = metricNametoReturn + metricCollections["Value"] = metricValue + + metricProps["Collections"].push(metricCollections) + metricItem["DataItems"].push(metricProps) + metricItems.push(metricItem) + #No container level limit for the given metric, so default to node level limit + else + nodeMetricsHashKey = clusterId + "/" + nodeName + "_" + "allocatable" + "_" + metricNameToCollect + if (metricCategory == "limits" && @@NodeMetrics.has_key?(nodeMetricsHashKey)) + metricValue = @@NodeMetrics[nodeMetricsHashKey] + #@Log.info("Limits not set for container #{clusterId + "/" + podUid + "/" + containerName} using node level limits: #{nodeMetricsHashKey}=#{metricValue} ") metricItem = {} metricItem["DataItems"] = [] @@ -451,32 +481,6 @@ def getContainerResourceRequestsAndLimits(metricJSON, metricCategory, metricName metricProps["Collections"].push(metricCollections) metricItem["DataItems"].push(metricProps) metricItems.push(metricItem) - #No container level limit for the given metric, so default to node level limit - else - nodeMetricsHashKey = clusterId + "/" + nodeName + "_" + "allocatable" + "_" + metricNameToCollect - if (metricCategory == "limits" && @@NodeMetrics.has_key?(nodeMetricsHashKey)) - metricValue = @@NodeMetrics[nodeMetricsHashKey] - #@Log.info("Limits not set for container #{clusterId + "/" + podUid + "/" + containerName} using node level limits: #{nodeMetricsHashKey}=#{metricValue} ") - metricItem = {} - metricItem["DataItems"] = [] - - metricProps = {} - metricProps["Timestamp"] = metricTime - metricProps["Host"] = nodeName - # Adding this so that it is not set by base omsagent since it was not set earlier and being set by base omsagent - metricProps["Computer"] = nodeName - metricProps["ObjectName"] = "K8SContainer" - metricProps["InstanceName"] = clusterId + "/" + podUid + "/" + containerName - - metricProps["Collections"] = [] - metricCollections = {} - metricCollections["CounterName"] = metricNametoReturn - metricCollections["Value"] = metricValue - - metricProps["Collections"].push(metricCollections) - metricItem["DataItems"].push(metricProps) - metricItems.push(metricItem) - end end end end @@ -488,78 +492,74 @@ def getContainerResourceRequestsAndLimits(metricJSON, metricCategory, metricName return metricItems end #getContainerResourceRequestAndLimits - def getContainerResourceRequestsAndLimitsAsInsightsMetrics(metricJSON, metricCategory, metricNameToCollect, metricNametoReturn, metricTime = Time.now.utc.iso8601) + def getContainerResourceRequestsAndLimitsAsInsightsMetrics(pod, metricCategory, metricNameToCollect, metricNametoReturn, metricTime = Time.now.utc.iso8601) metricItems = [] begin clusterId = getClusterId clusterName = getClusterName - - metricInfo = metricJSON - metricInfo["items"].each do |pod| - podNameSpace = pod["metadata"]["namespace"] - if podNameSpace.eql?("kube-system") && !pod["metadata"].key?("ownerReferences") - # The above case seems to be the only case where you have horizontal scaling of pods - # but no controller, in which case cAdvisor picks up kubernetes.io/config.hash - # instead of the actual poduid. Since this uid is not being surface into the UX - # its ok to use this. - # Use kubernetes.io/config.hash to be able to correlate with cadvisor data - if pod["metadata"]["annotations"].nil? - next - else - podUid = pod["metadata"]["annotations"]["kubernetes.io/config.hash"] - end + podNameSpace = pod["metadata"]["namespace"] + if podNameSpace.eql?("kube-system") && !pod["metadata"].key?("ownerReferences") + # The above case seems to be the only case where you have horizontal scaling of pods + # but no controller, in which case cAdvisor picks up kubernetes.io/config.hash + # instead of the actual poduid. Since this uid is not being surface into the UX + # its ok to use this. + # Use kubernetes.io/config.hash to be able to correlate with cadvisor data + if pod["metadata"]["annotations"].nil? + return metricItems else - podUid = pod["metadata"]["uid"] + podUid = pod["metadata"]["annotations"]["kubernetes.io/config.hash"] end + else + podUid = pod["metadata"]["uid"] + end - podContainers = [] - if !pod["spec"]["containers"].nil? && !pod["spec"]["containers"].empty? - podContainers = podContainers + pod["spec"]["containers"] - end - # Adding init containers to the record list as well. - if !pod["spec"]["initContainers"].nil? && !pod["spec"]["initContainers"].empty? - podContainers = podContainers + pod["spec"]["initContainers"] - end + podContainers = [] + if !pod["spec"]["containers"].nil? && !pod["spec"]["containers"].empty? + podContainers = podContainers + pod["spec"]["containers"] + end + # Adding init containers to the record list as well. + if !pod["spec"]["initContainers"].nil? && !pod["spec"]["initContainers"].empty? + podContainers = podContainers + pod["spec"]["initContainers"] + end - if (!podContainers.nil? && !podContainers.empty?) - if (!pod["spec"]["nodeName"].nil?) - nodeName = pod["spec"]["nodeName"] + if (!podContainers.nil? && !podContainers.empty?) + if (!pod["spec"]["nodeName"].nil?) + nodeName = pod["spec"]["nodeName"] + else + nodeName = "" #unscheduled pod. We still want to collect limits & requests for GPU + end + podContainers.each do |container| + metricValue = nil + containerName = container["name"] + #metricTime = Time.now.utc.iso8601 #2018-01-30T19:36:14Z + if (!container["resources"].nil? && !container["resources"].empty? && !container["resources"][metricCategory].nil? && !container["resources"][metricCategory][metricNameToCollect].nil?) + metricValue = getMetricNumericValue(metricNameToCollect, container["resources"][metricCategory][metricNameToCollect]) else - nodeName = "" #unscheduled pod. We still want to collect limits & requests for GPU - end - podContainers.each do |container| - metricValue = nil - containerName = container["name"] - #metricTime = Time.now.utc.iso8601 #2018-01-30T19:36:14Z - if (!container["resources"].nil? && !container["resources"].empty? && !container["resources"][metricCategory].nil? && !container["resources"][metricCategory][metricNameToCollect].nil?) - metricValue = getMetricNumericValue(metricNameToCollect, container["resources"][metricCategory][metricNameToCollect]) - else - #No container level limit for the given metric, so default to node level limit for non-gpu metrics - if (metricNameToCollect.downcase != "nvidia.com/gpu") && (metricNameToCollect.downcase != "amd.com/gpu") - nodeMetricsHashKey = clusterId + "/" + nodeName + "_" + "allocatable" + "_" + metricNameToCollect - metricValue = @@NodeMetrics[nodeMetricsHashKey] - end - end - if (!metricValue.nil?) - metricItem = {} - metricItem["CollectionTime"] = metricTime - metricItem["Computer"] = nodeName - metricItem["Name"] = metricNametoReturn - metricItem["Value"] = metricValue - metricItem["Origin"] = Constants::INSIGHTSMETRICS_TAGS_ORIGIN - metricItem["Namespace"] = Constants::INSIGHTSMETRICS_TAGS_GPU_NAMESPACE - - metricTags = {} - metricTags[Constants::INSIGHTSMETRICS_TAGS_CLUSTERID] = clusterId - metricTags[Constants::INSIGHTSMETRICS_TAGS_CLUSTERNAME] = clusterName - metricTags[Constants::INSIGHTSMETRICS_TAGS_CONTAINER_NAME] = podUid + "/" + containerName - #metricTags[Constants::INSIGHTSMETRICS_TAGS_K8SNAMESPACE] = podNameSpace - - metricItem["Tags"] = metricTags - - metricItems.push(metricItem) + #No container level limit for the given metric, so default to node level limit for non-gpu metrics + if (metricNameToCollect.downcase != "nvidia.com/gpu") && (metricNameToCollect.downcase != "amd.com/gpu") + nodeMetricsHashKey = clusterId + "/" + nodeName + "_" + "allocatable" + "_" + metricNameToCollect + metricValue = @@NodeMetrics[nodeMetricsHashKey] end end + if (!metricValue.nil?) + metricItem = {} + metricItem["CollectionTime"] = metricTime + metricItem["Computer"] = nodeName + metricItem["Name"] = metricNametoReturn + metricItem["Value"] = metricValue + metricItem["Origin"] = Constants::INSIGHTSMETRICS_TAGS_ORIGIN + metricItem["Namespace"] = Constants::INSIGHTSMETRICS_TAGS_GPU_NAMESPACE + + metricTags = {} + metricTags[Constants::INSIGHTSMETRICS_TAGS_CLUSTERID] = clusterId + metricTags[Constants::INSIGHTSMETRICS_TAGS_CLUSTERNAME] = clusterName + metricTags[Constants::INSIGHTSMETRICS_TAGS_CONTAINER_NAME] = podUid + "/" + containerName + #metricTags[Constants::INSIGHTSMETRICS_TAGS_K8SNAMESPACE] = podNameSpace + + metricItem["Tags"] = metricTags + + metricItems.push(metricItem) + end end end rescue => error @@ -578,32 +578,9 @@ def parseNodeLimits(metricJSON, metricCategory, metricNameToCollect, metricNamet #if we are coming up with the time it should be same for all nodes #metricTime = Time.now.utc.iso8601 #2018-01-30T19:36:14Z metricInfo["items"].each do |node| - if (!node["status"][metricCategory].nil?) - - # metricCategory can be "capacity" or "allocatable" and metricNameToCollect can be "cpu" or "memory" - metricValue = getMetricNumericValue(metricNameToCollect, node["status"][metricCategory][metricNameToCollect]) - - metricItem = {} - metricItem["DataItems"] = [] - metricProps = {} - metricProps["Timestamp"] = metricTime - metricProps["Host"] = node["metadata"]["name"] - # Adding this so that it is not set by base omsagent since it was not set earlier and being set by base omsagent - metricProps["Computer"] = node["metadata"]["name"] - metricProps["ObjectName"] = "K8SNode" - metricProps["InstanceName"] = clusterId + "/" + node["metadata"]["name"] - metricProps["Collections"] = [] - metricCollections = {} - metricCollections["CounterName"] = metricNametoReturn - metricCollections["Value"] = metricValue - - metricProps["Collections"].push(metricCollections) - metricItem["DataItems"].push(metricProps) + metricItem = parseNodeLimitsFromNodeItem(node, metricCategory, metricNameToCollect, metricNametoReturn, metricTime) + if !metricItem.nil? && !metricItem.empty? metricItems.push(metricItem) - #push node level metrics to a inmem hash so that we can use it looking up at container level. - #Currently if container level cpu & memory limits are not defined we default to node level limits - @@NodeMetrics[clusterId + "/" + node["metadata"]["name"] + "_" + metricCategory + "_" + metricNameToCollect] = metricValue - #@Log.info ("Node metric hash: #{@@NodeMetrics}") end end rescue => error @@ -612,49 +589,82 @@ def parseNodeLimits(metricJSON, metricCategory, metricNameToCollect, metricNamet return metricItems end #parseNodeLimits - def parseNodeLimitsAsInsightsMetrics(metricJSON, metricCategory, metricNameToCollect, metricNametoReturn, metricTime = Time.now.utc.iso8601) - metricItems = [] + def parseNodeLimitsFromNodeItem(node, metricCategory, metricNameToCollect, metricNametoReturn, metricTime = Time.now.utc.iso8601) + metricItem = {} begin - metricInfo = metricJSON clusterId = getClusterId - clusterName = getClusterName #Since we are getting all node data at the same time and kubernetes doesnt specify a timestamp for the capacity and allocation metrics, #if we are coming up with the time it should be same for all nodes #metricTime = Time.now.utc.iso8601 #2018-01-30T19:36:14Z - metricInfo["items"].each do |node| - if (!node["status"][metricCategory].nil?) && (!node["status"][metricCategory][metricNameToCollect].nil?) - - # metricCategory can be "capacity" or "allocatable" and metricNameToCollect can be "cpu" or "memory" or "amd.com/gpu" or "nvidia.com/gpu" - metricValue = getMetricNumericValue(metricNameToCollect, node["status"][metricCategory][metricNameToCollect]) - - metricItem = {} - metricItem["CollectionTime"] = metricTime - metricItem["Computer"] = node["metadata"]["name"] - metricItem["Name"] = metricNametoReturn - metricItem["Value"] = metricValue - metricItem["Origin"] = Constants::INSIGHTSMETRICS_TAGS_ORIGIN - metricItem["Namespace"] = Constants::INSIGHTSMETRICS_TAGS_GPU_NAMESPACE - - metricTags = {} - metricTags[Constants::INSIGHTSMETRICS_TAGS_CLUSTERID] = clusterId - metricTags[Constants::INSIGHTSMETRICS_TAGS_CLUSTERNAME] = clusterName - metricTags[Constants::INSIGHTSMETRICS_TAGS_GPU_VENDOR] = metricNameToCollect - - metricItem["Tags"] = metricTags + if (!node["status"][metricCategory].nil?) && (!node["status"][metricCategory][metricNameToCollect].nil?) + # metricCategory can be "capacity" or "allocatable" and metricNameToCollect can be "cpu" or "memory" + metricValue = getMetricNumericValue(metricNameToCollect, node["status"][metricCategory][metricNameToCollect]) + + metricItem["DataItems"] = [] + metricProps = {} + metricProps["Timestamp"] = metricTime + metricProps["Host"] = node["metadata"]["name"] + # Adding this so that it is not set by base omsagent since it was not set earlier and being set by base omsagent + metricProps["Computer"] = node["metadata"]["name"] + metricProps["ObjectName"] = "K8SNode" + metricProps["InstanceName"] = clusterId + "/" + node["metadata"]["name"] + metricProps["Collections"] = [] + metricCollections = {} + metricCollections["CounterName"] = metricNametoReturn + metricCollections["Value"] = metricValue + + metricProps["Collections"].push(metricCollections) + metricItem["DataItems"].push(metricProps) + + #push node level metrics to a inmem hash so that we can use it looking up at container level. + #Currently if container level cpu & memory limits are not defined we default to node level limits + @@NodeMetrics[clusterId + "/" + node["metadata"]["name"] + "_" + metricCategory + "_" + metricNameToCollect] = metricValue + #@Log.info ("Node metric hash: #{@@NodeMetrics}") + end + rescue => error + @Log.warn("parseNodeLimitsFromNodeItem failed: #{error} for metric #{metricCategory} #{metricNameToCollect}") + end + return metricItem + end #parseNodeLimitsFromNodeItem - metricItems.push(metricItem) - #push node level metrics (except gpu ones) to a inmem hash so that we can use it looking up at container level. - #Currently if container level cpu & memory limits are not defined we default to node level limits - if (metricNameToCollect.downcase != "nvidia.com/gpu") && (metricNameToCollect.downcase != "amd.com/gpu") - @@NodeMetrics[clusterId + "/" + node["metadata"]["name"] + "_" + metricCategory + "_" + metricNameToCollect] = metricValue - #@Log.info ("Node metric hash: #{@@NodeMetrics}") - end + def parseNodeLimitsAsInsightsMetrics(node, metricCategory, metricNameToCollect, metricNametoReturn, metricTime = Time.now.utc.iso8601) + metricItem = {} + begin + #Since we are getting all node data at the same time and kubernetes doesnt specify a timestamp for the capacity and allocation metrics, + #if we are coming up with the time it should be same for all nodes + #metricTime = Time.now.utc.iso8601 #2018-01-30T19:36:14Z + if (!node["status"][metricCategory].nil?) && (!node["status"][metricCategory][metricNameToCollect].nil?) + clusterId = getClusterId + clusterName = getClusterName + + # metricCategory can be "capacity" or "allocatable" and metricNameToCollect can be "cpu" or "memory" or "amd.com/gpu" or "nvidia.com/gpu" + metricValue = getMetricNumericValue(metricNameToCollect, node["status"][metricCategory][metricNameToCollect]) + + metricItem["CollectionTime"] = metricTime + metricItem["Computer"] = node["metadata"]["name"] + metricItem["Name"] = metricNametoReturn + metricItem["Value"] = metricValue + metricItem["Origin"] = Constants::INSIGHTSMETRICS_TAGS_ORIGIN + metricItem["Namespace"] = Constants::INSIGHTSMETRICS_TAGS_GPU_NAMESPACE + + metricTags = {} + metricTags[Constants::INSIGHTSMETRICS_TAGS_CLUSTERID] = clusterId + metricTags[Constants::INSIGHTSMETRICS_TAGS_CLUSTERNAME] = clusterName + metricTags[Constants::INSIGHTSMETRICS_TAGS_GPU_VENDOR] = metricNameToCollect + + metricItem["Tags"] = metricTags + + #push node level metrics (except gpu ones) to a inmem hash so that we can use it looking up at container level. + #Currently if container level cpu & memory limits are not defined we default to node level limits + if (metricNameToCollect.downcase != "nvidia.com/gpu") && (metricNameToCollect.downcase != "amd.com/gpu") + @@NodeMetrics[clusterId + "/" + node["metadata"]["name"] + "_" + metricCategory + "_" + metricNameToCollect] = metricValue + #@Log.info ("Node metric hash: #{@@NodeMetrics}") end end rescue => error @Log.warn("parseNodeLimitsAsInsightsMetrics failed: #{error} for metric #{metricCategory} #{metricNameToCollect}") end - return metricItems + return metricItem end def getMetricNumericValue(metricName, metricVal) @@ -777,5 +787,32 @@ def getKubeAPIServerUrl end return apiServerUrl end + + def getKubeServicesInventoryRecords(serviceList, batchTime = Time.utc.iso8601) + kubeServiceRecords = [] + begin + if (!serviceList.nil? && !serviceList.empty?) + servicesCount = serviceList["items"].length + @Log.info("KubernetesApiClient::getKubeServicesInventoryRecords : number of services in serviceList #{servicesCount} @ #{Time.now.utc.iso8601}") + serviceList["items"].each do |item| + kubeServiceRecord = {} + kubeServiceRecord["CollectionTime"] = batchTime #This is the time that is mapped to become TimeGenerated + kubeServiceRecord["ServiceName"] = item["metadata"]["name"] + kubeServiceRecord["Namespace"] = item["metadata"]["namespace"] + kubeServiceRecord["SelectorLabels"] = [item["spec"]["selector"]] + # added these before emit to avoid memory foot print + # kubeServiceRecord["ClusterId"] = KubernetesApiClient.getClusterId + # kubeServiceRecord["ClusterName"] = KubernetesApiClient.getClusterName + kubeServiceRecord["ClusterIP"] = item["spec"]["clusterIP"] + kubeServiceRecord["ServiceType"] = item["spec"]["type"] + kubeServiceRecords.push(kubeServiceRecord.dup) + end + end + rescue => errorStr + @Log.warn "KubernetesApiClient::getKubeServicesInventoryRecords:Failed with an error : #{errorStr}" + ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) + end + return kubeServiceRecords + end end end diff --git a/source/plugins/ruby/in_kube_events.rb b/source/plugins/ruby/in_kube_events.rb index 6f59a3fc1..4f6017cc5 100644 --- a/source/plugins/ruby/in_kube_events.rb +++ b/source/plugins/ruby/in_kube_events.rb @@ -17,8 +17,9 @@ def initialize require_relative "omslog" require_relative "ApplicationInsightsUtility" - # 30000 events account to approximately 5MB - @EVENTS_CHUNK_SIZE = 30000 + # refer tomlparser-agent-config for defaults + # this configurable via configmap + @EVENTS_CHUNK_SIZE = 0 # Initializing events count for telemetry @eventsCount = 0 @@ -36,6 +37,15 @@ def configure(conf) def start if @run_interval + if !ENV["EVENTS_CHUNK_SIZE"].nil? && !ENV["EVENTS_CHUNK_SIZE"].empty? && ENV["EVENTS_CHUNK_SIZE"].to_i > 0 + @EVENTS_CHUNK_SIZE = ENV["EVENTS_CHUNK_SIZE"].to_i + else + # this shouldnt happen just setting default here as safe guard + $log.warn("in_kube_events::start: setting to default value since got EVENTS_CHUNK_SIZE nil or empty") + @EVENTS_CHUNK_SIZE = 4000 + end + $log.info("in_kube_events::start : EVENTS_CHUNK_SIZE @ #{@EVENTS_CHUNK_SIZE}") + @finished = false @condition = ConditionVariable.new @mutex = Mutex.new @@ -82,6 +92,8 @@ def enumerate end $log.info("in_kube_events::enumerate : Done getting events from Kube API @ #{Time.now.utc.iso8601}") if (!eventList.nil? && !eventList.empty? && eventList.key?("items") && !eventList["items"].nil? && !eventList["items"].empty?) + eventsCount = eventList["items"].length + $log.info "in_kube_events::enumerate:Received number of events in eventList is #{eventsCount} @ #{Time.now.utc.iso8601}" newEventQueryState = parse_and_emit_records(eventList, eventQueryState, newEventQueryState, batchTime) else $log.warn "in_kube_events::enumerate:Received empty eventList" @@ -91,6 +103,8 @@ def enumerate while (!continuationToken.nil? && !continuationToken.empty?) continuationToken, eventList = KubernetesApiClient.getResourcesAndContinuationToken("events?fieldSelector=type!=Normal&limit=#{@EVENTS_CHUNK_SIZE}&continue=#{continuationToken}") if (!eventList.nil? && !eventList.empty? && eventList.key?("items") && !eventList["items"].nil? && !eventList["items"].empty?) + eventsCount = eventList["items"].length + $log.info "in_kube_events::enumerate:Received number of events in eventList is #{eventsCount} @ #{Time.now.utc.iso8601}" newEventQueryState = parse_and_emit_records(eventList, eventQueryState, newEventQueryState, batchTime) else $log.warn "in_kube_events::enumerate:Received empty eventList" diff --git a/source/plugins/ruby/in_kube_nodes.rb b/source/plugins/ruby/in_kube_nodes.rb index 4d58382f5..e7c5060a5 100644 --- a/source/plugins/ruby/in_kube_nodes.rb +++ b/source/plugins/ruby/in_kube_nodes.rb @@ -32,7 +32,12 @@ def initialize require_relative "ApplicationInsightsUtility" require_relative "oms_common" require_relative "omslog" - @NODES_CHUNK_SIZE = "400" + # refer tomlparser-agent-config for the defaults + @NODES_CHUNK_SIZE = 0 + @NODES_EMIT_STREAM_BATCH_SIZE = 0 + + @nodeInventoryE2EProcessingLatencyMs = 0 + @nodesAPIE2ELatencyMs = 0 require_relative "constants" end @@ -45,11 +50,30 @@ def configure(conf) def start if @run_interval + if !ENV["NODES_CHUNK_SIZE"].nil? && !ENV["NODES_CHUNK_SIZE"].empty? && ENV["NODES_CHUNK_SIZE"].to_i > 0 + @NODES_CHUNK_SIZE = ENV["NODES_CHUNK_SIZE"].to_i + else + # this shouldnt happen just setting default here as safe guard + $log.warn("in_kube_nodes::start: setting to default value since got NODES_CHUNK_SIZE nil or empty") + @NODES_CHUNK_SIZE = 250 + end + $log.info("in_kube_nodes::start : NODES_CHUNK_SIZE @ #{@NODES_CHUNK_SIZE}") + + if !ENV["NODES_EMIT_STREAM_BATCH_SIZE"].nil? && !ENV["NODES_EMIT_STREAM_BATCH_SIZE"].empty? && ENV["NODES_EMIT_STREAM_BATCH_SIZE"].to_i > 0 + @NODES_EMIT_STREAM_BATCH_SIZE = ENV["NODES_EMIT_STREAM_BATCH_SIZE"].to_i + else + # this shouldnt happen just setting default here as safe guard + $log.warn("in_kube_nodes::start: setting to default value since got NODES_EMIT_STREAM_BATCH_SIZE nil or empty") + @NODES_EMIT_STREAM_BATCH_SIZE = 100 + end + $log.info("in_kube_nodes::start : NODES_EMIT_STREAM_BATCH_SIZE @ #{@NODES_EMIT_STREAM_BATCH_SIZE}") + @finished = false @condition = ConditionVariable.new @mutex = Mutex.new @thread = Thread.new(&method(:run_periodic)) @@nodeTelemetryTimeTracker = DateTime.now.to_time.to_i + @@nodeInventoryLatencyTelemetryTimeTracker = DateTime.now.to_time.to_i end end @@ -69,14 +93,20 @@ def enumerate currentTime = Time.now batchTime = currentTime.utc.iso8601 + @nodesAPIE2ELatencyMs = 0 + @nodeInventoryE2EProcessingLatencyMs = 0 + nodeInventoryStartTime = (Time.now.to_f * 1000).to_i + nodesAPIChunkStartTime = (Time.now.to_f * 1000).to_i # Initializing continuation token to nil continuationToken = nil $log.info("in_kube_nodes::enumerate : Getting nodes from Kube API @ #{Time.now.utc.iso8601}") resourceUri = KubernetesApiClient.getNodesResourceUri("nodes?limit=#{@NODES_CHUNK_SIZE}") continuationToken, nodeInventory = KubernetesApiClient.getResourcesAndContinuationToken(resourceUri) - $log.info("in_kube_nodes::enumerate : Done getting nodes from Kube API @ #{Time.now.utc.iso8601}") + nodesAPIChunkEndTime = (Time.now.to_f * 1000).to_i + @nodesAPIE2ELatencyMs = (nodesAPIChunkEndTime - nodesAPIChunkStartTime) if (!nodeInventory.nil? && !nodeInventory.empty? && nodeInventory.key?("items") && !nodeInventory["items"].nil? && !nodeInventory["items"].empty?) + $log.info("in_kube_nodes::enumerate : number of node items :#{nodeInventory["items"].length} from Kube API @ #{Time.now.utc.iso8601}") parse_and_emit_records(nodeInventory, batchTime) else $log.warn "in_kube_nodes::enumerate:Received empty nodeInventory" @@ -84,14 +114,26 @@ def enumerate #If we receive a continuation token, make calls, process and flush data until we have processed all data while (!continuationToken.nil? && !continuationToken.empty?) + nodesAPIChunkStartTime = (Time.now.to_f * 1000).to_i continuationToken, nodeInventory = KubernetesApiClient.getResourcesAndContinuationToken(resourceUri + "&continue=#{continuationToken}") + nodesAPIChunkEndTime = (Time.now.to_f * 1000).to_i + @nodesAPIE2ELatencyMs = @nodesAPIE2ELatencyMs + (nodesAPIChunkEndTime - nodesAPIChunkStartTime) if (!nodeInventory.nil? && !nodeInventory.empty? && nodeInventory.key?("items") && !nodeInventory["items"].nil? && !nodeInventory["items"].empty?) + $log.info("in_kube_nodes::enumerate : number of node items :#{nodeInventory["items"].length} from Kube API @ #{Time.now.utc.iso8601}") parse_and_emit_records(nodeInventory, batchTime) else $log.warn "in_kube_nodes::enumerate:Received empty nodeInventory" end end + @nodeInventoryE2EProcessingLatencyMs = ((Time.now.to_f * 1000).to_i - nodeInventoryStartTime) + timeDifference = (DateTime.now.to_time.to_i - @@nodeInventoryLatencyTelemetryTimeTracker).abs + timeDifferenceInMinutes = timeDifference / 60 + if (timeDifferenceInMinutes >= Constants::TELEMETRY_FLUSH_INTERVAL_IN_MINUTES) + ApplicationInsightsUtility.sendMetricTelemetry("NodeInventoryE2EProcessingLatencyMs", @nodeInventoryE2EProcessingLatencyMs, {}) + ApplicationInsightsUtility.sendMetricTelemetry("NodesAPIE2ELatencyMs", @nodesAPIE2ELatencyMs, {}) + @@nodeInventoryLatencyTelemetryTimeTracker = DateTime.now.to_time.to_i + end # Setting this to nil so that we dont hold memory until GC kicks in nodeInventory = nil rescue => errorStr @@ -109,77 +151,32 @@ def parse_and_emit_records(nodeInventory, batchTime = Time.utc.iso8601) eventStream = MultiEventStream.new containerNodeInventoryEventStream = MultiEventStream.new insightsMetricsEventStream = MultiEventStream.new + kubePerfEventStream = MultiEventStream.new @@istestvar = ENV["ISTEST"] #get node inventory - nodeInventory["items"].each do |items| - record = {} - # Sending records for ContainerNodeInventory - containerNodeInventoryRecord = {} - containerNodeInventoryRecord["CollectionTime"] = batchTime #This is the time that is mapped to become TimeGenerated - containerNodeInventoryRecord["Computer"] = items["metadata"]["name"] - - record["CollectionTime"] = batchTime #This is the time that is mapped to become TimeGenerated - record["Computer"] = items["metadata"]["name"] - record["ClusterName"] = KubernetesApiClient.getClusterName - record["ClusterId"] = KubernetesApiClient.getClusterId - record["CreationTimeStamp"] = items["metadata"]["creationTimestamp"] - record["Labels"] = [items["metadata"]["labels"]] - record["Status"] = "" - - if !items["spec"]["providerID"].nil? && !items["spec"]["providerID"].empty? - if File.file?(@@AzStackCloudFileName) # existence of this file indicates agent running on azstack - record["KubernetesProviderID"] = "azurestack" - else - #Multicluster kusto query is filtering after splitting by ":" to the left, so do the same here - #https://msazure.visualstudio.com/One/_git/AzureUX-Monitoring?path=%2Fsrc%2FMonitoringExtension%2FClient%2FInfraInsights%2FData%2FQueryTemplates%2FMultiClusterKustoQueryTemplate.ts&_a=contents&version=GBdev - provider = items["spec"]["providerID"].split(":")[0] - if !provider.nil? && !provider.empty? - record["KubernetesProviderID"] = provider - else - record["KubernetesProviderID"] = items["spec"]["providerID"] - end - end - else - record["KubernetesProviderID"] = "onprem" - end - - # Refer to https://kubernetes.io/docs/concepts/architecture/nodes/#condition for possible node conditions. - # We check the status of each condition e.g. {"type": "OutOfDisk","status": "False"} . Based on this we - # populate the KubeNodeInventory Status field. A possible value for this field could be "Ready OutofDisk" - # implying that the node is ready for hosting pods, however its out of disk. - - if items["status"].key?("conditions") && !items["status"]["conditions"].empty? - allNodeConditions = "" - items["status"]["conditions"].each do |condition| - if condition["status"] == "True" - if !allNodeConditions.empty? - allNodeConditions = allNodeConditions + "," + condition["type"] - else - allNodeConditions = condition["type"] - end - end - #collect last transition to/from ready (no matter ready is true/false) - if condition["type"] == "Ready" && !condition["lastTransitionTime"].nil? - record["LastTransitionTimeReady"] = condition["lastTransitionTime"] - end - end - if !allNodeConditions.empty? - record["Status"] = allNodeConditions + nodeInventory["items"].each do |item| + # node inventory + nodeInventoryRecord = getNodeInventoryRecord(item, batchTime) + wrapper = { + "DataType" => "KUBE_NODE_INVENTORY_BLOB", + "IPName" => "ContainerInsights", + "DataItems" => [nodeInventoryRecord.each { |k, v| nodeInventoryRecord[k] = v }], + } + eventStream.add(emitTime, wrapper) if wrapper + if @NODES_EMIT_STREAM_BATCH_SIZE > 0 && eventStream.count >= @NODES_EMIT_STREAM_BATCH_SIZE + $log.info("in_kube_node::parse_and_emit_records: number of node inventory records emitted #{@NODES_EMIT_STREAM_BATCH_SIZE} @ #{Time.now.utc.iso8601}") + router.emit_stream(@tag, eventStream) if eventStream + $log.info("in_kube_node::parse_and_emit_records: number of mdm node inventory records emitted #{@NODES_EMIT_STREAM_BATCH_SIZE} @ #{Time.now.utc.iso8601}") + router.emit_stream(@@MDMKubeNodeInventoryTag, eventStream) if eventStream + + if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) + $log.info("kubeNodeInventoryEmitStreamSuccess @ #{Time.now.utc.iso8601}") end + eventStream = MultiEventStream.new end - nodeInfo = items["status"]["nodeInfo"] - record["KubeletVersion"] = nodeInfo["kubeletVersion"] - record["KubeProxyVersion"] = nodeInfo["kubeProxyVersion"] - containerNodeInventoryRecord["OperatingSystem"] = nodeInfo["osImage"] - containerRuntimeVersion = nodeInfo["containerRuntimeVersion"] - if containerRuntimeVersion.downcase.start_with?("docker://") - containerNodeInventoryRecord["DockerVersion"] = containerRuntimeVersion.split("//")[1] - else - # using containerRuntimeVersion as DockerVersion as is for non docker runtimes - containerNodeInventoryRecord["DockerVersion"] = containerRuntimeVersion - end - # ContainerNodeInventory data for docker version and operating system. + # container node inventory + containerNodeInventoryRecord = getContainerNodeInventoryRecord(item, batchTime) containerNodeInventoryWrapper = { "DataType" => "CONTAINER_NODE_INVENTORY_BLOB", "IPName" => "ContainerInsights", @@ -187,33 +184,81 @@ def parse_and_emit_records(nodeInventory, batchTime = Time.utc.iso8601) } containerNodeInventoryEventStream.add(emitTime, containerNodeInventoryWrapper) if containerNodeInventoryWrapper - wrapper = { - "DataType" => "KUBE_NODE_INVENTORY_BLOB", - "IPName" => "ContainerInsights", - "DataItems" => [record.each { |k, v| record[k] = v }], - } - eventStream.add(emitTime, wrapper) if wrapper + if @NODES_EMIT_STREAM_BATCH_SIZE > 0 && containerNodeInventoryEventStream.count >= @NODES_EMIT_STREAM_BATCH_SIZE + $log.info("in_kube_node::parse_and_emit_records: number of container node inventory records emitted #{@NODES_EMIT_STREAM_BATCH_SIZE} @ #{Time.now.utc.iso8601}") + router.emit_stream(@@ContainerNodeInventoryTag, containerNodeInventoryEventStream) if containerNodeInventoryEventStream + containerNodeInventoryEventStream = MultiEventStream.new + end + + # node metrics records + nodeMetricRecords = [] + nodeMetricRecord = KubernetesApiClient.parseNodeLimitsFromNodeItem(item, "allocatable", "cpu", "cpuAllocatableNanoCores", batchTime) + if !nodeMetricRecord.nil? && !nodeMetricRecord.empty? + nodeMetricRecords.push(nodeMetricRecord) + end + nodeMetricRecord = KubernetesApiClient.parseNodeLimitsFromNodeItem(item, "allocatable", "memory", "memoryAllocatableBytes", batchTime) + if !nodeMetricRecord.nil? && !nodeMetricRecord.empty? + nodeMetricRecords.push(nodeMetricRecord) + end + nodeMetricRecord = KubernetesApiClient.parseNodeLimitsFromNodeItem(item, "capacity", "cpu", "cpuCapacityNanoCores", batchTime) + if !nodeMetricRecord.nil? && !nodeMetricRecord.empty? + nodeMetricRecords.push(nodeMetricRecord) + end + nodeMetricRecord = KubernetesApiClient.parseNodeLimitsFromNodeItem(item, "capacity", "memory", "memoryCapacityBytes", batchTime) + if !nodeMetricRecord.nil? && !nodeMetricRecord.empty? + nodeMetricRecords.push(nodeMetricRecord) + end + nodeMetricRecords.each do |metricRecord| + metricRecord["DataType"] = "LINUX_PERF_BLOB" + metricRecord["IPName"] = "LogManagement" + kubePerfEventStream.add(emitTime, metricRecord) if metricRecord + end + if @NODES_EMIT_STREAM_BATCH_SIZE > 0 && kubePerfEventStream.count >= @NODES_EMIT_STREAM_BATCH_SIZE + $log.info("in_kube_nodes::parse_and_emit_records: number of node perf metric records emitted #{@NODES_EMIT_STREAM_BATCH_SIZE} @ #{Time.now.utc.iso8601}") + router.emit_stream(@@kubeperfTag, kubePerfEventStream) if kubePerfEventStream + kubePerfEventStream = MultiEventStream.new + end + + # node GPU metrics record + nodeGPUInsightsMetricsRecords = [] + insightsMetricsRecord = KubernetesApiClient.parseNodeLimitsAsInsightsMetrics(item, "allocatable", "nvidia.com/gpu", "nodeGpuAllocatable", batchTime) + if !insightsMetricsRecord.nil? && !insightsMetricsRecord.empty? + nodeGPUInsightsMetricsRecords.push(insightsMetricsRecord) + end + insightsMetricsRecord = KubernetesApiClient.parseNodeLimitsAsInsightsMetrics(item, "capacity", "nvidia.com/gpu", "nodeGpuCapacity", batchTime) + if !insightsMetricsRecord.nil? && !insightsMetricsRecord.empty? + nodeGPUInsightsMetricsRecords.push(insightsMetricsRecord) + end + insightsMetricsRecord = KubernetesApiClient.parseNodeLimitsAsInsightsMetrics(item, "allocatable", "amd.com/gpu", "nodeGpuAllocatable", batchTime) + if !insightsMetricsRecord.nil? && !insightsMetricsRecord.empty? + nodeGPUInsightsMetricsRecords.push(insightsMetricsRecord) + end + insightsMetricsRecord = KubernetesApiClient.parseNodeLimitsAsInsightsMetrics(item, "capacity", "amd.com/gpu", "nodeGpuCapacity", batchTime) + if !insightsMetricsRecord.nil? && !insightsMetricsRecord.empty? + nodeGPUInsightsMetricsRecords.push(insightsMetricsRecord) + end + nodeGPUInsightsMetricsRecords.each do |insightsMetricsRecord| + wrapper = { + "DataType" => "INSIGHTS_METRICS_BLOB", + "IPName" => "ContainerInsights", + "DataItems" => [insightsMetricsRecord.each { |k, v| insightsMetricsRecord[k] = v }], + } + insightsMetricsEventStream.add(emitTime, wrapper) if wrapper + end + if @NODES_EMIT_STREAM_BATCH_SIZE > 0 && insightsMetricsEventStream.count >= @NODES_EMIT_STREAM_BATCH_SIZE + $log.info("in_kube_nodes::parse_and_emit_records: number of GPU node perf metric records emitted #{@NODES_EMIT_STREAM_BATCH_SIZE} @ #{Time.now.utc.iso8601}") + router.emit_stream(Constants::INSIGHTSMETRICS_FLUENT_TAG, insightsMetricsEventStream) if insightsMetricsEventStream + insightsMetricsEventStream = MultiEventStream.new + end # Adding telemetry to send node telemetry every 10 minutes timeDifference = (DateTime.now.to_time.to_i - @@nodeTelemetryTimeTracker).abs timeDifferenceInMinutes = timeDifference / 60 if (timeDifferenceInMinutes >= Constants::TELEMETRY_FLUSH_INTERVAL_IN_MINUTES) - properties = {} - properties["Computer"] = record["Computer"] - properties["KubeletVersion"] = record["KubeletVersion"] - properties["OperatingSystem"] = nodeInfo["operatingSystem"] - # DockerVersion field holds docker version if runtime is docker/moby else :// - if containerRuntimeVersion.downcase.start_with?("docker://") - properties["DockerVersion"] = containerRuntimeVersion.split("//")[1] - else - properties["DockerVersion"] = containerRuntimeVersion - end - properties["KubernetesProviderID"] = record["KubernetesProviderID"] - properties["KernelVersion"] = nodeInfo["kernelVersion"] - properties["OSImage"] = nodeInfo["osImage"] + properties = getNodeTelemetryProps(item) + properties["KubernetesProviderID"] = nodeInventoryRecord["KubernetesProviderID"] + capacityInfo = item["status"]["capacity"] - capacityInfo = items["status"]["capacity"] ApplicationInsightsUtility.sendMetricTelemetry("NodeMemory", capacityInfo["memory"], properties) - begin if (!capacityInfo["nvidia.com/gpu"].nil?) && (!capacityInfo["nvidia.com/gpu"].empty?) properties["nvigpus"] = capacityInfo["nvidia.com/gpu"] @@ -247,72 +292,32 @@ def parse_and_emit_records(nodeInventory, batchTime = Time.utc.iso8601) telemetrySent = true end end - router.emit_stream(@tag, eventStream) if eventStream - router.emit_stream(@@MDMKubeNodeInventoryTag, eventStream) if eventStream - router.emit_stream(@@ContainerNodeInventoryTag, containerNodeInventoryEventStream) if containerNodeInventoryEventStream if telemetrySent == true @@nodeTelemetryTimeTracker = DateTime.now.to_time.to_i end - - if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0 && eventStream.count > 0) - $log.info("kubeNodeInventoryEmitStreamSuccess @ #{Time.now.utc.iso8601}") + if eventStream.count > 0 + $log.info("in_kube_node::parse_and_emit_records: number of node inventory records emitted #{eventStream.count} @ #{Time.now.utc.iso8601}") + router.emit_stream(@tag, eventStream) if eventStream + $log.info("in_kube_node::parse_and_emit_records: number of mdm node inventory records emitted #{eventStream.count} @ #{Time.now.utc.iso8601}") + router.emit_stream(@@MDMKubeNodeInventoryTag, eventStream) if eventStream + eventStream = nil end - #:optimize:kubeperf merge - begin - #if(!nodeInventory.empty?) - nodeMetricDataItems = [] - #allocatable metrics @ node level - nodeMetricDataItems.concat(KubernetesApiClient.parseNodeLimits(nodeInventory, "allocatable", "cpu", "cpuAllocatableNanoCores", batchTime)) - nodeMetricDataItems.concat(KubernetesApiClient.parseNodeLimits(nodeInventory, "allocatable", "memory", "memoryAllocatableBytes", batchTime)) - #capacity metrics @ node level - nodeMetricDataItems.concat(KubernetesApiClient.parseNodeLimits(nodeInventory, "capacity", "cpu", "cpuCapacityNanoCores", batchTime)) - nodeMetricDataItems.concat(KubernetesApiClient.parseNodeLimits(nodeInventory, "capacity", "memory", "memoryCapacityBytes", batchTime)) - - kubePerfEventStream = MultiEventStream.new - - nodeMetricDataItems.each do |record| - record["DataType"] = "LINUX_PERF_BLOB" - record["IPName"] = "LogManagement" - kubePerfEventStream.add(emitTime, record) if record - end - #end - router.emit_stream(@@kubeperfTag, kubePerfEventStream) if kubePerfEventStream - - #start GPU InsightsMetrics items - begin - nodeGPUInsightsMetricsDataItems = [] - nodeGPUInsightsMetricsDataItems.concat(KubernetesApiClient.parseNodeLimitsAsInsightsMetrics(nodeInventory, "allocatable", "nvidia.com/gpu", "nodeGpuAllocatable", batchTime)) - nodeGPUInsightsMetricsDataItems.concat(KubernetesApiClient.parseNodeLimitsAsInsightsMetrics(nodeInventory, "capacity", "nvidia.com/gpu", "nodeGpuCapacity", batchTime)) - - nodeGPUInsightsMetricsDataItems.concat(KubernetesApiClient.parseNodeLimitsAsInsightsMetrics(nodeInventory, "allocatable", "amd.com/gpu", "nodeGpuAllocatable", batchTime)) - nodeGPUInsightsMetricsDataItems.concat(KubernetesApiClient.parseNodeLimitsAsInsightsMetrics(nodeInventory, "capacity", "amd.com/gpu", "nodeGpuCapacity", batchTime)) - - nodeGPUInsightsMetricsDataItems.each do |insightsMetricsRecord| - wrapper = { - "DataType" => "INSIGHTS_METRICS_BLOB", - "IPName" => "ContainerInsights", - "DataItems" => [insightsMetricsRecord.each { |k, v| insightsMetricsRecord[k] = v }], - } - insightsMetricsEventStream.add(emitTime, wrapper) if wrapper - end - - router.emit_stream(Constants::INSIGHTSMETRICS_FLUENT_TAG, insightsMetricsEventStream) if insightsMetricsEventStream - if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0 && insightsMetricsEventStream.count > 0) - $log.info("kubeNodeInsightsMetricsEmitStreamSuccess @ #{Time.now.utc.iso8601}") - end - rescue => errorStr - $log.warn "Failed when processing GPU metrics in_kube_nodes : #{errorStr}" - $log.debug_backtrace(errorStr.backtrace) - ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) - end - #end GPU InsightsMetrics items - rescue => errorStr - $log.warn "Failed in enumerate for KubePerf from in_kube_nodes : #{errorStr}" - $log.debug_backtrace(errorStr.backtrace) - ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) + if containerNodeInventoryEventStream.count > 0 + $log.info("in_kube_node::parse_and_emit_records: number of container node inventory records emitted #{containerNodeInventoryEventStream.count} @ #{Time.now.utc.iso8601}") + router.emit_stream(@@ContainerNodeInventoryTag, containerNodeInventoryEventStream) if containerNodeInventoryEventStream + containerNodeInventoryEventStream = nil end - #:optimize:end kubeperf merge + if kubePerfEventStream.count > 0 + $log.info("in_kube_nodes::parse_and_emit_records: number of node perf metric records emitted #{kubePerfEventStream.count} @ #{Time.now.utc.iso8601}") + router.emit_stream(@@kubeperfTag, kubePerfEventStream) if kubePerfEventStream + kubePerfEventStream = nil + end + if insightsMetricsEventStream.count > 0 + $log.info("in_kube_nodes::parse_and_emit_records: number of GPU node perf metric records emitted #{insightsMetricsEventStream.count} @ #{Time.now.utc.iso8601}") + router.emit_stream(Constants::INSIGHTSMETRICS_FLUENT_TAG, insightsMetricsEventStream) if insightsMetricsEventStream + insightsMetricsEventStream = nil + end rescue => errorStr $log.warn "Failed to retrieve node inventory: #{errorStr}" $log.debug_backtrace(errorStr.backtrace) @@ -352,5 +357,112 @@ def run_periodic end @mutex.unlock end + + # TODO - move this method to KubernetesClient or helper class + def getNodeInventoryRecord(item, batchTime = Time.utc.iso8601) + record = {} + begin + record["CollectionTime"] = batchTime #This is the time that is mapped to become TimeGenerated + record["Computer"] = item["metadata"]["name"] + record["ClusterName"] = KubernetesApiClient.getClusterName + record["ClusterId"] = KubernetesApiClient.getClusterId + record["CreationTimeStamp"] = item["metadata"]["creationTimestamp"] + record["Labels"] = [item["metadata"]["labels"]] + record["Status"] = "" + + if !item["spec"]["providerID"].nil? && !item["spec"]["providerID"].empty? + if File.file?(@@AzStackCloudFileName) # existence of this file indicates agent running on azstack + record["KubernetesProviderID"] = "azurestack" + else + #Multicluster kusto query is filtering after splitting by ":" to the left, so do the same here + #https://msazure.visualstudio.com/One/_git/AzureUX-Monitoring?path=%2Fsrc%2FMonitoringExtension%2FClient%2FInfraInsights%2FData%2FQueryTemplates%2FMultiClusterKustoQueryTemplate.ts&_a=contents&version=GBdev + provider = item["spec"]["providerID"].split(":")[0] + if !provider.nil? && !provider.empty? + record["KubernetesProviderID"] = provider + else + record["KubernetesProviderID"] = item["spec"]["providerID"] + end + end + else + record["KubernetesProviderID"] = "onprem" + end + + # Refer to https://kubernetes.io/docs/concepts/architecture/nodes/#condition for possible node conditions. + # We check the status of each condition e.g. {"type": "OutOfDisk","status": "False"} . Based on this we + # populate the KubeNodeInventory Status field. A possible value for this field could be "Ready OutofDisk" + # implying that the node is ready for hosting pods, however its out of disk. + if item["status"].key?("conditions") && !item["status"]["conditions"].empty? + allNodeConditions = "" + item["status"]["conditions"].each do |condition| + if condition["status"] == "True" + if !allNodeConditions.empty? + allNodeConditions = allNodeConditions + "," + condition["type"] + else + allNodeConditions = condition["type"] + end + end + #collect last transition to/from ready (no matter ready is true/false) + if condition["type"] == "Ready" && !condition["lastTransitionTime"].nil? + record["LastTransitionTimeReady"] = condition["lastTransitionTime"] + end + end + if !allNodeConditions.empty? + record["Status"] = allNodeConditions + end + end + nodeInfo = item["status"]["nodeInfo"] + record["KubeletVersion"] = nodeInfo["kubeletVersion"] + record["KubeProxyVersion"] = nodeInfo["kubeProxyVersion"] + rescue => errorStr + $log.warn "in_kube_nodes::getNodeInventoryRecord:Failed: #{errorStr}" + end + return record + end + + # TODO - move this method to KubernetesClient or helper class + def getContainerNodeInventoryRecord(item, batchTime = Time.utc.iso8601) + containerNodeInventoryRecord = {} + begin + containerNodeInventoryRecord["CollectionTime"] = batchTime #This is the time that is mapped to become TimeGenerated + containerNodeInventoryRecord["Computer"] = item["metadata"]["name"] + nodeInfo = item["status"]["nodeInfo"] + containerNodeInventoryRecord["OperatingSystem"] = nodeInfo["osImage"] + containerRuntimeVersion = nodeInfo["containerRuntimeVersion"] + if containerRuntimeVersion.downcase.start_with?("docker://") + containerNodeInventoryRecord["DockerVersion"] = containerRuntimeVersion.split("//")[1] + else + # using containerRuntimeVersion as DockerVersion as is for non docker runtimes + containerNodeInventoryRecord["DockerVersion"] = containerRuntimeVersion + end + rescue => errorStr + $log.warn "in_kube_nodes::getContainerNodeInventoryRecord:Failed: #{errorStr}" + end + return containerNodeInventoryRecord + end + + # TODO - move this method to KubernetesClient or helper class + def getNodeTelemetryProps(item) + properties = {} + begin + properties["Computer"] = item["metadata"]["name"] + nodeInfo = item["status"]["nodeInfo"] + properties["KubeletVersion"] = nodeInfo["kubeletVersion"] + properties["OperatingSystem"] = nodeInfo["osImage"] + properties["KernelVersion"] = nodeInfo["kernelVersion"] + properties["OSImage"] = nodeInfo["osImage"] + containerRuntimeVersion = nodeInfo["containerRuntimeVersion"] + if containerRuntimeVersion.downcase.start_with?("docker://") + properties["DockerVersion"] = containerRuntimeVersion.split("//")[1] + else + # using containerRuntimeVersion as DockerVersion as is for non docker runtimes + properties["DockerVersion"] = containerRuntimeVersion + end + properties["NODES_CHUNK_SIZE"] = @NODES_CHUNK_SIZE + properties["NODES_EMIT_STREAM_BATCH_SIZE"] = @NODES_EMIT_STREAM_BATCH_SIZE + rescue => errorStr + $log.warn "in_kube_nodes::getContainerNodeIngetNodeTelemetryPropsventoryRecord:Failed: #{errorStr}" + end + return properties + end end # Kube_Node_Input end # module diff --git a/source/plugins/ruby/in_kube_podinventory.rb b/source/plugins/ruby/in_kube_podinventory.rb index bba3e920f..0cff2eefe 100644 --- a/source/plugins/ruby/in_kube_podinventory.rb +++ b/source/plugins/ruby/in_kube_podinventory.rb @@ -2,7 +2,7 @@ # frozen_string_literal: true module Fluent - require_relative "podinventory_to_mdm" + require_relative "podinventory_to_mdm" class Kube_PodInventory_Input < Input Plugin.register_input("kubepodinventory", self) @@ -19,7 +19,7 @@ def initialize require "yajl" require "set" require "time" - + require_relative "kubernetes_container_inventory" require_relative "KubernetesApiClient" require_relative "ApplicationInsightsUtility" @@ -27,11 +27,18 @@ def initialize require_relative "omslog" require_relative "constants" - @PODS_CHUNK_SIZE = "1500" + # refer tomlparser-agent-config for updating defaults + # this configurable via configmap + @PODS_CHUNK_SIZE = 0 + @PODS_EMIT_STREAM_BATCH_SIZE = 0 + @podCount = 0 + @serviceCount = 0 @controllerSet = Set.new [] @winContainerCount = 0 @controllerData = {} + @podInventoryE2EProcessingLatencyMs = 0 + @podsAPIE2ELatencyMs = 0 end config_param :run_interval, :time, :default => 60 @@ -44,6 +51,24 @@ def configure(conf) def start if @run_interval + if !ENV["PODS_CHUNK_SIZE"].nil? && !ENV["PODS_CHUNK_SIZE"].empty? && ENV["PODS_CHUNK_SIZE"].to_i > 0 + @PODS_CHUNK_SIZE = ENV["PODS_CHUNK_SIZE"].to_i + else + # this shouldnt happen just setting default here as safe guard + $log.warn("in_kube_podinventory::start: setting to default value since got PODS_CHUNK_SIZE nil or empty") + @PODS_CHUNK_SIZE = 1000 + end + $log.info("in_kube_podinventory::start : PODS_CHUNK_SIZE @ #{@PODS_CHUNK_SIZE}") + + if !ENV["PODS_EMIT_STREAM_BATCH_SIZE"].nil? && !ENV["PODS_EMIT_STREAM_BATCH_SIZE"].empty? && ENV["PODS_EMIT_STREAM_BATCH_SIZE"].to_i > 0 + @PODS_EMIT_STREAM_BATCH_SIZE = ENV["PODS_EMIT_STREAM_BATCH_SIZE"].to_i + else + # this shouldnt happen just setting default here as safe guard + $log.warn("in_kube_podinventory::start: setting to default value since got PODS_EMIT_STREAM_BATCH_SIZE nil or empty") + @PODS_EMIT_STREAM_BATCH_SIZE = 200 + end + $log.info("in_kube_podinventory::start : PODS_EMIT_STREAM_BATCH_SIZE @ #{@PODS_EMIT_STREAM_BATCH_SIZE}") + @finished = false @condition = ConditionVariable.new @mutex = Mutex.new @@ -67,12 +92,15 @@ def enumerate(podList = nil) podInventory = podList telemetryFlush = false @podCount = 0 + @serviceCount = 0 @controllerSet = Set.new [] @winContainerCount = 0 @controllerData = {} currentTime = Time.now batchTime = currentTime.utc.iso8601 - + serviceRecords = [] + @podInventoryE2EProcessingLatencyMs = 0 + podInventoryStartTime = (Time.now.to_f * 1000).to_i # Get services first so that we dont need to make a call for very chunk $log.info("in_kube_podinventory::enumerate : Getting services from Kube API @ #{Time.now.utc.iso8601}") serviceInfo = KubernetesApiClient.getKubeResourceInfo("services") @@ -84,32 +112,48 @@ def enumerate(podList = nil) serviceList = Yajl::Parser.parse(StringIO.new(serviceInfo.body)) $log.info("in_kube_podinventory::enumerate:End:Parsing services data using yajl @ #{Time.now.utc.iso8601}") serviceInfo = nil + # service inventory records much smaller and fixed size compared to serviceList + serviceRecords = KubernetesApiClient.getKubeServicesInventoryRecords(serviceList, batchTime) + # updating for telemetry + @serviceCount += serviceRecords.length + serviceList = nil end + # to track e2e processing latency + @podsAPIE2ELatencyMs = 0 + podsAPIChunkStartTime = (Time.now.to_f * 1000).to_i # Initializing continuation token to nil continuationToken = nil $log.info("in_kube_podinventory::enumerate : Getting pods from Kube API @ #{Time.now.utc.iso8601}") continuationToken, podInventory = KubernetesApiClient.getResourcesAndContinuationToken("pods?limit=#{@PODS_CHUNK_SIZE}") $log.info("in_kube_podinventory::enumerate : Done getting pods from Kube API @ #{Time.now.utc.iso8601}") + podsAPIChunkEndTime = (Time.now.to_f * 1000).to_i + @podsAPIE2ELatencyMs = (podsAPIChunkEndTime - podsAPIChunkStartTime) if (!podInventory.nil? && !podInventory.empty? && podInventory.key?("items") && !podInventory["items"].nil? && !podInventory["items"].empty?) - parse_and_emit_records(podInventory, serviceList, continuationToken, batchTime) + $log.info("in_kube_podinventory::enumerate : number of pod items :#{podInventory["items"].length} from Kube API @ #{Time.now.utc.iso8601}") + parse_and_emit_records(podInventory, serviceRecords, continuationToken, batchTime) else $log.warn "in_kube_podinventory::enumerate:Received empty podInventory" end #If we receive a continuation token, make calls, process and flush data until we have processed all data while (!continuationToken.nil? && !continuationToken.empty?) + podsAPIChunkStartTime = (Time.now.to_f * 1000).to_i continuationToken, podInventory = KubernetesApiClient.getResourcesAndContinuationToken("pods?limit=#{@PODS_CHUNK_SIZE}&continue=#{continuationToken}") + podsAPIChunkEndTime = (Time.now.to_f * 1000).to_i + @podsAPIE2ELatencyMs = @podsAPIE2ELatencyMs + (podsAPIChunkEndTime - podsAPIChunkStartTime) if (!podInventory.nil? && !podInventory.empty? && podInventory.key?("items") && !podInventory["items"].nil? && !podInventory["items"].empty?) - parse_and_emit_records(podInventory, serviceList, continuationToken, batchTime) + $log.info("in_kube_podinventory::enumerate : number of pod items :#{podInventory["items"].length} from Kube API @ #{Time.now.utc.iso8601}") + parse_and_emit_records(podInventory, serviceRecords, continuationToken, batchTime) else $log.warn "in_kube_podinventory::enumerate:Received empty podInventory" end end + @podInventoryE2EProcessingLatencyMs = ((Time.now.to_f * 1000).to_i - podInventoryStartTime) # Setting these to nil so that we dont hold memory until GC kicks in podInventory = nil - serviceList = nil + serviceRecords = nil # Adding telemetry to send pod telemetry every 5 minutes timeDifference = (DateTime.now.to_time.to_i - @@podTelemetryTimeTracker).abs @@ -122,14 +166,19 @@ def enumerate(podList = nil) if telemetryFlush == true telemetryProperties = {} telemetryProperties["Computer"] = @@hostName + telemetryProperties["PODS_CHUNK_SIZE"] = @PODS_CHUNK_SIZE + telemetryProperties["PODS_EMIT_STREAM_BATCH_SIZE"] = @PODS_EMIT_STREAM_BATCH_SIZE ApplicationInsightsUtility.sendCustomEvent("KubePodInventoryHeartBeatEvent", telemetryProperties) ApplicationInsightsUtility.sendMetricTelemetry("PodCount", @podCount, {}) + ApplicationInsightsUtility.sendMetricTelemetry("ServiceCount", @serviceCount, {}) telemetryProperties["ControllerData"] = @controllerData.to_json ApplicationInsightsUtility.sendMetricTelemetry("ControllerCount", @controllerSet.length, telemetryProperties) if @winContainerCount > 0 telemetryProperties["ClusterWideWindowsContainersCount"] = @winContainerCount ApplicationInsightsUtility.sendCustomEvent("WindowsContainerInventoryEvent", telemetryProperties) end + ApplicationInsightsUtility.sendMetricTelemetry("PodInventoryE2EProcessingLatencyMs", @podInventoryE2EProcessingLatencyMs, telemetryProperties) + ApplicationInsightsUtility.sendMetricTelemetry("PodsAPIE2ELatencyMs", @podsAPIE2ELatencyMs, telemetryProperties) @@podTelemetryTimeTracker = DateTime.now.to_time.to_i end rescue => errorStr @@ -137,260 +186,138 @@ def enumerate(podList = nil) $log.debug_backtrace(errorStr.backtrace) ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) end - end + end - def parse_and_emit_records(podInventory, serviceList, continuationToken, batchTime = Time.utc.iso8601) + def parse_and_emit_records(podInventory, serviceRecords, continuationToken, batchTime = Time.utc.iso8601) currentTime = Time.now emitTime = currentTime.to_f #batchTime = currentTime.utc.iso8601 eventStream = MultiEventStream.new + kubePerfEventStream = MultiEventStream.new + insightsMetricsEventStream = MultiEventStream.new @@istestvar = ENV["ISTEST"] begin #begin block start # Getting windows nodes from kubeapi winNodes = KubernetesApiClient.getWindowsNodesArray - - podInventory["items"].each do |items| #podInventory block start - containerInventoryRecords = [] - records = [] - record = {} - record["CollectionTime"] = batchTime #This is the time that is mapped to become TimeGenerated - record["Name"] = items["metadata"]["name"] - podNameSpace = items["metadata"]["namespace"] - - # For ARO v3 cluster, skip the pods scheduled on to master or infra nodes - if KubernetesApiClient.isAROV3Cluster && !items["spec"].nil? && !items["spec"]["nodeName"].nil? && - (items["spec"]["nodeName"].downcase.start_with?("infra-") || - items["spec"]["nodeName"].downcase.start_with?("master-")) - next - end - - podUid = KubernetesApiClient.getPodUid(podNameSpace, items["metadata"]) - if podUid.nil? - next - end - record["PodUid"] = podUid - record["PodLabel"] = [items["metadata"]["labels"]] - record["Namespace"] = podNameSpace - record["PodCreationTimeStamp"] = items["metadata"]["creationTimestamp"] - #for unscheduled (non-started) pods startTime does NOT exist - if !items["status"]["startTime"].nil? - record["PodStartTime"] = items["status"]["startTime"] - else - record["PodStartTime"] = "" - end - #podStatus - # the below is for accounting 'NodeLost' scenario, where-in the pod(s) in the lost node is still being reported as running - podReadyCondition = true - if !items["status"]["reason"].nil? && items["status"]["reason"] == "NodeLost" && !items["status"]["conditions"].nil? - items["status"]["conditions"].each do |condition| - if condition["type"] == "Ready" && condition["status"] == "False" - podReadyCondition = false - break - end + podInventory["items"].each do |item| #podInventory block start + # pod inventory records + podInventoryRecords = getPodInventoryRecords(item, serviceRecords, batchTime) + podInventoryRecords.each do |record| + if !record.nil? + wrapper = { + "DataType" => "KUBE_POD_INVENTORY_BLOB", + "IPName" => "ContainerInsights", + "DataItems" => [record.each { |k, v| record[k] = v }], + } + eventStream.add(emitTime, wrapper) if wrapper + @inventoryToMdmConvertor.process_pod_inventory_record(wrapper) end end - - if podReadyCondition == false - record["PodStatus"] = "Unknown" - # ICM - https://portal.microsofticm.com/imp/v3/incidents/details/187091803/home - elsif !items["metadata"]["deletionTimestamp"].nil? && !items["metadata"]["deletionTimestamp"].empty? - record["PodStatus"] = Constants::POD_STATUS_TERMINATING - else - record["PodStatus"] = items["status"]["phase"] - end - #for unscheduled (non-started) pods podIP does NOT exist - if !items["status"]["podIP"].nil? - record["PodIp"] = items["status"]["podIP"] - else - record["PodIp"] = "" - end - #for unscheduled (non-started) pods nodeName does NOT exist - if !items["spec"]["nodeName"].nil? - record["Computer"] = items["spec"]["nodeName"] - else - record["Computer"] = "" - end - # Setting this flag to true so that we can send ContainerInventory records for containers # on windows nodes and parse environment variables for these containers if winNodes.length > 0 - if (!record["Computer"].empty? && (winNodes.include? record["Computer"])) + nodeName = "" + if !item["spec"]["nodeName"].nil? + nodeName = item["spec"]["nodeName"] + end + if (!nodeName.empty? && (winNodes.include? nodeName)) clusterCollectEnvironmentVar = ENV["AZMON_CLUSTER_COLLECT_ENV_VAR"] #Generate ContainerInventory records for windows nodes so that we can get image and image tag in property panel - containerInventoryRecordsInPodItem = KubernetesContainerInventory.getContainerInventoryRecords(items, batchTime, clusterCollectEnvironmentVar, true) - containerInventoryRecordsInPodItem.each do |containerRecord| - containerInventoryRecords.push(containerRecord) - end + containerInventoryRecords = KubernetesContainerInventory.getContainerInventoryRecords(item, batchTime, clusterCollectEnvironmentVar, true) + # Send container inventory records for containers on windows nodes + @winContainerCount += containerInventoryRecords.length + containerInventoryRecords.each do |cirecord| + if !cirecord.nil? + ciwrapper = { + "DataType" => "CONTAINER_INVENTORY_BLOB", + "IPName" => "ContainerInsights", + "DataItems" => [cirecord.each { |k, v| cirecord[k] = v }], + } + eventStream.add(emitTime, ciwrapper) if ciwrapper + end + end end end - record["ClusterId"] = KubernetesApiClient.getClusterId - record["ClusterName"] = KubernetesApiClient.getClusterName - record["ServiceName"] = getServiceNameFromLabels(items["metadata"]["namespace"], items["metadata"]["labels"], serviceList) - - if !items["metadata"]["ownerReferences"].nil? - record["ControllerKind"] = items["metadata"]["ownerReferences"][0]["kind"] - record["ControllerName"] = items["metadata"]["ownerReferences"][0]["name"] - @controllerSet.add(record["ControllerKind"] + record["ControllerName"]) - #Adding controller kind to telemetry ro information about customer workload - if (@controllerData[record["ControllerKind"]].nil?) - @controllerData[record["ControllerKind"]] = 1 - else - controllerValue = @controllerData[record["ControllerKind"]] - @controllerData[record["ControllerKind"]] += 1 + if @PODS_EMIT_STREAM_BATCH_SIZE > 0 && eventStream.count >= @PODS_EMIT_STREAM_BATCH_SIZE + $log.info("in_kube_podinventory::parse_and_emit_records: number of pod inventory records emitted #{@PODS_EMIT_STREAM_BATCH_SIZE} @ #{Time.now.utc.iso8601}") + if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) + $log.info("kubePodInventoryEmitStreamSuccess @ #{Time.now.utc.iso8601}") end + router.emit_stream(@tag, eventStream) if eventStream + eventStream = MultiEventStream.new end - podRestartCount = 0 - record["PodRestartCount"] = 0 - #Invoke the helper method to compute ready/not ready mdm metric - @inventoryToMdmConvertor.process_record_for_pods_ready_metric(record["ControllerName"], record["Namespace"], items["status"]["conditions"]) + #container perf records + containerMetricDataItems = [] + containerMetricDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimits(item, "requests", "cpu", "cpuRequestNanoCores", batchTime)) + containerMetricDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimits(item, "requests", "memory", "memoryRequestBytes", batchTime)) + containerMetricDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimits(item, "limits", "cpu", "cpuLimitNanoCores", batchTime)) + containerMetricDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimits(item, "limits", "memory", "memoryLimitBytes", batchTime)) - podContainers = [] - if items["status"].key?("containerStatuses") && !items["status"]["containerStatuses"].empty? - podContainers = podContainers + items["status"]["containerStatuses"] - end - # Adding init containers to the record list as well. - if items["status"].key?("initContainerStatuses") && !items["status"]["initContainerStatuses"].empty? - podContainers = podContainers + items["status"]["initContainerStatuses"] + containerMetricDataItems.each do |record| + record["DataType"] = "LINUX_PERF_BLOB" + record["IPName"] = "LogManagement" + kubePerfEventStream.add(emitTime, record) if record end - # if items["status"].key?("containerStatuses") && !items["status"]["containerStatuses"].empty? #container status block start - if !podContainers.empty? #container status block start - podContainers.each do |container| - containerRestartCount = 0 - lastFinishedTime = nil - # Need this flag to determine if we need to process container data for mdm metrics like oomkilled and container restart - #container Id is of the form - #docker://dfd9da983f1fd27432fb2c1fe3049c0a1d25b1c697b2dc1a530c986e58b16527 - if !container["containerID"].nil? - record["ContainerID"] = container["containerID"].split("//")[1] - else - # for containers that have image issues (like invalid image/tag etc..) this will be empty. do not make it all 0 - record["ContainerID"] = "" - end - #keeping this as which is same as InstanceName in perf table - if podUid.nil? || container["name"].nil? - next - else - record["ContainerName"] = podUid + "/" + container["name"] - end - #Pod restart count is a sumtotal of restart counts of individual containers - #within the pod. The restart count of a container is maintained by kubernetes - #itself in the form of a container label. - containerRestartCount = container["restartCount"] - record["ContainerRestartCount"] = containerRestartCount - - containerStatus = container["state"] - record["ContainerStatusReason"] = "" - # state is of the following form , so just picking up the first key name - # "state": { - # "waiting": { - # "reason": "CrashLoopBackOff", - # "message": "Back-off 5m0s restarting failed container=metrics-server pod=metrics-server-2011498749-3g453_kube-system(5953be5f-fcae-11e7-a356-000d3ae0e432)" - # } - # }, - # the below is for accounting 'NodeLost' scenario, where-in the containers in the lost node/pod(s) is still being reported as running - if podReadyCondition == false - record["ContainerStatus"] = "Unknown" - else - record["ContainerStatus"] = containerStatus.keys[0] - end - #TODO : Remove ContainerCreationTimeStamp from here since we are sending it as a metric - #Picking up both container and node start time from cAdvisor to be consistent - if containerStatus.keys[0] == "running" - record["ContainerCreationTimeStamp"] = container["state"]["running"]["startedAt"] - else - if !containerStatus[containerStatus.keys[0]]["reason"].nil? && !containerStatus[containerStatus.keys[0]]["reason"].empty? - record["ContainerStatusReason"] = containerStatus[containerStatus.keys[0]]["reason"] - end - # Process the record to see if job was completed 6 hours ago. If so, send metric to mdm - if !record["ControllerKind"].nil? && record["ControllerKind"].downcase == Constants::CONTROLLER_KIND_JOB - @inventoryToMdmConvertor.process_record_for_terminated_job_metric(record["ControllerName"], record["Namespace"], containerStatus) - end - end - - # Record the last state of the container. This may have information on why a container was killed. - begin - if !container["lastState"].nil? && container["lastState"].keys.length == 1 - lastStateName = container["lastState"].keys[0] - lastStateObject = container["lastState"][lastStateName] - if !lastStateObject.is_a?(Hash) - raise "expected a hash object. This could signify a bug or a kubernetes API change" - end - - if lastStateObject.key?("reason") && lastStateObject.key?("startedAt") && lastStateObject.key?("finishedAt") - newRecord = Hash.new - newRecord["lastState"] = lastStateName # get the name of the last state (ex: terminated) - lastStateReason = lastStateObject["reason"] - # newRecord["reason"] = lastStateObject["reason"] # (ex: OOMKilled) - newRecord["reason"] = lastStateReason # (ex: OOMKilled) - newRecord["startedAt"] = lastStateObject["startedAt"] # (ex: 2019-07-02T14:58:51Z) - lastFinishedTime = lastStateObject["finishedAt"] - newRecord["finishedAt"] = lastFinishedTime # (ex: 2019-07-02T14:58:52Z) - - # only write to the output field if everything previously ran without error - record["ContainerLastStatus"] = newRecord - - #Populate mdm metric for OOMKilled container count if lastStateReason is OOMKilled - if lastStateReason.downcase == Constants::REASON_OOM_KILLED - @inventoryToMdmConvertor.process_record_for_oom_killed_metric(record["ControllerName"], record["Namespace"], lastFinishedTime) - end - lastStateReason = nil - else - record["ContainerLastStatus"] = Hash.new - end - else - record["ContainerLastStatus"] = Hash.new - end - - #Populate mdm metric for container restart count if greater than 0 - if (!containerRestartCount.nil? && (containerRestartCount.is_a? Integer) && containerRestartCount > 0) - @inventoryToMdmConvertor.process_record_for_container_restarts_metric(record["ControllerName"], record["Namespace"], lastFinishedTime) - end - rescue => errorStr - $log.warn "Failed in parse_and_emit_record pod inventory while processing ContainerLastStatus: #{errorStr}" - $log.debug_backtrace(errorStr.backtrace) - ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) - record["ContainerLastStatus"] = Hash.new - end + if @PODS_EMIT_STREAM_BATCH_SIZE > 0 && kubePerfEventStream.count >= @PODS_EMIT_STREAM_BATCH_SIZE + $log.info("in_kube_podinventory::parse_and_emit_records: number of container perf records emitted #{@PODS_EMIT_STREAM_BATCH_SIZE} @ #{Time.now.utc.iso8601}") + router.emit_stream(@@kubeperfTag, kubePerfEventStream) if kubePerfEventStream + kubePerfEventStream = MultiEventStream.new + end - podRestartCount += containerRestartCount - records.push(record.dup) - end - else # for unscheduled pods there are no status.containerStatuses, in this case we still want the pod - records.push(record) - end #container status block end - records.each do |record| - if !record.nil? - record["PodRestartCount"] = podRestartCount - wrapper = { - "DataType" => "KUBE_POD_INVENTORY_BLOB", - "IPName" => "ContainerInsights", - "DataItems" => [record.each { |k, v| record[k] = v }], - } - eventStream.add(emitTime, wrapper) if wrapper - @inventoryToMdmConvertor.process_pod_inventory_record(wrapper) - end + # container GPU records + containerGPUInsightsMetricsDataItems = [] + containerGPUInsightsMetricsDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimitsAsInsightsMetrics(item, "requests", "nvidia.com/gpu", "containerGpuRequests", batchTime)) + containerGPUInsightsMetricsDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimitsAsInsightsMetrics(item, "limits", "nvidia.com/gpu", "containerGpuLimits", batchTime)) + containerGPUInsightsMetricsDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimitsAsInsightsMetrics(item, "requests", "amd.com/gpu", "containerGpuRequests", batchTime)) + containerGPUInsightsMetricsDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimitsAsInsightsMetrics(item, "limits", "amd.com/gpu", "containerGpuLimits", batchTime)) + containerGPUInsightsMetricsDataItems.each do |insightsMetricsRecord| + wrapper = { + "DataType" => "INSIGHTS_METRICS_BLOB", + "IPName" => "ContainerInsights", + "DataItems" => [insightsMetricsRecord.each { |k, v| insightsMetricsRecord[k] = v }], + } + insightsMetricsEventStream.add(emitTime, wrapper) if wrapper end - # Send container inventory records for containers on windows nodes - @winContainerCount += containerInventoryRecords.length - containerInventoryRecords.each do |cirecord| - if !cirecord.nil? - ciwrapper = { - "DataType" => "CONTAINER_INVENTORY_BLOB", - "IPName" => "ContainerInsights", - "DataItems" => [cirecord.each { |k, v| cirecord[k] = v }], - } - eventStream.add(emitTime, ciwrapper) if ciwrapper + + if @PODS_EMIT_STREAM_BATCH_SIZE > 0 && insightsMetricsEventStream.count >= @PODS_EMIT_STREAM_BATCH_SIZE + $log.info("in_kube_podinventory::parse_and_emit_records: number of GPU insights metrics records emitted #{@PODS_EMIT_STREAM_BATCH_SIZE} @ #{Time.now.utc.iso8601}") + if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) + $log.info("kubePodInsightsMetricsEmitStreamSuccess @ #{Time.now.utc.iso8601}") end + router.emit_stream(Constants::INSIGHTSMETRICS_FLUENT_TAG, insightsMetricsEventStream) if insightsMetricsEventStream + insightsMetricsEventStream = MultiEventStream.new end end #podInventory block end - router.emit_stream(@tag, eventStream) if eventStream + if eventStream.count > 0 + $log.info("in_kube_podinventory::parse_and_emit_records: number of pod inventory records emitted #{eventStream.count} @ #{Time.now.utc.iso8601}") + router.emit_stream(@tag, eventStream) if eventStream + if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) + $log.info("kubePodInventoryEmitStreamSuccess @ #{Time.now.utc.iso8601}") + end + eventStream = nil + end + + if kubePerfEventStream.count > 0 + $log.info("in_kube_podinventory::parse_and_emit_records: number of perf records emitted #{kubePerfEventStream.count} @ #{Time.now.utc.iso8601}") + router.emit_stream(@@kubeperfTag, kubePerfEventStream) if kubePerfEventStream + kubePerfEventStream = nil + end + + if insightsMetricsEventStream.count > 0 + $log.info("in_kube_podinventory::parse_and_emit_records: number of insights metrics records emitted #{insightsMetricsEventStream.count} @ #{Time.now.utc.iso8601}") + router.emit_stream(Constants::INSIGHTSMETRICS_FLUENT_TAG, insightsMetricsEventStream) if insightsMetricsEventStream + if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) + $log.info("kubePodInsightsMetricsEmitStreamSuccess @ #{Time.now.utc.iso8601}") + end + insightsMetricsEventStream = nil + end - if continuationToken.nil? #no more chunks in this batch to be sent, get all pod inventory records to send + if continuationToken.nil? #no more chunks in this batch to be sent, get all mdm pod inventory records to send @log.info "Sending pod inventory mdm records to out_mdm" pod_inventory_mdm_records = @inventoryToMdmConvertor.get_pod_inventory_mdm_records(batchTime) @log.info "pod_inventory_mdm_records.size #{pod_inventory_mdm_records.size}" @@ -401,101 +328,36 @@ def parse_and_emit_records(podInventory, serviceList, continuationToken, batchTi router.emit_stream(@@MDMKubePodInventoryTag, mdm_pod_inventory_es) if mdm_pod_inventory_es end - #:optimize:kubeperf merge - begin - #if(!podInventory.empty?) - containerMetricDataItems = [] - #hostName = (OMS::Common.get_hostname) - containerMetricDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimits(podInventory, "requests", "cpu", "cpuRequestNanoCores", batchTime)) - containerMetricDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimits(podInventory, "requests", "memory", "memoryRequestBytes", batchTime)) - containerMetricDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimits(podInventory, "limits", "cpu", "cpuLimitNanoCores", batchTime)) - containerMetricDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimits(podInventory, "limits", "memory", "memoryLimitBytes", batchTime)) - - kubePerfEventStream = MultiEventStream.new - insightsMetricsEventStream = MultiEventStream.new - - containerMetricDataItems.each do |record| - record["DataType"] = "LINUX_PERF_BLOB" - record["IPName"] = "LogManagement" - kubePerfEventStream.add(emitTime, record) if record - end - #end - router.emit_stream(@@kubeperfTag, kubePerfEventStream) if kubePerfEventStream - - begin - #start GPU InsightsMetrics items - - containerGPUInsightsMetricsDataItems = [] - containerGPUInsightsMetricsDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimitsAsInsightsMetrics(podInventory, "requests", "nvidia.com/gpu", "containerGpuRequests", batchTime)) - containerGPUInsightsMetricsDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimitsAsInsightsMetrics(podInventory, "limits", "nvidia.com/gpu", "containerGpuLimits", batchTime)) - - containerGPUInsightsMetricsDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimitsAsInsightsMetrics(podInventory, "requests", "amd.com/gpu", "containerGpuRequests", batchTime)) - containerGPUInsightsMetricsDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimitsAsInsightsMetrics(podInventory, "limits", "amd.com/gpu", "containerGpuLimits", batchTime)) - - containerGPUInsightsMetricsDataItems.each do |insightsMetricsRecord| - wrapper = { - "DataType" => "INSIGHTS_METRICS_BLOB", - "IPName" => "ContainerInsights", - "DataItems" => [insightsMetricsRecord.each { |k, v| insightsMetricsRecord[k] = v }], - } - insightsMetricsEventStream.add(emitTime, wrapper) if wrapper - - if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0 && insightsMetricsEventStream.count > 0) - $log.info("kubePodInsightsMetricsEmitStreamSuccess @ #{Time.now.utc.iso8601}") - end - end - - router.emit_stream(Constants::INSIGHTSMETRICS_FLUENT_TAG, insightsMetricsEventStream) if insightsMetricsEventStream - #end GPU InsightsMetrics items - rescue => errorStr - $log.warn "Failed when processing GPU metrics in_kube_podinventory : #{errorStr}" - $log.debug_backtrace(errorStr.backtrace) - ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) - end - rescue => errorStr - $log.warn "Failed in parse_and_emit_record for KubePerf from in_kube_podinventory : #{errorStr}" - $log.debug_backtrace(errorStr.backtrace) - ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) - end - #:optimize:end kubeperf merge - - #:optimize:start kubeservices merge - begin - if (!serviceList.nil? && !serviceList.empty?) - kubeServicesEventStream = MultiEventStream.new - serviceList["items"].each do |items| - kubeServiceRecord = {} - kubeServiceRecord["CollectionTime"] = batchTime #This is the time that is mapped to become TimeGenerated - kubeServiceRecord["ServiceName"] = items["metadata"]["name"] - kubeServiceRecord["Namespace"] = items["metadata"]["namespace"] - kubeServiceRecord["SelectorLabels"] = [items["spec"]["selector"]] + if continuationToken.nil? # sending kube services inventory records + kubeServicesEventStream = MultiEventStream.new + serviceRecords.each do |kubeServiceRecord| + if !kubeServiceRecord.nil? + # adding before emit to reduce memory foot print kubeServiceRecord["ClusterId"] = KubernetesApiClient.getClusterId kubeServiceRecord["ClusterName"] = KubernetesApiClient.getClusterName - kubeServiceRecord["ClusterIP"] = items["spec"]["clusterIP"] - kubeServiceRecord["ServiceType"] = items["spec"]["type"] - # : Add ports and status fields kubeServicewrapper = { "DataType" => "KUBE_SERVICES_BLOB", "IPName" => "ContainerInsights", "DataItems" => [kubeServiceRecord.each { |k, v| kubeServiceRecord[k] = v }], } kubeServicesEventStream.add(emitTime, kubeServicewrapper) if kubeServicewrapper + if @PODS_EMIT_STREAM_BATCH_SIZE > 0 && kubeServicesEventStream.count >= @PODS_EMIT_STREAM_BATCH_SIZE + $log.info("in_kube_podinventory::parse_and_emit_records: number of service records emitted #{@PODS_EMIT_STREAM_BATCH_SIZE} @ #{Time.now.utc.iso8601}") + router.emit_stream(@@kubeservicesTag, kubeServicesEventStream) if kubeServicesEventStream + kubeServicesEventStream = MultiEventStream.new + end end + end + + if kubeServicesEventStream.count > 0 + $log.info("in_kube_podinventory::parse_and_emit_records : number of service records emitted #{kubeServicesEventStream.count} @ #{Time.now.utc.iso8601}") router.emit_stream(@@kubeservicesTag, kubeServicesEventStream) if kubeServicesEventStream end - rescue => errorStr - $log.warn "Failed in parse_and_emit_record for KubeServices from in_kube_podinventory : #{errorStr}" - $log.debug_backtrace(errorStr.backtrace) - ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) + kubeServicesEventStream = nil end - #:optimize:end kubeservices merge #Updating value for AppInsights telemetry @podCount += podInventory["items"].length - - if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0 && eventStream.count > 0) - $log.info("kubePodInventoryEmitStreamSuccess @ #{Time.now.utc.iso8601}") - end rescue => errorStr $log.warn "Failed in parse_and_emit_record pod inventory: #{errorStr}" $log.debug_backtrace(errorStr.backtrace) @@ -535,25 +397,238 @@ def run_periodic @mutex.unlock end - def getServiceNameFromLabels(namespace, labels, serviceList) + # TODO - move this method to KubernetesClient or helper class + def getPodInventoryRecords(item, serviceRecords, batchTime = Time.utc.iso8601) + records = [] + record = {} + + begin + record["CollectionTime"] = batchTime #This is the time that is mapped to become TimeGenerated + record["Name"] = item["metadata"]["name"] + podNameSpace = item["metadata"]["namespace"] + podUid = KubernetesApiClient.getPodUid(podNameSpace, item["metadata"]) + if podUid.nil? + return records + end + + nodeName = "" + #for unscheduled (non-started) pods nodeName does NOT exist + if !item["spec"]["nodeName"].nil? + nodeName = item["spec"]["nodeName"] + end + # For ARO v3 cluster, skip the pods scheduled on to master or infra nodes + if KubernetesApiClient.isAROv3MasterOrInfraPod(nodeName) + return records + end + + record["PodUid"] = podUid + record["PodLabel"] = [item["metadata"]["labels"]] + record["Namespace"] = podNameSpace + record["PodCreationTimeStamp"] = item["metadata"]["creationTimestamp"] + #for unscheduled (non-started) pods startTime does NOT exist + if !item["status"]["startTime"].nil? + record["PodStartTime"] = item["status"]["startTime"] + else + record["PodStartTime"] = "" + end + #podStatus + # the below is for accounting 'NodeLost' scenario, where-in the pod(s) in the lost node is still being reported as running + podReadyCondition = true + if !item["status"]["reason"].nil? && item["status"]["reason"] == "NodeLost" && !item["status"]["conditions"].nil? + item["status"]["conditions"].each do |condition| + if condition["type"] == "Ready" && condition["status"] == "False" + podReadyCondition = false + break + end + end + end + if podReadyCondition == false + record["PodStatus"] = "Unknown" + # ICM - https://portal.microsofticm.com/imp/v3/incidents/details/187091803/home + elsif !item["metadata"]["deletionTimestamp"].nil? && !item["metadata"]["deletionTimestamp"].empty? + record["PodStatus"] = Constants::POD_STATUS_TERMINATING + else + record["PodStatus"] = item["status"]["phase"] + end + #for unscheduled (non-started) pods podIP does NOT exist + if !item["status"]["podIP"].nil? + record["PodIp"] = item["status"]["podIP"] + else + record["PodIp"] = "" + end + + record["Computer"] = nodeName + record["ClusterId"] = KubernetesApiClient.getClusterId + record["ClusterName"] = KubernetesApiClient.getClusterName + record["ServiceName"] = getServiceNameFromLabels(item["metadata"]["namespace"], item["metadata"]["labels"], serviceRecords) + + if !item["metadata"]["ownerReferences"].nil? + record["ControllerKind"] = item["metadata"]["ownerReferences"][0]["kind"] + record["ControllerName"] = item["metadata"]["ownerReferences"][0]["name"] + @controllerSet.add(record["ControllerKind"] + record["ControllerName"]) + #Adding controller kind to telemetry ro information about customer workload + if (@controllerData[record["ControllerKind"]].nil?) + @controllerData[record["ControllerKind"]] = 1 + else + controllerValue = @controllerData[record["ControllerKind"]] + @controllerData[record["ControllerKind"]] += 1 + end + end + podRestartCount = 0 + record["PodRestartCount"] = 0 + + #Invoke the helper method to compute ready/not ready mdm metric + @inventoryToMdmConvertor.process_record_for_pods_ready_metric(record["ControllerName"], record["Namespace"], item["status"]["conditions"]) + + podContainers = [] + if item["status"].key?("containerStatuses") && !item["status"]["containerStatuses"].empty? + podContainers = podContainers + item["status"]["containerStatuses"] + end + # Adding init containers to the record list as well. + if item["status"].key?("initContainerStatuses") && !item["status"]["initContainerStatuses"].empty? + podContainers = podContainers + item["status"]["initContainerStatuses"] + end + # if items["status"].key?("containerStatuses") && !items["status"]["containerStatuses"].empty? #container status block start + if !podContainers.empty? #container status block start + podContainers.each do |container| + containerRestartCount = 0 + lastFinishedTime = nil + # Need this flag to determine if we need to process container data for mdm metrics like oomkilled and container restart + #container Id is of the form + #docker://dfd9da983f1fd27432fb2c1fe3049c0a1d25b1c697b2dc1a530c986e58b16527 + if !container["containerID"].nil? + record["ContainerID"] = container["containerID"].split("//")[1] + else + # for containers that have image issues (like invalid image/tag etc..) this will be empty. do not make it all 0 + record["ContainerID"] = "" + end + #keeping this as which is same as InstanceName in perf table + if podUid.nil? || container["name"].nil? + next + else + record["ContainerName"] = podUid + "/" + container["name"] + end + #Pod restart count is a sumtotal of restart counts of individual containers + #within the pod. The restart count of a container is maintained by kubernetes + #itself in the form of a container label. + containerRestartCount = container["restartCount"] + record["ContainerRestartCount"] = containerRestartCount + + containerStatus = container["state"] + record["ContainerStatusReason"] = "" + # state is of the following form , so just picking up the first key name + # "state": { + # "waiting": { + # "reason": "CrashLoopBackOff", + # "message": "Back-off 5m0s restarting failed container=metrics-server pod=metrics-server-2011498749-3g453_kube-system(5953be5f-fcae-11e7-a356-000d3ae0e432)" + # } + # }, + # the below is for accounting 'NodeLost' scenario, where-in the containers in the lost node/pod(s) is still being reported as running + if podReadyCondition == false + record["ContainerStatus"] = "Unknown" + else + record["ContainerStatus"] = containerStatus.keys[0] + end + #TODO : Remove ContainerCreationTimeStamp from here since we are sending it as a metric + #Picking up both container and node start time from cAdvisor to be consistent + if containerStatus.keys[0] == "running" + record["ContainerCreationTimeStamp"] = container["state"]["running"]["startedAt"] + else + if !containerStatus[containerStatus.keys[0]]["reason"].nil? && !containerStatus[containerStatus.keys[0]]["reason"].empty? + record["ContainerStatusReason"] = containerStatus[containerStatus.keys[0]]["reason"] + end + # Process the record to see if job was completed 6 hours ago. If so, send metric to mdm + if !record["ControllerKind"].nil? && record["ControllerKind"].downcase == Constants::CONTROLLER_KIND_JOB + @inventoryToMdmConvertor.process_record_for_terminated_job_metric(record["ControllerName"], record["Namespace"], containerStatus) + end + end + + # Record the last state of the container. This may have information on why a container was killed. + begin + if !container["lastState"].nil? && container["lastState"].keys.length == 1 + lastStateName = container["lastState"].keys[0] + lastStateObject = container["lastState"][lastStateName] + if !lastStateObject.is_a?(Hash) + raise "expected a hash object. This could signify a bug or a kubernetes API change" + end + + if lastStateObject.key?("reason") && lastStateObject.key?("startedAt") && lastStateObject.key?("finishedAt") + newRecord = Hash.new + newRecord["lastState"] = lastStateName # get the name of the last state (ex: terminated) + lastStateReason = lastStateObject["reason"] + # newRecord["reason"] = lastStateObject["reason"] # (ex: OOMKilled) + newRecord["reason"] = lastStateReason # (ex: OOMKilled) + newRecord["startedAt"] = lastStateObject["startedAt"] # (ex: 2019-07-02T14:58:51Z) + lastFinishedTime = lastStateObject["finishedAt"] + newRecord["finishedAt"] = lastFinishedTime # (ex: 2019-07-02T14:58:52Z) + + # only write to the output field if everything previously ran without error + record["ContainerLastStatus"] = newRecord + + #Populate mdm metric for OOMKilled container count if lastStateReason is OOMKilled + if lastStateReason.downcase == Constants::REASON_OOM_KILLED + @inventoryToMdmConvertor.process_record_for_oom_killed_metric(record["ControllerName"], record["Namespace"], lastFinishedTime) + end + lastStateReason = nil + else + record["ContainerLastStatus"] = Hash.new + end + else + record["ContainerLastStatus"] = Hash.new + end + + #Populate mdm metric for container restart count if greater than 0 + if (!containerRestartCount.nil? && (containerRestartCount.is_a? Integer) && containerRestartCount > 0) + @inventoryToMdmConvertor.process_record_for_container_restarts_metric(record["ControllerName"], record["Namespace"], lastFinishedTime) + end + rescue => errorStr + $log.warn "Failed in parse_and_emit_record pod inventory while processing ContainerLastStatus: #{errorStr}" + $log.debug_backtrace(errorStr.backtrace) + ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) + record["ContainerLastStatus"] = Hash.new + end + + podRestartCount += containerRestartCount + records.push(record.dup) + end + else # for unscheduled pods there are no status.containerStatuses, in this case we still want the pod + records.push(record) + end #container status block end + + records.each do |record| + if !record.nil? + record["PodRestartCount"] = podRestartCount + end + end + rescue => error + $log.warn("getPodInventoryRecords failed: #{error}") + end + return records + end + + # TODO - move this method to KubernetesClient or helper class + def getServiceNameFromLabels(namespace, labels, serviceRecords) serviceName = "" begin if !labels.nil? && !labels.empty? - if (!serviceList.nil? && !serviceList.empty? && serviceList.key?("items") && !serviceList["items"].empty?) - serviceList["items"].each do |item| - found = 0 - if !item["spec"].nil? && !item["spec"]["selector"].nil? && item["metadata"]["namespace"] == namespace - selectorLabels = item["spec"]["selector"] - if !selectorLabels.empty? - selectorLabels.each do |key, value| - if !(labels.select { |k, v| k == key && v == value }.length > 0) - break - end - found = found + 1 + serviceRecords.each do |kubeServiceRecord| + found = 0 + if kubeServiceRecord["Namespace"] == namespace + selectorLabels = {} + # selector labels wrapped in array in kube service records so unwrapping here + if !kubeServiceRecord["SelectorLabels"].nil? && kubeServiceRecord["SelectorLabels"].length > 0 + selectorLabels = kubeServiceRecord["SelectorLabels"][0] + end + if !selectorLabels.nil? && !selectorLabels.empty? + selectorLabels.each do |key, value| + if !(labels.select { |k, v| k == key && v == value }.length > 0) + break end + found = found + 1 end + # service can have no selectors if found == selectorLabels.length - return item["metadata"]["name"] + return kubeServiceRecord["ServiceName"] end end end diff --git a/source/plugins/ruby/in_kubestate_deployments.rb b/source/plugins/ruby/in_kubestate_deployments.rb index bcf397150..27e4709a2 100644 --- a/source/plugins/ruby/in_kubestate_deployments.rb +++ b/source/plugins/ruby/in_kubestate_deployments.rb @@ -2,230 +2,238 @@ # frozen_string_literal: true module Fluent - class Kube_Kubestate_Deployments_Input < Input - Plugin.register_input("kubestatedeployments", self) - @@istestvar = ENV["ISTEST"] - # telemetry - To keep telemetry cost reasonable, we keep track of the max deployments over a period of 15m - @@deploymentsCount = 0 - - - - def initialize - super - require "yajl/json_gem" - require "yajl" - require "date" - require "time" - - require_relative "KubernetesApiClient" - require_relative "oms_common" - require_relative "omslog" - require_relative "ApplicationInsightsUtility" - require_relative "constants" - - # roughly each deployment is 8k - # 1000 deployments account to approximately 8MB - @DEPLOYMENTS_CHUNK_SIZE = 1000 - @DEPLOYMENTS_API_GROUP = "apps" - @@telemetryLastSentTime = DateTime.now.to_time.to_i - - - @deploymentsRunningTotal = 0 - - @NodeName = OMS::Common.get_hostname - @ClusterId = KubernetesApiClient.getClusterId - @ClusterName = KubernetesApiClient.getClusterName - end - - config_param :run_interval, :time, :default => 60 - config_param :tag, :string, :default => Constants::INSIGHTSMETRICS_FLUENT_TAG - - def configure(conf) - super - end - - def start - if @run_interval - @finished = false - @condition = ConditionVariable.new - @mutex = Mutex.new - @thread = Thread.new(&method(:run_periodic)) + class Kube_Kubestate_Deployments_Input < Input + Plugin.register_input("kubestatedeployments", self) + @@istestvar = ENV["ISTEST"] + # telemetry - To keep telemetry cost reasonable, we keep track of the max deployments over a period of 15m + @@deploymentsCount = 0 + + def initialize + super + require "yajl/json_gem" + require "yajl" + require "date" + require "time" + + require_relative "KubernetesApiClient" + require_relative "oms_common" + require_relative "omslog" + require_relative "ApplicationInsightsUtility" + require_relative "constants" + + # refer tomlparser-agent-config for defaults + # this configurable via configmap + @DEPLOYMENTS_CHUNK_SIZE = 0 + + @DEPLOYMENTS_API_GROUP = "apps" + @@telemetryLastSentTime = DateTime.now.to_time.to_i + + @deploymentsRunningTotal = 0 + + @NodeName = OMS::Common.get_hostname + @ClusterId = KubernetesApiClient.getClusterId + @ClusterName = KubernetesApiClient.getClusterName + end + + config_param :run_interval, :time, :default => 60 + config_param :tag, :string, :default => Constants::INSIGHTSMETRICS_FLUENT_TAG + + def configure(conf) + super + end + + def start + if @run_interval + if !ENV["DEPLOYMENTS_CHUNK_SIZE"].nil? && !ENV["DEPLOYMENTS_CHUNK_SIZE"].empty? && ENV["DEPLOYMENTS_CHUNK_SIZE"].to_i > 0 + @DEPLOYMENTS_CHUNK_SIZE = ENV["DEPLOYMENTS_CHUNK_SIZE"].to_i + else + # this shouldnt happen just setting default here as safe guard + $log.warn("in_kubestate_deployments::start: setting to default value since got DEPLOYMENTS_CHUNK_SIZE nil or empty") + @DEPLOYMENTS_CHUNK_SIZE = 500 end + $log.info("in_kubestate_deployments::start : DEPLOYMENTS_CHUNK_SIZE @ #{@DEPLOYMENTS_CHUNK_SIZE}") + + @finished = false + @condition = ConditionVariable.new + @mutex = Mutex.new + @thread = Thread.new(&method(:run_periodic)) end - - def shutdown - if @run_interval - @mutex.synchronize { - @finished = true - @condition.signal - } - @thread.join - end + end + + def shutdown + if @run_interval + @mutex.synchronize { + @finished = true + @condition.signal + } + @thread.join end - - def enumerate - begin - deploymentList = nil - currentTime = Time.now - batchTime = currentTime.utc.iso8601 - - #set the running total for this batch to 0 - @deploymentsRunningTotal = 0 - - # Initializing continuation token to nil - continuationToken = nil - $log.info("in_kubestate_deployments::enumerate : Getting deployments from Kube API @ #{Time.now.utc.iso8601}") - continuationToken, deploymentList = KubernetesApiClient.getResourcesAndContinuationToken("deployments?limit=#{@DEPLOYMENTS_CHUNK_SIZE}", api_group: @DEPLOYMENTS_API_GROUP) - $log.info("in_kubestate_deployments::enumerate : Done getting deployments from Kube API @ #{Time.now.utc.iso8601}") + end + + def enumerate + begin + deploymentList = nil + currentTime = Time.now + batchTime = currentTime.utc.iso8601 + + #set the running total for this batch to 0 + @deploymentsRunningTotal = 0 + + # Initializing continuation token to nil + continuationToken = nil + $log.info("in_kubestate_deployments::enumerate : Getting deployments from Kube API @ #{Time.now.utc.iso8601}") + continuationToken, deploymentList = KubernetesApiClient.getResourcesAndContinuationToken("deployments?limit=#{@DEPLOYMENTS_CHUNK_SIZE}", api_group: @DEPLOYMENTS_API_GROUP) + $log.info("in_kubestate_deployments::enumerate : Done getting deployments from Kube API @ #{Time.now.utc.iso8601}") + if (!deploymentList.nil? && !deploymentList.empty? && deploymentList.key?("items") && !deploymentList["items"].nil? && !deploymentList["items"].empty?) + $log.info("in_kubestate_deployments::enumerate : number of deployment items :#{deploymentList["items"].length} from Kube API @ #{Time.now.utc.iso8601}") + parse_and_emit_records(deploymentList, batchTime) + else + $log.warn "in_kubestate_deployments::enumerate:Received empty deploymentList" + end + + #If we receive a continuation token, make calls, process and flush data until we have processed all data + while (!continuationToken.nil? && !continuationToken.empty?) + continuationToken, deploymentList = KubernetesApiClient.getResourcesAndContinuationToken("deployments?limit=#{@DEPLOYMENTS_CHUNK_SIZE}&continue=#{continuationToken}", api_group: @DEPLOYMENTS_API_GROUP) if (!deploymentList.nil? && !deploymentList.empty? && deploymentList.key?("items") && !deploymentList["items"].nil? && !deploymentList["items"].empty?) + $log.info("in_kubestate_deployments::enumerate : number of deployment items :#{deploymentList["items"].length} from Kube API @ #{Time.now.utc.iso8601}") parse_and_emit_records(deploymentList, batchTime) else $log.warn "in_kubestate_deployments::enumerate:Received empty deploymentList" end - - #If we receive a continuation token, make calls, process and flush data until we have processed all data - while (!continuationToken.nil? && !continuationToken.empty?) - continuationToken, deploymentList = KubernetesApiClient.getResourcesAndContinuationToken("deployments?limit=#{@DEPLOYMENTS_CHUNK_SIZE}&continue=#{continuationToken}", api_group: @DEPLOYMENTS_API_GROUP) - if (!deploymentList.nil? && !deploymentList.empty? && deploymentList.key?("items") && !deploymentList["items"].nil? && !deploymentList["items"].empty?) - parse_and_emit_records(deploymentList, batchTime) - else - $log.warn "in_kubestate_deployments::enumerate:Received empty deploymentList" - end + end + + # Setting this to nil so that we dont hold memory until GC kicks in + deploymentList = nil + + $log.info("successfully emitted a total of #{@deploymentsRunningTotal} kube_state_deployment metrics") + # Flush AppInsights telemetry once all the processing is done, only if the number of events flushed is greater than 0 + if (@deploymentsRunningTotal > @@deploymentsCount) + @@deploymentsCount = @deploymentsRunningTotal + end + if (((DateTime.now.to_time.to_i - @@telemetryLastSentTime).abs) / 60) >= Constants::KUBE_STATE_TELEMETRY_FLUSH_INTERVAL_IN_MINUTES + #send telemetry + $log.info "sending deployemt telemetry..." + ApplicationInsightsUtility.sendMetricTelemetry("MaxDeploymentCount", @@deploymentsCount, {}) + #reset last sent value & time + @@deploymentsCount = 0 + @@telemetryLastSentTime = DateTime.now.to_time.to_i + end + rescue => errorStr + $log.warn "in_kubestate_deployments::enumerate:Failed in enumerate: #{errorStr}" + ApplicationInsightsUtility.sendExceptionTelemetry("in_kubestate_deployments::enumerate:Failed in enumerate: #{errorStr}") + end + end # end enumerate + + def parse_and_emit_records(deployments, batchTime = Time.utc.iso8601) + metricItems = [] + insightsMetricsEventStream = MultiEventStream.new + begin + metricInfo = deployments + metricInfo["items"].each do |deployment| + deploymentName = deployment["metadata"]["name"] + deploymentNameSpace = deployment["metadata"]["namespace"] + deploymentCreatedTime = "" + if !deployment["metadata"]["creationTimestamp"].nil? + deploymentCreatedTime = deployment["metadata"]["creationTimestamp"] + end + deploymentStrategy = "RollingUpdate" #default when not specified as per spec + if !deployment["spec"]["strategy"].nil? && !deployment["spec"]["strategy"]["type"].nil? + deploymentStrategy = deployment["spec"]["strategy"]["type"] end - - # Setting this to nil so that we dont hold memory until GC kicks in - deploymentList = nil - - $log.info("successfully emitted a total of #{@deploymentsRunningTotal} kube_state_deployment metrics") - # Flush AppInsights telemetry once all the processing is done, only if the number of events flushed is greater than 0 - if (@deploymentsRunningTotal > @@deploymentsCount) - @@deploymentsCount = @deploymentsRunningTotal + deploymentSpecReplicas = 1 #default is 1 as per k8s spec + if !deployment["spec"]["replicas"].nil? + deploymentSpecReplicas = deployment["spec"]["replicas"] end - if (((DateTime.now.to_time.to_i - @@telemetryLastSentTime).abs)/60 ) >= Constants::KUBE_STATE_TELEMETRY_FLUSH_INTERVAL_IN_MINUTES - #send telemetry - $log.info "sending deployemt telemetry..." - ApplicationInsightsUtility.sendMetricTelemetry("MaxDeploymentCount", @@deploymentsCount, {}) - #reset last sent value & time - @@deploymentsCount = 0 - @@telemetryLastSentTime = DateTime.now.to_time.to_i + deploymentStatusReadyReplicas = 0 + if !deployment["status"]["readyReplicas"].nil? + deploymentStatusReadyReplicas = deployment["status"]["readyReplicas"] end - rescue => errorStr - $log.warn "in_kubestate_deployments::enumerate:Failed in enumerate: #{errorStr}" - ApplicationInsightsUtility.sendExceptionTelemetry("in_kubestate_deployments::enumerate:Failed in enumerate: #{errorStr}") + deploymentStatusUpToDateReplicas = 0 + if !deployment["status"]["updatedReplicas"].nil? + deploymentStatusUpToDateReplicas = deployment["status"]["updatedReplicas"] + end + deploymentStatusAvailableReplicas = 0 + if !deployment["status"]["availableReplicas"].nil? + deploymentStatusAvailableReplicas = deployment["status"]["availableReplicas"] + end + + metricItem = {} + metricItem["CollectionTime"] = batchTime + metricItem["Computer"] = @NodeName + metricItem["Name"] = Constants::INSIGHTSMETRICS_METRIC_NAME_KUBE_STATE_DEPLOYMENT_STATE + metricItem["Value"] = deploymentStatusReadyReplicas + metricItem["Origin"] = Constants::INSIGHTSMETRICS_TAGS_ORIGIN + metricItem["Namespace"] = Constants::INSIGHTSMETRICS_TAGS_KUBESTATE_NAMESPACE + + metricTags = {} + metricTags[Constants::INSIGHTSMETRICS_TAGS_CLUSTERID] = @ClusterId + metricTags[Constants::INSIGHTSMETRICS_TAGS_CLUSTERNAME] = @ClusterName + metricTags[Constants::INSIGHTSMETRICS_TAGS_KUBE_STATE_DEPLOYMENT_NAME] = deploymentName + metricTags[Constants::INSIGHTSMETRICS_TAGS_K8SNAMESPACE] = deploymentNameSpace + metricTags[Constants::INSIGHTSMETRICS_TAGS_KUBE_STATE_DEPLOYMENT_STRATEGY] = deploymentStrategy + metricTags[Constants::INSIGHTSMETRICS_TAGS_KUBE_STATE_CREATIONTIME] = deploymentCreatedTime + metricTags[Constants::INSIGHTSMETRICS_TAGS_KUBE_STATE_DEPLOYMENT_SPEC_REPLICAS] = deploymentSpecReplicas + metricTags[Constants::INSIGHTSMETRICS_TAGS_KUBE_STATE_DEPLOYMENT_STATUS_REPLICAS_UPDATED] = deploymentStatusUpToDateReplicas + metricTags[Constants::INSIGHTSMETRICS_TAGS_KUBE_STATE_DEPLOYMENT_STATUS_REPLICAS_AVAILABLE] = deploymentStatusAvailableReplicas + + metricItem["Tags"] = metricTags + + metricItems.push(metricItem) end - end # end enumerate - - def parse_and_emit_records(deployments, batchTime = Time.utc.iso8601) - metricItems = [] - insightsMetricsEventStream = MultiEventStream.new - begin - metricInfo = deployments - metricInfo["items"].each do |deployment| - deploymentName = deployment["metadata"]["name"] - deploymentNameSpace = deployment["metadata"]["namespace"] - deploymentCreatedTime = "" - if !deployment["metadata"]["creationTimestamp"].nil? - deploymentCreatedTime = deployment["metadata"]["creationTimestamp"] - end - deploymentStrategy = "RollingUpdate" #default when not specified as per spec - if !deployment["spec"]["strategy"].nil? && !deployment["spec"]["strategy"]["type"].nil? - deploymentStrategy = deployment["spec"]["strategy"]["type"] - end - deploymentSpecReplicas = 1 #default is 1 as per k8s spec - if !deployment["spec"]["replicas"].nil? - deploymentSpecReplicas = deployment["spec"]["replicas"] - end - deploymentStatusReadyReplicas = 0 - if !deployment["status"]["readyReplicas"].nil? - deploymentStatusReadyReplicas = deployment["status"]["readyReplicas"] - end - deploymentStatusUpToDateReplicas = 0 - if !deployment["status"]["updatedReplicas"].nil? - deploymentStatusUpToDateReplicas = deployment["status"]["updatedReplicas"] - end - deploymentStatusAvailableReplicas = 0 - if !deployment["status"]["availableReplicas"].nil? - deploymentStatusAvailableReplicas = deployment["status"]["availableReplicas"] - end - - metricItem = {} - metricItem["CollectionTime"] = batchTime - metricItem["Computer"] = @NodeName - metricItem["Name"] = Constants::INSIGHTSMETRICS_METRIC_NAME_KUBE_STATE_DEPLOYMENT_STATE - metricItem["Value"] = deploymentStatusReadyReplicas - metricItem["Origin"] = Constants::INSIGHTSMETRICS_TAGS_ORIGIN - metricItem["Namespace"] = Constants::INSIGHTSMETRICS_TAGS_KUBESTATE_NAMESPACE - - metricTags = {} - metricTags[Constants::INSIGHTSMETRICS_TAGS_CLUSTERID] = @ClusterId - metricTags[Constants::INSIGHTSMETRICS_TAGS_CLUSTERNAME] = @ClusterName - metricTags[Constants::INSIGHTSMETRICS_TAGS_KUBE_STATE_DEPLOYMENT_NAME] = deploymentName - metricTags[Constants::INSIGHTSMETRICS_TAGS_K8SNAMESPACE] = deploymentNameSpace - metricTags[Constants::INSIGHTSMETRICS_TAGS_KUBE_STATE_DEPLOYMENT_STRATEGY ] = deploymentStrategy - metricTags[Constants::INSIGHTSMETRICS_TAGS_KUBE_STATE_CREATIONTIME] = deploymentCreatedTime - metricTags[Constants::INSIGHTSMETRICS_TAGS_KUBE_STATE_DEPLOYMENT_SPEC_REPLICAS] = deploymentSpecReplicas - metricTags[Constants::INSIGHTSMETRICS_TAGS_KUBE_STATE_DEPLOYMENT_STATUS_REPLICAS_UPDATED] = deploymentStatusUpToDateReplicas - metricTags[Constants::INSIGHTSMETRICS_TAGS_KUBE_STATE_DEPLOYMENT_STATUS_REPLICAS_AVAILABLE] = deploymentStatusAvailableReplicas - - - metricItem["Tags"] = metricTags - - metricItems.push(metricItem) - end - - time = Time.now.to_f - metricItems.each do |insightsMetricsRecord| - wrapper = { - "DataType" => "INSIGHTS_METRICS_BLOB", - "IPName" => "ContainerInsights", - "DataItems" => [insightsMetricsRecord.each { |k, v| insightsMetricsRecord[k] = v }], - } - insightsMetricsEventStream.add(time, wrapper) if wrapper - end - - router.emit_stream(Constants::INSIGHTSMETRICS_FLUENT_TAG, insightsMetricsEventStream) if insightsMetricsEventStream - $log.info("successfully emitted #{metricItems.length()} kube_state_deployment metrics") - @deploymentsRunningTotal = @deploymentsRunningTotal + metricItems.length() - if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0 && insightsMetricsEventStream.count > 0) - $log.info("kubestatedeploymentsInsightsMetricsEmitStreamSuccess @ #{Time.now.utc.iso8601}") - end - rescue => error - $log.warn("in_kubestate_deployments::parse_and_emit_records failed: #{error} ") - ApplicationInsightsUtility.sendExceptionTelemetry("in_kubestate_deployments::parse_and_emit_records failed: #{error}") + + time = Time.now.to_f + metricItems.each do |insightsMetricsRecord| + wrapper = { + "DataType" => "INSIGHTS_METRICS_BLOB", + "IPName" => "ContainerInsights", + "DataItems" => [insightsMetricsRecord.each { |k, v| insightsMetricsRecord[k] = v }], + } + insightsMetricsEventStream.add(time, wrapper) if wrapper + end + + router.emit_stream(Constants::INSIGHTSMETRICS_FLUENT_TAG, insightsMetricsEventStream) if insightsMetricsEventStream + $log.info("successfully emitted #{metricItems.length()} kube_state_deployment metrics") + + @deploymentsRunningTotal = @deploymentsRunningTotal + metricItems.length() + if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0 && insightsMetricsEventStream.count > 0) + $log.info("kubestatedeploymentsInsightsMetricsEmitStreamSuccess @ #{Time.now.utc.iso8601}") end - + rescue => error + $log.warn("in_kubestate_deployments::parse_and_emit_records failed: #{error} ") + ApplicationInsightsUtility.sendExceptionTelemetry("in_kubestate_deployments::parse_and_emit_records failed: #{error}") end - - def run_periodic - @mutex.lock + end + + def run_periodic + @mutex.lock + done = @finished + @nextTimeToRun = Time.now + @waitTimeout = @run_interval + until done + @nextTimeToRun = @nextTimeToRun + @run_interval + @now = Time.now + if @nextTimeToRun <= @now + @waitTimeout = 1 + @nextTimeToRun = @now + else + @waitTimeout = @nextTimeToRun - @now + end + @condition.wait(@mutex, @waitTimeout) done = @finished - @nextTimeToRun = Time.now - @waitTimeout = @run_interval - until done - @nextTimeToRun = @nextTimeToRun + @run_interval - @now = Time.now - if @nextTimeToRun <= @now - @waitTimeout = 1 - @nextTimeToRun = @now - else - @waitTimeout = @nextTimeToRun - @now - end - @condition.wait(@mutex, @waitTimeout) - done = @finished - @mutex.unlock - if !done - begin - $log.info("in_kubestate_deployments::run_periodic.enumerate.start @ #{Time.now.utc.iso8601}") - enumerate - $log.info("in_kubestate_deployments::run_periodic.enumerate.end @ #{Time.now.utc.iso8601}") - rescue => errorStr - $log.warn "in_kubestate_deployments::run_periodic: enumerate Failed to retrieve kube deployments: #{errorStr}" - ApplicationInsightsUtility.sendExceptionTelemetry("in_kubestate_deployments::run_periodic: enumerate Failed to retrieve kube deployments: #{errorStr}") - end + @mutex.unlock + if !done + begin + $log.info("in_kubestate_deployments::run_periodic.enumerate.start @ #{Time.now.utc.iso8601}") + enumerate + $log.info("in_kubestate_deployments::run_periodic.enumerate.end @ #{Time.now.utc.iso8601}") + rescue => errorStr + $log.warn "in_kubestate_deployments::run_periodic: enumerate Failed to retrieve kube deployments: #{errorStr}" + ApplicationInsightsUtility.sendExceptionTelemetry("in_kubestate_deployments::run_periodic: enumerate Failed to retrieve kube deployments: #{errorStr}") end - @mutex.lock end - @mutex.unlock + @mutex.lock end + @mutex.unlock end -end \ No newline at end of file + end +end diff --git a/source/plugins/ruby/in_kubestate_hpa.rb b/source/plugins/ruby/in_kubestate_hpa.rb index 3ce63a75a..afecf8e3b 100644 --- a/source/plugins/ruby/in_kubestate_hpa.rb +++ b/source/plugins/ruby/in_kubestate_hpa.rb @@ -2,231 +2,236 @@ # frozen_string_literal: true module Fluent - class Kube_Kubestate_HPA_Input < Input - Plugin.register_input("kubestatehpa", self) - @@istestvar = ENV["ISTEST"] - - - def initialize - super - require "yajl/json_gem" - require "yajl" - require "time" - - require_relative "KubernetesApiClient" - require_relative "oms_common" - require_relative "omslog" - require_relative "ApplicationInsightsUtility" - require_relative "constants" - - # roughly each HPA is 3k - # 2000 HPAs account to approximately 6-7MB - @HPA_CHUNK_SIZE = 2000 - @HPA_API_GROUP = "autoscaling" - - # telemetry - @hpaCount = 0 - - @NodeName = OMS::Common.get_hostname - @ClusterId = KubernetesApiClient.getClusterId - @ClusterName = KubernetesApiClient.getClusterName - end - - config_param :run_interval, :time, :default => 60 - config_param :tag, :string, :default => Constants::INSIGHTSMETRICS_FLUENT_TAG - - def configure(conf) - super - end - - def start - if @run_interval - @finished = false - @condition = ConditionVariable.new - @mutex = Mutex.new - @thread = Thread.new(&method(:run_periodic)) + class Kube_Kubestate_HPA_Input < Input + Plugin.register_input("kubestatehpa", self) + @@istestvar = ENV["ISTEST"] + + def initialize + super + require "yajl/json_gem" + require "yajl" + require "time" + + require_relative "KubernetesApiClient" + require_relative "oms_common" + require_relative "omslog" + require_relative "ApplicationInsightsUtility" + require_relative "constants" + + # refer tomlparser-agent-config for defaults + # this configurable via configmap + @HPA_CHUNK_SIZE = 0 + + @HPA_API_GROUP = "autoscaling" + + # telemetry + @hpaCount = 0 + + @NodeName = OMS::Common.get_hostname + @ClusterId = KubernetesApiClient.getClusterId + @ClusterName = KubernetesApiClient.getClusterName + end + + config_param :run_interval, :time, :default => 60 + config_param :tag, :string, :default => Constants::INSIGHTSMETRICS_FLUENT_TAG + + def configure(conf) + super + end + + def start + if @run_interval + if !ENV["HPA_CHUNK_SIZE"].nil? && !ENV["HPA_CHUNK_SIZE"].empty? && ENV["HPA_CHUNK_SIZE"].to_i > 0 + @HPA_CHUNK_SIZE = ENV["HPA_CHUNK_SIZE"].to_i + else + # this shouldnt happen just setting default here as safe guard + $log.warn("in_kubestate_hpa::start: setting to default value since got HPA_CHUNK_SIZE nil or empty") + @HPA_CHUNK_SIZE = 2000 end + $log.info("in_kubestate_hpa::start : HPA_CHUNK_SIZE @ #{@HPA_CHUNK_SIZE}") + + @finished = false + @condition = ConditionVariable.new + @mutex = Mutex.new + @thread = Thread.new(&method(:run_periodic)) end - - def shutdown - if @run_interval - @mutex.synchronize { - @finished = true - @condition.signal - } - @thread.join - end + end + + def shutdown + if @run_interval + @mutex.synchronize { + @finished = true + @condition.signal + } + @thread.join end - - def enumerate - begin - hpaList = nil - currentTime = Time.now - batchTime = currentTime.utc.iso8601 - - @hpaCount = 0 - - # Initializing continuation token to nil - continuationToken = nil - $log.info("in_kubestate_hpa::enumerate : Getting HPAs from Kube API @ #{Time.now.utc.iso8601}") - continuationToken, hpaList = KubernetesApiClient.getResourcesAndContinuationToken("horizontalpodautoscalers?limit=#{@HPA_CHUNK_SIZE}", api_group: @HPA_API_GROUP) - $log.info("in_kubestate_hpa::enumerate : Done getting HPAs from Kube API @ #{Time.now.utc.iso8601}") + end + + def enumerate + begin + hpaList = nil + currentTime = Time.now + batchTime = currentTime.utc.iso8601 + + @hpaCount = 0 + + # Initializing continuation token to nil + continuationToken = nil + $log.info("in_kubestate_hpa::enumerate : Getting HPAs from Kube API @ #{Time.now.utc.iso8601}") + continuationToken, hpaList = KubernetesApiClient.getResourcesAndContinuationToken("horizontalpodautoscalers?limit=#{@HPA_CHUNK_SIZE}", api_group: @HPA_API_GROUP) + $log.info("in_kubestate_hpa::enumerate : Done getting HPAs from Kube API @ #{Time.now.utc.iso8601}") + if (!hpaList.nil? && !hpaList.empty? && hpaList.key?("items") && !hpaList["items"].nil? && !hpaList["items"].empty?) + parse_and_emit_records(hpaList, batchTime) + else + $log.warn "in_kubestate_hpa::enumerate:Received empty hpaList" + end + + #If we receive a continuation token, make calls, process and flush data until we have processed all data + while (!continuationToken.nil? && !continuationToken.empty?) + continuationToken, hpaList = KubernetesApiClient.getResourcesAndContinuationToken("horizontalpodautoscalers?limit=#{@HPA_CHUNK_SIZE}&continue=#{continuationToken}", api_group: @HPA_API_GROUP) if (!hpaList.nil? && !hpaList.empty? && hpaList.key?("items") && !hpaList["items"].nil? && !hpaList["items"].empty?) parse_and_emit_records(hpaList, batchTime) else $log.warn "in_kubestate_hpa::enumerate:Received empty hpaList" end - - #If we receive a continuation token, make calls, process and flush data until we have processed all data - while (!continuationToken.nil? && !continuationToken.empty?) - continuationToken, hpaList = KubernetesApiClient.getResourcesAndContinuationToken("horizontalpodautoscalers?limit=#{@HPA_CHUNK_SIZE}&continue=#{continuationToken}", api_group: @HPA_API_GROUP) - if (!hpaList.nil? && !hpaList.empty? && hpaList.key?("items") && !hpaList["items"].nil? && !hpaList["items"].empty?) - parse_and_emit_records(hpaList, batchTime) - else - $log.warn "in_kubestate_hpa::enumerate:Received empty hpaList" + end + + # Setting this to nil so that we dont hold memory until GC kicks in + hpaList = nil + + # Flush AppInsights telemetry once all the processing is done, only if the number of events flushed is greater than 0 + if (@hpaCount > 0) + # this will not be a useful telemetry, as hpa counts will not be huge, just log for now + $log.info("in_kubestate_hpa::hpaCount= #{hpaCount}") + #ApplicationInsightsUtility.sendMetricTelemetry("HPACount", @hpaCount, {}) + end + rescue => errorStr + $log.warn "in_kubestate_hpa::enumerate:Failed in enumerate: #{errorStr}" + ApplicationInsightsUtility.sendExceptionTelemetry("in_kubestate_hpa::enumerate:Failed in enumerate: #{errorStr}") + end + end # end enumerate + + def parse_and_emit_records(hpas, batchTime = Time.utc.iso8601) + metricItems = [] + insightsMetricsEventStream = MultiEventStream.new + begin + metricInfo = hpas + metricInfo["items"].each do |hpa| + hpaName = hpa["metadata"]["name"] + hpaNameSpace = hpa["metadata"]["namespace"] + hpaCreatedTime = "" + if !hpa["metadata"]["creationTimestamp"].nil? + hpaCreatedTime = hpa["metadata"]["creationTimestamp"] + end + hpaSpecMinReplicas = 1 #default is 1 as per k8s spec + if !hpa["spec"]["minReplicas"].nil? + hpaSpecMinReplicas = hpa["spec"]["minReplicas"] + end + hpaSpecMaxReplicas = 0 + if !hpa["spec"]["maxReplicas"].nil? + hpaSpecMaxReplicas = hpa["spec"]["maxReplicas"] + end + hpaSpecScaleTargetKind = "" + hpaSpecScaleTargetName = "" + if !hpa["spec"]["scaleTargetRef"].nil? + if !hpa["spec"]["scaleTargetRef"]["kind"].nil? + hpaSpecScaleTargetKind = hpa["spec"]["scaleTargetRef"]["kind"] + end + if !hpa["spec"]["scaleTargetRef"]["name"].nil? + hpaSpecScaleTargetName = hpa["spec"]["scaleTargetRef"]["name"] end end - - # Setting this to nil so that we dont hold memory until GC kicks in - hpaList = nil - - # Flush AppInsights telemetry once all the processing is done, only if the number of events flushed is greater than 0 - if (@hpaCount > 0) - # this will not be a useful telemetry, as hpa counts will not be huge, just log for now - $log.info("in_kubestate_hpa::hpaCount= #{hpaCount}") - #ApplicationInsightsUtility.sendMetricTelemetry("HPACount", @hpaCount, {}) + hpaStatusCurrentReplicas = 0 + if !hpa["status"]["currentReplicas"].nil? + hpaStatusCurrentReplicas = hpa["status"]["currentReplicas"] end - rescue => errorStr - $log.warn "in_kubestate_hpa::enumerate:Failed in enumerate: #{errorStr}" - ApplicationInsightsUtility.sendExceptionTelemetry("in_kubestate_hpa::enumerate:Failed in enumerate: #{errorStr}") + hpaStatusDesiredReplicas = 0 + if !hpa["status"]["desiredReplicas"].nil? + hpaStatusDesiredReplicas = hpa["status"]["desiredReplicas"] + end + + hpaStatuslastScaleTime = "" + if !hpa["status"]["lastScaleTime"].nil? + hpaStatuslastScaleTime = hpa["status"]["lastScaleTime"] + end + + metricItem = {} + metricItem["CollectionTime"] = batchTime + metricItem["Computer"] = @NodeName + metricItem["Name"] = Constants::INSIGHTSMETRICS_METRIC_NAME_KUBE_STATE_HPA_STATE + metricItem["Value"] = hpaStatusCurrentReplicas + metricItem["Origin"] = Constants::INSIGHTSMETRICS_TAGS_ORIGIN + metricItem["Namespace"] = Constants::INSIGHTSMETRICS_TAGS_KUBESTATE_NAMESPACE + + metricTags = {} + metricTags[Constants::INSIGHTSMETRICS_TAGS_CLUSTERID] = @ClusterId + metricTags[Constants::INSIGHTSMETRICS_TAGS_CLUSTERNAME] = @ClusterName + metricTags[Constants::INSIGHTSMETRICS_TAGS_KUBE_STATE_HPA_NAME] = hpaName + metricTags[Constants::INSIGHTSMETRICS_TAGS_K8SNAMESPACE] = hpaNameSpace + metricTags[Constants::INSIGHTSMETRICS_TAGS_KUBE_STATE_CREATIONTIME] = hpaCreatedTime + metricTags[Constants::INSIGHTSMETRICS_TAGS_KUBE_STATE_HPA_SPEC_MIN_REPLICAS] = hpaSpecMinReplicas + metricTags[Constants::INSIGHTSMETRICS_TAGS_KUBE_STATE_HPA_SPEC_MAX_REPLICAS] = hpaSpecMaxReplicas + metricTags[Constants::INSIGHTSMETRICS_TAGS_KUBE_STATE_HPA_SPEC_SCALE_TARGET_KIND] = hpaSpecScaleTargetKind + metricTags[Constants::INSIGHTSMETRICS_TAGS_KUBE_STATE_HPA_SPEC_SCALE_TARGET_NAME] = hpaSpecScaleTargetName + metricTags[Constants::INSIGHTSMETRICS_TAGS_KUBE_STATE_HPA_STATUS_DESIRED_REPLICAS] = hpaStatusDesiredReplicas + metricTags[Constants::INSIGHTSMETRICS_TAGS_KUBE_STATE_HPA_STATUS_LAST_SCALE_TIME] = hpaStatuslastScaleTime + + metricItem["Tags"] = metricTags + + metricItems.push(metricItem) end - end # end enumerate - - def parse_and_emit_records(hpas, batchTime = Time.utc.iso8601) - metricItems = [] - insightsMetricsEventStream = MultiEventStream.new - begin - metricInfo = hpas - metricInfo["items"].each do |hpa| - hpaName = hpa["metadata"]["name"] - hpaNameSpace = hpa["metadata"]["namespace"] - hpaCreatedTime = "" - if !hpa["metadata"]["creationTimestamp"].nil? - hpaCreatedTime = hpa["metadata"]["creationTimestamp"] - end - hpaSpecMinReplicas = 1 #default is 1 as per k8s spec - if !hpa["spec"]["minReplicas"].nil? - hpaSpecMinReplicas = hpa["spec"]["minReplicas"] - end - hpaSpecMaxReplicas = 0 - if !hpa["spec"]["maxReplicas"].nil? - hpaSpecMaxReplicas = hpa["spec"]["maxReplicas"] - end - hpaSpecScaleTargetKind = "" - hpaSpecScaleTargetName = "" - if !hpa["spec"]["scaleTargetRef"].nil? - if !hpa["spec"]["scaleTargetRef"]["kind"].nil? - hpaSpecScaleTargetKind = hpa["spec"]["scaleTargetRef"]["kind"] - end - if !hpa["spec"]["scaleTargetRef"]["name"].nil? - hpaSpecScaleTargetName = hpa["spec"]["scaleTargetRef"]["name"] - end - - end - hpaStatusCurrentReplicas = 0 - if !hpa["status"]["currentReplicas"].nil? - hpaStatusCurrentReplicas = hpa["status"]["currentReplicas"] - end - hpaStatusDesiredReplicas = 0 - if !hpa["status"]["desiredReplicas"].nil? - hpaStatusDesiredReplicas = hpa["status"]["desiredReplicas"] - end - - hpaStatuslastScaleTime = "" - if !hpa["status"]["lastScaleTime"].nil? - hpaStatuslastScaleTime = hpa["status"]["lastScaleTime"] - end - - - metricItem = {} - metricItem["CollectionTime"] = batchTime - metricItem["Computer"] = @NodeName - metricItem["Name"] = Constants::INSIGHTSMETRICS_METRIC_NAME_KUBE_STATE_HPA_STATE - metricItem["Value"] = hpaStatusCurrentReplicas - metricItem["Origin"] = Constants::INSIGHTSMETRICS_TAGS_ORIGIN - metricItem["Namespace"] = Constants::INSIGHTSMETRICS_TAGS_KUBESTATE_NAMESPACE - - metricTags = {} - metricTags[Constants::INSIGHTSMETRICS_TAGS_CLUSTERID] = @ClusterId - metricTags[Constants::INSIGHTSMETRICS_TAGS_CLUSTERNAME] = @ClusterName - metricTags[Constants::INSIGHTSMETRICS_TAGS_KUBE_STATE_HPA_NAME] = hpaName - metricTags[Constants::INSIGHTSMETRICS_TAGS_K8SNAMESPACE] = hpaNameSpace - metricTags[Constants::INSIGHTSMETRICS_TAGS_KUBE_STATE_CREATIONTIME] = hpaCreatedTime - metricTags[Constants::INSIGHTSMETRICS_TAGS_KUBE_STATE_HPA_SPEC_MIN_REPLICAS] = hpaSpecMinReplicas - metricTags[Constants::INSIGHTSMETRICS_TAGS_KUBE_STATE_HPA_SPEC_MAX_REPLICAS] = hpaSpecMaxReplicas - metricTags[Constants::INSIGHTSMETRICS_TAGS_KUBE_STATE_HPA_SPEC_SCALE_TARGET_KIND] = hpaSpecScaleTargetKind - metricTags[Constants::INSIGHTSMETRICS_TAGS_KUBE_STATE_HPA_SPEC_SCALE_TARGET_NAME] = hpaSpecScaleTargetName - metricTags[Constants::INSIGHTSMETRICS_TAGS_KUBE_STATE_HPA_STATUS_DESIRED_REPLICAS] = hpaStatusDesiredReplicas - metricTags[Constants::INSIGHTSMETRICS_TAGS_KUBE_STATE_HPA_STATUS_LAST_SCALE_TIME] = hpaStatuslastScaleTime - - - metricItem["Tags"] = metricTags - - metricItems.push(metricItem) - end - time = Time.now.to_f - metricItems.each do |insightsMetricsRecord| - wrapper = { - "DataType" => "INSIGHTS_METRICS_BLOB", - "IPName" => "ContainerInsights", - "DataItems" => [insightsMetricsRecord.each { |k, v| insightsMetricsRecord[k] = v }], - } - insightsMetricsEventStream.add(time, wrapper) if wrapper - end - - router.emit_stream(Constants::INSIGHTSMETRICS_FLUENT_TAG, insightsMetricsEventStream) if insightsMetricsEventStream - $log.info("successfully emitted #{metricItems.length()} kube_state_hpa metrics") - if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0 && insightsMetricsEventStream.count > 0) - $log.info("kubestatehpaInsightsMetricsEmitStreamSuccess @ #{Time.now.utc.iso8601}") - end - rescue => error - $log.warn("in_kubestate_hpa::parse_and_emit_records failed: #{error} ") - ApplicationInsightsUtility.sendExceptionTelemetry("in_kubestate_hpa::parse_and_emit_records failed: #{error}") + time = Time.now.to_f + metricItems.each do |insightsMetricsRecord| + wrapper = { + "DataType" => "INSIGHTS_METRICS_BLOB", + "IPName" => "ContainerInsights", + "DataItems" => [insightsMetricsRecord.each { |k, v| insightsMetricsRecord[k] = v }], + } + insightsMetricsEventStream.add(time, wrapper) if wrapper + end + + router.emit_stream(Constants::INSIGHTSMETRICS_FLUENT_TAG, insightsMetricsEventStream) if insightsMetricsEventStream + $log.info("successfully emitted #{metricItems.length()} kube_state_hpa metrics") + if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0 && insightsMetricsEventStream.count > 0) + $log.info("kubestatehpaInsightsMetricsEmitStreamSuccess @ #{Time.now.utc.iso8601}") end - + rescue => error + $log.warn("in_kubestate_hpa::parse_and_emit_records failed: #{error} ") + ApplicationInsightsUtility.sendExceptionTelemetry("in_kubestate_hpa::parse_and_emit_records failed: #{error}") end - - def run_periodic - @mutex.lock + end + + def run_periodic + @mutex.lock + done = @finished + @nextTimeToRun = Time.now + @waitTimeout = @run_interval + until done + @nextTimeToRun = @nextTimeToRun + @run_interval + @now = Time.now + if @nextTimeToRun <= @now + @waitTimeout = 1 + @nextTimeToRun = @now + else + @waitTimeout = @nextTimeToRun - @now + end + @condition.wait(@mutex, @waitTimeout) done = @finished - @nextTimeToRun = Time.now - @waitTimeout = @run_interval - until done - @nextTimeToRun = @nextTimeToRun + @run_interval - @now = Time.now - if @nextTimeToRun <= @now - @waitTimeout = 1 - @nextTimeToRun = @now - else - @waitTimeout = @nextTimeToRun - @now - end - @condition.wait(@mutex, @waitTimeout) - done = @finished - @mutex.unlock - if !done - begin - $log.info("in_kubestate_hpa::run_periodic.enumerate.start @ #{Time.now.utc.iso8601}") - enumerate - $log.info("in_kubestate_hpa::run_periodic.enumerate.end @ #{Time.now.utc.iso8601}") - rescue => errorStr - $log.warn "in_kubestate_hpa::run_periodic: enumerate Failed to retrieve kube hpas: #{errorStr}" - ApplicationInsightsUtility.sendExceptionTelemetry("in_kubestate_hpa::run_periodic: enumerate Failed to retrieve kube hpas: #{errorStr}") - end + @mutex.unlock + if !done + begin + $log.info("in_kubestate_hpa::run_periodic.enumerate.start @ #{Time.now.utc.iso8601}") + enumerate + $log.info("in_kubestate_hpa::run_periodic.enumerate.end @ #{Time.now.utc.iso8601}") + rescue => errorStr + $log.warn "in_kubestate_hpa::run_periodic: enumerate Failed to retrieve kube hpas: #{errorStr}" + ApplicationInsightsUtility.sendExceptionTelemetry("in_kubestate_hpa::run_periodic: enumerate Failed to retrieve kube hpas: #{errorStr}") end - @mutex.lock end - @mutex.unlock + @mutex.lock end + @mutex.unlock end -end \ No newline at end of file + end +end From 9cb058c850cbfd8ed88910920cf3055b8066061b Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Fri, 18 Dec 2020 14:24:37 -0800 Subject: [PATCH 050/301] Gangams/enable arc onboarding to ff (#478) * wip * updates * trigger login if the ctx cloud not same as specified cloud * add missed commit --- .../onboarding/managed/disable-monitoring.ps1 | 34 ++++++++++++--- .../onboarding/managed/disable-monitoring.sh | 17 ++++++++ .../onboarding/managed/enable-monitoring.ps1 | 43 ++++++++++++++++--- .../onboarding/managed/enable-monitoring.sh | 38 +++++++++++++--- .../onboarding/managed/upgrade-monitoring.sh | 19 +++++++- 5 files changed, 130 insertions(+), 21 deletions(-) diff --git a/scripts/onboarding/managed/disable-monitoring.ps1 b/scripts/onboarding/managed/disable-monitoring.ps1 index 1c011bfff..bcd135dba 100644 --- a/scripts/onboarding/managed/disable-monitoring.ps1 +++ b/scripts/onboarding/managed/disable-monitoring.ps1 @@ -15,6 +15,8 @@ tenantId of the service principal which will be used for the azure login .PARAMETER kubeContext (optional) kube-context of the k8 cluster to install Azure Monitor for containers HELM chart + .PARAMETER azureCloudName (optional) + Name of the Azure cloud name. Supported Azure cloud Name is AzureCloud or AzureUSGovernment Pre-requisites: - Azure Managed cluster Resource Id @@ -34,7 +36,9 @@ param( [Parameter(mandatory = $false)] [string]$tenantId, [Parameter(mandatory = $false)] - [string]$kubeContext + [string]$kubeContext, + [Parameter(mandatory = $false)] + [string]$azureCloudName ) $helmChartReleaseName = "azmon-containers-release-1" @@ -46,6 +50,21 @@ $isAksCluster = $false $isAroV4Cluster = $false $isUsingServicePrincipal = $false +if ([string]::IsNullOrEmpty($azureCloudName) -eq $true) { + Write-Host("Azure cloud name parameter not passed in so using default cloud as AzureCloud") + $azureCloudName = "AzureCloud" +} else { + if(($azureCloudName.ToLower() -eq "azurecloud" ) -eq $true) { + Write-Host("Specified Azure Cloud name is : $azureCloudName") + } elseif (($azureCloudName.ToLower() -eq "azureusgovernment" ) -eq $true) { + Write-Host("Specified Azure Cloud name is : $azureCloudName") + } else { + Write-Host("Specified Azure Cloud name is : $azureCloudName") + Write-Host("Only supported Azure clouds are : AzureCloud and AzureUSGovernment") + exit + } +} + # checks the required Powershell modules exist and if not exists, request the user permission to install $azAccountModule = Get-Module -ListAvailable -Name Az.Accounts $azResourcesModule = Get-Module -ListAvailable -Name Az.Resources @@ -226,14 +245,19 @@ Write-Host("Cluster SubscriptionId : '" + $clusterSubscriptionId + "' ") -Foregr if ($isUsingServicePrincipal) { $spSecret = ConvertTo-SecureString -String $servicePrincipalClientSecret -AsPlainText -Force $spCreds = New-Object -TypeName "System.Management.Automation.PSCredential" -ArgumentList $servicePrincipalClientId,$spSecret - Connect-AzAccount -ServicePrincipal -Credential $spCreds -Tenant $tenantId -Subscription $clusterSubscriptionId + Connect-AzAccount -ServicePrincipal -Credential $spCreds -Tenant $tenantId -Subscription $clusterSubscriptionId -Environment $azureCloudName } try { Write-Host("") Write-Host("Trying to get the current Az login context...") $account = Get-AzContext -ErrorAction Stop - Write-Host("Successfully fetched current AzContext context...") -ForegroundColor Green + $ctxCloud = $account.Environment.Name + if(($azureCloudName.ToLower() -eq $ctxCloud.ToLower() ) -eq $false) { + Write-Host("Specified azure cloud name is not same as current context cloud hence setting account to null to retrigger the login" ) -ForegroundColor Green + $account = $null + } + Write-Host("Successfully fetched current AzContext context and azure cloud name: $azureCloudName" ) -ForegroundColor Green Write-Host("") } catch { @@ -249,10 +273,10 @@ if ($null -eq $account.Account) { if ($isUsingServicePrincipal) { $spSecret = ConvertTo-SecureString -String $servicePrincipalClientSecret -AsPlainText -Force $spCreds = New-Object -TypeName "System.Management.Automation.PSCredential" -ArgumentList $servicePrincipalClientId,$spSecret - Connect-AzAccount -ServicePrincipal -Credential $spCreds -Tenant $tenantId -Subscription $clusterSubscriptionId + Connect-AzAccount -ServicePrincipal -Credential $spCreds -Tenant $tenantId -Subscription $clusterSubscriptionId -Environment $azureCloudName } else { Write-Host("Please login...") - Connect-AzAccount -subscriptionid $clusterSubscriptionId + Connect-AzAccount -subscriptionid $clusterSubscriptionId -Environment $azureCloudName } } catch { diff --git a/scripts/onboarding/managed/disable-monitoring.sh b/scripts/onboarding/managed/disable-monitoring.sh index c11426f30..d43a79f51 100644 --- a/scripts/onboarding/managed/disable-monitoring.sh +++ b/scripts/onboarding/managed/disable-monitoring.sh @@ -280,10 +280,27 @@ done } +validate_and_configure_supported_cloud() { + echo "get active azure cloud name configured to azure cli" + azureCloudName=$(az cloud show --query name -o tsv | tr "[:upper:]" "[:lower:]") + echo "active azure cloud name configured to azure cli: ${azureCloudName}" + if [ "$isArcK8sCluster" = true ]; then + if [ "$azureCloudName" != "azurecloud" -a "$azureCloudName" != "azureusgovernment" ]; then + echo "-e only supported clouds are AzureCloud and AzureUSGovernment for Azure Arc enabled Kubernetes cluster type" + exit 1 + fi + else + # For ARO v4, only supported cloud is public so just configure to public to keep the existing behavior + configure_to_public_cloud + fi +} # parse args parse_args $@ +# validate and configure azure cloud +validate_and_configure_supported_cloud + # parse cluster resource id clusterSubscriptionId="$(echo $clusterResourceId | cut -d'/' -f3 | tr "[:upper:]" "[:lower:]")" clusterResourceGroup="$(echo $clusterResourceId | cut -d'/' -f5)" diff --git a/scripts/onboarding/managed/enable-monitoring.ps1 b/scripts/onboarding/managed/enable-monitoring.ps1 index b052f22c5..7b128b112 100644 --- a/scripts/onboarding/managed/enable-monitoring.ps1 +++ b/scripts/onboarding/managed/enable-monitoring.ps1 @@ -22,6 +22,8 @@ .PARAMETER proxyEndpoint (optional) Provide Proxy endpoint if you have K8s cluster behind the proxy and would like to route Azure Monitor for containers outbound traffic via proxy. Format of the proxy endpoint should be http(s://:@: + .PARAMETER azureCloudName (optional) + Name of the Azure cloud name. Supported Azure cloud Name is AzureCloud or AzureUSGovernment Pre-requisites: - Azure Managed cluster Resource Id @@ -46,7 +48,9 @@ param( [Parameter(mandatory = $false)] [string]$workspaceResourceId, [Parameter(mandatory = $false)] - [string]$proxyEndpoint + [string]$proxyEndpoint, + [Parameter(mandatory = $false)] + [string]$azureCloudName ) $solutionTemplateUri = "https://raw.githubusercontent.com/microsoft/Docker-Provider/ci_dev/scripts/onboarding/templates/azuremonitor-containerSolution.json" @@ -63,6 +67,24 @@ $mcr = "mcr.microsoft.com" $mcrChartVersion = "2.7.9" $mcrChartRepoPath = "azuremonitor/containerinsights/preview/azuremonitor-containers" $helmLocalRepoName = "." +$omsAgentDomainName="opinsights.azure.com" + +if ([string]::IsNullOrEmpty($azureCloudName) -eq $true) { + Write-Host("Azure cloud name parameter not passed in so using default cloud as AzureCloud") + $azureCloudName = "AzureCloud" +} else { + if(($azureCloudName.ToLower() -eq "azurecloud" ) -eq $true) { + Write-Host("Specified Azure Cloud name is : $azureCloudName") + $omsAgentDomainName="opinsights.azure.com" + } elseif (($azureCloudName.ToLower() -eq "azureusgovernment" ) -eq $true) { + Write-Host("Specified Azure Cloud name is : $azureCloudName") + $omsAgentDomainName="opinsights.azure.us" + } else { + Write-Host("Specified Azure Cloud name is : $azureCloudName") + Write-Host("Only supported azure clouds are : AzureCloud and AzureUSGovernment") + exit + } +} # checks the required Powershell modules exist and if not exists, request the user permission to install $azAccountModule = Get-Module -ListAvailable -Name Az.Accounts @@ -244,14 +266,19 @@ Write-Host("Cluster SubscriptionId : '" + $clusterSubscriptionId + "' ") -Foregr if ($isUsingServicePrincipal) { $spSecret = ConvertTo-SecureString -String $servicePrincipalClientSecret -AsPlainText -Force $spCreds = New-Object -TypeName "System.Management.Automation.PSCredential" -ArgumentList $servicePrincipalClientId, $spSecret - Connect-AzAccount -ServicePrincipal -Credential $spCreds -Tenant $tenantId -Subscription $clusterSubscriptionId + Connect-AzAccount -ServicePrincipal -Credential $spCreds -Tenant $tenantId -Subscription $clusterSubscriptionId -Environment $azureCloudName } try { Write-Host("") Write-Host("Trying to get the current Az login context...") $account = Get-AzContext -ErrorAction Stop - Write-Host("Successfully fetched current AzContext context...") -ForegroundColor Green + $ctxCloud = $account.Environment.Name + if(($azureCloudName.ToLower() -eq $ctxCloud.ToLower() ) -eq $false) { + Write-Host("Specified azure cloud name is not same as current context cloud hence setting account to null to retrigger the login" ) -ForegroundColor Green + $account = $null + } + Write-Host("Successfully fetched current AzContext context and azure cloud name: $azureCloudName" ) -ForegroundColor Green Write-Host("") } catch { @@ -266,11 +293,12 @@ if ($null -eq $account.Account) { if ($isUsingServicePrincipal) { $spSecret = ConvertTo-SecureString -String $servicePrincipalClientSecret -AsPlainText -Force $spCreds = New-Object -TypeName "System.Management.Automation.PSCredential" -ArgumentList $servicePrincipalClientId, $spSecret - Connect-AzAccount -ServicePrincipal -Credential $spCreds -Tenant $tenantId -Subscription $clusterSubscriptionId + + Connect-AzAccount -ServicePrincipal -Credential $spCreds -Tenant $tenantId -Subscription $clusterSubscriptionId -Environment $azureCloudName } else { Write-Host("Please login...") - Connect-AzAccount -subscriptionid $clusterSubscriptionId + Connect-AzAccount -subscriptionid $clusterSubscriptionId -Environment $azureCloudName } } catch { @@ -380,7 +408,8 @@ if ([string]::IsNullOrEmpty($workspaceResourceId)) { "westeurope" = "westeurope" ; "westindia" = "centralindia" ; "westus" = "westus" ; - "westus2" = "westus2" + "westus2" = "westus2"; + "usgovvirginia" = "usgovvirginia" } $workspaceRegionCode = "EUS" @@ -531,7 +560,7 @@ try { Write-Host("helmChartRepoPath is : ${helmChartRepoPath}") - $helmParameters = "omsagent.secret.wsid=$workspaceGUID,omsagent.secret.key=$workspacePrimarySharedKey,omsagent.env.clusterId=$clusterResourceId,omsagent.env.clusterRegion=$clusterRegion" + $helmParameters = "omsagent.domain=$omsAgentDomainName,omsagent.secret.wsid=$workspaceGUID,omsagent.secret.key=$workspacePrimarySharedKey,omsagent.env.clusterId=$clusterResourceId,omsagent.env.clusterRegion=$clusterRegion" if ([string]::IsNullOrEmpty($proxyEndpoint) -eq $false) { Write-Host("using proxy endpoint since its provided") $helmParameters = $helmParameters + ",omsagent.proxy=$proxyEndpoint" diff --git a/scripts/onboarding/managed/enable-monitoring.sh b/scripts/onboarding/managed/enable-monitoring.sh index bb6974258..85428aff7 100644 --- a/scripts/onboarding/managed/enable-monitoring.sh +++ b/scripts/onboarding/managed/enable-monitoring.sh @@ -38,8 +38,10 @@ set -e set -o pipefail -# default to public cloud since only supported cloud is azure public clod +# default to public cloud since only supported cloud is azure public cloud defaultAzureCloud="AzureCloud" +# default domain will be for public cloud +omsAgentDomainName="opinsights.azure.com" # released chart version in mcr mcrChartVersion="2.7.9" @@ -307,6 +309,25 @@ parse_args() { } +validate_and_configure_supported_cloud() { + echo "get active azure cloud name configured to azure cli" + azureCloudName=$(az cloud show --query name -o tsv | tr "[:upper:]" "[:lower:]") + echo "active azure cloud name configured to azure cli: ${azureCloudName}" + if [ "$isArcK8sCluster" = true ]; then + if [ "$azureCloudName" != "azurecloud" -a "$azureCloudName" != "azureusgovernment" ]; then + echo "-e only supported clouds are AzureCloud and AzureUSGovernment for Azure Arc enabled Kubernetes cluster type" + exit 1 + fi + if [ "$azureCloudName" = "azureusgovernment" ]; then + echo "setting omsagent domain as opinsights.azure.us since the azure cloud is azureusgovernment " + omsAgentDomainName="opinsights.azure.us" + fi + else + # For ARO v4, only supported cloud is public so just configure to public to keep the existing behavior + configure_to_public_cloud + fi +} + configure_to_public_cloud() { echo "Set AzureCloud as active cloud for az cli" az cloud set -n $defaultAzureCloud @@ -398,8 +419,10 @@ create_default_log_analytics_workspace() { [westindia]=centralindia [westus]=westus [westus2]=westus2 + [usgovvirginia]=usgovvirginia ) + echo "cluster Region:"$clusterRegion if [ -n "${AzureCloudRegionToOmsRegionMap[$clusterRegion]}" ]; then workspaceRegion=${AzureCloudRegionToOmsRegionMap[$clusterRegion]} fi @@ -433,6 +456,7 @@ create_default_log_analytics_workspace() { workspaceResourceId=$(az resource show -g $workspaceResourceGroup -n $workspaceName --resource-type $workspaceResourceProvider --query id) workspaceResourceId=$(echo $workspaceResourceId | tr -d '"') + echo "workspace resource Id: ${workspaceResourceId}" } add_container_insights_solution() { @@ -504,18 +528,18 @@ install_helm_chart() { echo "using proxy endpoint since proxy configuration passed in" if [ -z "$kubeconfigContext" ]; then echo "using current kube-context since --kube-context/-k parameter not passed in" - helm upgrade --install $releaseName --set omsagent.proxy=$proxyEndpoint,omsagent.secret.wsid=$workspaceGuid,omsagent.secret.key=$workspaceKey,omsagent.env.clusterId=$clusterResourceId,omsagent.env.clusterRegion=$clusterRegion $helmChartRepoPath + helm upgrade --install $releaseName --set omsagent.domain=$omsAgentDomainName,omsagent.proxy=$proxyEndpoint,omsagent.secret.wsid=$workspaceGuid,omsagent.secret.key=$workspaceKey,omsagent.env.clusterId=$clusterResourceId,omsagent.env.clusterRegion=$clusterRegion $helmChartRepoPath else echo "using --kube-context:${kubeconfigContext} since passed in" - helm upgrade --install $releaseName --set omsagent.proxy=$proxyEndpoint,omsagent.secret.wsid=$workspaceGuid,omsagent.secret.key=$workspaceKey,omsagent.env.clusterId=$clusterResourceId,omsagent.env.clusterRegion=$clusterRegion $helmChartRepoPath --kube-context ${kubeconfigContext} + helm upgrade --install $releaseName --set omsagent.domain=$omsAgentDomainName,omsagent.proxy=$proxyEndpoint,omsagent.secret.wsid=$workspaceGuid,omsagent.secret.key=$workspaceKey,omsagent.env.clusterId=$clusterResourceId,omsagent.env.clusterRegion=$clusterRegion $helmChartRepoPath --kube-context ${kubeconfigContext} fi else if [ -z "$kubeconfigContext" ]; then echo "using current kube-context since --kube-context/-k parameter not passed in" - helm upgrade --install $releaseName --set omsagent.secret.wsid=$workspaceGuid,omsagent.secret.key=$workspaceKey,omsagent.env.clusterId=$clusterResourceId,omsagent.env.clusterRegion=$clusterRegion $helmChartRepoPath + helm upgrade --install $releaseName --set omsagent.domain=$omsAgentDomainName,omsagent.secret.wsid=$workspaceGuid,omsagent.secret.key=$workspaceKey,omsagent.env.clusterId=$clusterResourceId,omsagent.env.clusterRegion=$clusterRegion $helmChartRepoPath else echo "using --kube-context:${kubeconfigContext} since passed in" - helm upgrade --install $releaseName --set omsagent.secret.wsid=$workspaceGuid,omsagent.secret.key=$workspaceKey,omsagent.env.clusterId=$clusterResourceId,omsagent.env.clusterRegion=$clusterRegion $helmChartRepoPath --kube-context ${kubeconfigContext} + helm upgrade --install $releaseName --set omsagent.domain=$omsAgentDomainName,omsagent.secret.wsid=$workspaceGuid,omsagent.secret.key=$workspaceKey,omsagent.env.clusterId=$clusterResourceId,omsagent.env.clusterRegion=$clusterRegion $helmChartRepoPath --kube-context ${kubeconfigContext} fi fi @@ -560,8 +584,8 @@ enable_aks_monitoring_addon() { # parse and validate args parse_args $@ -# configure azure cli for public cloud -configure_to_public_cloud +# validate and configure azure cli for cloud +validate_and_configure_supported_cloud # parse cluster resource id clusterSubscriptionId="$(echo $clusterResourceId | cut -d'/' -f3 | tr "[:upper:]" "[:lower:]")" diff --git a/scripts/onboarding/managed/upgrade-monitoring.sh b/scripts/onboarding/managed/upgrade-monitoring.sh index 11ecf6819..847bf84ea 100644 --- a/scripts/onboarding/managed/upgrade-monitoring.sh +++ b/scripts/onboarding/managed/upgrade-monitoring.sh @@ -281,11 +281,26 @@ set_azure_subscription() { echo "successfully configured subscription id: ${subscriptionId} as current subscription for the azure cli" } +validate_and_configure_supported_cloud() { + echo "get active azure cloud name configured to azure cli" + azureCloudName=$(az cloud show --query name -o tsv | tr "[:upper:]" "[:lower:]") + echo "active azure cloud name configured to azure cli: ${azureCloudName}" + if [ "$isArcK8sCluster" = true ]; then + if [ "$azureCloudName" != "azurecloud" -a "$azureCloudName" != "azureusgovernment" ]; then + echo "-e only supported clouds are AzureCloud and AzureUSGovernment for Azure Arc enabled Kubernetes cluster type" + exit 1 + fi + else + # For ARO v4, only supported cloud is public so just configure to public to keep the existing behavior + configure_to_public_cloud + fi +} + # parse and validate args parse_args $@ -# configure azure cli for public cloud -configure_to_public_cloud +# configure azure cli for cloud +validate_and_configure_supported_cloud # parse cluster resource id clusterSubscriptionId="$(echo $clusterResourceId | cut -d'/' -f3 | tr "[:upper:]" "[:lower:]")" From ef9d726c7053fba0254fc897aff124e5a5a2be34 Mon Sep 17 00:00:00 2001 From: Grace Wehner Date: Mon, 4 Jan 2021 10:43:44 -0800 Subject: [PATCH 051/301] Convert PV type dictionary to json for telemetry so it shows up in logs (#480) --- source/plugins/ruby/in_kube_pvinventory.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/plugins/ruby/in_kube_pvinventory.rb b/source/plugins/ruby/in_kube_pvinventory.rb index b0e09c85b..861b3a8e1 100644 --- a/source/plugins/ruby/in_kube_pvinventory.rb +++ b/source/plugins/ruby/in_kube_pvinventory.rb @@ -90,7 +90,7 @@ def enumerate # Flush AppInsights telemetry once all the processing is done if telemetryFlush == true telemetryProperties = {} - telemetryProperties["CountsOfPVTypes"] = @pvTypeToCountHash + telemetryProperties["CountsOfPVTypes"] = @pvTypeToCountHash.to_json ApplicationInsightsUtility.sendCustomEvent(Constants::PV_INVENTORY_HEART_BEAT_EVENT, telemetryProperties) @@pvTelemetryTimeTracker = DateTime.now.to_time.to_i end From 97bdb94ad95234202ec2eca172cf419b5cee82d5 Mon Sep 17 00:00:00 2001 From: Vishwanath Date: Wed, 6 Jan 2021 09:59:49 -0800 Subject: [PATCH 052/301] fix 2 windows tasks - 1) Dont log to termination log 2) enable ADX route for containerlogs in windows (for O365) (#482) --- build/common/installer/scripts/tomlparser.rb | 2 +- .../installer/scripts/livenessprobe.cmd | 24 +++++++------------ 2 files changed, 9 insertions(+), 17 deletions(-) diff --git a/build/common/installer/scripts/tomlparser.rb b/build/common/installer/scripts/tomlparser.rb index 7235ee0c3..1d33da124 100644 --- a/build/common/installer/scripts/tomlparser.rb +++ b/build/common/installer/scripts/tomlparser.rb @@ -244,7 +244,7 @@ def get_command_windows(env_variable_name, env_variable_value) file.write(commands) commands = get_command_windows('AZMON_CLUSTER_COLLECT_ALL_KUBE_EVENTS', @collectAllKubeEvents) file.write(commands) - commands = get_command_windows('AZMON_CONTAINER_LOGS_ROUTE', @containerLogsRoute) + commands = get_command_windows('AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE', @containerLogsRoute) file.write(commands) # Close file after writing all environment variables diff --git a/build/windows/installer/scripts/livenessprobe.cmd b/build/windows/installer/scripts/livenessprobe.cmd index 06d577f31..19d0b69d7 100644 --- a/build/windows/installer/scripts/livenessprobe.cmd +++ b/build/windows/installer/scripts/livenessprobe.cmd @@ -1,40 +1,32 @@ -echo "Checking if fluent-bit is running" +REM "Checking if fluent-bit is running" tasklist /fi "imagename eq fluent-bit.exe" /fo "table" | findstr fluent-bit IF ERRORLEVEL 1 ( - echo "Fluent-Bit is not running" > /dev/termination-log + echo "Fluent-Bit is not running" exit /b 1 -) ELSE ( - echo "Fluent-Bit is running" ) -echo "Checking if config map has been updated since agent start" +REM "Checking if config map has been updated since agent start" IF EXIST C:\etc\omsagentwindows\filesystemwatcher.txt ( - echo "Config Map Updated since agent started" > /dev/termination-log + echo "Config Map Updated since agent started" exit /b 1 -) ELSE ( - echo "Config Map not Updated since agent start" ) -echo "Checking if certificate needs to be renewed (aka agent restart required)" +REM "Checking if certificate needs to be renewed (aka agent restart required)" IF EXIST C:\etc\omsagentwindows\renewcertificate.txt ( - echo "Certificate needs to be renewed" > /dev/termination-log + echo "Certificate needs to be renewed" exit /b 1 -) ELSE ( - echo "Certificate does NOT need to be renewd" ) -echo "Checking if fluentd service is running" +REM "Checking if fluentd service is running" sc query fluentdwinaks | findstr /i STATE | findstr RUNNING IF ERRORLEVEL 1 ( - echo "Fluentd Service is NOT Running" > /dev/termination-log + echo "Fluentd Service is NOT Running" exit /b 1 -) ELSE ( - echo "Fluentd Service is Running" ) exit /b 0 From 94237beba5671904945a676d156c609118c0b2d7 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Wed, 6 Jan 2021 13:58:22 -0800 Subject: [PATCH 053/301] fix ci envvar collection in large pods (#483) --- .../ruby/kubernetes_container_inventory.rb | 43 +++++++++++++------ 1 file changed, 31 insertions(+), 12 deletions(-) diff --git a/source/plugins/ruby/kubernetes_container_inventory.rb b/source/plugins/ruby/kubernetes_container_inventory.rb index 4fe728579..ba6a9af42 100644 --- a/source/plugins/ruby/kubernetes_container_inventory.rb +++ b/source/plugins/ruby/kubernetes_container_inventory.rb @@ -193,25 +193,41 @@ def obtainContainerEnvironmentVars(containerId) $log.info("KubernetesContainerInventory::obtainContainerEnvironmentVars @ #{Time.now.utc.iso8601}") envValueString = "" begin - unless @@containerCGroupCache.has_key?(containerId) + isCGroupPidFetchRequired = false + if !@@containerCGroupCache.has_key?(containerId) + isCGroupPidFetchRequired = true + else + cGroupPid = @@containerCGroupCache[containerId] + if cGroupPid.nil? || cGroupPid.empty? + isCGroupPidFetchRequired = true + @@containerCGroupCache.delete(containerId) + elsif !File.exist?("/hostfs/proc/#{cGroupPid}/environ") + isCGroupPidFetchRequired = true + @@containerCGroupCache.delete(containerId) + end + end + + if isCGroupPidFetchRequired $log.info("KubernetesContainerInventory::obtainContainerEnvironmentVars fetching cGroup parent pid @ #{Time.now.utc.iso8601} for containerId: #{containerId}") Dir["/hostfs/proc/*/cgroup"].each do |filename| begin - if File.file?(filename) && File.foreach(filename).grep(/#{containerId}/).any? + if File.file?(filename) && File.exist?(filename) && File.foreach(filename).grep(/#{containerId}/).any? # file full path is /hostfs/proc//cgroup - cGroupPid = filename.split("/")[3] - if @@containerCGroupCache.has_key?(containerId) - tempCGroupPid = @@containerCGroupCache[containerId] - if tempCGroupPid > cGroupPid + cGroupPid = filename.split("/")[3] + if is_number?(cGroupPid) + if @@containerCGroupCache.has_key?(containerId) + tempCGroupPid = @@containerCGroupCache[containerId] + if tempCGroupPid.to_i > cGroupPid.to_i + @@containerCGroupCache[containerId] = cGroupPid + end + else @@containerCGroupCache[containerId] = cGroupPid - end - else - @@containerCGroupCache[containerId] = cGroupPid + end end end - rescue SystemCallError # ignore Error::ENOENT,Errno::ESRCH which is expected if any of the container gone while we read - end - end + rescue SystemCallError # ignore Error::ENOENT,Errno::ESRCH which is expected if any of the container gone while we read + end + end end cGroupPid = @@containerCGroupCache[containerId] if !cGroupPid.nil? && !cGroupPid.empty? @@ -341,5 +357,8 @@ def deleteCGroupCacheEntryForDeletedContainer(containerId) ApplicationInsightsUtility.sendExceptionTelemetry(error) end end + def is_number?(value) + true if Integer(value) rescue false + end end end From aacd496eeba6350ec0d028334813df7edc806a5e Mon Sep 17 00:00:00 2001 From: Grace Wehner Date: Thu, 7 Jan 2021 13:39:17 -0800 Subject: [PATCH 054/301] grwehner/jan agent tasks (#481) - Windows agent fix to use log filtering settings in config map. - Error handling for kubelet_utils get_node_capacity in case /metrics/cadvsior endpoint fails. - Remove env variable for workspace key for windows agent --- build/common/installer/scripts/tomlparser.rb | 2 +- .../installer/certificategenerator/Program.cs | 8 +++----- build/windows/installer/conf/fluent.conf | 12 ++++++++++-- kubernetes/windows/main.ps1 | 12 ++---------- source/plugins/ruby/filter_cadvisor2mdm.rb | 12 ++++++++++-- 5 files changed, 26 insertions(+), 20 deletions(-) diff --git a/build/common/installer/scripts/tomlparser.rb b/build/common/installer/scripts/tomlparser.rb index 1d33da124..fe26f639e 100644 --- a/build/common/installer/scripts/tomlparser.rb +++ b/build/common/installer/scripts/tomlparser.rb @@ -228,7 +228,7 @@ def get_command_windows(env_variable_name, env_variable_value) file.write(commands) commands = get_command_windows('AZMON_LOG_TAIL_PATH', @logTailPath) file.write(commands) - commands = get_command_windows('AZMON_LOG_EXCLUSION_REGEX_PATTERN', @stdoutExcludeNamespaces) + commands = get_command_windows('AZMON_LOG_EXCLUSION_REGEX_PATTERN', @logExclusionRegexPattern) file.write(commands) commands = get_command_windows('AZMON_STDOUT_EXCLUDED_NAMESPACES', @stdoutExcludeNamespaces) file.write(commands) diff --git a/build/windows/installer/certificategenerator/Program.cs b/build/windows/installer/certificategenerator/Program.cs index 43063c4be..e24d0e303 100644 --- a/build/windows/installer/certificategenerator/Program.cs +++ b/build/windows/installer/certificategenerator/Program.cs @@ -414,14 +414,12 @@ static void Main(string[] args) try { - if (!String.IsNullOrEmpty(Environment.GetEnvironmentVariable("WSKEY"))) - { - logAnalyticsWorkspaceSharedKey = Environment.GetEnvironmentVariable("WSKEY"); - } + // WSKEY isn't stored as an environment variable + logAnalyticsWorkspaceSharedKey = File.ReadAllText("C:/etc/omsagent-secret/KEY").Trim(); } catch (Exception ex) { - Console.WriteLine("Failed to read env variables (WSKEY)" + ex.Message); + Console.WriteLine("Failed to read secret (WSKEY)" + ex.Message); } try diff --git a/build/windows/installer/conf/fluent.conf b/build/windows/installer/conf/fluent.conf index c96300b1e..d5eb475ca 100644 --- a/build/windows/installer/conf/fluent.conf +++ b/build/windows/installer/conf/fluent.conf @@ -6,7 +6,8 @@ @type tail - path /var/log/containers/*.log + path "#{ENV['AZMON_LOG_TAIL_PATH']}" + exclude_path "#{ENV['AZMON_CLUSTER_LOG_TAIL_EXCLUDE_PATH']}" pos_file /var/opt/microsoft/fluent/fluentd-containers.log.pos tag oms.container.log.la @log_level trace @@ -28,6 +29,14 @@ @include fluent-docker-parser.conf + + @type grep + + key stream + pattern "#{ENV['AZMON_LOG_EXCLUSION_REGEX_PATTERN']}" + + + @type record_transformer # fluent-plugin-record-modifier more light-weight but needs to be installed (dependency worth it?) @@ -37,7 +46,6 @@ - @type forward send_timeout 60s diff --git a/kubernetes/windows/main.ps1 b/kubernetes/windows/main.ps1 index d32e5068a..a297e3801 100644 --- a/kubernetes/windows/main.ps1 +++ b/kubernetes/windows/main.ps1 @@ -64,19 +64,11 @@ function Set-EnvironmentVariables { $wsID = Get-Content /etc/omsagent-secret/WSID } - # Set DOMAIN + # Set WSID [System.Environment]::SetEnvironmentVariable("WSID", $wsID, "Process") [System.Environment]::SetEnvironmentVariable("WSID", $wsID, "Machine") - $wsKey = "" - if (Test-Path /etc/omsagent-secret/KEY) { - # TODO: Change to omsagent-secret before merging - $wsKey = Get-Content /etc/omsagent-secret/KEY - } - - # Set KEY - [System.Environment]::SetEnvironmentVariable("WSKEY", $wsKey, "Process") - [System.Environment]::SetEnvironmentVariable("WSKEY", $wsKey, "Machine") + # Don't store WSKEY as environment variable $proxy = "" if (Test-Path /etc/omsagent-secret/PROXY) { diff --git a/source/plugins/ruby/filter_cadvisor2mdm.rb b/source/plugins/ruby/filter_cadvisor2mdm.rb index 2423ad024..8d7e729c8 100644 --- a/source/plugins/ruby/filter_cadvisor2mdm.rb +++ b/source/plugins/ruby/filter_cadvisor2mdm.rb @@ -308,8 +308,16 @@ def ensure_cpu_memory_capacity_set end elsif controller_type.downcase == "daemonset" capacity_from_kubelet = KubeletUtils.get_node_capacity - @cpu_capacity = capacity_from_kubelet[0] - @memory_capacity = capacity_from_kubelet[1] + + # Error handling in case /metrics/cadvsior endpoint fails + if !capacity_from_kubelet.nil? && capacity_from_kubelet.length > 1 + @cpu_capacity = capacity_from_kubelet[0] + @memory_capacity = capacity_from_kubelet[1] + else + # cpu_capacity and memory_capacity keep initialized value of 0.0 + @log.error "Error getting capacity_from_kubelet: cpu_capacity and memory_capacity" + end + end end From 148d73974a003aba7f77f93389c59aede4679b49 Mon Sep 17 00:00:00 2001 From: rashmichandrashekar Date: Thu, 7 Jan 2021 18:38:06 -0800 Subject: [PATCH 055/301] updating fbit version and cpu limit (#485) --- kubernetes/linux/setup.sh | 2 +- kubernetes/omsagent.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/kubernetes/linux/setup.sh b/kubernetes/linux/setup.sh index fb41d4782..88e9da4dd 100644 --- a/kubernetes/linux/setup.sh +++ b/kubernetes/linux/setup.sh @@ -71,7 +71,7 @@ chmod 777 /opt/telegraf wget -qO - https://packages.fluentbit.io/fluentbit.key | sudo apt-key add - sudo echo "deb https://packages.fluentbit.io/ubuntu/xenial xenial main" >> /etc/apt/sources.list sudo apt-get update -sudo apt-get install td-agent-bit=1.4.2 -y +sudo apt-get install td-agent-bit=1.6.9 -y rm -rf $TMPDIR/omsbundle rm -f $TMPDIR/omsagent*.sh diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index 013e2a6c0..563955968 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -368,7 +368,7 @@ spec: imagePullPolicy: IfNotPresent resources: limits: - cpu: 250m + cpu: 500m memory: 600Mi requests: cpu: 75m From bd33dd9f23cfc5c569e83d9389b2d0064757f5be Mon Sep 17 00:00:00 2001 From: rashmichandrashekar Date: Fri, 8 Jan 2021 13:47:25 -0800 Subject: [PATCH 056/301] reverting to older version (#487) --- kubernetes/linux/setup.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kubernetes/linux/setup.sh b/kubernetes/linux/setup.sh index 88e9da4dd..352be06d7 100644 --- a/kubernetes/linux/setup.sh +++ b/kubernetes/linux/setup.sh @@ -71,7 +71,7 @@ chmod 777 /opt/telegraf wget -qO - https://packages.fluentbit.io/fluentbit.key | sudo apt-key add - sudo echo "deb https://packages.fluentbit.io/ubuntu/xenial xenial main" >> /etc/apt/sources.list sudo apt-get update -sudo apt-get install td-agent-bit=1.6.9 -y +sudo apt-get install td-agent-bit=1.6.8 -y rm -rf $TMPDIR/omsbundle rm -f $TMPDIR/omsagent*.sh From d5164d235dd2512824f679ddbe30ebafdf8f1a14 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Mon, 11 Jan 2021 11:48:50 -0800 Subject: [PATCH 057/301] Gangams/add fbsettings configurable via configmap (#486) * wip * fbit config settings * add config warn message * handle one config provided but not other * fixed pr feedback * fix copy paste error * rename config parameter names * fix typo * fix fbit crash in helm path * fix nil check --- .../scripts/td-agent-bit-conf-customizer.rb | 11 +++-- .../scripts/tomlparser-agent-config.rb | 48 +++++++++++++++++++ kubernetes/linux/main.sh | 1 + 3 files changed, 57 insertions(+), 3 deletions(-) diff --git a/build/common/installer/scripts/td-agent-bit-conf-customizer.rb b/build/common/installer/scripts/td-agent-bit-conf-customizer.rb index fae3acb36..35b71e550 100644 --- a/build/common/installer/scripts/td-agent-bit-conf-customizer.rb +++ b/build/common/installer/scripts/td-agent-bit-conf-customizer.rb @@ -18,12 +18,17 @@ def substituteFluentBitPlaceHolders bufferChunkSize = ENV["FBIT_TAIL_BUFFER_CHUNK_SIZE"] bufferMaxSize = ENV["FBIT_TAIL_BUFFER_MAX_SIZE"] - serviceInterval = (!interval.nil? && is_number?(interval)) ? interval : @default_service_interval + serviceInterval = (!interval.nil? && is_number?(interval) && interval.to_i > 0 ) ? interval : @default_service_interval serviceIntervalSetting = "Flush " + serviceInterval - tailBufferChunkSize = (!bufferChunkSize.nil? && is_number?(bufferChunkSize)) ? bufferChunkSize : nil + tailBufferChunkSize = (!bufferChunkSize.nil? && is_number?(bufferChunkSize) && bufferChunkSize.to_i > 0) ? bufferChunkSize : nil - tailBufferMaxSize = (!bufferMaxSize.nil? && is_number?(bufferMaxSize)) ? bufferMaxSize : nil + tailBufferMaxSize = (!bufferMaxSize.nil? && is_number?(bufferMaxSize) && bufferMaxSize.to_i > 0) ? bufferMaxSize : nil + + if ((!tailBufferChunkSize.nil? && tailBufferMaxSize.nil?) || (!tailBufferChunkSize.nil? && !tailBufferMaxSize.nil? && tailBufferChunkSize.to_i > tailBufferMaxSize.to_i)) + puts "config:warn buffer max size must be greater or equal to chunk size" + tailBufferMaxSize = tailBufferChunkSize + end text = File.read(@td_agent_bit_conf_path) new_contents = text.gsub("${SERVICE_FLUSH_INTERVAL}", serviceIntervalSetting) diff --git a/build/linux/installer/scripts/tomlparser-agent-config.rb b/build/linux/installer/scripts/tomlparser-agent-config.rb index 87c5194ed..e587909e5 100644 --- a/build/linux/installer/scripts/tomlparser-agent-config.rb +++ b/build/linux/installer/scripts/tomlparser-agent-config.rb @@ -55,6 +55,12 @@ @podsEmitStreamBatchSizeMin = 50 @nodesEmitStreamBatchSizeMin = 50 +# configmap settings related fbit config +@fbitFlushIntervalSecs = 0 +@fbitTailBufferChunkSizeMBs = 0 +@fbitTailBufferMaxSizeMBs = 0 + + def is_number?(value) true if Integer(value) rescue false end @@ -131,6 +137,38 @@ def populateSettingValuesFromConfigMap(parsedConfig) puts "Using config map value: NODES_EMIT_STREAM_BATCH_SIZE = #{@nodesEmitStreamBatchSize}" end end + # fbit config settings + fbit_config = parsedConfig[:agent_settings][:fbit_config] + if !fbit_config.nil? + fbitFlushIntervalSecs = fbit_config[:log_flush_interval_secs] + if !fbitFlushIntervalSecs.nil? && is_number?(fbitFlushIntervalSecs) && fbitFlushIntervalSecs.to_i > 0 + @fbitFlushIntervalSecs = fbitFlushIntervalSecs.to_i + puts "Using config map value: log_flush_interval_secs = #{@fbitFlushIntervalSecs}" + end + + fbitTailBufferChunkSizeMBs = fbit_config[:tail_buf_chunksize_megabytes] + if !fbitTailBufferChunkSizeMBs.nil? && is_number?(fbitTailBufferChunkSizeMBs) && fbitTailBufferChunkSizeMBs.to_i > 0 + @fbitTailBufferChunkSizeMBs = fbitTailBufferChunkSizeMBs.to_i + puts "Using config map value: tail_buf_chunksize_megabytes = #{@fbitTailBufferChunkSizeMBs}" + end + + fbitTailBufferMaxSizeMBs = fbit_config[:tail_buf_maxsize_megabytes] + if !fbitTailBufferMaxSizeMBs.nil? && is_number?(fbitTailBufferMaxSizeMBs) && fbitTailBufferMaxSizeMBs.to_i > 0 + if fbitTailBufferMaxSizeMBs.to_i >= @fbitTailBufferChunkSizeMBs + @fbitTailBufferMaxSizeMBs = fbitTailBufferMaxSizeMBs.to_i + puts "Using config map value: tail_buf_maxsize_megabytes = #{@fbitTailBufferMaxSizeMBs}" + else + # tail_buf_maxsize_megabytes has to be greater or equal to tail_buf_chunksize_megabytes + @fbitTailBufferMaxSizeMBs = @fbitTailBufferChunkSizeMBs + puts "config::warn: tail_buf_maxsize_megabytes must be greater or equal to value of tail_buf_chunksize_megabytes. Using tail_buf_maxsize_megabytes = #{@fbitTailBufferMaxSizeMBs} since provided config value not valid" + end + end + # in scenario - tail_buf_chunksize_megabytes provided but not tail_buf_maxsize_megabytes to prevent fbit crash + if @fbitTailBufferChunkSizeMBs > 0 && @fbitTailBufferMaxSizeMBs == 0 + @fbitTailBufferMaxSizeMBs = @fbitTailBufferChunkSizeMBs + puts "config::warn: since tail_buf_maxsize_megabytes not provided hence using tail_buf_maxsize_megabytes=#{@fbitTailBufferMaxSizeMBs} which is same as the value of tail_buf_chunksize_megabytes" + end + end end rescue => errorStr puts "config::error:Exception while reading config settings for agent configuration setting - #{errorStr}, using defaults" @@ -164,6 +202,16 @@ def populateSettingValuesFromConfigMap(parsedConfig) file.write("export HPA_CHUNK_SIZE=#{@hpaChunkSize}\n") file.write("export PODS_EMIT_STREAM_BATCH_SIZE=#{@podsEmitStreamBatchSize}\n") file.write("export NODES_EMIT_STREAM_BATCH_SIZE=#{@nodesEmitStreamBatchSize}\n") + # fbit settings + if @fbitFlushIntervalSecs > 0 + file.write("export FBIT_SERVICE_FLUSH_INTERVAL=#{@fbitFlushIntervalSecs}\n") + end + if @fbitTailBufferChunkSizeMBs > 0 + file.write("export FBIT_TAIL_BUFFER_CHUNK_SIZE=#{@fbitTailBufferChunkSizeMBs}\n") + end + if @fbitTailBufferMaxSizeMBs > 0 + file.write("export FBIT_TAIL_BUFFER_MAX_SIZE=#{@fbitTailBufferMaxSizeMBs}\n") + end # Close file after writing all environment variables file.close else diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index ed16d3e32..b4df538d4 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -172,6 +172,7 @@ source config_env_var #Parse the configmap to set the right environment variables for agent config. +#Note > tomlparser-agent-config.rb has to be parsed first before td-agent-bit-conf-customizer.rb for fbit agent settings /opt/microsoft/omsagent/ruby/bin/ruby tomlparser-agent-config.rb cat agent_config_env_var | while read line; do From 908d9b0cdcd46452582338ca23f7bfbf85411e37 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Mon, 11 Jan 2021 12:47:38 -0800 Subject: [PATCH 058/301] Gangams/jan agent release tasks (#484) * wip * explicit amd64 affinity for hybrid workloads * fix space issue * wip * revert vscode setting file --- .../templates/omsagent-daemonset-windows.yaml | 4 ++++ .../templates/omsagent-daemonset.yaml | 4 ++++ charts/azuremonitor-containers/values.yaml | 18 +++++++++++++++++- kubernetes/omsagent.yaml | 8 ++++++++ 4 files changed, 33 insertions(+), 1 deletion(-) diff --git a/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml b/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml index 6a309c121..81003c704 100644 --- a/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml @@ -27,6 +27,10 @@ spec: checksum/secret: {{ include (print $.Template.BasePath "/omsagent-secret.yaml") . | sha256sum }} checksum/config: {{ toYaml .Values.omsagent.resources | sha256sum }} spec: + dnsConfig: + options: + - name: ndots + value: "3" {{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion }} nodeSelector: kubernetes.io/os: windows diff --git a/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml b/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml index d57c4d82b..3d29ede42 100644 --- a/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml @@ -28,6 +28,10 @@ spec: checksum/config: {{ toYaml .Values.omsagent.resources | sha256sum }} checksum/logsettings: {{ toYaml .Values.omsagent.logsettings | sha256sum }} spec: + dnsConfig: + options: + - name: ndots + value: "3" {{- if .Values.omsagent.rbac }} serviceAccountName: omsagent {{- end }} diff --git a/charts/azuremonitor-containers/values.yaml b/charts/azuremonitor-containers/values.yaml index 907e315d1..b3d029146 100644 --- a/charts/azuremonitor-containers/values.yaml +++ b/charts/azuremonitor-containers/values.yaml @@ -58,7 +58,7 @@ omsagent: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - labelSelector: - matchExpressions: + matchExpressions: - key: kubernetes.io/os operator: In values: @@ -67,6 +67,10 @@ omsagent: operator: NotIn values: - virtual-kubelet + - key: kubernetes.io/arch + operator: In + values: + - amd64 nodeSelectorTerms: - labelSelector: matchExpressions: @@ -78,6 +82,10 @@ omsagent: operator: NotIn values: - virtual-kubelet + - key: beta.kubernetes.io/arch + operator: In + values: + - amd64 deployment: affinity: nodeAffinity: @@ -106,6 +114,10 @@ omsagent: operator: NotIn values: - master + - key: kubernetes.io/arch + operator: In + values: + - amd64 nodeSelectorTerms: - labelSelector: matchExpressions: @@ -121,6 +133,10 @@ omsagent: operator: NotIn values: - master + - key: beta.kubernetes.io/arch + operator: In + values: + - amd64 ## Configure resource requests and limits ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ ## diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index 563955968..df80cabc4 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -362,6 +362,10 @@ spec: schema-versions: "v1" spec: serviceAccountName: omsagent + dnsConfig: + options: + - name: ndots + value: "3" containers: - name: omsagent image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod11092020" @@ -675,6 +679,10 @@ spec: schema-versions: "v1" spec: serviceAccountName: omsagent + dnsConfig: + options: + - name: ndots + value: "3" containers: - name: omsagent-win image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod11092020" From 8ede53653f79a7401352739f6d4f09e572b12235 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Mon, 11 Jan 2021 13:18:14 -0800 Subject: [PATCH 059/301] remove per container logs in ci (#488) --- .../plugins/ruby/kubernetes_container_inventory.rb | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/source/plugins/ruby/kubernetes_container_inventory.rb b/source/plugins/ruby/kubernetes_container_inventory.rb index ba6a9af42..69beca493 100644 --- a/source/plugins/ruby/kubernetes_container_inventory.rb +++ b/source/plugins/ruby/kubernetes_container_inventory.rb @@ -189,8 +189,7 @@ def getContainersInfoMap(podItem, isWindows) return containersInfoMap end - def obtainContainerEnvironmentVars(containerId) - $log.info("KubernetesContainerInventory::obtainContainerEnvironmentVars @ #{Time.now.utc.iso8601}") + def obtainContainerEnvironmentVars(containerId) envValueString = "" begin isCGroupPidFetchRequired = false @@ -207,8 +206,7 @@ def obtainContainerEnvironmentVars(containerId) end end - if isCGroupPidFetchRequired - $log.info("KubernetesContainerInventory::obtainContainerEnvironmentVars fetching cGroup parent pid @ #{Time.now.utc.iso8601} for containerId: #{containerId}") + if isCGroupPidFetchRequired Dir["/hostfs/proc/*/cgroup"].each do |filename| begin if File.file?(filename) && File.exist?(filename) && File.foreach(filename).grep(/#{containerId}/).any? @@ -231,8 +229,7 @@ def obtainContainerEnvironmentVars(containerId) end cGroupPid = @@containerCGroupCache[containerId] if !cGroupPid.nil? && !cGroupPid.empty? - environFilePath = "/hostfs/proc/#{cGroupPid}/environ" - $log.info("KubernetesContainerInventory::obtainContainerEnvironmentVars cGroupPid: #{cGroupPid} environFilePath: #{environFilePath} for containerId: #{containerId}") + environFilePath = "/hostfs/proc/#{cGroupPid}/environ" if File.exist?(environFilePath) # Skip environment variable processing if it contains the flag AZMON_COLLECT_ENV=FALSE # Check to see if the environment variable collection is disabled for this container. @@ -245,8 +242,7 @@ def obtainContainerEnvironmentVars(containerId) if !envVars.nil? && !envVars.empty? envVars = envVars.split("\0") envValueString = envVars.to_json - envValueStringLength = envValueString.length - $log.info("KubernetesContainerInventory::environment vars filename @ #{environFilePath} envVars size @ #{envValueStringLength}") + envValueStringLength = envValueString.length if envValueStringLength >= 200000 lastIndex = envValueString.rindex("\",") if !lastIndex.nil? From 37e5218e4a6a6f6c02591093356e0dee7f79af7f Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Tue, 12 Jan 2021 10:34:31 -0800 Subject: [PATCH 060/301] updates for ciprod01112021 release (#489) --- ReleaseNotes.md | 17 +++++++++++++++++ build/version | 6 +++--- charts/azuremonitor-containers/Chart.yaml | 2 +- charts/azuremonitor-containers/values.yaml | 6 +++--- kubernetes/linux/Dockerfile | 2 +- kubernetes/omsagent.yaml | 12 ++++++------ kubernetes/windows/Dockerfile | 2 +- .../onboarding/managed/enable-monitoring.ps1 | 2 +- scripts/onboarding/managed/enable-monitoring.sh | 2 +- .../onboarding/managed/upgrade-monitoring.sh | 2 +- 10 files changed, 35 insertions(+), 18 deletions(-) diff --git a/ReleaseNotes.md b/ReleaseNotes.md index ddfd01314..b1eb316a1 100644 --- a/ReleaseNotes.md +++ b/ReleaseNotes.md @@ -10,6 +10,23 @@ additional questions or comments. ## Release History Note : The agent version(s) below has dates (ciprod), which indicate the agent build dates (not release dates) +### 01/11/2021 - +##### Version microsoft/oms:ciprod01112021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod01112021 (linux) +##### Version microsoft/oms:win-ciprod01112021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod01112021 (windows) +##### Code change log +- Fixes for Linux Agent Replicaset Pod OOMing issue +- Update fluentbit (1.14.2 to 1.6.8) for the Linux Daemonset +- Make Fluentbit settings: log_flush_interval_secs, tail_buf_chunksize_megabytes and tail_buf_maxsize_megabytes configurable via configmap +- Support for PV inventory collection +- Removal of Custom metric region check for Public cloud regions and update to use cloud environment variable to determine the custom metric support +- For daemonset pods, add the dnsconfig to use ndots: 3 from ndots:5 to optimize the number of DNS API calls made +- Fix for inconsistency in the collection container environment variables for the pods which has high number of containers +- Fix for disabling of std{out;err} log_collection_settings via configmap issue in windows daemonset +- Update to use workspace key from mount file rather than environment variable for windows daemonset agent +- Remove per container info logs in the container inventory +- Enable ADX route for windows container logs +- Remove logging to termination log in windows agent liveness probe + ### 11/09/2020 - ##### Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod11092020 (linux) diff --git a/build/version b/build/version index a8b78ecac..711a96921 100644 --- a/build/version +++ b/build/version @@ -2,11 +2,11 @@ # Build Version Information -CONTAINER_BUILDVERSION_MAJOR=11 +CONTAINER_BUILDVERSION_MAJOR=12 CONTAINER_BUILDVERSION_MINOR=0 CONTAINER_BUILDVERSION_PATCH=0 -CONTAINER_BUILDVERSION_BUILDNR=1 -CONTAINER_BUILDVERSION_DATE=20201109 +CONTAINER_BUILDVERSION_BUILDNR=0 +CONTAINER_BUILDVERSION_DATE=20210111 CONTAINER_BUILDVERSION_STATUS=Developer_Build #-------------------------------- End of File ----------------------------------- diff --git a/charts/azuremonitor-containers/Chart.yaml b/charts/azuremonitor-containers/Chart.yaml index 987841f77..a809a4e69 100644 --- a/charts/azuremonitor-containers/Chart.yaml +++ b/charts/azuremonitor-containers/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v1 appVersion: 7.0.0-1 description: Helm chart for deploying Azure Monitor container monitoring agent in Kubernetes name: azuremonitor-containers -version: 2.7.9 +version: 2.8.0 kubeVersion: "^1.10.0-0" keywords: - monitoring diff --git a/charts/azuremonitor-containers/values.yaml b/charts/azuremonitor-containers/values.yaml index b3d029146..debd66b0b 100644 --- a/charts/azuremonitor-containers/values.yaml +++ b/charts/azuremonitor-containers/values.yaml @@ -12,10 +12,10 @@ Azure: omsagent: image: repo: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod" - tag: "ciprod11092020" - tagWindows: "win-ciprod11092020" + tag: "ciprod01112021" + tagWindows: "win-ciprod01112021" pullPolicy: IfNotPresent - dockerProviderVersion: "11.0.0-1" + dockerProviderVersion: "12.0.0-0" agentVersion: "1.10.0.1" ## To get your workspace id and key do the following ## You can create a Azure Loganalytics workspace from portal.azure.com and get its ID & PRIMARY KEY from 'Advanced Settings' tab in the Ux. diff --git a/kubernetes/linux/Dockerfile b/kubernetes/linux/Dockerfile index 34ab133da..2e1118922 100644 --- a/kubernetes/linux/Dockerfile +++ b/kubernetes/linux/Dockerfile @@ -2,7 +2,7 @@ FROM ubuntu:18.04 MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=ciprod11092020 +ARG IMAGE_TAG=ciprod01112021 ENV AGENT_VERSION ${IMAGE_TAG} ENV tmpdir /opt ENV APPLICATIONINSIGHTS_AUTH NzAwZGM5OGYtYTdhZC00NThkLWI5NWMtMjA3ZjM3NmM3YmRi diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index df80cabc4..67bd9cdde 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -358,7 +358,7 @@ spec: tier: node annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "11.0.0-1" + dockerProviderVersion: "12.0.0-0" schema-versions: "v1" spec: serviceAccountName: omsagent @@ -368,7 +368,7 @@ spec: value: "3" containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod11092020" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod01112021" imagePullPolicy: IfNotPresent resources: limits: @@ -521,13 +521,13 @@ spec: rsName: "omsagent-rs" annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "11.0.0-1" + dockerProviderVersion: "12.0.0-0" schema-versions: "v1" spec: serviceAccountName: omsagent containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod11092020" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod01112021" imagePullPolicy: IfNotPresent resources: limits: @@ -675,7 +675,7 @@ spec: tier: node-win annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "11.0.0-1" + dockerProviderVersion: "12.0.0-0" schema-versions: "v1" spec: serviceAccountName: omsagent @@ -685,7 +685,7 @@ spec: value: "3" containers: - name: omsagent-win - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod11092020" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod01112021" imagePullPolicy: IfNotPresent resources: limits: diff --git a/kubernetes/windows/Dockerfile b/kubernetes/windows/Dockerfile index 10ea235b2..f852bd236 100644 --- a/kubernetes/windows/Dockerfile +++ b/kubernetes/windows/Dockerfile @@ -3,7 +3,7 @@ MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=win-ciprod11092020 +ARG IMAGE_TAG=win-ciprod01112021 # Do not split this into multiple RUN! # Docker creates a layer for every RUN-Statement diff --git a/scripts/onboarding/managed/enable-monitoring.ps1 b/scripts/onboarding/managed/enable-monitoring.ps1 index 7b128b112..45ddb44b0 100644 --- a/scripts/onboarding/managed/enable-monitoring.ps1 +++ b/scripts/onboarding/managed/enable-monitoring.ps1 @@ -64,7 +64,7 @@ $isUsingServicePrincipal = $false # released chart version in mcr $mcr = "mcr.microsoft.com" -$mcrChartVersion = "2.7.9" +$mcrChartVersion = "2.8.0" $mcrChartRepoPath = "azuremonitor/containerinsights/preview/azuremonitor-containers" $helmLocalRepoName = "." $omsAgentDomainName="opinsights.azure.com" diff --git a/scripts/onboarding/managed/enable-monitoring.sh b/scripts/onboarding/managed/enable-monitoring.sh index 85428aff7..2dc0a465f 100644 --- a/scripts/onboarding/managed/enable-monitoring.sh +++ b/scripts/onboarding/managed/enable-monitoring.sh @@ -44,7 +44,7 @@ defaultAzureCloud="AzureCloud" omsAgentDomainName="opinsights.azure.com" # released chart version in mcr -mcrChartVersion="2.7.9" +mcrChartVersion="2.8.0" mcr="mcr.microsoft.com" mcrChartRepoPath="azuremonitor/containerinsights/preview/azuremonitor-containers" helmLocalRepoName="." diff --git a/scripts/onboarding/managed/upgrade-monitoring.sh b/scripts/onboarding/managed/upgrade-monitoring.sh index 847bf84ea..8826b6df6 100644 --- a/scripts/onboarding/managed/upgrade-monitoring.sh +++ b/scripts/onboarding/managed/upgrade-monitoring.sh @@ -20,7 +20,7 @@ set -e set -o pipefail # released chart version for Azure Arc enabled Kubernetes public preview -mcrChartVersion="2.7.9" +mcrChartVersion="2.8.0" mcr="mcr.microsoft.com" mcrChartRepoPath="azuremonitor/containerinsights/preview/azuremonitor-containers" From 3c97af6ac0613045df20f59b126f5aec94855e1f Mon Sep 17 00:00:00 2001 From: deagraw Date: Thu, 14 Jan 2021 10:47:48 -0800 Subject: [PATCH 061/301] new yaml files (#491) --- .../clusteruser/cluster-user-role-binding.yaml | 12 ++++++++++++ .../onboarding/clusteruser/cluster-user-role.yaml | 14 ++++++++++++++ 2 files changed, 26 insertions(+) create mode 100644 scripts/onboarding/clusteruser/cluster-user-role-binding.yaml create mode 100644 scripts/onboarding/clusteruser/cluster-user-role.yaml diff --git a/scripts/onboarding/clusteruser/cluster-user-role-binding.yaml b/scripts/onboarding/clusteruser/cluster-user-role-binding.yaml new file mode 100644 index 000000000..fce2fc582 --- /dev/null +++ b/scripts/onboarding/clusteruser/cluster-user-role-binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: containerHealth-read-logs-global +roleRef: + kind: ClusterRole + name: containerHealth-log-reader + apiGroup: rbac.authorization.k8s.io +subjects: + - kind: User + name: clusterUser + apiGroup: rbac.authorization.k8s.io diff --git a/scripts/onboarding/clusteruser/cluster-user-role.yaml b/scripts/onboarding/clusteruser/cluster-user-role.yaml new file mode 100644 index 000000000..b3519fdd3 --- /dev/null +++ b/scripts/onboarding/clusteruser/cluster-user-role.yaml @@ -0,0 +1,14 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: containerHealth-log-reader +rules: + - apiGroups: ["", "metrics.k8s.io", "extensions", "apps"] + resources: + - "pods/log" + - "events" + - "nodes" + - "pods" + - "deployments" + - "replicasets" + verbs: ["get", "list"] From 90e1a5be8928305cd2378c1922924efac8cafc80 Mon Sep 17 00:00:00 2001 From: David Michelman Date: Thu, 21 Jan 2021 18:48:14 -0800 Subject: [PATCH 062/301] Use cloud-specific instrumentation keys (#494) If APPLICATIONINSIGHTS_AUTH_URL is set/non-empty then the agent will now grab a custom IKey from a URL stored in APPLICATIONINSIGHTS_AUTH_URL --- .../build-and-publish-docker-image.sh | 0 kubernetes/linux/main.sh | 38 ++++++++++++-- kubernetes/windows/main.ps1 | 50 ++++++++++++++++--- 3 files changed, 75 insertions(+), 13 deletions(-) mode change 100644 => 100755 kubernetes/linux/dockerbuild/build-and-publish-docker-image.sh diff --git a/kubernetes/linux/dockerbuild/build-and-publish-docker-image.sh b/kubernetes/linux/dockerbuild/build-and-publish-docker-image.sh old mode 100644 new mode 100755 diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index b4df538d4..c4067f25e 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -161,6 +161,39 @@ fi export CLOUD_ENVIRONMENT=$CLOUD_ENVIRONMENT echo "export CLOUD_ENVIRONMENT=$CLOUD_ENVIRONMENT" >> ~/.bashrc +# Check if the instrumentation key needs to be fetched from a storage account (as in airgapped clouds) +if [ ${#APPLICATIONINSIGHTS_AUTH_URL} -ge 1 ]; then # (check if APPLICATIONINSIGHTS_AUTH_URL has length >=1) + for BACKOFF in {1..4}; do + KEY=$(curl -sS $APPLICATIONINSIGHTS_AUTH_URL ) + # there's no easy way to get the HTTP status code from curl, so just check if the result is well formatted + if [[ $KEY =~ ^[A-Za-z0-9=]+$ ]]; then + break + else + sleep $((2**$BACKOFF / 4)) # (exponential backoff) + fi + done + + # validate that the retrieved data is an instrumentation key + if [[ $KEY =~ ^[A-Za-z0-9=]+$ ]]; then + export APPLICATIONINSIGHTS_AUTH=$(echo $KEY) + echo "export APPLICATIONINSIGHTS_AUTH=$APPLICATIONINSIGHTS_AUTH" >> ~/.bashrc + echo "Using cloud-specific instrumentation key" + else + # no ikey can be retrieved. Disable telemetry and continue + export DISABLE_TELEMETRY=true + echo "export DISABLE_TELEMETRY=true" >> ~/.bashrc + echo "Could not get cloud-specific instrumentation key (network error?). Disabling telemetry" + fi +fi + + +aikey=$(echo $APPLICATIONINSIGHTS_AUTH | base64 --decode) +export TELEMETRY_APPLICATIONINSIGHTS_KEY=$aikey +echo "export TELEMETRY_APPLICATIONINSIGHTS_KEY=$aikey" >> ~/.bashrc + +source ~/.bashrc + + #Parse the configmap to set the right environment variables. /opt/microsoft/omsagent/ruby/bin/ruby tomlparser.rb @@ -581,11 +614,6 @@ echo "export HOST_ETC=/hostfs/etc" >> ~/.bashrc export HOST_VAR=/hostfs/var echo "export HOST_VAR=/hostfs/var" >> ~/.bashrc -aikey=$(echo $APPLICATIONINSIGHTS_AUTH | base64 --decode) -export TELEMETRY_APPLICATIONINSIGHTS_KEY=$aikey -echo "export TELEMETRY_APPLICATIONINSIGHTS_KEY=$aikey" >> ~/.bashrc - -source ~/.bashrc #start telegraf /opt/telegraf --config $telegrafConfFile & diff --git a/kubernetes/windows/main.ps1 b/kubernetes/windows/main.ps1 index a297e3801..722392157 100644 --- a/kubernetes/windows/main.ps1 +++ b/kubernetes/windows/main.ps1 @@ -119,10 +119,48 @@ function Set-EnvironmentVariables { $env:AZMON_AGENT_CFG_SCHEMA_VERSION } - # Set environment variable for TELEMETRY_APPLICATIONINSIGHTS_KEY - $aiKey = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($env:APPLICATIONINSIGHTS_AUTH)) - [System.Environment]::SetEnvironmentVariable("TELEMETRY_APPLICATIONINSIGHTS_KEY", $aiKey, "Process") - [System.Environment]::SetEnvironmentVariable("TELEMETRY_APPLICATIONINSIGHTS_KEY", $aiKey, "Machine") + # Check if the instrumentation key needs to be fetched from a storage account (as in airgapped clouds) + $aiKeyURl = [System.Environment]::GetEnvironmentVariable('APPLICATIONINSIGHTS_AUTH_URL') + if ($aiKeyURl) { + $aiKeyFetched = "" + # retry up to 5 times + for( $i = 1; $i -le 4; $i++) { + try { + $response = Invoke-WebRequest -uri $aiKeyURl -UseBasicParsing -TimeoutSec 5 -ErrorAction:Stop + + if ($response.StatusCode -ne 200) { + Write-Host "Expecting reponse code 200, was: $($response.StatusCode), retrying" + Start-Sleep -Seconds ([MATH]::Pow(2, $i) / 4) + } + else { + $aiKeyFetched = $response.Content + break + } + } + catch { + Write-Host "Exception encountered fetching instrumentation key:" + Write-Host $_.Exception + } + } + + # Check if the fetched IKey was properly encoded. if not then turn off telemetry + if ($aiKeyFetched -match '^[A-Za-z0-9=]+$') { + Write-Host "Using cloud-specific instrumentation key" + [System.Environment]::SetEnvironmentVariable("APPLICATIONINSIGHTS_AUTH", $aiKeyFetched, "Process") + [System.Environment]::SetEnvironmentVariable("APPLICATIONINSIGHTS_AUTH", $aiKeyFetched, "Machine") + } + else { + # Couldn't fetch the Ikey, turn telemetry off + Write-Host "Could not get cloud-specific instrumentation key (network error?). Disabling telemetry" + [System.Environment]::SetEnvironmentVariable("DISABLE_TELEMETRY", "True", "Process") + [System.Environment]::SetEnvironmentVariable("DISABLE_TELEMETRY", "True", "Machine") + } + } + + $aiKeyDecoded = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($env:APPLICATIONINSIGHTS_AUTH)) + [System.Environment]::SetEnvironmentVariable("TELEMETRY_APPLICATIONINSIGHTS_KEY", $aiKeyDecoded, "Process") + [System.Environment]::SetEnvironmentVariable("TELEMETRY_APPLICATIONINSIGHTS_KEY", $aiKeyDecoded, "Machine") + # run config parser ruby /opt/omsagentwindows/scripts/ruby/tomlparser.rb @@ -324,7 +362,3 @@ Get-WmiObject Win32_process | Where-Object { $_.Name -match 'powershell' } | For #check if fluentd service is running Get-Service fluentdwinaks - - - - From 98b6d779d29d4bbc56657e0403ef03e4498028e3 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Thu, 21 Jan 2021 23:27:07 -0800 Subject: [PATCH 063/301] upgrade apt to latest version (#492) * upgrade apt to latest version * fix pr feedback --- kubernetes/linux/setup.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kubernetes/linux/setup.sh b/kubernetes/linux/setup.sh index 352be06d7..fe6c0565a 100644 --- a/kubernetes/linux/setup.sh +++ b/kubernetes/linux/setup.sh @@ -2,8 +2,8 @@ TMPDIR="/opt" cd $TMPDIR #Download utf-8 encoding capability on the omsagent container. - -apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y locales +#upgrade apt to latest version +apt-get update && apt-get install -y apt && DEBIAN_FRONTEND=noninteractive apt-get install -y locales sed -i -e 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen && \ dpkg-reconfigure --frontend=noninteractive locales && \ From ddcd3eec1037471abecc9b13b0807e520d7fbeff Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Wed, 27 Jan 2021 13:20:27 -0800 Subject: [PATCH 064/301] Gangams/add support for extension msi for arc k8s cluster (#495) * wip * add env var for the arc k8s extension name * chart update * extension msi updates * fix bug * revert chart and image to prod version * minor text changes * image tag to prod * wip * wip * wip * wip * final updates * fix whitespaces * simplify crd yaml --- .../templates/omsagent-arc-k8s-crd.yaml | 17 +++++++++++++++++ .../templates/omsagent-daemonset.yaml | 6 +++++- .../templates/omsagent-deployment.yaml | 6 +++++- .../templates/omsagent-rbac.yaml | 4 ++++ charts/azuremonitor-containers/values.yaml | 7 +++++-- source/plugins/ruby/arc_k8s_cluster_identity.rb | 11 ++++++++--- 6 files changed, 44 insertions(+), 7 deletions(-) diff --git a/charts/azuremonitor-containers/templates/omsagent-arc-k8s-crd.yaml b/charts/azuremonitor-containers/templates/omsagent-arc-k8s-crd.yaml index ebdd5ea3f..b7482b8b5 100644 --- a/charts/azuremonitor-containers/templates/omsagent-arc-k8s-crd.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-arc-k8s-crd.yaml @@ -1,4 +1,18 @@ {{- if or ( contains "microsoft.kubernetes/connectedclusters" (.Values.Azure.Cluster.ResourceId | lower) ) ( contains "microsoft.kubernetes/connectedclusters" (.Values.omsagent.env.clusterId | lower)) }} +#extension model +{{- if not (empty .Values.Azure.Extension.Name) }} +apiVersion: clusterconfig.azure.com/v1beta1 +kind: AzureExtensionIdentity +metadata: + name: {{ .Values.Azure.Extension.Name }} + namespace: azure-arc +spec: + serviceAccounts: + - name: omsagent + namespace: kube-system + tokenNamespace: azure-arc +--- +{{- end }} apiVersion: clusterconfig.azure.com/v1beta1 kind: AzureClusterIdentityRequest metadata: @@ -6,4 +20,7 @@ metadata: namespace: azure-arc spec: audience: https://monitoring.azure.com/ + {{- if not (empty .Values.Azure.Extension.Name) }} + resourceId: {{ .Values.Azure.Extension.Name }} + {{- end }} {{- end }} diff --git a/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml b/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml index 3d29ede42..595edd7bb 100644 --- a/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml @@ -70,8 +70,12 @@ spec: valueFrom: fieldRef: fieldPath: status.hostIP + {{- if not (empty .Values.Azure.Extension.Name) }} + - name: ARC_K8S_EXTENSION_NAME + value: {{ .Values.Azure.Extension.Name | quote }} + {{- end }} - name: USER_ASSIGNED_IDENTITY_CLIENT_ID - value: "" + value: "" {{- if .Values.omsagent.logsettings.logflushintervalsecs }} - name: FBIT_SERVICE_FLUSH_INTERVAL value: {{ .Values.omsagent.logsettings.logflushintervalsecs | quote }} diff --git a/charts/azuremonitor-containers/templates/omsagent-deployment.yaml b/charts/azuremonitor-containers/templates/omsagent-deployment.yaml index 8609d25c9..ecd0b705b 100644 --- a/charts/azuremonitor-containers/templates/omsagent-deployment.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-deployment.yaml @@ -67,8 +67,12 @@ spec: valueFrom: fieldRef: fieldPath: status.hostIP + {{- if not (empty .Values.Azure.Extension.Name) }} + - name: ARC_K8S_EXTENSION_NAME + value: {{ .Values.Azure.Extension.Name | quote }} + {{- end }} - name: USER_ASSIGNED_IDENTITY_CLIENT_ID - value: "" + value: "" securityContext: privileged: true ports: diff --git a/charts/azuremonitor-containers/templates/omsagent-rbac.yaml b/charts/azuremonitor-containers/templates/omsagent-rbac.yaml index bd4e9baf3..5db5c2dab 100644 --- a/charts/azuremonitor-containers/templates/omsagent-rbac.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-rbac.yaml @@ -33,10 +33,14 @@ rules: verbs: ["get", "create", "patch"] - nonResourceURLs: ["/metrics"] verbs: ["get"] +#arc k8s extension model grants access as part of the extension msi +#remove this explicit permission once the extension available in public preview +{{- if (empty .Values.Azure.Extension.Name) }} - apiGroups: [""] resources: ["secrets"] resourceNames: ["container-insights-clusteridentityrequest-token"] verbs: ["get"] +{{- end }} --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1beta1 diff --git a/charts/azuremonitor-containers/values.yaml b/charts/azuremonitor-containers/values.yaml index debd66b0b..341b9fb65 100644 --- a/charts/azuremonitor-containers/values.yaml +++ b/charts/azuremonitor-containers/values.yaml @@ -4,11 +4,14 @@ ## Microsoft OMS Agent image for kubernetes cluster monitoring ## ref: https://github.com/microsoft/Docker-Provider/tree/ci_prod -## Values of ResourceId and Region under Azure->Cluster being populated by Azure Arc K8s RP during the installation of the extension +## Values of under Azure are being populated by Azure Arc K8s RP during the installation of the extension Azure: Cluster: Region: - ResourceId: + ResourceId: + Extension: + Name: "" + ResourceId: "" omsagent: image: repo: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod" diff --git a/source/plugins/ruby/arc_k8s_cluster_identity.rb b/source/plugins/ruby/arc_k8s_cluster_identity.rb index ef55c3257..7824f3d4e 100644 --- a/source/plugins/ruby/arc_k8s_cluster_identity.rb +++ b/source/plugins/ruby/arc_k8s_cluster_identity.rb @@ -18,7 +18,7 @@ class ArcK8sClusterIdentity @@crd_resource_uri_template = "%{kube_api_server_url}/apis/%{cluster_config_crd_api_version}/namespaces/%{cluster_identity_resource_namespace}/azureclusteridentityrequests/%{cluster_identity_resource_name}" @@secret_resource_uri_template = "%{kube_api_server_url}/api/v1/namespaces/%{cluster_identity_token_secret_namespace}/secrets/%{token_secret_name}" @@azure_monitor_custom_metrics_audience = "https://monitoring.azure.com/" - @@cluster_identity_request_kind = "AzureClusterIdentityRequest" + @@cluster_identity_request_kind = "AzureClusterIdentityRequest" def initialize @LogPath = "/var/opt/microsoft/docker-cimprov/log/arc_k8s_cluster_identity.log" @@ -33,7 +33,9 @@ def initialize @log.warn "got api server url nil from KubernetesApiClient.getKubeAPIServerUrl @ #{Time.now.utc.iso8601}" end @http_client = get_http_client - @service_account_token = get_service_account_token + @service_account_token = get_service_account_token + @extensionName = ENV["ARC_K8S_EXTENSION_NAME"] + @log.info "extension name:#{@extensionName} @ #{Time.now.utc.iso8601}" @log.info "initialize complete @ #{Time.now.utc.iso8601}" end @@ -148,7 +150,7 @@ def renew_near_expiry_token() update_response = @http_client.request(update_request) @log.info "Got response of #{update_response.code} for PATCH #{crd_request_uri} @ #{Time.now.utc.iso8601}" if update_response.code.to_i == 404 - @log.info "since crd resource doesnt exist since creating crd resource : #{@@cluster_identity_resource_name} @ #{Time.now.utc.iso8601}" + @log.info "since crd resource doesnt exist hence creating crd resource : #{@@cluster_identity_resource_name} @ #{Time.now.utc.iso8601}" create_request = Net::HTTP::Post.new(crd_request_uri) create_request["Content-Type"] = "application/json" create_request["Authorization"] = "Bearer #{@service_account_token}" @@ -211,6 +213,9 @@ def get_crd_request_body body["metadata"]["namespace"] = @@cluster_identity_resource_namespace body["spec"] = {} body["spec"]["audience"] = @@azure_monitor_custom_metrics_audience + if !@extensionName.nil? && !@extensionName.empty? + body["spec"]["resourceId"] = @extensionName + end return body end end From 0cd99e41b251254ce23e32c86ab28f06ea2c34d3 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Wed, 27 Jan 2021 13:35:11 -0800 Subject: [PATCH 065/301] Gangams/arm template arc k8s extension (#496) * arm templates for arc k8s extension * update to use official extension type name * update * add identity property * add proxyendpointurl parameter * add default values --- .../existingClusterOnboarding.json | 135 ++++++++++++++++++ .../existingClusterParam.json | 24 ++++ 2 files changed, 159 insertions(+) create mode 100644 scripts/onboarding/templates/arc-k8s-extension/existingClusterOnboarding.json create mode 100644 scripts/onboarding/templates/arc-k8s-extension/existingClusterParam.json diff --git a/scripts/onboarding/templates/arc-k8s-extension/existingClusterOnboarding.json b/scripts/onboarding/templates/arc-k8s-extension/existingClusterOnboarding.json new file mode 100644 index 000000000..8ebef232a --- /dev/null +++ b/scripts/onboarding/templates/arc-k8s-extension/existingClusterOnboarding.json @@ -0,0 +1,135 @@ +{ + "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "clusterResourceId": { + "type": "string", + "metadata": { + "description": "Resource Id of the Azure Arc Connected Cluster" + } + }, + "clusterRegion": { + "type": "string", + "metadata": { + "description": "Location of the Azure Arc Connected Cluster Resource e.g. \"eastus\"" + } + }, + "proxyEndpointUrl": { + "type": "string", + "defaultValue": "", + "metadata": { + "description": "If the cluster behind forward proxy, then specify Proxy Endpoint URL in this format: http(s)://:@:" + } + }, + "workspaceResourceId": { + "type": "string", + "metadata": { + "description": "Azure Monitor Log Analytics Resource ID" + } + }, + "workspaceRegion": { + "type": "string", + "metadata": { + "description": "Azure Monitor Log Analytics Workspace region e.g. \"eastus\"" + } + }, + "workspaceDomain": { + "type": "string", + "allowedValues": [ + "opinsights.azure.com", + "opinsights.azure.cn", + "opinsights.azure.us", + "opinsights.azure.eaglex.ic.gov", + "opinsights.azure.microsoft.scloud" + ], + "defaultValue": "opinsights.azure.com", + "metadata": { + "description": "Azure Monitor Log Analytics Workspace Domain e.g. opinsights.azure.com" + } + } + }, + "resources": [ + { + "type": "Microsoft.Resources/deployments", + "name": "[Concat('ContainerInsights', '-', uniqueString(parameters('workspaceResourceId')))]", + "apiVersion": "2017-05-10", + "subscriptionId": "[split(parameters('workspaceResourceId'),'/')[2]]", + "resourceGroup": "[split(parameters('workspaceResourceId'),'/')[4]]", + "properties": { + "mode": "Incremental", + "template": { + "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": {}, + "variables": {}, + "resources": [ + { + "apiVersion": "2015-11-01-preview", + "type": "Microsoft.OperationsManagement/solutions", + "location": "[parameters('workspaceRegion')]", + "name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]", + "properties": { + "workspaceResourceId": "[parameters('workspaceResourceId')]" + }, + "plan": { + "name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]", + "product": "[Concat('OMSGallery/', 'ContainerInsights')]", + "promotionCode": "", + "publisher": "Microsoft" + } + } + ] + }, + "parameters": {} + } + }, + { + "type": "Microsoft.Resources/deployments", + "name": "[Concat('arc-k8s-ci-extension', '-', uniqueString(parameters('clusterResourceId')))]", + "apiVersion": "2019-05-01", + "subscriptionId": "[split(parameters('clusterResourceId'),'/')[2]]", + "resourceGroup": "[split(parameters('clusterResourceId'),'/')[4]]", + "dependsOn": [ + "[Concat('ContainerInsights', '-', uniqueString(parameters('workspaceResourceId')))]" + ], + "properties": { + "mode": "Incremental", + "template": { + "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": {}, + "variables": {}, + "resources": [ + { + "type": "Microsoft.KubernetesConfiguration/extensions", + "apiVersion": "2020-07-01-preview", + "name": "azuremonitor-containers", + "location": "[parameters('clusterRegion')]", + "identity": {"type": "systemassigned"}, + "properties": { + "extensionType": "Microsoft.AzureMonitor.Containers", + "configurationSettings": { + "logAnalyticsWorkspaceResourceID": "[parameters('workspaceResourceId')]", + "omsagent.domain": "[parameters('workspaceDomain')]" + }, + "configurationProtectedSettings": { + "omsagent.secret.wsid": "[reference(parameters('workspaceResourceId'), '2015-03-20').customerId]", + "omsagent.secret.key": "[listKeys(parameters('workspaceResourceId'), '2015-03-20').primarySharedKey]" , + "omsagent.proxy": "[if(equals(parameters('proxyEndpointUrl'), ''), '', parameters('proxyEndpointUrl'))]" + }, + "autoUpgradeMinorVersion": true, + "releaseTrain": "Stable", + "scope": { + "Cluster": { + "releaseNamespace": "azuremonitor-containers" + } + } + }, + "scope": "[concat('Microsoft.Kubernetes/connectedClusters/', split(parameters('clusterResourceId'),'/')[8])]" + } + ] + } + } + } + ] +} diff --git a/scripts/onboarding/templates/arc-k8s-extension/existingClusterParam.json b/scripts/onboarding/templates/arc-k8s-extension/existingClusterParam.json new file mode 100644 index 000000000..b74b5ac95 --- /dev/null +++ b/scripts/onboarding/templates/arc-k8s-extension/existingClusterParam.json @@ -0,0 +1,24 @@ +{ + "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentParameters.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "clusterResourceId": { + "value": "/subscriptions//resourceGroups//providers/Microsoft.Kubernetes/connectedClusters/" + }, + "clusterRegion": { + "value": "" + }, + "proxyEndpointUrl": { + "value": "" + }, + "workspaceResourceId": { + "value": "/subscriptions//resourcegroups//providers/microsoft.operationalinsights/workspaces/" + }, + "workspaceRegion": { + "value": "" + }, + "workspaceDomain": { + "value": "" + } + } +} From 13521c5d316eb9e1c147c74b661f67b5873b2d5b Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Mon, 1 Feb 2021 10:27:40 -0800 Subject: [PATCH 066/301] Gangams/aks monitoring via policy (#497) * enable monitoring through policy * wip * handle tags * wip * add alias * wip * working * updates * working * with deployment name * doc updates * doc updates * fix typo in the docs --- .../azure-policy.json | 113 ++++++++++++++++++ .../azurepolicy.parameters.json | 9 ++ .../azurepolicy.rules.json | 101 ++++++++++++++++ .../enable-monitoring-using-policy.md | 64 ++++++++++ 4 files changed, 287 insertions(+) create mode 100644 scripts/onboarding/aks/onboarding-using-azure-policy/azure-policy.json create mode 100644 scripts/onboarding/aks/onboarding-using-azure-policy/azurepolicy.parameters.json create mode 100644 scripts/onboarding/aks/onboarding-using-azure-policy/azurepolicy.rules.json create mode 100644 scripts/onboarding/enable-monitoring-using-policy.md diff --git a/scripts/onboarding/aks/onboarding-using-azure-policy/azure-policy.json b/scripts/onboarding/aks/onboarding-using-azure-policy/azure-policy.json new file mode 100644 index 000000000..c68bfed17 --- /dev/null +++ b/scripts/onboarding/aks/onboarding-using-azure-policy/azure-policy.json @@ -0,0 +1,113 @@ +{ + "mode": "Indexed", + "policyRule": { + "if": { + "field": "type", + "equals": "Microsoft.ContainerService/managedClusters" + }, + "then": { + "effect": "deployIfNotExists", + "details": { + "type": "Microsoft.ContainerService/managedClusters", + "name": "[field('name')]", + "roleDefinitionIds": [ + "/providers/Microsoft.Authorization/roleDefinitions/ed7f3fbd-7b88-4dd4-9017-9adb7ce333f8", + "/providers/Microsoft.Authorization/roleDefinitions/92aaf0da-9dab-42b6-94a3-d43ce8d16293" + ], + "existenceCondition": { + "field": "Microsoft.ContainerService/managedClusters/addonProfiles.omsagent.enabled", + "equals": "true" + }, + "deployment": { + "properties": { + "mode": "incremental", + "template": { + "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "clusterName": { + "type": "string" + }, + "clusterResourceGroupName": { + "type": "string" + }, + "clusterLocation": { + "type": "string" + }, + "clusterTags": { + "type": "object" + }, + "workspaceResourceId": { + "type": "string" + } + }, + "resources": [ + { + "type": "Microsoft.Resources/deployments", + "name": "[Concat('aks-monitoring-policy', '-', uniqueString(parameters('clusterName')))]", + "apiVersion": "2019-05-01", + "properties": { + "mode": "Incremental", + "template": { + "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": {}, + "variables": {}, + "resources": [ + { + "name": "[parameters('clusterName')]", + "type": "Microsoft.ContainerService/managedClusters", + "location": "[parameters('clusterLocation')]", + "tags": "[parameters('clusterTags')]", + "apiVersion": "2018-03-31", + "properties": { + "mode": "Incremental", + "id": "[resourceId(parameters('clusterResourceGroupName'), 'Microsoft.ContainerService/managedClusters', parameters('clusterName'))]", + "addonProfiles": { + "omsagent": { + "enabled": true, + "config": { + "logAnalyticsWorkspaceResourceID": "[parameters('workspaceResourceId')]" + } + } + } + } + } + ] + } + } + } + ] + }, + "parameters": { + "clusterName": { + "value": "[field('name')]" + }, + "clusterResourceGroupName": { + "value": "[resourceGroup().name]" + }, + "clusterLocation": { + "value": "[field('location')]" + }, + "clusterTags": { + "value": "[field('tags')]" + }, + "workspaceResourceId": { + "value": "[parameters('workspaceResourceId')]" + } + } + } + } + } + } + }, + "parameters": { + "workspaceResourceId": { + "type": "String", + "metadata": { + "displayName": "Resource Id of the existing Azure Log Analytics Workspace", + "description": "Azure Monitor Log Analytics Resource ID" + } + } + } +} diff --git a/scripts/onboarding/aks/onboarding-using-azure-policy/azurepolicy.parameters.json b/scripts/onboarding/aks/onboarding-using-azure-policy/azurepolicy.parameters.json new file mode 100644 index 000000000..6281cdade --- /dev/null +++ b/scripts/onboarding/aks/onboarding-using-azure-policy/azurepolicy.parameters.json @@ -0,0 +1,9 @@ +{ + "workspaceResourceId": { + "type": "string", + "metadata": { + "displayName": "Resource Id of the existing Azure Log Analytics Workspace", + "description": "Azure Monitor Log Analytics Resource ID" + } + } +} diff --git a/scripts/onboarding/aks/onboarding-using-azure-policy/azurepolicy.rules.json b/scripts/onboarding/aks/onboarding-using-azure-policy/azurepolicy.rules.json new file mode 100644 index 000000000..a113441ce --- /dev/null +++ b/scripts/onboarding/aks/onboarding-using-azure-policy/azurepolicy.rules.json @@ -0,0 +1,101 @@ +{ + "if": { + "field": "type", + "equals": "Microsoft.ContainerService/managedClusters" + }, + "then": { + "effect": "deployIfNotExists", + "details": { + "type": "Microsoft.ContainerService/managedClusters", + "name": "[field('name')]", + "roleDefinitionIds": [ + "/providers/Microsoft.Authorization/roleDefinitions/ed7f3fbd-7b88-4dd4-9017-9adb7ce333f8", + "/providers/Microsoft.Authorization/roleDefinitions/92aaf0da-9dab-42b6-94a3-d43ce8d16293" + ], + "existenceCondition": { + "field": "Microsoft.ContainerService/managedClusters/addonProfiles.omsagent.enabled", + "equals": "true" + }, + "deployment": { + "properties": { + "mode": "incremental", + "template": { + "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "clusterName": { + "type": "string" + }, + "clusterResourceGroupName": { + "type": "string" + }, + "clusterLocation": { + "type": "string" + }, + "clusterTags": { + "type": "object" + }, + "workspaceResourceId": { + "type": "string" + } + }, + "resources": [ + { + "type": "Microsoft.Resources/deployments", + "name": "[Concat('aks-monitoring-policy', '-', uniqueString(parameters('clusterName')))]", + "apiVersion": "2019-05-01", + "properties": { + "mode": "Incremental", + "template": { + "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": {}, + "variables": {}, + "resources": [ + { + "name": "[parameters('clusterName')]", + "type": "Microsoft.ContainerService/managedClusters", + "location": "[parameters('clusterLocation')]", + "tags": "[parameters('clusterTags')]", + "apiVersion": "2018-03-31", + "properties": { + "mode": "Incremental", + "id": "[resourceId(parameters('clusterResourceGroupName'), 'Microsoft.ContainerService/managedClusters', parameters('clusterName'))]", + "addonProfiles": { + "omsagent": { + "enabled": true, + "config": { + "logAnalyticsWorkspaceResourceID": "[parameters('workspaceResourceId')]" + } + } + } + } + } + ] + } + } + } + ] + }, + "parameters": { + "clusterName": { + "value": "[field('name')]" + }, + "clusterResourceGroupName": { + "value": "[resourceGroup().name]" + }, + "clusterLocation": { + "value": "[field('location')]" + }, + "clusterTags": { + "value": "[field('tags')]" + }, + "workspaceResourceId": { + "value": "[parameters('workspaceResourceId')]" + } + } + } + } + } + } +} diff --git a/scripts/onboarding/enable-monitoring-using-policy.md b/scripts/onboarding/enable-monitoring-using-policy.md new file mode 100644 index 000000000..e1e395ecc --- /dev/null +++ b/scripts/onboarding/enable-monitoring-using-policy.md @@ -0,0 +1,64 @@ +# How to enable AKS Monitoring Addon via Azure Policy +This doc describes how to enable AKS Monitoring Addon using Azure Custom Policy.Monitoring Addon Custom Policy can be assigned +either at subscription or resource group scope. If Azure Log Analytics workspace and AKS cluster are in different subscriptions then Managed Identity used by Policy assignnment has to have required role permissions on both the subscriptions or least on the resource of the Azure Log Aalytics workspace. Similarly, If the policy scoped to Resource Group, then Managed Identity should have required role permissions on the Log Analytics workspace if the workspace not in the selected Resource Group scope. + +Monitoring Addon require following roles on the Managed Identity used by Azure Policy + - [azure-kubernetes-service-contributor-role](https://docs.microsoft.com/en-us/azure/role-based-access-control/built-in-roles#azure-kubernetes-service-contributor-role) + - [log-analytics-contributor](https://docs.microsoft.com/en-us/azure/role-based-access-control/built-in-roles#log-analytics-contributor) + +## Create and Assign Policy definition using Azure Portal + +### Create Policy Definition + +1. Download the Azure Custom Policy definition to enable AKS Monitoring Addon +``` sh + curl -o azurepolicy.json -L https://aka.ms/aks-enable-monitoring-custom-policy +``` +2. Navigate to https://portal.azure.com/#blade/Microsoft_Azure_Policy/PolicyMenuBlade/Definitions and create policy definition with the following details in the Policy definition create dialogue box + + - Pick any Azure Subscription where you want to store Policy Definition + - Name - '(Preview)AKS-Monitoring-Addon' + - Description - 'Azure Custom Policy to enable Monitoring Addon onto Azure Kubernetes Cluster(s) in specified scope' + - Category - Choose "use existing" and pick 'Kubernetes' from drop down + - Remove the existing sample rules and copy the contents of azurepolicy.json downloaded in step #1 above + +### Assign Policy Definition to Specified Scope + +> Note: Managed Identity will be created automatically and assigned specified roles in the Policy definition. + +3. Navigate to https://portal.azure.com/#blade/Microsoft_Azure_Policy/PolicyMenuBlade/Definitions and select the Policy Definition 'AKS Monitoring Addon' +4. Click an Assignment and select Scope, Exclusions (if any) +5. Provide the Resource Id of the Azure Log Analytics Workspace. The Resource Id should be in this format `/subscriptions//resourceGroups//providers/Microsoft.OperationalInsights/workspaces/` +6. Create Remediation task in case if you want apply to policy to existing AKS clusters in selected scope +7. Click and Review & Create Option to create Policy Assignment + +## Create and Assign Policy definition using Azure CLI + +### Create Policy Definition + +1. Download the Azure Custom Policy definition rules and parameters files + ``` sh + curl -o azurepolicy.rules.json -L https://aka.ms/aks-enable-monitoring-custom-policy-rules + curl -o azurepolicy.parameters.json -L https://aka.ms/aks-enable-monitoring-custom-policy-parameters + ``` +2. Create policy definition using below command + + ``` sh + az cloud set -n # set the Azure cloud + az login # login to cloud environment + az account set -s + az policy definition create --name "(Preview)AKS-Monitoring-Addon" --display-name "(Preview)AKS-Monitoring-Addon" --mode Indexed --metadata version=1.0.0 category=Kubernetes --rules azurepolicy.rules.json --params azurepolicy.parameters.json + ``` +### Assign Policy Definition to Specified Scope + +3. Create policy assignment + +``` sh +az policy assignment create --name aks-monitoring-addon --policy "(Preview)AKS-Monitoring-Addon" --assign-identity --identity-scope /subscriptions/ --role Contributor --scope /subscriptions/ --location --role Contributor --scope /subscriptions/ -p "{ \"workspaceResourceId\": { \"value\": \"/subscriptions//resourcegroups//providers/microsoft.operationalinsights/workspaces/\" } }" +``` + +## References +- https://docs.microsoft.com/en-us/azure/governance/policy/ +- https://docs.microsoft.com/en-us/azure/governance/policy/how-to/remediate-resources#how-remediation-security-works +- https://docs.microsoft.com/en-us/cli/azure/install-azure-cli +- https://docs.microsoft.com/en-us/azure/azure-monitor/insights/container-insights-overview \ No newline at end of file From e4f36c7aef7bce1a0c2270f52f98bf07bf4bfe1c Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Mon, 1 Feb 2021 10:27:55 -0800 Subject: [PATCH 067/301] revert to use operatingSystem from osImage for node os telemety (#498) --- source/plugins/ruby/in_kube_nodes.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/plugins/ruby/in_kube_nodes.rb b/source/plugins/ruby/in_kube_nodes.rb index e7c5060a5..0a4727077 100644 --- a/source/plugins/ruby/in_kube_nodes.rb +++ b/source/plugins/ruby/in_kube_nodes.rb @@ -447,7 +447,7 @@ def getNodeTelemetryProps(item) properties["Computer"] = item["metadata"]["name"] nodeInfo = item["status"]["nodeInfo"] properties["KubeletVersion"] = nodeInfo["kubeletVersion"] - properties["OperatingSystem"] = nodeInfo["osImage"] + properties["OperatingSystem"] = nodeInfo["operatingSystem"] properties["KernelVersion"] = nodeInfo["kernelVersion"] properties["OSImage"] = nodeInfo["osImage"] containerRuntimeVersion = nodeInfo["containerRuntimeVersion"] From ec15ac122cc465cfbed5745773c5a0827dcbeed7 Mon Sep 17 00:00:00 2001 From: Vishwanath Date: Thu, 4 Feb 2021 14:34:46 -0800 Subject: [PATCH 068/301] Container log v2 schema changes (#499) * make pod name in mdsd definition as str for consistency. msgp has no type checking, as it has type metadata in it the message itself. --- build/common/installer/scripts/tomlparser.rb | 14 + kubernetes/linux/mdsd.xml | 67 ++-- source/plugins/go/src/oms.go | 291 ++++++++++++------ source/plugins/go/src/utils.go | 2 +- .../plugins/ruby/CAdvisorMetricsAPIClient.rb | 7 +- 5 files changed, 264 insertions(+), 117 deletions(-) diff --git a/build/common/installer/scripts/tomlparser.rb b/build/common/installer/scripts/tomlparser.rb index fe26f639e..a0f3c2f0a 100644 --- a/build/common/installer/scripts/tomlparser.rb +++ b/build/common/installer/scripts/tomlparser.rb @@ -23,6 +23,7 @@ @logExclusionRegexPattern = "(^((?!stdout|stderr).)*$)" @excludePath = "*.csv2" #some invalid path @enrichContainerLogs = false +@containerLogSchemaVersion = "" @collectAllKubeEvents = false @containerLogsRoute = "" @@ -138,6 +139,16 @@ def populateSettingValuesFromConfigMap(parsedConfig) ConfigParseErrorLogger.logError("Exception while reading config map settings for cluster level container log enrichment - #{errorStr}, using defaults, please check config map for errors") end + #Get container log schema version setting + begin + if !parsedConfig[:log_collection_settings][:schema].nil? && !parsedConfig[:log_collection_settings][:schema][:containerlog_schema_version].nil? + @containerLogSchemaVersion = parsedConfig[:log_collection_settings][:schema][:containerlog_schema_version] + puts "config::Using config map setting for container log schema version" + end + rescue => errorStr + ConfigParseErrorLogger.logError("Exception while reading config map settings for container log schema version - #{errorStr}, using defaults, please check config map for errors") + end + #Get kube events enrichment setting begin if !parsedConfig[:log_collection_settings][:collect_all_kube_events].nil? && !parsedConfig[:log_collection_settings][:collect_all_kube_events][:enabled].nil? @@ -200,6 +211,7 @@ def populateSettingValuesFromConfigMap(parsedConfig) file.write("export AZMON_CLUSTER_CONTAINER_LOG_ENRICH=#{@enrichContainerLogs}\n") file.write("export AZMON_CLUSTER_COLLECT_ALL_KUBE_EVENTS=#{@collectAllKubeEvents}\n") file.write("export AZMON_CONTAINER_LOGS_ROUTE=#{@containerLogsRoute}\n") + file.write("export AZMON_CONTAINER_LOG_SCHEMA_VERSION=#{@containerLogSchemaVersion}\n") # Close file after writing all environment variables file.close puts "Both stdout & stderr log collection are turned off for namespaces: '#{@excludePath}' " @@ -246,6 +258,8 @@ def get_command_windows(env_variable_name, env_variable_value) file.write(commands) commands = get_command_windows('AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE', @containerLogsRoute) file.write(commands) + commands = get_command_windows('AZMON_CONTAINER_LOG_SCHEMA_VERSION', @containerLogSchemaVersion) + file.write(commands) # Close file after writing all environment variables file.close diff --git a/kubernetes/linux/mdsd.xml b/kubernetes/linux/mdsd.xml index 76d2104fc..49d329791 100644 --- a/kubernetes/linux/mdsd.xml +++ b/kubernetes/linux/mdsd.xml @@ -48,20 +48,31 @@ --> - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + - + + @@ -97,15 +108,22 @@ priority events to be delivered sooner than the next five-minute interval. --> - - - - + + + + + + + + + - @@ -118,7 +136,16 @@ - ]]> + ]]> + + + + + + + + + ]]> diff --git a/source/plugins/go/src/oms.go b/source/plugins/go/src/oms.go index 5a678781c..0bd983297 100644 --- a/source/plugins/go/src/oms.go +++ b/source/plugins/go/src/oms.go @@ -32,13 +32,16 @@ import ( // DataType for Container Log const ContainerLogDataType = "CONTAINER_LOG_BLOB" +//DataType for Container Log v2 +const ContainerLogV2DataType = "CONTAINERINSIGHTS_CONTAINERLOGV2" + // DataType for Insights metric const InsightsMetricsDataType = "INSIGHTS_METRICS_BLOB" // DataType for KubeMonAgentEvent const KubeMonAgentEventDataType = "KUBE_MON_AGENT_EVENTS_BLOB" -//env varibale which has ResourceId for LA +//env variable which has ResourceId for LA const ResourceIdEnv = "AKS_RESOURCE_ID" //env variable which has ResourceName for NON-AKS @@ -78,20 +81,26 @@ const DaemonSetContainerLogPluginConfFilePath = "/etc/opt/microsoft/docker-cimpr const ReplicaSetContainerLogPluginConfFilePath = "/etc/opt/microsoft/docker-cimprov/out_oms.conf" const WindowsContainerLogPluginConfFilePath = "/etc/omsagentwindows/out_oms.conf" -// IPName for Container Log -const IPName = "Containers" +// IPName +const IPName = "ContainerInsights" + + const defaultContainerInventoryRefreshInterval = 60 const kubeMonAgentConfigEventFlushInterval = 60 //Eventsource name in mdsd -const MdsdSourceName = "ContainerLogSource" +const MdsdContainerLogSourceName = "ContainerLogSource" +const MdsdContainerLogV2SourceName = "ContainerLogV2Source" -//container logs route - v2 (v2=flush to oneagent, adx= flush to adx ingestion, anything else flush to ODS[default]) +//container logs route (v2=flush to oneagent, adx= flush to adx ingestion, anything else flush to ODS[default]) const ContainerLogsV2Route = "v2" const ContainerLogsADXRoute = "adx" +//container logs schema (v2=ContainerLogsV2 table in LA, anything else ContainerLogs table in LA. This is applicable only if Container logs route is NOT ADX) +const ContainerLogV2SchemaVersion = "v2" + var ( // PluginConfiguration the plugins configuration PluginConfiguration map[string]string @@ -125,6 +134,8 @@ var ( ContainerLogsRouteV2 bool // container log route for routing thru ADX ContainerLogsRouteADX bool + // container log schema (applicable only for non-ADX route) + ContainerLogSchemaV2 bool //ADX Cluster URI AdxClusterUri string // ADX clientID @@ -180,8 +191,8 @@ var ( userAgent = "" ) -// DataItem represents the object corresponding to the json that is sent by fluentbit tail plugin -type DataItem struct { +// DataItemLAv1 == ContainerLog table in LA +type DataItemLAv1 struct { LogEntry string `json:"LogEntry"` LogEntrySource string `json:"LogEntrySource"` LogEntryTimeStamp string `json:"LogEntryTimeStamp"` @@ -193,10 +204,25 @@ type DataItem struct { Computer string `json:"Computer"` } +// DataItemLAv2 == ContainerLogV2 table in LA +// Please keep the names same as destination column names, to avoid transforming one to another in the pipeline +type DataItemLAv2 struct { + TimeGenerated string `json:"TimeGenerated"` + Computer string `json:"Computer"` + ContainerId string `json:"ContainerId"` + ContainerName string `json:"ContainerName"` + PodName string `json:"PodName"` + PodNamespace string `json:"PodNamespace"` + LogMessage string `json:"LogMessage"` + LogSource string `json:"LogSource"` + //PodLabels string `json:"PodLabels"` +} + +// DataItemADX == ContainerLogV2 table in ADX type DataItemADX struct { TimeGenerated string `json:"TimeGenerated"` Computer string `json:"Computer"` - ContainerID string `json:"ContainerID"` + ContainerId string `json:"ContainerId"` ContainerName string `json:"ContainerName"` PodName string `json:"PodName"` PodNamespace string `json:"PodNamespace"` @@ -227,10 +253,17 @@ type InsightsMetricsBlob struct { } // ContainerLogBlob represents the object corresponding to the payload that is sent to the ODS end point -type ContainerLogBlob struct { +type ContainerLogBlobLAv1 struct { DataType string `json:"DataType"` IPName string `json:"IPName"` - DataItems []DataItem `json:"DataItems"` + DataItems []DataItemLAv1 `json:"DataItems"` +} + +// ContainerLogBlob represents the object corresponding to the payload that is sent to the ODS end point +type ContainerLogBlobLAv2 struct { + DataType string `json:"DataType"` + IPName string `json:"IPName"` + DataItems []DataItemLAv2 `json:"DataItems"` } // MsgPackEntry represents the object corresponding to a single messagepack event in the messagepack stream @@ -792,7 +825,8 @@ func UpdateNumTelegrafMetricsSentTelemetry(numMetricsSent int, numSendErrors int // PostDataHelper sends data to the ODS endpoint or oneagent or ADX func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { start := time.Now() - var dataItems []DataItem + var dataItemsLAv1 []DataItemLAv1 + var dataItemsLAv2 []DataItemLAv2 var dataItemsADX []DataItemADX var msgPackEntries []MsgPackEntry @@ -830,26 +864,42 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { } stringMap = make(map[string]string) + //below id & name are used by latency telemetry in both v1 & v2 LA schemas + id := "" + name := "" logEntry := ToString(record["log"]) logEntryTimeStamp := ToString(record["time"]) - stringMap["LogEntry"] = logEntry - stringMap["LogEntrySource"] = logEntrySource - stringMap["LogEntryTimeStamp"] = logEntryTimeStamp - stringMap["SourceSystem"] = "Containers" - stringMap["Id"] = containerID - - if val, ok := imageIDMap[containerID]; ok { - stringMap["Image"] = val - } + //ADX Schema & LAv2 schema are almost the same (except resourceId) + if (ContainerLogSchemaV2 == true || ContainerLogsRouteADX == true) { + stringMap["Computer"] = Computer + stringMap["ContainerId"] = containerID + stringMap["ContainerName"] = containerName + stringMap["PodName"] = k8sPodName + stringMap["PodNamespace"] = k8sNamespace + stringMap["LogMessage"] = logEntry + stringMap["LogSource"] = logEntrySource + stringMap["TimeGenerated"] = logEntryTimeStamp + } else { + stringMap["LogEntry"] = logEntry + stringMap["LogEntrySource"] = logEntrySource + stringMap["LogEntryTimeStamp"] = logEntryTimeStamp + stringMap["SourceSystem"] = "Containers" + stringMap["Id"] = containerID + + if val, ok := imageIDMap[containerID]; ok { + stringMap["Image"] = val + } - if val, ok := nameIDMap[containerID]; ok { - stringMap["Name"] = val - } + if val, ok := nameIDMap[containerID]; ok { + stringMap["Name"] = val + } - stringMap["TimeOfCommand"] = start.Format(time.RFC3339) - stringMap["Computer"] = Computer - var dataItem DataItem + stringMap["TimeOfCommand"] = start.Format(time.RFC3339) + stringMap["Computer"] = Computer + } + var dataItemLAv1 DataItemLAv1 + var dataItemLAv2 DataItemLAv2 var dataItemADX DataItemADX var msgPackEntry MsgPackEntry @@ -866,50 +916,68 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { } else if ContainerLogsRouteADX == true { if ResourceCentric == true { stringMap["AzureResourceId"] = ResourceID + } else { + stringMap["AzureResourceId"] = "" } - stringMap["PodName"] = k8sPodName - stringMap["PodNamespace"] = k8sNamespace - stringMap["ContainerName"] = containerName dataItemADX = DataItemADX{ - TimeGenerated: stringMap["LogEntryTimeStamp"], + TimeGenerated: stringMap["TimeGenerated"], Computer: stringMap["Computer"], - ContainerID: stringMap["Id"], + ContainerId: stringMap["ContainerId"], ContainerName: stringMap["ContainerName"], PodName: stringMap["PodName"], PodNamespace: stringMap["PodNamespace"], - LogMessage: stringMap["LogEntry"], - LogSource: stringMap["LogEntrySource"], + LogMessage: stringMap["LogMessage"], + LogSource: stringMap["LogSource"], AzureResourceId: stringMap["AzureResourceId"], } //ADX dataItemsADX = append(dataItemsADX, dataItemADX) } else { - dataItem = DataItem{ - ID: stringMap["Id"], - LogEntry: stringMap["LogEntry"], - LogEntrySource: stringMap["LogEntrySource"], - LogEntryTimeStamp: stringMap["LogEntryTimeStamp"], - LogEntryTimeOfCommand: stringMap["TimeOfCommand"], - SourceSystem: stringMap["SourceSystem"], - Computer: stringMap["Computer"], - Image: stringMap["Image"], - Name: stringMap["Name"], + if (ContainerLogSchemaV2 == true) { + dataItemLAv2 = DataItemLAv2{ + TimeGenerated: stringMap["TimeGenerated"], + Computer: stringMap["Computer"], + ContainerId: stringMap["ContainerId"], + ContainerName: stringMap["ContainerName"], + PodName: stringMap["PodName"], + PodNamespace: stringMap["PodNamespace"], + LogMessage: stringMap["LogMessage"], + LogSource: stringMap["LogSource"], + } + //ODS-v2 schema + dataItemsLAv2 = append(dataItemsLAv2, dataItemLAv2) + name = stringMap["ContainerName"] + id = stringMap["ContainerId"] + } else { + dataItemLAv1 = DataItemLAv1{ + ID: stringMap["Id"], + LogEntry: stringMap["LogEntry"], + LogEntrySource: stringMap["LogEntrySource"], + LogEntryTimeStamp: stringMap["LogEntryTimeStamp"], + LogEntryTimeOfCommand: stringMap["TimeOfCommand"], + SourceSystem: stringMap["SourceSystem"], + Computer: stringMap["Computer"], + Image: stringMap["Image"], + Name: stringMap["Name"], + } + //ODS-v1 schema + dataItemsLAv1 = append(dataItemsLAv1, dataItemLAv1) + name = stringMap["Name"] + id = stringMap["Id"] } - //ODS - dataItems = append(dataItems, dataItem) } - if stringMap["LogEntryTimeStamp"] != "" { - loggedTime, e := time.Parse(time.RFC3339, stringMap["LogEntryTimeStamp"]) + if logEntryTimeStamp != "" { + loggedTime, e := time.Parse(time.RFC3339, logEntryTimeStamp) if e != nil { - message := fmt.Sprintf("Error while converting LogEntryTimeStamp for telemetry purposes: %s", e.Error()) + message := fmt.Sprintf("Error while converting logEntryTimeStamp for telemetry purposes: %s", e.Error()) Log(message) SendException(message) } else { ltncy := float64(start.Sub(loggedTime) / time.Millisecond) if ltncy >= maxLatency { maxLatency = ltncy - maxLatencyContainer = dataItem.Name + "=" + dataItem.ID + maxLatencyContainer = name + "=" + id } } } @@ -919,8 +987,12 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { if len(msgPackEntries) > 0 && ContainerLogsRouteV2 == true { //flush to mdsd + mdsdSourceName := MdsdContainerLogSourceName + if (ContainerLogSchemaV2 == true) { + mdsdSourceName = MdsdContainerLogV2SourceName + } fluentForward := MsgPackForward{ - Tag: MdsdSourceName, + Tag: mdsdSourceName, Entries: msgPackEntries, } @@ -967,7 +1039,7 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { elapsed = time.Since(start) if er != nil { - Log("Error::mdsd::Failed to write to mdsd %d records after %s. Will retry ... error : %s", len(dataItems), elapsed, er.Error()) + Log("Error::mdsd::Failed to write to mdsd %d records after %s. Will retry ... error : %s", len(msgPackEntries), elapsed, er.Error()) if MdsdMsgpUnixSocketClient != nil { MdsdMsgpUnixSocketClient.Close() MdsdMsgpUnixSocketClient = nil @@ -1013,14 +1085,14 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { } } - // Setup a maximum time for completion to be 15 Seconds. + // Setup a maximum time for completion to be 30 Seconds. ctx, cancel := context.WithTimeout(ParentContext, 30*time.Second) defer cancel() //ADXFlushMutex.Lock() //defer ADXFlushMutex.Unlock() //MultiJSON support is not there yet - if ingestionErr := ADXIngestor.FromReader(ctx, r, ingest.IngestionMappingRef("ContainerLogv2Mapping", ingest.JSON), ingest.FileFormat(ingest.JSON)); ingestionErr != nil { + if ingestionErr := ADXIngestor.FromReader(ctx, r, ingest.IngestionMappingRef("ContainerLogV2Mapping", ingest.JSON), ingest.FileFormat(ingest.JSON)); ingestionErr != nil { Log("Error when streaming to ADX Ingestion: %s", ingestionErr.Error()) //ADXIngestor = nil //not required as per ADX team. Will keep it to indicate that we tried this approach @@ -1035,58 +1107,75 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { numContainerLogRecords = len(dataItemsADX) Log("Success::ADX::Successfully wrote %d container log records to ADX in %s", numContainerLogRecords, elapsed) - } else { - //flush to ODS - if len(dataItems) > 0 { - logEntry := ContainerLogBlob{ - DataType: ContainerLogDataType, + } else { //ODS + var logEntry interface{} + recordType := "" + loglinesCount := 0 + //schema v2 + if (len(dataItemsLAv2) > 0 && ContainerLogSchemaV2 == true) { + logEntry = ContainerLogBlobLAv2{ + DataType: ContainerLogV2DataType, IPName: IPName, - DataItems: dataItems} - - marshalled, err := json.Marshal(logEntry) - if err != nil { - message := fmt.Sprintf("Error while Marshalling log Entry: %s", err.Error()) - Log(message) - SendException(message) - return output.FLB_OK + DataItems: dataItemsLAv2} + loglinesCount = len(dataItemsLAv2) + recordType = "ContainerLogV2" + } else { + //schema v1 + if len(dataItemsLAv1) > 0 { + logEntry = ContainerLogBlobLAv1{ + DataType: ContainerLogDataType, + IPName: IPName, + DataItems: dataItemsLAv1} + loglinesCount = len(dataItemsLAv1) + recordType = "ContainerLog" } + } - req, _ := http.NewRequest("POST", OMSEndpoint, bytes.NewBuffer(marshalled)) - req.Header.Set("Content-Type", "application/json") - req.Header.Set("User-Agent", userAgent) - reqId := uuid.New().String() - req.Header.Set("X-Request-ID", reqId) - //expensive to do string len for every request, so use a flag - if ResourceCentric == true { - req.Header.Set("x-ms-AzureResourceId", ResourceID) - } + marshalled, err := json.Marshal(logEntry) + //Log("LogEntry::e %s", marshalled) + if err != nil { + message := fmt.Sprintf("Error while Marshalling log Entry: %s", err.Error()) + Log(message) + SendException(message) + return output.FLB_OK + } - resp, err := HTTPClient.Do(req) - elapsed = time.Since(start) + req, _ := http.NewRequest("POST", OMSEndpoint, bytes.NewBuffer(marshalled)) + req.Header.Set("Content-Type", "application/json") + req.Header.Set("User-Agent", userAgent) + reqId := uuid.New().String() + req.Header.Set("X-Request-ID", reqId) + //expensive to do string len for every request, so use a flag + if ResourceCentric == true { + req.Header.Set("x-ms-AzureResourceId", ResourceID) + } + + resp, err := HTTPClient.Do(req) + elapsed = time.Since(start) - if err != nil { - message := fmt.Sprintf("Error when sending request %s \n", err.Error()) - Log(message) - // Commenting this out for now. TODO - Add better telemetry for ods errors using aggregation - //SendException(message) - Log("Failed to flush %d records after %s", len(dataItems), elapsed) + if err != nil { + message := fmt.Sprintf("Error when sending request %s \n", err.Error()) + Log(message) + // Commenting this out for now. TODO - Add better telemetry for ods errors using aggregation + //SendException(message) + + Log("Failed to flush %d records after %s", loglinesCount, elapsed) - return output.FLB_RETRY - } + return output.FLB_RETRY + } - if resp == nil || resp.StatusCode != 200 { - if resp != nil { - Log("RequestId %s Status %s Status Code %d", reqId, resp.Status, resp.StatusCode) - } - return output.FLB_RETRY + if resp == nil || resp.StatusCode != 200 { + if resp != nil { + Log("RequestId %s Status %s Status Code %d", reqId, resp.Status, resp.StatusCode) } + return output.FLB_RETRY + } - defer resp.Body.Close() - numContainerLogRecords = len(dataItems) - Log("PostDataHelper::Info::Successfully flushed %d container log records to ODS in %s", numContainerLogRecords, elapsed) + defer resp.Body.Close() + numContainerLogRecords = loglinesCount + Log("PostDataHelper::Info::Successfully flushed %d %s records to ODS in %s", numContainerLogRecords, recordType, elapsed) } - } ContainerLogTelemetryMutex.Lock() defer ContainerLogTelemetryMutex.Unlock() @@ -1374,10 +1463,22 @@ func InitializePlugin(pluginConfPath string, agentVersion string) { CreateADXClient() } + ContainerLogSchemaVersion := strings.TrimSpace(strings.ToLower(os.Getenv("AZMON_CONTAINER_LOG_SCHEMA_VERSION"))) + Log("AZMON_CONTAINER_LOG_SCHEMA_VERSION:%s", ContainerLogSchemaVersion) + + ContainerLogSchemaV2 = false //default is v1 schema + + if strings.Compare(ContainerLogSchemaVersion, ContainerLogV2SchemaVersion) == 0 && ContainerLogsRouteADX != true { + ContainerLogSchemaV2 = true + Log("Container logs schema=%s", ContainerLogV2SchemaVersion) + fmt.Fprintf(os.Stdout, "Container logs schema=%s... \n", ContainerLogV2SchemaVersion) + } + if strings.Compare(strings.ToLower(os.Getenv("CONTROLLER_TYPE")), "daemonset") == 0 { populateExcludedStdoutNamespaces() populateExcludedStderrNamespaces() - if enrichContainerLogs == true && ContainerLogsRouteADX != true { + //enrichment not applicable for ADX and v2 schema + if enrichContainerLogs == true && ContainerLogsRouteADX != true && ContainerLogSchemaV2 != true { Log("ContainerLogEnrichment=true; starting goroutine to update containerimagenamemaps \n") go updateContainerImageNameMaps() } else { diff --git a/source/plugins/go/src/utils.go b/source/plugins/go/src/utils.go index 91791ae1a..61d047e52 100644 --- a/source/plugins/go/src/utils.go +++ b/source/plugins/go/src/utils.go @@ -145,7 +145,7 @@ func CreateADXClient() { //log.Fatalf("Unable to create ADX connection %s", err.Error()) } else { Log("Successfully created ADX Client. Creating Ingestor...") - ingestor, ingestorErr := ingest.New(client, "containerinsights", "ContainerLogv2") + ingestor, ingestorErr := ingest.New(client, "containerinsights", "ContainerLogV2") if ingestorErr != nil { Log("Error::mdsd::Unable to create ADX ingestor %s", ingestorErr.Error()) } else { diff --git a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb index 67bd61667..102cb05f2 100644 --- a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb +++ b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb @@ -25,6 +25,7 @@ class CAdvisorMetricsAPIClient @clusterLogTailPath = ENV["AZMON_LOG_TAIL_PATH"] @clusterAgentSchemaVersion = ENV["AZMON_AGENT_CFG_SCHEMA_VERSION"] @clusterContainerLogEnrich = ENV["AZMON_CLUSTER_CONTAINER_LOG_ENRICH"] + @clusterContainerLogSchemaVersion = ENV["AZMON_CONTAINER_LOG_SCHEMA_VERSION"] @dsPromInterval = ENV["TELEMETRY_DS_PROM_INTERVAL"] @dsPromFieldPassCount = ENV["TELEMETRY_DS_PROM_FIELDPASS_LENGTH"] @@ -247,7 +248,7 @@ def getContainerCpuMetricItems(metricJSON, hostName, cpuMetricNameToCollect, met telemetryProps["dsPromFDC"] = @dsPromFieldDropCount telemetryProps["dsPromUrl"] = @dsPromUrlCount end - #telemetry about containerlogs Routing for daemonset + #telemetry about containerlog Routing for daemonset if File.exist?(Constants::AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE_V2_FILENAME) telemetryProps["containerLogsRoute"] = "v2" elsif (!@containerLogsRoute.nil? && !@containerLogsRoute.empty?) @@ -263,6 +264,10 @@ def getContainerCpuMetricItems(metricJSON, hostName, cpuMetricNameToCollect, met elsif (!@npmIntegrationBasic.nil? && !@npmIntegrationBasic.empty?) telemetryProps["int-npm-b"] = "1" end + #telemetry for Container log schema version clusterContainerLogSchemaVersion + if (!@clusterContainerLogSchemaVersion.nil? && !@clusterContainerLogSchemaVersion.empty?) + telemetryProps["containerLogVer"] = @clusterContainerLogSchemaVersion + end ApplicationInsightsUtility.sendMetricTelemetry(metricNametoReturn, metricValue, telemetryProps) end end From 6031be8b71b11ec9352bc01c61195092fb589c9b Mon Sep 17 00:00:00 2001 From: Michael Sinz <36865706+Michael-Sinz@users.noreply.github.com> Date: Tue, 9 Feb 2021 09:46:29 -0800 Subject: [PATCH 069/301] Add priority class to the daemonsets (#500) * Add priority class to the daemonsets Add a priority class for omsagent and have the daemonsets use this to be sure to schedule the pods. Daemonset pods are constrained in scheduling to run on specific nodes. This is done by the daemonset controller. When a node shows up it will create a pod with a strong affinity to that node. When a node goes away, it will delete the pod with the node affinity to that node. Kubernetes pod scheduling does not know it is a daemonset but it does know it is tied to a specific node. With default scheduling, it is possible for the pods to be "frozen out" of a node because the node already is full. This can happen because "normal" pods may already exist and are looking for a node to get scheduled on when a node is added to the cluster. The daemonset controller will only first create the pod for the node at around the same time. The kubernetes scheduler is running async from all of this and thus there can be a race as to who gets scheduled on the node. The pod priority class (and thus the pod priority) is a way to indicate that the pod has a higher scheduling priority than a default pod. By default, all pods are at priority 0. Higher numbers are higher priority. Setting the priority to something greater than zero will allow the omsagent daemonsets to win a race against "normal" pods for scheduled resources on a node - and will also allow for graceful eviction in the case the node is too full. Without this, omsagent can be left out of node in clusters that are very busy, especially in dynamic scaling situations. I did not test the windows pod as we have no windows clusters. * CR feedback --- charts/azuremonitor-containers/README.md | 2 ++ .../templates/omsagent-daemonset-windows.yaml | 5 +-- .../templates/omsagent-daemonset.yaml | 13 +++---- .../templates/omsagent-priorityclass.yaml | 22 ++++++++++++ charts/azuremonitor-containers/values.yaml | 36 +++++++++++++------ 5 files changed, 59 insertions(+), 19 deletions(-) create mode 100644 charts/azuremonitor-containers/templates/omsagent-priorityclass.yaml diff --git a/charts/azuremonitor-containers/README.md b/charts/azuremonitor-containers/README.md index 469fac94a..a3f17b509 100644 --- a/charts/azuremonitor-containers/README.md +++ b/charts/azuremonitor-containers/README.md @@ -93,6 +93,7 @@ The following table lists the configurable parameters of the MSOMS chart and the | `omsagent.env.clusterName` | Name of your cluster | Does not have a default value, needs to be provided | | `omsagent.rbac` | rbac enabled/disabled | true (i.e.enabled) | | `omsagent.proxy` | Proxy endpoint | Doesnt have default value. Refer to [configure proxy](#Configuring-Proxy-Endpoint) | +| `omsagent.priority` | DaemonSet Pod Priority | This is the [priority](https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/) to use for the daemonsets such that they get scheduled onto the node ahead of "normal" pods - must be an integer, defaults to 10 | > Note: For Azure Manage K8s clusters such as Azure Arc K8s and ARO v4, `omsagent.env.clusterId` with fully qualified azure resource id of the cluster should be used instead of `omsagent.env.clusterName` @@ -100,6 +101,7 @@ The following table lists the configurable parameters of the MSOMS chart and the - Parameter `omsagent.env.doNotCollectKubeSystemLogs` has been removed starting chart version 1.0.0. Refer to 'Agent data collection settings' section below to configure it using configmap. - onboarding of multiple clusters with the same cluster name to same log analytics workspace not supported. If need this configuration, use the cluster FQDN name rather than cluster dns prefix to avoid collision with clusterName +- The `omsagent.priority` parameter sets the priority of the omsagent daemonset priority class. This pod priority class is used for daemonsets to allow them to have priority over pods that can be scheduled elsewhere. Without a priority class, it is possible for a node to fill up with "normal" pods before the daemonset pods get to be created for the node or get scheduled. Note that pods are not "daemonset" pods - they are just pods created by the daemonset controller but they have a specific affinity set during creation to the specific node each pod was created to run on. You want this value to be greater than 0 (default is 10) and generally greater than pods that have the flexibility to run on different nodes such that they do not block the node specific pods. ## Agent data collection settings diff --git a/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml b/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml index 81003c704..82d210f3d 100644 --- a/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml @@ -27,10 +27,11 @@ spec: checksum/secret: {{ include (print $.Template.BasePath "/omsagent-secret.yaml") . | sha256sum }} checksum/config: {{ toYaml .Values.omsagent.resources | sha256sum }} spec: - dnsConfig: + priorityClassName: omsagent + dnsConfig: options: - name: ndots - value: "3" + value: "3" {{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion }} nodeSelector: kubernetes.io/os: windows diff --git a/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml b/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml index 595edd7bb..0272c6263 100644 --- a/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml @@ -28,10 +28,11 @@ spec: checksum/config: {{ toYaml .Values.omsagent.resources | sha256sum }} checksum/logsettings: {{ toYaml .Values.omsagent.logsettings | sha256sum }} spec: - dnsConfig: + priorityClassName: omsagent + dnsConfig: options: - name: ndots - value: "3" + value: "3" {{- if .Values.omsagent.rbac }} serviceAccountName: omsagent {{- end }} @@ -70,12 +71,12 @@ spec: valueFrom: fieldRef: fieldPath: status.hostIP - {{- if not (empty .Values.Azure.Extension.Name) }} + {{- if not (empty .Values.Azure.Extension.Name) }} - name: ARC_K8S_EXTENSION_NAME - value: {{ .Values.Azure.Extension.Name | quote }} - {{- end }} + value: {{ .Values.Azure.Extension.Name | quote }} + {{- end }} - name: USER_ASSIGNED_IDENTITY_CLIENT_ID - value: "" + value: "" {{- if .Values.omsagent.logsettings.logflushintervalsecs }} - name: FBIT_SERVICE_FLUSH_INTERVAL value: {{ .Values.omsagent.logsettings.logflushintervalsecs | quote }} diff --git a/charts/azuremonitor-containers/templates/omsagent-priorityclass.yaml b/charts/azuremonitor-containers/templates/omsagent-priorityclass.yaml new file mode 100644 index 000000000..4d9980ab3 --- /dev/null +++ b/charts/azuremonitor-containers/templates/omsagent-priorityclass.yaml @@ -0,0 +1,22 @@ +{{- if and (ne .Values.omsagent.secret.key "") (ne .Values.omsagent.secret.wsid "") (or (ne .Values.omsagent.env.clusterName "") (ne .Values.omsagent.env.clusterId "") (ne .Values.Azure.Cluster.ResourceId "") )}} +# This pod priority class is used for daemonsets to allow them to have priority +# over pods that can be scheduled elsewhere. Without a priority class, it is +# possible for a node to fill up with pods before the daemonset pods get to be +# created for the node or get scheduled. Note that pods are not "daemonset" +# pods - they are just pods created by the daemonset controller but they have +# a specific affinity set during creation to the specific node each pod was +# created to run on (daemonset controller takes care of that) +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: omsagent + # Priority classes don't have labels :-) + annotations: + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: oms-agent +value: {{ .Values.omsagent.priority }} +globalDefault: false +description: "This is the daemonset priority class for omsagent" +{{- end }} diff --git a/charts/azuremonitor-containers/values.yaml b/charts/azuremonitor-containers/values.yaml index 341b9fb65..5601a5738 100644 --- a/charts/azuremonitor-containers/values.yaml +++ b/charts/azuremonitor-containers/values.yaml @@ -8,9 +8,9 @@ Azure: Cluster: Region: - ResourceId: + ResourceId: Extension: - Name: "" + Name: "" ResourceId: "" omsagent: image: @@ -20,6 +20,20 @@ omsagent: pullPolicy: IfNotPresent dockerProviderVersion: "12.0.0-0" agentVersion: "1.10.0.1" + + # The priority used by the omsagent priority class for the daemonset pods + # Note that this is not execution piority - it is scheduling priority, as + # in getting scheduled to the node. This needs to be greater than 0 such + # that the daemonset pods, which can not schedule onto different nodes as + # they are defined to run on specific nodes, are not accidentally frozen + # out of a node due to other pods showing up earlier in scheduling. + # (DaemonSet pods by definition only are created once the node exists for + # them to be created for and thus it is possible to have "normal" pods + # already in line to run on the node before the DeamonSet controller got a + # chance to build pod for the node and give it to the scheduler) + # Should be some number greater than default (0) + priority: 10 + ## To get your workspace id and key do the following ## You can create a Azure Loganalytics workspace from portal.azure.com and get its ID & PRIMARY KEY from 'Advanced Settings' tab in the Ux. @@ -61,7 +75,7 @@ omsagent: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - labelSelector: - matchExpressions: + matchExpressions: - key: kubernetes.io/os operator: In values: @@ -70,10 +84,10 @@ omsagent: operator: NotIn values: - virtual-kubelet - - key: kubernetes.io/arch + - key: kubernetes.io/arch operator: In values: - - amd64 + - amd64 nodeSelectorTerms: - labelSelector: matchExpressions: @@ -85,10 +99,10 @@ omsagent: operator: NotIn values: - virtual-kubelet - - key: beta.kubernetes.io/arch + - key: beta.kubernetes.io/arch operator: In values: - - amd64 + - amd64 deployment: affinity: nodeAffinity: @@ -117,10 +131,10 @@ omsagent: operator: NotIn values: - master - - key: kubernetes.io/arch + - key: kubernetes.io/arch operator: In values: - - amd64 + - amd64 nodeSelectorTerms: - labelSelector: matchExpressions: @@ -136,10 +150,10 @@ omsagent: operator: NotIn values: - master - - key: beta.kubernetes.io/arch + - key: beta.kubernetes.io/arch operator: In values: - - amd64 + - amd64 ## Configure resource requests and limits ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ ## From 4212e1a6ee1225f2c1280bf5b58070877cf55890 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Thu, 11 Feb 2021 09:15:23 -0800 Subject: [PATCH 070/301] fix node metric issue (#502) --- source/plugins/ruby/kubelet_utils.rb | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/source/plugins/ruby/kubelet_utils.rb b/source/plugins/ruby/kubelet_utils.rb index 599640d8f..bd2bd75b7 100644 --- a/source/plugins/ruby/kubelet_utils.rb +++ b/source/plugins/ruby/kubelet_utils.rb @@ -20,10 +20,12 @@ def get_node_capacity response = CAdvisorMetricsAPIClient.getAllMetricsCAdvisor(winNode: nil) if !response.nil? && !response.body.nil? - all_metrics = response.body.split("\n") - cpu_capacity = all_metrics.select{|m| m.start_with?('machine_cpu_cores') && m.split.first.strip == 'machine_cpu_cores' }.first.split.last.to_f * 1000 + all_metrics = response.body.split("\n") + #cadvisor machine metrics can exist with (>=1.19) or without dimensions (<1.19) + #so just checking startswith of metric name would be good enough to pick the metric value from exposition format + cpu_capacity = all_metrics.select{|m| m.start_with?('machine_cpu_cores') }.first.split.last.to_f * 1000 @log.info "CPU Capacity #{cpu_capacity}" - memory_capacity_e = all_metrics.select{|m| m.start_with?('machine_memory_bytes') && m.split.first.strip == 'machine_memory_bytes' }.first.split.last + memory_capacity_e = all_metrics.select{|m| m.start_with?('machine_memory_bytes') }.first.split.last memory_capacity = BigDecimal(memory_capacity_e).to_f @log.info "Memory Capacity #{memory_capacity}" return [cpu_capacity, memory_capacity] From 24644ce31b9a4ab003c3ebcfc4165a9d0899eaca Mon Sep 17 00:00:00 2001 From: rashmichandrashekar Date: Thu, 18 Feb 2021 12:53:24 -0800 Subject: [PATCH 071/301] Bug fixes for Feb release (#504) * bug fix for mdm metrics with no limits * fix exception bug --- .../plugins/ruby/CAdvisorMetricsAPIClient.rb | 131 +++++++++--------- source/plugins/ruby/kubelet_utils.rb | 12 +- 2 files changed, 73 insertions(+), 70 deletions(-) diff --git a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb index 102cb05f2..8cb6f603e 100644 --- a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb +++ b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb @@ -66,6 +66,7 @@ class CAdvisorMetricsAPIClient #cadvisor ports @@CADVISOR_SECURE_PORT = "10250" @@CADVISOR_NON_SECURE_PORT = "10255" + def initialize end @@ -86,40 +87,40 @@ def getPodsFromCAdvisor(winNode: nil) end def getBaseCAdvisorUri(winNode) - cAdvisorSecurePort = isCAdvisorOnSecurePort() + cAdvisorSecurePort = isCAdvisorOnSecurePort() + + if !!cAdvisorSecurePort == true + defaultHost = "https://localhost:#{@@CADVISOR_SECURE_PORT}" + else + defaultHost = "http://localhost:#{@@CADVISOR_NON_SECURE_PORT}" + end + + if !winNode.nil? + nodeIP = winNode["InternalIP"] + else + nodeIP = ENV["NODE_IP"] + end + if !nodeIP.nil? + @Log.info("Using #{nodeIP} for CAdvisor Host") if !!cAdvisorSecurePort == true - defaultHost = "https://localhost:#{@@CADVISOR_SECURE_PORT}" + return "https://#{nodeIP}:#{@@CADVISOR_SECURE_PORT}" else - defaultHost = "http://localhost:#{@@CADVISOR_NON_SECURE_PORT}" + return "http://#{nodeIP}:#{@@CADVISOR_NON_SECURE_PORT}" end - + else + @Log.warn ("NODE_IP environment variable not set. Using default as : #{defaultHost}") if !winNode.nil? - nodeIP = winNode["InternalIP"] - else - nodeIP = ENV["NODE_IP"] - end - - if !nodeIP.nil? - @Log.info("Using #{nodeIP} for CAdvisor Host") - if !!cAdvisorSecurePort == true - return "https://#{nodeIP}:#{@@CADVISOR_SECURE_PORT}" - else - return "http://#{nodeIP}:#{@@CADVISOR_NON_SECURE_PORT}" - end + return nil else - @Log.warn ("NODE_IP environment variable not set. Using default as : #{defaultHost}") - if !winNode.nil? - return nil - else - return defaultHost - end + return defaultHost end + end end def getCAdvisorUri(winNode, relativeUri) - baseUri = getBaseCAdvisorUri(winNode) - return baseUri + relativeUri + baseUri = getBaseCAdvisorUri(winNode) + return baseUri + relativeUri end def getMetrics(winNode: nil, metricTime: Time.now.utc.iso8601) @@ -254,20 +255,20 @@ def getContainerCpuMetricItems(metricJSON, hostName, cpuMetricNameToCollect, met elsif (!@containerLogsRoute.nil? && !@containerLogsRoute.empty?) telemetryProps["containerLogsRoute"] = @containerLogsRoute end - #telemetry about health model - if (!@hmEnabled.nil? && !@hmEnabled.empty?) + #telemetry about health model + if (!@hmEnabled.nil? && !@hmEnabled.empty?) telemetryProps["hmEnabled"] = @hmEnabled - end - #telemetry for npm integration - if (!@npmIntegrationAdvanced.nil? && !@npmIntegrationAdvanced.empty?) - telemetryProps["int-npm-a"] = "1" - elsif (!@npmIntegrationBasic.nil? && !@npmIntegrationBasic.empty?) - telemetryProps["int-npm-b"] = "1" - end - #telemetry for Container log schema version clusterContainerLogSchemaVersion - if (!@clusterContainerLogSchemaVersion.nil? && !@clusterContainerLogSchemaVersion.empty?) + end + #telemetry for npm integration + if (!@npmIntegrationAdvanced.nil? && !@npmIntegrationAdvanced.empty?) + telemetryProps["int-npm-a"] = "1" + elsif (!@npmIntegrationBasic.nil? && !@npmIntegrationBasic.empty?) + telemetryProps["int-npm-b"] = "1" + end + #telemetry for Container log schema version clusterContainerLogSchemaVersion + if (!@clusterContainerLogSchemaVersion.nil? && !@clusterContainerLogSchemaVersion.empty?) telemetryProps["containerLogVer"] = @clusterContainerLogSchemaVersion - end + end ApplicationInsightsUtility.sendMetricTelemetry(metricNametoReturn, metricValue, telemetryProps) end end @@ -308,8 +309,8 @@ def getInsightsMetrics(winNode: nil, metricTime: Time.now.utc.iso8601) end if !metricInfo.nil? metricDataItems.concat(getContainerGpuMetricsAsInsightsMetrics(metricInfo, hostName, "memoryTotal", "containerGpumemoryTotalBytes", metricTime)) - metricDataItems.concat(getContainerGpuMetricsAsInsightsMetrics(metricInfo, hostName, "memoryUsed","containerGpumemoryUsedBytes", metricTime)) - metricDataItems.concat(getContainerGpuMetricsAsInsightsMetrics(metricInfo, hostName, "dutyCycle","containerGpuDutyCycle", metricTime)) + metricDataItems.concat(getContainerGpuMetricsAsInsightsMetrics(metricInfo, hostName, "memoryUsed", "containerGpumemoryUsedBytes", metricTime)) + metricDataItems.concat(getContainerGpuMetricsAsInsightsMetrics(metricInfo, hostName, "dutyCycle", "containerGpuDutyCycle", metricTime)) metricDataItems.concat(getPersistentVolumeMetrics(metricInfo, hostName, "usedBytes", Constants::PV_USED_BYTES, metricTime)) else @@ -332,7 +333,6 @@ def getPersistentVolumeMetrics(metricJSON, hostName, metricNameToCollect, metric begin metricInfo = metricJSON metricInfo["pods"].each do |pod| - podNamespace = pod["podRef"]["namespace"] excludeNamespace = false if (podNamespace.downcase == "kube-system") && @pvKubeSystemCollectionMetricsEnabled == "false" @@ -356,11 +356,11 @@ def getPersistentVolumeMetrics(metricJSON, hostName, metricNameToCollect, metric metricItem["Computer"] = hostName metricItem["Name"] = metricNameToReturn metricItem["Value"] = volume[metricNameToCollect] - metricItem["Origin"] = Constants::INSIGHTSMETRICS_TAGS_ORIGIN + metricItem["Origin"] = Constants::INSIGHTSMETRICS_TAGS_ORIGIN metricItem["Namespace"] = Constants::INSIGTHTSMETRICS_TAGS_PV_NAMESPACE - + metricTags = {} - metricTags[Constants::INSIGHTSMETRICS_TAGS_CLUSTERID ] = clusterId + metricTags[Constants::INSIGHTSMETRICS_TAGS_CLUSTERID] = clusterId metricTags[Constants::INSIGHTSMETRICS_TAGS_CLUSTERNAME] = clusterName metricTags[Constants::INSIGHTSMETRICS_TAGS_POD_UID] = podUid metricTags[Constants::INSIGHTSMETRICS_TAGS_POD_NAME] = podName @@ -370,7 +370,7 @@ def getPersistentVolumeMetrics(metricJSON, hostName, metricNameToCollect, metric metricTags[Constants::INSIGHTSMETRICS_TAGS_PV_CAPACITY_BYTES] = volume["capacityBytes"] metricItem["Tags"] = metricTags - + metricItems.push(metricItem) end end @@ -395,7 +395,6 @@ def getPersistentVolumeMetrics(metricJSON, hostName, metricNameToCollect, metric return metricItems end - def getContainerGpuMetricsAsInsightsMetrics(metricJSON, hostName, metricNameToCollect, metricNametoReturn, metricPollTime) metricItems = [] clusterId = KubernetesApiClient.getClusterId @@ -415,18 +414,17 @@ def getContainerGpuMetricsAsInsightsMetrics(metricJSON, hostName, metricNameToCo if (!accelerator[metricNameToCollect].nil?) #empty check is invalid for non-strings containerName = container["name"] metricValue = accelerator[metricNameToCollect] - metricItem = {} metricItem["CollectionTime"] = metricPollTime metricItem["Computer"] = hostName metricItem["Name"] = metricNametoReturn metricItem["Value"] = metricValue - metricItem["Origin"] = Constants::INSIGHTSMETRICS_TAGS_ORIGIN + metricItem["Origin"] = Constants::INSIGHTSMETRICS_TAGS_ORIGIN metricItem["Namespace"] = Constants::INSIGHTSMETRICS_TAGS_GPU_NAMESPACE - + metricTags = {} - metricTags[Constants::INSIGHTSMETRICS_TAGS_CLUSTERID ] = clusterId + metricTags[Constants::INSIGHTSMETRICS_TAGS_CLUSTERID] = clusterId metricTags[Constants::INSIGHTSMETRICS_TAGS_CLUSTERNAME] = clusterName metricTags[Constants::INSIGHTSMETRICS_TAGS_CONTAINER_NAME] = podUid + "/" + containerName #metricTags[Constants::INSIGHTSMETRICS_TAGS_K8SNAMESPACE] = podNameSpace @@ -442,9 +440,9 @@ def getContainerGpuMetricsAsInsightsMetrics(metricJSON, hostName, metricNameToCo if (!accelerator["id"].nil? && !accelerator["id"].empty?) metricTags[Constants::INSIGHTSMETRICS_TAGS_GPU_ID] = accelerator["id"] end - + metricItem["Tags"] = metricTags - + metricItems.push(metricItem) end end @@ -921,13 +919,13 @@ def getResponse(winNode, relativeUri) uri = URI.parse(cAdvisorUri) if isCAdvisorOnSecurePort() Net::HTTP.start(uri.host, uri.port, - :use_ssl => true, :open_timeout => 20, :read_timeout => 40, - :ca_file => "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt", - :verify_mode => OpenSSL::SSL::VERIFY_NONE) do |http| - cAdvisorApiRequest = Net::HTTP::Get.new(uri.request_uri) - cAdvisorApiRequest["Authorization"] = "Bearer #{bearerToken}" - response = http.request(cAdvisorApiRequest) - @Log.info "Got response code #{response.code} from #{uri.request_uri}" + :use_ssl => true, :open_timeout => 20, :read_timeout => 40, + :ca_file => "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt", + :verify_mode => OpenSSL::SSL::VERIFY_NONE) do |http| + cAdvisorApiRequest = Net::HTTP::Get.new(uri.request_uri) + cAdvisorApiRequest["Authorization"] = "Bearer #{bearerToken}" + response = http.request(cAdvisorApiRequest) + @Log.info "Got response code #{response.code} from #{uri.request_uri}" end else Net::HTTP.start(uri.host, uri.port, :use_ssl => false, :open_timeout => 20, :read_timeout => 40) do |http| @@ -940,19 +938,24 @@ def getResponse(winNode, relativeUri) rescue => error @Log.warn("CAdvisor api request for #{cAdvisorUri} failed: #{error}") telemetryProps = {} - telemetryProps["Computer"] = winNode["Hostname"] + if !winNode.nil? + hostName = winNode["Hostname"] + else + hostName = (OMS::Common.get_hostname) + end + telemetryProps["Computer"] = hostName ApplicationInsightsUtility.sendExceptionTelemetry(error, telemetryProps) end return response end def isCAdvisorOnSecurePort - cAdvisorSecurePort = false - # Check to see whether omsagent needs to use 10255(insecure) port or 10250(secure) port - if !@cAdvisorMetricsSecurePort.nil? && @cAdvisorMetricsSecurePort == "true" - cAdvisorSecurePort = true - end - return cAdvisorSecurePort + cAdvisorSecurePort = false + # Check to see whether omsagent needs to use 10255(insecure) port or 10250(secure) port + if !@cAdvisorMetricsSecurePort.nil? && @cAdvisorMetricsSecurePort == "true" + cAdvisorSecurePort = true + end + return cAdvisorSecurePort end end end diff --git a/source/plugins/ruby/kubelet_utils.rb b/source/plugins/ruby/kubelet_utils.rb index bd2bd75b7..e2c731b79 100644 --- a/source/plugins/ruby/kubelet_utils.rb +++ b/source/plugins/ruby/kubelet_utils.rb @@ -20,12 +20,12 @@ def get_node_capacity response = CAdvisorMetricsAPIClient.getAllMetricsCAdvisor(winNode: nil) if !response.nil? && !response.body.nil? - all_metrics = response.body.split("\n") - #cadvisor machine metrics can exist with (>=1.19) or without dimensions (<1.19) + all_metrics = response.body.split("\n") + #cadvisor machine metrics can exist with (>=1.19) or without dimensions (<1.19) #so just checking startswith of metric name would be good enough to pick the metric value from exposition format - cpu_capacity = all_metrics.select{|m| m.start_with?('machine_cpu_cores') }.first.split.last.to_f * 1000 + cpu_capacity = all_metrics.select { |m| m.start_with?("machine_cpu_cores") }.first.split.last.to_f * 1000 @log.info "CPU Capacity #{cpu_capacity}" - memory_capacity_e = all_metrics.select{|m| m.start_with?('machine_memory_bytes') }.first.split.last + memory_capacity_e = all_metrics.select { |m| m.start_with?("machine_memory_bytes") }.first.split.last memory_capacity = BigDecimal(memory_capacity_e).to_f @log.info "Memory Capacity #{memory_capacity}" return [cpu_capacity, memory_capacity] @@ -89,9 +89,9 @@ def get_all_container_limits @log.info "cpuLimit: #{cpuLimit}" @log.info "memoryLimit: #{memoryLimit}" # Get cpu limit in nanocores - containerCpuLimitHash[key] = !cpuLimit.nil? ? KubernetesApiClient.getMetricNumericValue("cpu", cpuLimit) : 0 + containerCpuLimitHash[key] = !cpuLimit.nil? ? KubernetesApiClient.getMetricNumericValue("cpu", cpuLimit) : nil # Get memory limit in bytes - containerMemoryLimitHash[key] = !memoryLimit.nil? ? KubernetesApiClient.getMetricNumericValue("memory", memoryLimit) : 0 + containerMemoryLimitHash[key] = !memoryLimit.nil? ? KubernetesApiClient.getMetricNumericValue("memory", memoryLimit) : nil end end end From e56104c35f2df6a1a8f0a2fa72dc7921b47fb508 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Mon, 22 Feb 2021 18:00:03 -0800 Subject: [PATCH 072/301] Gangams/feb 2021 agent bug fix (#505) * fix npe in getKubeServiceRecords * use image fields from spec * fix typo * cover all cases * handle scenario only digest specified --- source/plugins/ruby/KubernetesApiClient.rb | 2 +- .../ruby/kubernetes_container_inventory.rb | 71 ++++++++++++------- 2 files changed, 48 insertions(+), 25 deletions(-) diff --git a/source/plugins/ruby/KubernetesApiClient.rb b/source/plugins/ruby/KubernetesApiClient.rb index aca2142a0..c5a363741 100644 --- a/source/plugins/ruby/KubernetesApiClient.rb +++ b/source/plugins/ruby/KubernetesApiClient.rb @@ -791,7 +791,7 @@ def getKubeAPIServerUrl def getKubeServicesInventoryRecords(serviceList, batchTime = Time.utc.iso8601) kubeServiceRecords = [] begin - if (!serviceList.nil? && !serviceList.empty?) + if (!serviceList.nil? && !serviceList.empty? && serviceList.key?("items") && !serviceList["items"].nil? && !serviceList["items"].empty? ) servicesCount = serviceList["items"].length @Log.info("KubernetesApiClient::getKubeServicesInventoryRecords : number of services in serviceList #{servicesCount} @ #{Time.now.utc.iso8601}") serviceList["items"].each do |item| diff --git a/source/plugins/ruby/kubernetes_container_inventory.rb b/source/plugins/ruby/kubernetes_container_inventory.rb index 69beca493..82e36c8cc 100644 --- a/source/plugins/ruby/kubernetes_container_inventory.rb +++ b/source/plugins/ruby/kubernetes_container_inventory.rb @@ -50,30 +50,7 @@ def getContainerInventoryRecords(podItem, batchTime, clusterCollectEnvironmentVa if !atLocation.nil? containerInventoryRecord["ImageId"] = imageIdValue[(atLocation + 1)..-1] end - end - # image is of the format - repository/image:imagetag - imageValue = containerStatus["image"] - if !imageValue.nil? && !imageValue.empty? - # Find delimiters in the string of format repository/image:imagetag - slashLocation = imageValue.index("/") - colonLocation = imageValue.index(":") - if !colonLocation.nil? - if slashLocation.nil? - # image:imagetag - containerInventoryRecord["Image"] = imageValue[0..(colonLocation - 1)] - else - # repository/image:imagetag - containerInventoryRecord["Repository"] = imageValue[0..(slashLocation - 1)] - containerInventoryRecord["Image"] = imageValue[(slashLocation + 1)..(colonLocation - 1)] - end - containerInventoryRecord["ImageTag"] = imageValue[(colonLocation + 1)..-1] - end - elsif !imageIdValue.nil? && !imageIdValue.empty? - # Getting repo information from imageIdValue when no tag in ImageId - if !atLocation.nil? - containerInventoryRecord["Repository"] = imageIdValue[0..(atLocation - 1)] - end - end + end containerInventoryRecord["ExitCode"] = 0 isContainerTerminated = false isContainerWaiting = false @@ -107,6 +84,51 @@ def getContainerInventoryRecords(podItem, batchTime, clusterCollectEnvironmentVa end containerInfoMap = containersInfoMap[containerName] + # image can be in any one of below format in spec + # repository/image[:imagetag | @digest], repository/image:imagetag@digest, repo/image, image:imagetag, image@digest, image + imageValue = containerInfoMap["image"] + if !imageValue.nil? && !imageValue.empty? + # Find delimiters in image format + atLocation = imageValue.index("@") + isDigestSpecified = false + if !atLocation.nil? + # repository/image@digest or repository/image:imagetag@digest, image@digest + imageValue = imageValue[0..(atLocation - 1)] + # Use Digest from the spec's image in case when the status doesnt get populated i.e. container in pending or image pull back etc. + if containerInventoryRecord["ImageId"].nil? || containerInventoryRecord["ImageId"].empty? + containerInventoryRecord["ImageId"] = imageValue[(atLocation + 1)..-1] + end + isDigestSpecified = true + end + slashLocation = imageValue.index("/") + colonLocation = imageValue.index(":") + if !colonLocation.nil? + if slashLocation.nil? + # image:imagetag + containerInventoryRecord["Image"] = imageValue[0..(colonLocation - 1)] + else + # repository/image:imagetag + containerInventoryRecord["Repository"] = imageValue[0..(slashLocation - 1)] + containerInventoryRecord["Image"] = imageValue[(slashLocation + 1)..(colonLocation - 1)] + end + containerInventoryRecord["ImageTag"] = imageValue[(colonLocation + 1)..-1] + else + if slashLocation.nil? + # image + containerInventoryRecord["Image"] = imageValue + else + # repo/image + containerInventoryRecord["Repository"] = imageValue[0..(slashLocation - 1)] + containerInventoryRecord["Image"] = imageValue[(slashLocation + 1)..-1] + end + # if no tag specified, k8s assumes latest as imagetag and this is same behavior from docker API and from status. + # Ref - https://kubernetes.io/docs/concepts/containers/images/#image-names + if isDigestSpecified == false + containerInventoryRecord["ImageTag"] = "latest" + end + end + end + podName = containerInfoMap["PodName"] namespace = containerInfoMap["Namespace"] # containername in the format what docker sees @@ -165,6 +187,7 @@ def getContainersInfoMap(podItem, isWindows) podContainers.each do |container| containerInfoMap = {} containerName = container["name"] + containerInfoMap["image"] = container["image"] containerInfoMap["ElementName"] = containerName containerInfoMap["Computer"] = nodeName containerInfoMap["PodName"] = podName From e00b2aabf9609f76b9ce13c3397cc290e0318dd9 Mon Sep 17 00:00:00 2001 From: Vishwanath Date: Tue, 23 Feb 2021 13:08:51 -0800 Subject: [PATCH 073/301] changes for release -ciprod02232021 (#506) --- ReleaseNotes.md | 17 +++++++++++++++-- build/version | 4 ++-- charts/azuremonitor-containers/Chart.yaml | 2 +- charts/azuremonitor-containers/values.yaml | 6 +++--- kubernetes/linux/Dockerfile | 2 +- kubernetes/omsagent.yaml | 12 ++++++------ kubernetes/windows/Dockerfile | 2 +- .../onboarding/managed/enable-monitoring.ps1 | 2 +- scripts/onboarding/managed/enable-monitoring.sh | 2 +- .../onboarding/managed/upgrade-monitoring.sh | 2 +- 10 files changed, 32 insertions(+), 19 deletions(-) diff --git a/ReleaseNotes.md b/ReleaseNotes.md index b1eb316a1..80d6f188d 100644 --- a/ReleaseNotes.md +++ b/ReleaseNotes.md @@ -10,6 +10,20 @@ additional questions or comments. ## Release History Note : The agent version(s) below has dates (ciprod), which indicate the agent build dates (not release dates) +### 02/23/2021 - +##### Version microsoft/oms:ciprod02232021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod02232021 (linux) +##### Version microsoft/oms:win-ciprod02232021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod02232021 (windows) +##### Code change log +- ContainerLogV2 schema support for LogAnalytics & ADX (not usable externally yet) +- Fix nodemetrics (cpuusageprecentage & memoryusagepercentage) metrics not flowing. This is fixed upstream for k8s versions >= 1.19.7 and >=1.20.2. +- Fix cpu & memory usage exceeded threshold container metrics not flowing when requests and/or limits were not set +- Mute some unused exceptions from going to telemetry +- Collect containerimage (repository, image & imagetag) from spec (instead of runtime) +- Add support for extension MSI for k8s arc +- Use cloud specific instrumentation keys for telemetry +- Picked up newer version for apt +- Add priority class to daemonset (in our chart only) + ### 01/11/2021 - ##### Version microsoft/oms:ciprod01112021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod01112021 (linux) ##### Version microsoft/oms:win-ciprod01112021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod01112021 (windows) @@ -27,7 +41,6 @@ Note : The agent version(s) below has dates (ciprod), which indicate t - Enable ADX route for windows container logs - Remove logging to termination log in windows agent liveness probe - ### 11/09/2020 - ##### Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod11092020 (linux) ##### Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod11092020 (windows) @@ -36,7 +49,7 @@ Note : The agent version(s) below has dates (ciprod), which indicate t ### 10/27/2020 - ##### Version microsoft/oms:ciprod10272020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod10272020 (linux) -##### Version microsoft/oms:win-ciprod10272020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod10052020 (windows) +##### Version microsoft/oms:win-ciprod10272020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod10272020 (windows) ##### Code change log - Activate oneagent in few AKS regions (koreacentral,norwayeast) - Disable syslog diff --git a/build/version b/build/version index 711a96921..2da3efa39 100644 --- a/build/version +++ b/build/version @@ -2,11 +2,11 @@ # Build Version Information -CONTAINER_BUILDVERSION_MAJOR=12 +CONTAINER_BUILDVERSION_MAJOR=13 CONTAINER_BUILDVERSION_MINOR=0 CONTAINER_BUILDVERSION_PATCH=0 CONTAINER_BUILDVERSION_BUILDNR=0 -CONTAINER_BUILDVERSION_DATE=20210111 +CONTAINER_BUILDVERSION_DATE=20210223 CONTAINER_BUILDVERSION_STATUS=Developer_Build #-------------------------------- End of File ----------------------------------- diff --git a/charts/azuremonitor-containers/Chart.yaml b/charts/azuremonitor-containers/Chart.yaml index a809a4e69..ce64fd1ce 100644 --- a/charts/azuremonitor-containers/Chart.yaml +++ b/charts/azuremonitor-containers/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v1 appVersion: 7.0.0-1 description: Helm chart for deploying Azure Monitor container monitoring agent in Kubernetes name: azuremonitor-containers -version: 2.8.0 +version: 2.8.1 kubeVersion: "^1.10.0-0" keywords: - monitoring diff --git a/charts/azuremonitor-containers/values.yaml b/charts/azuremonitor-containers/values.yaml index 5601a5738..410f5d3c2 100644 --- a/charts/azuremonitor-containers/values.yaml +++ b/charts/azuremonitor-containers/values.yaml @@ -15,10 +15,10 @@ Azure: omsagent: image: repo: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod" - tag: "ciprod01112021" - tagWindows: "win-ciprod01112021" + tag: "ciprod02232021" + tagWindows: "win-ciprod02232021" pullPolicy: IfNotPresent - dockerProviderVersion: "12.0.0-0" + dockerProviderVersion: "13.0.0-0" agentVersion: "1.10.0.1" # The priority used by the omsagent priority class for the daemonset pods diff --git a/kubernetes/linux/Dockerfile b/kubernetes/linux/Dockerfile index 2e1118922..bee718a31 100644 --- a/kubernetes/linux/Dockerfile +++ b/kubernetes/linux/Dockerfile @@ -2,7 +2,7 @@ FROM ubuntu:18.04 MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=ciprod01112021 +ARG IMAGE_TAG=ciprod02232021 ENV AGENT_VERSION ${IMAGE_TAG} ENV tmpdir /opt ENV APPLICATIONINSIGHTS_AUTH NzAwZGM5OGYtYTdhZC00NThkLWI5NWMtMjA3ZjM3NmM3YmRi diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index 67bd9cdde..cafd9b904 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -358,7 +358,7 @@ spec: tier: node annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "12.0.0-0" + dockerProviderVersion: "13.0.0-0" schema-versions: "v1" spec: serviceAccountName: omsagent @@ -368,7 +368,7 @@ spec: value: "3" containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod01112021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod02232021" imagePullPolicy: IfNotPresent resources: limits: @@ -521,13 +521,13 @@ spec: rsName: "omsagent-rs" annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "12.0.0-0" + dockerProviderVersion: "13.0.0-0" schema-versions: "v1" spec: serviceAccountName: omsagent containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod01112021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod02232021" imagePullPolicy: IfNotPresent resources: limits: @@ -675,7 +675,7 @@ spec: tier: node-win annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "12.0.0-0" + dockerProviderVersion: "13.0.0-0" schema-versions: "v1" spec: serviceAccountName: omsagent @@ -685,7 +685,7 @@ spec: value: "3" containers: - name: omsagent-win - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod01112021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod02232021" imagePullPolicy: IfNotPresent resources: limits: diff --git a/kubernetes/windows/Dockerfile b/kubernetes/windows/Dockerfile index f852bd236..d4f118449 100644 --- a/kubernetes/windows/Dockerfile +++ b/kubernetes/windows/Dockerfile @@ -3,7 +3,7 @@ MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=win-ciprod01112021 +ARG IMAGE_TAG=win-ciprod02232021 # Do not split this into multiple RUN! # Docker creates a layer for every RUN-Statement diff --git a/scripts/onboarding/managed/enable-monitoring.ps1 b/scripts/onboarding/managed/enable-monitoring.ps1 index 45ddb44b0..db035b13d 100644 --- a/scripts/onboarding/managed/enable-monitoring.ps1 +++ b/scripts/onboarding/managed/enable-monitoring.ps1 @@ -64,7 +64,7 @@ $isUsingServicePrincipal = $false # released chart version in mcr $mcr = "mcr.microsoft.com" -$mcrChartVersion = "2.8.0" +$mcrChartVersion = "2.8.1" $mcrChartRepoPath = "azuremonitor/containerinsights/preview/azuremonitor-containers" $helmLocalRepoName = "." $omsAgentDomainName="opinsights.azure.com" diff --git a/scripts/onboarding/managed/enable-monitoring.sh b/scripts/onboarding/managed/enable-monitoring.sh index 2dc0a465f..9d0c0aca5 100644 --- a/scripts/onboarding/managed/enable-monitoring.sh +++ b/scripts/onboarding/managed/enable-monitoring.sh @@ -44,7 +44,7 @@ defaultAzureCloud="AzureCloud" omsAgentDomainName="opinsights.azure.com" # released chart version in mcr -mcrChartVersion="2.8.0" +mcrChartVersion="2.8.1" mcr="mcr.microsoft.com" mcrChartRepoPath="azuremonitor/containerinsights/preview/azuremonitor-containers" helmLocalRepoName="." diff --git a/scripts/onboarding/managed/upgrade-monitoring.sh b/scripts/onboarding/managed/upgrade-monitoring.sh index 8826b6df6..6d14dfa5f 100644 --- a/scripts/onboarding/managed/upgrade-monitoring.sh +++ b/scripts/onboarding/managed/upgrade-monitoring.sh @@ -20,7 +20,7 @@ set -e set -o pipefail # released chart version for Azure Arc enabled Kubernetes public preview -mcrChartVersion="2.8.0" +mcrChartVersion="2.8.1" mcr="mcr.microsoft.com" mcrChartRepoPath="azuremonitor/containerinsights/preview/azuremonitor-containers" From 31f0e5f50f63e08c70dab8a0e78a804b4a09e8bd Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Tue, 23 Feb 2021 15:38:14 -0800 Subject: [PATCH 074/301] Gangams/e2e test framework (#503) * add agent e2e fw and tests * doc and script updates * add validation script * doc updates * yaml updates * fix typo * doc updates * more doc updates * add ISTEST for helm chart to use arc conf * refactor test code * fix pr feedback * fix pr feedback * fix pr feedback * fix pr feedback --- .../update-place-holdres-in-e2e-tests.sh | 35 ++ .pipelines/validate-e2e-tests-results.sh | 71 +++ README.md | 31 ++ .../templates/omsagent-daemonset.yaml | 2 + .../templates/omsagent-deployment.yaml | 4 +- charts/azuremonitor-containers/values.yaml | 4 + kubernetes/omsagent.yaml | 6 + source/plugins/ruby/in_kube_events.rb | 4 + source/plugins/ruby/in_kube_nodes.rb | 21 + source/plugins/ruby/in_kube_podinventory.rb | 12 + source/plugins/ruby/in_kube_pvinventory.rb | 5 +- test/e2e/e2e-tests.yaml | 178 ++++++++ test/e2e/src/common/arm_rest_utility.py | 25 + test/e2e/src/common/constants.py | 119 +++++ test/e2e/src/common/helm_utility.py | 68 +++ .../common/kubernetes_configmap_utility.py | 8 + test/e2e/src/common/kubernetes_crd_utility.py | 27 ++ .../common/kubernetes_daemonset_utility.py | 36 ++ .../common/kubernetes_deployment_utility.py | 38 ++ .../common/kubernetes_namespace_utility.py | 32 ++ .../e2e/src/common/kubernetes_node_utility.py | 12 + test/e2e/src/common/kubernetes_pod_utility.py | 65 +++ .../src/common/kubernetes_secret_utility.py | 26 ++ .../src/common/kubernetes_service_utility.py | 19 + .../src/common/kubernetes_version_utility.py | 9 + test/e2e/src/common/results_utility.py | 24 + test/e2e/src/core/Dockerfile | 17 + test/e2e/src/core/conftest.py | 90 ++++ test/e2e/src/core/e2e_tests.sh | 26 ++ test/e2e/src/core/helper.py | 429 ++++++++++++++++++ test/e2e/src/core/pytest.ini | 4 + test/e2e/src/tests/test_ds_workflows.py | 60 +++ test/e2e/src/tests/test_e2e_workflows.py | 330 ++++++++++++++ .../tests/test_node_metrics_e2e_workflow.py | 420 +++++++++++++++++ .../tests/test_pod_metrics_e2e_workflow.py | 134 ++++++ test/e2e/src/tests/test_resource_status.py | 43 ++ test/e2e/src/tests/test_rs_workflows.py | 93 ++++ 37 files changed, 2525 insertions(+), 2 deletions(-) create mode 100755 .pipelines/update-place-holdres-in-e2e-tests.sh create mode 100644 .pipelines/validate-e2e-tests-results.sh create mode 100644 test/e2e/e2e-tests.yaml create mode 100644 test/e2e/src/common/arm_rest_utility.py create mode 100644 test/e2e/src/common/constants.py create mode 100644 test/e2e/src/common/helm_utility.py create mode 100644 test/e2e/src/common/kubernetes_configmap_utility.py create mode 100644 test/e2e/src/common/kubernetes_crd_utility.py create mode 100644 test/e2e/src/common/kubernetes_daemonset_utility.py create mode 100644 test/e2e/src/common/kubernetes_deployment_utility.py create mode 100644 test/e2e/src/common/kubernetes_namespace_utility.py create mode 100644 test/e2e/src/common/kubernetes_node_utility.py create mode 100644 test/e2e/src/common/kubernetes_pod_utility.py create mode 100644 test/e2e/src/common/kubernetes_secret_utility.py create mode 100644 test/e2e/src/common/kubernetes_service_utility.py create mode 100644 test/e2e/src/common/kubernetes_version_utility.py create mode 100644 test/e2e/src/common/results_utility.py create mode 100644 test/e2e/src/core/Dockerfile create mode 100644 test/e2e/src/core/conftest.py create mode 100644 test/e2e/src/core/e2e_tests.sh create mode 100755 test/e2e/src/core/helper.py create mode 100644 test/e2e/src/core/pytest.ini create mode 100755 test/e2e/src/tests/test_ds_workflows.py create mode 100755 test/e2e/src/tests/test_e2e_workflows.py create mode 100755 test/e2e/src/tests/test_node_metrics_e2e_workflow.py create mode 100755 test/e2e/src/tests/test_pod_metrics_e2e_workflow.py create mode 100755 test/e2e/src/tests/test_resource_status.py create mode 100755 test/e2e/src/tests/test_rs_workflows.py diff --git a/.pipelines/update-place-holdres-in-e2e-tests.sh b/.pipelines/update-place-holdres-in-e2e-tests.sh new file mode 100755 index 000000000..5fec73684 --- /dev/null +++ b/.pipelines/update-place-holdres-in-e2e-tests.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +echo "start: update placeholders of e2e-tests.yaml ..." + +for ARGUMENT in "$@" +do + KEY=$(echo $ARGUMENT | cut -f1 -d=) + VALUE=$(echo $ARGUMENT | cut -f2 -d=) + + case "$KEY" in + TENANT_ID) TENANT_ID=$VALUE ;; + *) + esac +done + +echo "start: read appid and appsecret" +# used the same SP which used for acr +CLIENT_ID=$(cat ~/acrappid) +CLIENT_SECRET=$(cat ~/acrappsecret) +echo "end: read appid and appsecret" + +echo "Service Principal CLIENT_ID:$CLIENT_ID" +echo "replace CLIENT_ID value" +sed -i "s=SP_CLIENT_ID_VALUE=$CLIENT_ID=g" e2e-tests.yaml + +# only uncomment for debug purpose +# echo "Service Principal CLIENT_SECRET:$CLIENT_SECRET" +echo "replace CLIENT_SECRET value" +sed -i "s=SP_CLIENT_SECRET_VALUE=$CLIENT_SECRET=g" e2e-tests.yaml + +echo "Service Principal TENANT_ID:$TENANT_ID" +echo "replace TENANT_ID value" +sed -i "s=SP_TENANT_ID_VALUE=$TENANT_ID=g" e2e-tests.yaml + +echo "end: update placeholders of e2e-tests.yaml." diff --git a/.pipelines/validate-e2e-tests-results.sh b/.pipelines/validate-e2e-tests-results.sh new file mode 100644 index 000000000..c38fa0f50 --- /dev/null +++ b/.pipelines/validate-e2e-tests-results.sh @@ -0,0 +1,71 @@ +#!/bin/bash +echo "start: validating results of e2e-tests ..." +DEFAULT_SONOBUOY_VERSION="0.20.0" +DEFAULT_TIME_OUT_IN_MINS=60 +for ARGUMENT in "$@" +do + KEY=$(echo $ARGUMENT | cut -f1 -d=) + VALUE=$(echo $ARGUMENT | cut -f2 -d=) + + case "$KEY" in + SONOBUOY_VERSION) SONOBUOY_VERSION=$VALUE ;; + *) + esac +done + +if [ -z $SONOBUOY_VERSION ]; then + SONOBUOY_VERSION=$DEFAULT_SONOBUOY_VERSION +fi + +echo "sonobuoy version: ${SONOBUOY_VERSION}" + +echo "start: downloading sonobuoy" +curl -LO https://github.com/vmware-tanzu/sonobuoy/releases/download/v${SONOBUOY_VERSION}/sonobuoy_${SONOBUOY_VERSION}_linux_amd64.tar.gz +echo "end: downloading sonobuoy" + +echo "start: extract sonobuoy tar file" +mkdir -p sonobuoy-install/ +tar -zxf sonobuoy_${SONOBUOY_VERSION}_*.tar.gz -C sonobuoy-install/ +echo "end: extract sonobuoy tar file" + +echo "start: move sonobuoy binaries to /usr/local/bin/" +mv -f sonobuoy-install/sonobuoy /usr/local/bin/ +echo "end: move sonobuoy binaries to /usr/local/bin/" + +rm -rf sonobuoy_${SONOBUOY_VERSION}_*.tar.gz sonobuoy-install/ + +results=$(sonobuoy retrieve) +mins=0 +IsSucceeded=true +while [ $mins -le $DEFAULT_TIME_OUT_IN_MINS ] +do + # check the status + echo "checking test status" + status=$(sonobuoy status) + status=$(echo $status | sed 's/`//g') + if [[ $status == *"completed"* ]]; then + echo "test run completed" + mins=$DEFAULT_TIME_OUT_IN_MINS + if [[ $status == *"failed"* ]]; then + IsSucceeded=false + fi + else + echo "sleep for 1m to check the status again" + sleep 1m + fi + mins=$(( $mins + 1 )) +done +echo "status:${IsSucceeded}" + +results=$(sonobuoy retrieve) +sonobuoy results $results + +if $IsSucceeded == true; then + echo "all test passed" + exit 0 +else + echo "tests are failed. please review the results by downloading tar file via sonobuoy retrieve command" + exit 1 +fi + +echo "end: validating results of e2e-tests ..." diff --git a/README.md b/README.md index 3eec1f344..3564345ee 100644 --- a/README.md +++ b/README.md @@ -91,6 +91,7 @@ The general directory structure is: │ │ | ... - plugins in, out and filters code in ruby │ ├── toml-parser/ - code for parsing of toml configuration files ├── test/ - source code for tests +│ ├── e2e/ - e2e tests to validate agent and e2e workflow(s) │ ├── unit-tests/ - unit tests code │ ├── scenario/ - scenario tests code ├── !_README.md - this file @@ -271,6 +272,36 @@ For DEV and PROD branches, automatically deployed latest yaml with latest agent # E2E Tests +## For executing tests + +1. Deploy the omsagent.yaml with your agent image. In the yaml, make sure `ISTEST` environment variable set to `true` if its not set already +2. Update the Service Principal CLIENT_ID, CLIENT_SECRET and TENANT_ID placeholder values and apply e2e-tests.yaml to execute the tests + > Note: Service Principal requires reader role on log analytics workspace and cluster resource to query LA and metrics + ``` + cd ~/Docker-Provider/test/e2e # based on your repo path + kubectl apply -f e2e-tests.yaml # this will trigger job to run the tests in sonobuoy namespace + kubectl get po -n sonobuoy # to check the pods and jobs associated to tests + ``` +3. Download (sonobuoy)[https://github.com/vmware-tanzu/sonobuoy/releases] on your dev box to view the results of the tests + ``` + results=$(sonobuoy retrieve) # downloads tar file which has logs and test results + sonobuoy results $results # get the summary of the results + tar -xzvf # extract downloaded tar file and look for pod logs, results and other k8s resources if there are any failures + ``` + +## For adding new tests + +1. Add the test python file with your test code under `tests` directory +2. Build the docker image, recommended to use ACR & MCR + ``` + cd ~/Docker-Provider/test/e2e/src # based on your repo path + docker login -u -p # login to acr + docker build -f ./core/Dockerfile -t /: . + docker push /: + ``` +3. update existing agentest image tag in e2e-tests.yaml with newly built image tag with MCR repo + +# Scenario Tests Clusters are used in release pipeline already has the yamls under test\scenario deployed. Make sure to validate these scenarios. If you have new interesting scenarios, please add/update them. diff --git a/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml b/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml index 0272c6263..615cd0485 100644 --- a/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml @@ -89,6 +89,8 @@ spec: - name: FBIT_TAIL_BUFFER_MAX_SIZE value: {{ .Values.omsagent.logsettings.tailbufmaxsizemegabytes | quote }} {{- end }} + - name: ISTEST + value: {{ .Values.omsagent.ISTEST | quote }} securityContext: privileged: true ports: diff --git a/charts/azuremonitor-containers/templates/omsagent-deployment.yaml b/charts/azuremonitor-containers/templates/omsagent-deployment.yaml index ecd0b705b..012dd2720 100644 --- a/charts/azuremonitor-containers/templates/omsagent-deployment.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-deployment.yaml @@ -72,7 +72,9 @@ spec: value: {{ .Values.Azure.Extension.Name | quote }} {{- end }} - name: USER_ASSIGNED_IDENTITY_CLIENT_ID - value: "" + value: "" + - name: ISTEST + value: {{ .Values.omsagent.ISTEST | quote }} securityContext: privileged: true ports: diff --git a/charts/azuremonitor-containers/values.yaml b/charts/azuremonitor-containers/values.yaml index 410f5d3c2..5831c9889 100644 --- a/charts/azuremonitor-containers/values.yaml +++ b/charts/azuremonitor-containers/values.yaml @@ -33,6 +33,10 @@ omsagent: # chance to build pod for the node and give it to the scheduler) # Should be some number greater than default (0) priority: 10 + + # This used for running agent pods in test mode. + # if set to true additional agent workflow logs will be emitted which are used for e2e and arc k8s conformance testing + ISTEST: false ## To get your workspace id and key do the following ## You can create a Azure Loganalytics workspace from portal.azure.com and get its ID & PRIMARY KEY from 'Advanced Settings' tab in the Ux. diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index cafd9b904..ebf0257af 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -383,6 +383,9 @@ spec: value: "VALUE_AKS_RESOURCE_ID_VALUE" - name: AKS_REGION value: "VALUE_AKS_RESOURCE_REGION_VALUE" + # this used for e2e test and setting this just emits some additional log statements which used for the e2e tests + - name: ISTEST + value: "true" #Uncomment below two lines for ACS clusters and set the cluster names manually. Also comment out the above two lines for ACS clusters #- name: ACS_RESOURCE_NAME # value: "my_acs_cluster_name" @@ -541,6 +544,9 @@ spec: value: "VALUE_AKS_RESOURCE_ID_VALUE" - name: AKS_REGION value: "VALUE_AKS_RESOURCE_REGION_VALUE" + # this used for e2e test and setting this just emits some additional log statements which used for the e2e tests + - name: ISTEST + value: "true" # Uncomment below two lines for ACS clusters and set the cluster names manually. Also comment out the above two lines for ACS clusters #- name: ACS_RESOURCE_NAME # value: "my_acs_cluster_name" diff --git a/source/plugins/ruby/in_kube_events.rb b/source/plugins/ruby/in_kube_events.rb index 4f6017cc5..f50019a01 100644 --- a/source/plugins/ruby/in_kube_events.rb +++ b/source/plugins/ruby/in_kube_events.rb @@ -129,6 +129,7 @@ def enumerate def parse_and_emit_records(events, eventQueryState, newEventQueryState, batchTime = Time.utc.iso8601) currentTime = Time.now emitTime = currentTime.to_f + @@istestvar = ENV["ISTEST"] begin eventStream = MultiEventStream.new events["items"].each do |items| @@ -171,6 +172,9 @@ def parse_and_emit_records(events, eventQueryState, newEventQueryState, batchTim @eventsCount += 1 end router.emit_stream(@tag, eventStream) if eventStream + if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) + $log.info("kubeEventsInventoryEmitStreamSuccess @ #{Time.now.utc.iso8601}") + end rescue => errorStr $log.debug_backtrace(errorStr.backtrace) ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) diff --git a/source/plugins/ruby/in_kube_nodes.rb b/source/plugins/ruby/in_kube_nodes.rb index 0a4727077..c803c0fa2 100644 --- a/source/plugins/ruby/in_kube_nodes.rb +++ b/source/plugins/ruby/in_kube_nodes.rb @@ -188,6 +188,9 @@ def parse_and_emit_records(nodeInventory, batchTime = Time.utc.iso8601) $log.info("in_kube_node::parse_and_emit_records: number of container node inventory records emitted #{@NODES_EMIT_STREAM_BATCH_SIZE} @ #{Time.now.utc.iso8601}") router.emit_stream(@@ContainerNodeInventoryTag, containerNodeInventoryEventStream) if containerNodeInventoryEventStream containerNodeInventoryEventStream = MultiEventStream.new + if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) + $log.info("containerNodeInventoryEmitStreamSuccess @ #{Time.now.utc.iso8601}") + end end # node metrics records @@ -217,6 +220,9 @@ def parse_and_emit_records(nodeInventory, batchTime = Time.utc.iso8601) $log.info("in_kube_nodes::parse_and_emit_records: number of node perf metric records emitted #{@NODES_EMIT_STREAM_BATCH_SIZE} @ #{Time.now.utc.iso8601}") router.emit_stream(@@kubeperfTag, kubePerfEventStream) if kubePerfEventStream kubePerfEventStream = MultiEventStream.new + if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) + $log.info("kubeNodePerfEmitStreamSuccess @ #{Time.now.utc.iso8601}") + end end # node GPU metrics record @@ -249,6 +255,9 @@ def parse_and_emit_records(nodeInventory, batchTime = Time.utc.iso8601) $log.info("in_kube_nodes::parse_and_emit_records: number of GPU node perf metric records emitted #{@NODES_EMIT_STREAM_BATCH_SIZE} @ #{Time.now.utc.iso8601}") router.emit_stream(Constants::INSIGHTSMETRICS_FLUENT_TAG, insightsMetricsEventStream) if insightsMetricsEventStream insightsMetricsEventStream = MultiEventStream.new + if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) + $log.info("kubeNodeInsightsMetricsEmitStreamSuccess @ #{Time.now.utc.iso8601}") + end end # Adding telemetry to send node telemetry every 10 minutes timeDifference = (DateTime.now.to_time.to_i - @@nodeTelemetryTimeTracker).abs @@ -300,23 +309,35 @@ def parse_and_emit_records(nodeInventory, batchTime = Time.utc.iso8601) router.emit_stream(@tag, eventStream) if eventStream $log.info("in_kube_node::parse_and_emit_records: number of mdm node inventory records emitted #{eventStream.count} @ #{Time.now.utc.iso8601}") router.emit_stream(@@MDMKubeNodeInventoryTag, eventStream) if eventStream + if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) + $log.info("kubeNodeInventoryEmitStreamSuccess @ #{Time.now.utc.iso8601}") + end eventStream = nil end if containerNodeInventoryEventStream.count > 0 $log.info("in_kube_node::parse_and_emit_records: number of container node inventory records emitted #{containerNodeInventoryEventStream.count} @ #{Time.now.utc.iso8601}") router.emit_stream(@@ContainerNodeInventoryTag, containerNodeInventoryEventStream) if containerNodeInventoryEventStream containerNodeInventoryEventStream = nil + if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) + $log.info("containerNodeInventoryEmitStreamSuccess @ #{Time.now.utc.iso8601}") + end end if kubePerfEventStream.count > 0 $log.info("in_kube_nodes::parse_and_emit_records: number of node perf metric records emitted #{kubePerfEventStream.count} @ #{Time.now.utc.iso8601}") router.emit_stream(@@kubeperfTag, kubePerfEventStream) if kubePerfEventStream kubePerfEventStream = nil + if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) + $log.info("kubeNodePerfInventoryEmitStreamSuccess @ #{Time.now.utc.iso8601}") + end end if insightsMetricsEventStream.count > 0 $log.info("in_kube_nodes::parse_and_emit_records: number of GPU node perf metric records emitted #{insightsMetricsEventStream.count} @ #{Time.now.utc.iso8601}") router.emit_stream(Constants::INSIGHTSMETRICS_FLUENT_TAG, insightsMetricsEventStream) if insightsMetricsEventStream insightsMetricsEventStream = nil + if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) + $log.info("kubeNodeInsightsMetricsEmitStreamSuccess @ #{Time.now.utc.iso8601}") + end end rescue => errorStr $log.warn "Failed to retrieve node inventory: #{errorStr}" diff --git a/source/plugins/ruby/in_kube_podinventory.rb b/source/plugins/ruby/in_kube_podinventory.rb index 0cff2eefe..5256eb159 100644 --- a/source/plugins/ruby/in_kube_podinventory.rb +++ b/source/plugins/ruby/in_kube_podinventory.rb @@ -265,6 +265,9 @@ def parse_and_emit_records(podInventory, serviceRecords, continuationToken, batc if @PODS_EMIT_STREAM_BATCH_SIZE > 0 && kubePerfEventStream.count >= @PODS_EMIT_STREAM_BATCH_SIZE $log.info("in_kube_podinventory::parse_and_emit_records: number of container perf records emitted #{@PODS_EMIT_STREAM_BATCH_SIZE} @ #{Time.now.utc.iso8601}") router.emit_stream(@@kubeperfTag, kubePerfEventStream) if kubePerfEventStream + if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) + $log.info("kubeContainerPerfEventEmitStreamSuccess @ #{Time.now.utc.iso8601}") + end kubePerfEventStream = MultiEventStream.new end @@ -306,6 +309,9 @@ def parse_and_emit_records(podInventory, serviceRecords, continuationToken, batc $log.info("in_kube_podinventory::parse_and_emit_records: number of perf records emitted #{kubePerfEventStream.count} @ #{Time.now.utc.iso8601}") router.emit_stream(@@kubeperfTag, kubePerfEventStream) if kubePerfEventStream kubePerfEventStream = nil + if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) + $log.info("kubeContainerPerfEventEmitStreamSuccess @ #{Time.now.utc.iso8601}") + end end if insightsMetricsEventStream.count > 0 @@ -345,6 +351,9 @@ def parse_and_emit_records(podInventory, serviceRecords, continuationToken, batc $log.info("in_kube_podinventory::parse_and_emit_records: number of service records emitted #{@PODS_EMIT_STREAM_BATCH_SIZE} @ #{Time.now.utc.iso8601}") router.emit_stream(@@kubeservicesTag, kubeServicesEventStream) if kubeServicesEventStream kubeServicesEventStream = MultiEventStream.new + if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) + $log.info("kubeServicesEventEmitStreamSuccess @ #{Time.now.utc.iso8601}") + end end end end @@ -352,6 +361,9 @@ def parse_and_emit_records(podInventory, serviceRecords, continuationToken, batc if kubeServicesEventStream.count > 0 $log.info("in_kube_podinventory::parse_and_emit_records : number of service records emitted #{kubeServicesEventStream.count} @ #{Time.now.utc.iso8601}") router.emit_stream(@@kubeservicesTag, kubeServicesEventStream) if kubeServicesEventStream + if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) + $log.info("kubeServicesEventEmitStreamSuccess @ #{Time.now.utc.iso8601}") + end end kubeServicesEventStream = nil end diff --git a/source/plugins/ruby/in_kube_pvinventory.rb b/source/plugins/ruby/in_kube_pvinventory.rb index 861b3a8e1..4efe86f61 100644 --- a/source/plugins/ruby/in_kube_pvinventory.rb +++ b/source/plugins/ruby/in_kube_pvinventory.rb @@ -106,7 +106,7 @@ def parse_and_emit_records(pvInventory, batchTime = Time.utc.iso8601) currentTime = Time.now emitTime = currentTime.to_f eventStream = MultiEventStream.new - + @@istestvar = ENV["ISTEST"] begin records = [] pvInventory["items"].each do |item| @@ -156,6 +156,9 @@ def parse_and_emit_records(pvInventory, batchTime = Time.utc.iso8601) end router.emit_stream(@tag, eventStream) if eventStream + if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) + $log.info("kubePVInventoryEmitStreamSuccess @ #{Time.now.utc.iso8601}") + end rescue => errorStr $log.warn "Failed in parse_and_emit_record for in_kube_pvinventory: #{errorStr}" diff --git a/test/e2e/e2e-tests.yaml b/test/e2e/e2e-tests.yaml new file mode 100644 index 000000000..06dfa1fb0 --- /dev/null +++ b/test/e2e/e2e-tests.yaml @@ -0,0 +1,178 @@ + +--- +apiVersion: v1 +kind: Namespace +metadata: + name: sonobuoy +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + component: sonobuoy + name: sonobuoy-serviceaccount + namespace: sonobuoy +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + component: sonobuoy + namespace: sonobuoy + name: sonobuoy-serviceaccount-sonobuoy +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: sonobuoy-serviceaccount-sonobuoy +subjects: +- kind: ServiceAccount + name: sonobuoy-serviceaccount + namespace: sonobuoy +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + component: sonobuoy + namespace: sonobuoy + name: sonobuoy-serviceaccount-sonobuoy +rules: +- apiGroups: + - '*' + resources: + - '*' + verbs: + - '*' +- nonResourceURLs: + - '/metrics' + - '/logs' + - '/logs/*' + verbs: + - 'get' +--- +apiVersion: v1 +data: + config.json: | + {"Description":"DEFAULT","UUID":"bf5c02ed-1948-48f1-b12d-5a2d74435e46","Version":"v0.20.0","ResultsDir":"/tmp/sonobuoy","Resources":["apiservices","certificatesigningrequests","clusterrolebindings","clusterroles","componentstatuses","configmaps","controllerrevisions","cronjobs","customresourcedefinitions","daemonsets","deployments","endpoints","ingresses","jobs","leases","limitranges","mutatingwebhookconfigurations","namespaces","networkpolicies","nodes","persistentvolumeclaims","persistentvolumes","poddisruptionbudgets","pods","podlogs","podsecuritypolicies","podtemplates","priorityclasses","replicasets","replicationcontrollers","resourcequotas","rolebindings","roles","servergroups","serverversion","serviceaccounts","services","statefulsets","storageclasses","validatingwebhookconfigurations","volumeattachments"],"Filters":{"Namespaces":".*","LabelSelector":""},"Limits":{"PodLogs":{"Namespaces":"","SonobuoyNamespace":true,"FieldSelectors":[],"LabelSelector":"","Previous":false,"SinceSeconds":null,"SinceTime":null,"Timestamps":false,"TailLines":null,"LimitBytes":null,"LimitSize":"","LimitTime":""}},"QPS":30,"Burst":50,"Server":{"bindaddress":"0.0.0.0","bindport":8080,"advertiseaddress":"","timeoutseconds":10800},"Plugins":null,"PluginSearchPath":["./plugins.d","/etc/sonobuoy/plugins.d","~/sonobuoy/plugins.d"],"Namespace":"sonobuoy","WorkerImage":"sonobuoy/sonobuoy:v0.20.0","ImagePullPolicy":"IfNotPresent","ImagePullSecrets":"","ProgressUpdatesPort":"8099"} +kind: ConfigMap +metadata: + labels: + component: sonobuoy + name: sonobuoy-config-cm + namespace: sonobuoy +--- +apiVersion: v1 +data: + plugin-0.yaml: | + podSpec: + containers: [] + restartPolicy: Never + serviceAccountName: sonobuoy-serviceaccount + nodeSelector: + kubernetes.io/os: linux + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - key: CriticalAddonsOnly + operator: Exists + - key: kubernetes.io/e2e-evict-taint-key + operator: Exists + sonobuoy-config: + driver: Job + plugin-name: agenttests + result-format: junit + spec: + env: + # Update values of CLIENT_ID, CLIENT_SECRET of the service principal which has permission to query LA ad Metrics API + # Update value of TENANT_ID corresponding your Azure Service principal + - name: CLIENT_ID + value: "SP_CLIENT_ID_VALUE" + - name: CLIENT_SECRET + value: "CLIENT_SECRET_VALUE" + - name: TENANT_ID + value: "SP_TENANT_ID_VALUE" + - name: DEFAULT_QUERY_TIME_INTERVAL_IN_MINUTES + value: "10" + - name: DEFAULT_METRICS_QUERY_TIME_INTERVAL_IN_MINUTES + value: "10" + - name: AGENT_POD_EXPECTED_RESTART_COUNT + value: "0" + - name: AZURE_CLOUD + value: "AZURE_PUBLIC_CLOUD" + # image tag should be updated if new tests being added after this image + image: mcr.microsoft.com/azuremonitor/containerinsights/cidev:ciagenttest02152021 + imagePullPolicy: IfNotPresent + name: plugin + resources: {} + volumeMounts: + - mountPath: /tmp/results + name: results +kind: ConfigMap +metadata: + labels: + component: sonobuoy + name: sonobuoy-plugins-cm + namespace: sonobuoy +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + component: sonobuoy + run: sonobuoy-master + sonobuoy-component: aggregator + tier: analysis + name: sonobuoy + namespace: sonobuoy +spec: + containers: + - env: + - name: SONOBUOY_ADVERTISE_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + image: sonobuoy/sonobuoy:v0.20.0 + imagePullPolicy: IfNotPresent + name: kube-sonobuoy + volumeMounts: + - mountPath: /etc/sonobuoy + name: sonobuoy-config-volume + - mountPath: /plugins.d + name: sonobuoy-plugins-volume + - mountPath: /tmp/sonobuoy + name: output-volume + restartPolicy: Never + serviceAccountName: sonobuoy-serviceaccount + nodeSelector: + kubernetes.io/os: linux + tolerations: + - key: "kubernetes.io/e2e-evict-taint-key" + operator: "Exists" + volumes: + - configMap: + name: sonobuoy-config-cm + name: sonobuoy-config-volume + - configMap: + name: sonobuoy-plugins-cm + name: sonobuoy-plugins-volume + - emptyDir: {} + name: output-volume +--- +apiVersion: v1 +kind: Service +metadata: + labels: + component: sonobuoy + sonobuoy-component: aggregator + name: sonobuoy-aggregator + namespace: sonobuoy +spec: + ports: + - port: 8080 + protocol: TCP + targetPort: 8080 + selector: + sonobuoy-component: aggregator + type: ClusterIP + diff --git a/test/e2e/src/common/arm_rest_utility.py b/test/e2e/src/common/arm_rest_utility.py new file mode 100644 index 000000000..604f8b791 --- /dev/null +++ b/test/e2e/src/common/arm_rest_utility.py @@ -0,0 +1,25 @@ +import adal +import pytest + +from msrestazure.azure_active_directory import AADTokenCredentials + + +# Function to fetch aad token from spn id and password +def fetch_aad_token(client_id, client_secret, authority_uri, resource_uri): + """ + Authenticate using service principal w/ key. + """ + try: + context = adal.AuthenticationContext(authority_uri, api_version=None) + return context.acquire_token_with_client_credentials(resource_uri, client_id, client_secret) + except Exception as e: + pytest.fail("Error occured while fetching aad token: " + str(e)) + + +# Function that returns aad token credentials for a given spn +def fetch_aad_token_credentials(client_id, client_secret, authority_uri, resource_uri): + mgmt_token = fetch_aad_token(client_id, client_secret, authority_uri, resource_uri) + try: + return AADTokenCredentials(mgmt_token, client_id) + except Exception as e: + pytest.fail("Error occured while fetching credentials: " + str(e)) diff --git a/test/e2e/src/common/constants.py b/test/e2e/src/common/constants.py new file mode 100644 index 000000000..770964cb5 --- /dev/null +++ b/test/e2e/src/common/constants.py @@ -0,0 +1,119 @@ +AZURE_PUBLIC_CLOUD_ENDPOINTS = { + "activeDirectory": "https://login.microsoftonline.com/", + "activeDirectoryDataLakeResourceId": "https://datalake.azure.net/", + "activeDirectoryGraphResourceId": "https://graph.windows.net/", + "activeDirectoryResourceId": "https://management.core.windows.net/", + "appInsights": "https://api.applicationinsights.io", + "appInsightsTelemetryChannel": "https://dc.applicationinsights.azure.com/v2/track", + "batchResourceId": "https://batch.core.windows.net/", + "gallery": "https://gallery.azure.com/", + "logAnalytics": "https://api.loganalytics.io", + "management": "https://management.core.windows.net/", + "mediaResourceId": "https://rest.media.azure.net", + "microsoftGraphResourceId": "https://graph.microsoft.com/", + "ossrdbmsResourceId": "https://ossrdbms-aad.database.windows.net", + "resourceManager": "https://management.azure.com/", + "sqlManagement": "https://management.core.windows.net:8443/", + "vmImageAliasDoc": "https://raw.githubusercontent.com/Azure/azure-rest-api-specs/master/arm-compute/quickstart-templates/aliases.json" +} + +AZURE_DOGFOOD_ENDPOINTS = { + "activeDirectory": "https://login.windows-ppe.net/", + "activeDirectoryDataLakeResourceId": None, + "activeDirectoryGraphResourceId": "https://graph.ppe.windows.net/", + "activeDirectoryResourceId": "https://management.core.windows.net/", + "appInsights": None, + "appInsightsTelemetryChannel": None, + "batchResourceId": None, + "gallery": "https://df.gallery.azure-test.net/", + "logAnalytics": None, + "management": "https://management-preview.core.windows-int.net/", + "mediaResourceId": None, + "microsoftGraphResourceId": None, + "ossrdbmsResourceId": None, + "resourceManager": "https://api-dogfood.resources.windows-int.net/", + "sqlManagement": None, + "vmImageAliasDoc": None +} + +AZURE_CLOUD_DICT = {"AZURE_PUBLIC_CLOUD" : AZURE_PUBLIC_CLOUD_ENDPOINTS, "AZURE_DOGFOOD": AZURE_DOGFOOD_ENDPOINTS} + +TIMEOUT = 300 + +# Azure Monitor for Container Extension related +AGENT_RESOURCES_NAMESPACE = 'kube-system' +AGENT_DEPLOYMENT_NAME = 'omsagent-rs' +AGENT_DAEMONSET_NAME = 'omsagent' +AGENT_WIN_DAEMONSET_NAME = 'omsagent-win' + +AGENT_DEPLOYMENT_PODS_LABEL_SELECTOR = 'rsName=omsagent-rs' +AGENT_DAEMON_SET_PODS_LABEL_SELECTOR = 'component=oms-agent' +AGENT_OMSAGENT_LOG_PATH = '/var/opt/microsoft/omsagent/log/omsagent.log' +AGENT_REPLICASET_WORKFLOWS = ["kubePodInventoryEmitStreamSuccess", "kubeNodeInventoryEmitStreamSuccess"] + +# override this through setting enviornment variable if the expected restart count is > 0 for example applying configmap +AGENT_POD_EXPECTED_RESTART_COUNT = 0 + +# replicaset workflow streams +KUBE_POD_INVENTORY_EMIT_STREAM = "kubePodInventoryEmitStreamSuccess" +KUBE_NODE_INVENTORY_EMIT_STREAM = "kubeNodeInventoryEmitStreamSuccess" +KUBE_DEPLOYMENT_INVENTORY_EMIT_STREAM = "kubestatedeploymentsInsightsMetricsEmitStreamSuccess" +KUBE_CONTAINER_PERF_EMIT_STREAM = "kubeContainerPerfEventEmitStreamSuccess" +KUBE_SERVICES_EMIT_STREAM = "kubeServicesEventEmitStreamSuccess" +KUBE_CONTAINER_NODE_INVENTORY_EMIT_STREAM = "containerNodeInventoryEmitStreamSuccess" +KUBE_EVENTS_EMIT_STREAM = "kubeEventsInventoryEmitStreamSuccess" +# daemonset workflow streams +CONTAINER_PERF_EMIT_STREAM = "cAdvisorPerfEmitStreamSuccess" +CONTAINER_INVENTORY_EMIT_STREAM = "containerInventoryEmitStreamSuccess" + +# simple log analytics queries to validate for e2e workflows +DEFAULT_QUERY_TIME_INTERVAL_IN_MINUTES = 10 +KUBE_POD_INVENTORY_QUERY = "KubePodInventory | where TimeGenerated > ago({0}) | count" +KUBE_NODE_INVENTORY_QUERY = "KubeNodeInventory | where TimeGenerated > ago({0}) | count" +KUBE_SERVICES_QUERY = "KubeServices | where TimeGenerated > ago({0}) | count" +KUBE_EVENTS_QUERY = "KubeEvents | where TimeGenerated > ago({0}) | count" +CONTAINER_NODE_INVENTORY_QUERY = "ContainerNodeInventory | where TimeGenerated > ago({0}) | count" +CONTAINER_INVENTORY_QUERY = "ContainerInventory | where TimeGenerated > ago({0}) | count" +# node perf +NODE_PERF_CPU_CAPCITY_QUERY = "Perf | where ObjectName == 'K8SNode' | where CounterName == 'cpuCapacityNanoCores' | where TimeGenerated > ago({0}) | count" +NODE_PERF_MEMORY_CAPCITY_QUERY = "Perf | where ObjectName == 'K8SNode' | where CounterName == 'memoryCapacityBytes' | where TimeGenerated > ago({0}) | count" +NODE_PERF_CPU_ALLOCATABLE_QUERY = "Perf | where ObjectName == 'K8SNode' | where CounterName == 'cpuAllocatableNanoCores' | where TimeGenerated > ago({0}) | count" +NODE_PERF_MEMORY_ALLOCATABLE_QUERY = "Perf | where ObjectName == 'K8SNode' | where CounterName == 'memoryAllocatableBytes' | where TimeGenerated > ago({0}) | count" +NODE_PERF_CPU_USAGE_QUERY = "Perf | where ObjectName == 'K8SNode' | where CounterName == 'cpuUsageNanoCores' | where TimeGenerated > ago({0}) | count" +NODE_PERF_MEMORY_RSS_USAGE_QUERY = "Perf | where ObjectName == 'K8SNode' | where CounterName == 'memoryRssBytes' | where TimeGenerated > ago({0}) | count" +NODE_PERF_MEMORY_WS_USAGE_QUERY = "Perf | where ObjectName == 'K8SNode' | where CounterName =='memoryWorkingSetBytes' | where TimeGenerated > ago({0}) | count" +NODE_PERF_RESTART_TIME_EPOCH_QUERY = "Perf | where ObjectName == 'K8SNode' | where CounterName == 'restartTimeEpoch' | where TimeGenerated > ago({0}) | count" +# container perf +CONTAINER_PERF_CPU_LIMITS_QUERY = "Perf | where ObjectName == 'K8SContainer' | where CounterName == 'cpuLimitNanoCores' | where TimeGenerated > ago({0}) | count" +CONTAINER_PERF_MEMORY_LIMITS_QUERY = "Perf | where ObjectName == 'K8SContainer' | where CounterName == 'memoryLimitBytes' | where TimeGenerated > ago({0}) | count" +CONTAINER_PERF_CPU_REQUESTS_QUERY = "Perf | where ObjectName == 'K8SContainer' | where CounterName == 'cpuRequestNanoCores' | where TimeGenerated > ago({0}) | count" +CONTAINER_PERF_MEMORY_REQUESTS_QUERY = "Perf | where ObjectName == 'K8SContainer' | where CounterName == 'memoryRequestBytes' | where TimeGenerated > ago({0}) | count" +CONTAINER_PERF_CPU_USAGE_QUERY = "Perf | where ObjectName == 'K8SContainer' | where CounterName == 'cpuUsageNanoCores' | where TimeGenerated > ago({0}) | count" +CONTAINER_PERF_MEMORY_RSS_USAGE_QUERY = "Perf | where ObjectName == 'K8SContainer' | where CounterName == 'memoryRssBytes' | where TimeGenerated > ago({0}) | count" +CONTAINER_PERF_MEMORY_WS_USAGE_QUERY = "Perf | where ObjectName == 'K8SContainer' | where CounterName == 'memoryWorkingSetBytes' | where TimeGenerated > ago({0}) | count" +CONTAINER_PERF_RESTART_TIME_EPOCH_QUERY = "Perf | where ObjectName == 'K8SContainer' | where CounterName == 'restartTimeEpoch' | where TimeGenerated > ago({0}) | count" +# container log +CONTAINER_LOG_QUERY = "ContainerLog | where TimeGenerated > ago({0}) | count" +# insights metrics +INSIGHTS_METRICS_QUERY = "InsightsMetrics | where TimeGenerated > ago({0}) | count" + +# custom metrics +METRICS_API_VERSION = '2019-07-01' +DEFAULT_METRICS_QUERY_TIME_INTERVAL_IN_MINUTES = 10 + +# node metrics +NODE_METRICS_NAMESPACE = 'insights.container/nodes' +NODE_METRIC_METRIC_AGGREGATION = 'average' +NODE_CPU_USAGE_MILLI_CORES_METRIC_NAME = 'cpuUsageMilliCores' +NODE_CPU_USAGE_PERCENTAGE_METRIC_NAME = 'cpuUsagePercentage' +NODE_MEMORY_RSS_METRIC_NAME = 'memoryRssBytes' +NODE_MEMORY_RSS_PERCENTAGE_METRIC_NAME = 'memoryRssPercentage' +NODE_MEMORY_WS_METRIC_NAME = 'memoryWorkingSetBytes' +NODE_MEMORY_WS_PERCENTAGE_METRIC_NAME = 'memoryWorkingSetPercentage' +NODE_COUNT_METRIC_NAME = 'nodesCount' +NODE_DISK_USAGE_PERCENTAGE_METRIC_NAME = 'diskUsedPercentage(Preview)' + +# pod metrics +POD_METRICS_NAMESPACE = 'insights.container/pods' +POD_METRIC_METRIC_AGGREGATION = 'average' +POD_COUNT_METRIC_NAME = 'PodCount' diff --git a/test/e2e/src/common/helm_utility.py b/test/e2e/src/common/helm_utility.py new file mode 100644 index 000000000..6eac1e071 --- /dev/null +++ b/test/e2e/src/common/helm_utility.py @@ -0,0 +1,68 @@ +import os +import pytest +import subprocess + + +# Function to pull helm charts +def pull_helm_chart(registry_path): + os.environ['HELM_EXPERIMENTAL_OCI'] = '1' + cmd_helm_chart_pull = ["helm", "chart", "pull", registry_path] + response_helm_chart_pull = subprocess.Popen(cmd_helm_chart_pull, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + output_helm_chart_pull, error_helm_chart_pull = response_helm_chart_pull.communicate() + if response_helm_chart_pull.returncode != 0: + pytest.fail("Unable to pull helm chart from the registry '{}': ".format(registry_path) + error_helm_chart_pull.decode("ascii")) + return output_helm_chart_pull.decode("ascii") + + +# Function to export helm charts +def export_helm_chart(registry_path, destination): + cmd_helm_chart_export = ["helm", "chart", "export", registry_path, "--destination", destination] + response_helm_chart_export = subprocess.Popen(cmd_helm_chart_export, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + output_helm_chart_export, error_helm_chart_export = response_helm_chart_export.communicate() + if response_helm_chart_export.returncode != 0: + pytest.fail("Unable to export helm chart from the registry '{}': ".format(registry_path) + error_helm_chart_export.decode("ascii")) + return output_helm_chart_export.decode("ascii") + + +# Function to add a helm repository +def add_helm_repo(repo_name, repo_url): + cmd_helm_repo = ["helm", "repo", "add", repo_name, repo_url] + response_helm_repo = subprocess.Popen(cmd_helm_repo, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + output_helm_repo, error_helm_repo = response_helm_repo.communicate() + if response_helm_repo.returncode != 0: + pytest.fail("Unable to add repository {} to helm: ".format(repo_url) + error_helm_repo.decode("ascii")) + return output_helm_repo.decode("ascii") + + +# Function to install helm charts +def install_helm_chart(helm_release_name, helm_release_namespace, helm_chart_path, wait=False, **kwargs): + cmd_helm_install = ["helm", "install", helm_release_name, helm_chart_path, "--namespace", helm_release_namespace] + if wait: + cmd_helm_install.extend(["--wait"]) + for key, value in kwargs.items(): + cmd_helm_install.extend(["--set", "{}={}".format(key, value)]) + response_helm_install = subprocess.Popen(cmd_helm_install, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + output_helm_install, error_helm_install = response_helm_install.communicate() + if response_helm_install.returncode != 0: + pytest.fail("Unable to install helm release: " + error_helm_install.decode("ascii")) + return output_helm_install.decode("ascii") + + +# Function to delete helm chart +def delete_helm_release(helm_release_name, helm_release_namespace): + cmd_helm_delete = ["helm", "delete", helm_release_name, "--namespace", helm_release_namespace] + response_helm_delete = subprocess.Popen(cmd_helm_delete, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + output_helm_delete, error_helm_delete = response_helm_delete.communicate() + if response_helm_delete.returncode != 0: + pytest.fail("Error occured while deleting the helm release: " + error_helm_delete.decode("ascii")) + return output_helm_delete.decode("ascii") + + +# Function to list helm release +def list_helm_release(helm_release_namespace): + cmd_helm_list = ["helm", "list", "--namespace", helm_release_namespace] + response_helm_list = subprocess.Popen(cmd_helm_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + output_helm_list, error_helm_list = response_helm_list.communicate() + if response_helm_list.returncode != 0: + pytest.fail("Error occured while fetching the helm release: " + error_helm_list.decode("ascii")) + return output_helm_list.decode("ascii") diff --git a/test/e2e/src/common/kubernetes_configmap_utility.py b/test/e2e/src/common/kubernetes_configmap_utility.py new file mode 100644 index 000000000..caee9628e --- /dev/null +++ b/test/e2e/src/common/kubernetes_configmap_utility.py @@ -0,0 +1,8 @@ +import pytest + + +def get_namespaced_configmap(api_instance, namespace, configmap_name): + try: + return api_instance.read_namespaced_config_map(configmap_name, namespace) + except Exception as e: + pytest.fail("Error occured when retrieving configmap: " + str(e)) diff --git a/test/e2e/src/common/kubernetes_crd_utility.py b/test/e2e/src/common/kubernetes_crd_utility.py new file mode 100644 index 000000000..f84092878 --- /dev/null +++ b/test/e2e/src/common/kubernetes_crd_utility.py @@ -0,0 +1,27 @@ +import pytest + +from kubernetes import watch + + +# Function to get the CRD instance +def get_crd_instance(api_instance, group, version, namespace, plural, crd_name): + try: + return api_instance.get_namespaced_custom_object(group, version, namespace, plural, crd_name) + except Exception as e: + pytest.fail("Error occurred when retrieving crd information: " + str(e)) + + +# Function that watches events corresponding to given CRD instance and passes the events to a callback function +def watch_crd_instance(api_instance, group, version, namespace, plural, crd_name, timeout, callback=None): + if not callback: + pytest.fail("callback should be specified") + + field_selector = "metadata.name={}".format(crd_name) if crd_name else "" + try: + w = watch.Watch() + for event in w.stream(api_instance.list_namespaced_custom_object, group, version, namespace, plural, field_selector=field_selector, timeout_seconds=timeout): + if callback(event): + return + except Exception as e: + pytest.fail("Error occurred when watching crd instance events: " + str(e)) + pytest.fail("The watch on the crd instance events has timed out.") diff --git a/test/e2e/src/common/kubernetes_daemonset_utility.py b/test/e2e/src/common/kubernetes_daemonset_utility.py new file mode 100644 index 000000000..dd76a11d9 --- /dev/null +++ b/test/e2e/src/common/kubernetes_daemonset_utility.py @@ -0,0 +1,36 @@ +import pytest +from kubernetes import watch + +# Returns a list of daemon_sets in a given namespace +def list_daemon_set(api_instance, namespace, field_selector="", label_selector=""): + try: + return api_instance.list_namespaced_daemon_set(namespace, field_selector=field_selector, label_selector=label_selector) + except Exception as e: + pytest.fail("Error occured when retrieving daemon_sets: " + str(e)) + +# Deletes a daemon_set +def delete_daemon_set(api_instance, namespace, daemon_set_name): + try: + return api_instance.delete_namespaced_daemon_set(daemon_set_name, namespace) + except Exception as e: + pytest.fail("Error occured when deleting daemon_set: " + str(e)) + +# Read a daemon_set +def read_daemon_set(api_instance, namespace, daemon_set_name): + try: + return api_instance.read_namespaced_daemon_set(daemon_set_name, namespace) + except Exception as e: + pytest.fail("Error occured when reading daemon_set: " + str(e)) + +# Function that watches events corresponding to daemon_sets in the given namespace and passes the events to a callback function +def watch_daemon_set_status(api_instance, namespace, timeout, callback=None): + if not callback: + return + try: + w = watch.Watch() + for event in w.stream(api_instance.list_namespaced_daemon_set, namespace, timeout_seconds=timeout): + if callback(event): + return + except Exception as e: + print("Error occurred when checking daemon_set status: " + str(e)) + print("The watch on the daemon_set status has timed out. Please see the pod logs for more info.") diff --git a/test/e2e/src/common/kubernetes_deployment_utility.py b/test/e2e/src/common/kubernetes_deployment_utility.py new file mode 100644 index 000000000..1be7a6b71 --- /dev/null +++ b/test/e2e/src/common/kubernetes_deployment_utility.py @@ -0,0 +1,38 @@ +import pytest +from kubernetes import watch + +# Returns a list of deployments in a given namespace +def list_deployment(api_instance, namespace, field_selector="", label_selector=""): + try: + return api_instance.list_namespaced_deployment(namespace, field_selector=field_selector, label_selector=label_selector) + except Exception as e: + pytest.fail("Error occured when retrieving deployments: " + str(e)) + +# Deletes a deployment +def delete_deployment(api_instance, namespace, deployment_name): + try: + return api_instance.delete_namespaced_deployment(deployment_name, namespace) + except Exception as e: + pytest.fail("Error occured when deleting deployment: " + str(e)) + + +# Read a deployment +def read_deployment(api_instance, namespace, deployment_name): + try: + return api_instance.read_namespaced_deployment(deployment_name, namespace) + except Exception as e: + pytest.fail("Error occured when reading deployment: " + str(e)) + +# Function that watches events corresponding to deployments in the given namespace and passes the events to a callback function +def watch_deployment_status(api_instance, namespace, timeout, callback=None): + if not callback: + return + try: + w = watch.Watch() + for event in w.stream(api_instance.list_namespaced_deployment, namespace, timeout_seconds=timeout): + if callback(event): + return + except Exception as e: + print("Error occurred when checking deployment status: " + str(e)) + print("The watch on the deployment status has timed out. Please see the pod logs for more info.") + \ No newline at end of file diff --git a/test/e2e/src/common/kubernetes_namespace_utility.py b/test/e2e/src/common/kubernetes_namespace_utility.py new file mode 100644 index 000000000..cea5788c5 --- /dev/null +++ b/test/e2e/src/common/kubernetes_namespace_utility.py @@ -0,0 +1,32 @@ +import pytest +from kubernetes import watch + + +# Function that watches events corresponding to kubernetes namespaces and passes the events to a callback function +def watch_namespace(api_instance, timeout, callback=None): + if not callback: + return + try: + w = watch.Watch() + for event in w.stream(api_instance.list_namespace, timeout_seconds=timeout): + if callback(event): + return + except Exception as e: + pytest.fail("Error occurred when checking namespace status: " + str(e)) + pytest.fail("The watch on the namespaces has timed out.") + + +# Function to list all kubernetes namespaces +def list_namespace(api_instance): + try: + return api_instance.list_namespace() + except Exception as e: + pytest.fail("Error occured when retrieving namespaces: " + str(e)) + + +# Function to delete a kubernetes namespaces +def delete_namespace(api_instance, namespace_name): + try: + return api_instance.delete_namespace(namespace_name) + except Exception as e: + pytest.fail("Error occured when deleting namespace: " + str(e)) diff --git a/test/e2e/src/common/kubernetes_node_utility.py b/test/e2e/src/common/kubernetes_node_utility.py new file mode 100644 index 000000000..050ce8b87 --- /dev/null +++ b/test/e2e/src/common/kubernetes_node_utility.py @@ -0,0 +1,12 @@ +import pytest + +def get_kubernetes_node_count(api_instance): + node_list = list_kubernetes_nodes(api_instance) + return len(node_list.items) + +def list_kubernetes_nodes(api_instance): + try: + return api_instance.list_node() + except Exception as e: + pytest.fail("Error occured while retrieving node information: " + str(e)) + diff --git a/test/e2e/src/common/kubernetes_pod_utility.py b/test/e2e/src/common/kubernetes_pod_utility.py new file mode 100644 index 000000000..27345fae7 --- /dev/null +++ b/test/e2e/src/common/kubernetes_pod_utility.py @@ -0,0 +1,65 @@ +import pytest +import time + +from kubernetes import watch +from kubernetes.stream import stream + +# Returns a kubernetes pod object in given namespace. Object description at: https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/V1PodList.md +def get_pod(api_instance, namespace, pod_name): + try: + return api_instance.read_namespaced_pod(pod_name, namespace) + except Exception as e: + pytest.fail("Error occured when retrieving pod information: " + str(e)) + + +# Returns a list of kubernetes pod objects in a given namespace. Object description at: https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/V1PodList.md +def get_pod_list(api_instance, namespace, label_selector=""): + try: + return api_instance.list_namespaced_pod(namespace, label_selector=label_selector) + except Exception as e: + pytest.fail("Error occurred when retrieving pod information: " + str(e)) + +# get the content of the log file in the container via exec +def get_log_file_content(api_instance, namespace, podName, logfilePath): + try: + exec_command = ['tar','cf', '-', logfilePath] + return stream(api_instance.connect_get_namespaced_pod_exec, podName, namespace, command=exec_command, stderr=True, stdin=False, stdout=True, tty=False) + except Exception as e: + pytest.fail("Error occurred when retrieving log file content: " + str(e)) + +# Function that watches events corresponding to pods in the given namespace and passes the events to a callback function +def watch_pod_status(api_instance, namespace, timeout, callback=None): + if not callback: + return + try: + w = watch.Watch() + for event in w.stream(api_instance.list_namespaced_pod, namespace, timeout_seconds=timeout): + if callback(event): + return + except Exception as e: + pytest.fail("Error occurred when checking pod status: " + str(e)) + pytest.fail("The watch on the pods has timed out. Please see the pod logs for more info.") + + +# Function that watches events corresponding to pod logs and passes them to a callback function +def watch_pod_logs(api_instance, namespace, pod_name, container_name, timeout_seconds, callback=None): + if not callback: + return + try: + w = watch.Watch() + timeout = time.time() + timeout_seconds + for event in w.stream(api_instance.read_namespaced_pod_log, pod_name, namespace, container=container_name): + if callback(event): + return + if time.time() > timeout: + pytest.fail("The watch on the pod logs has timed out.") + except Exception as e: + pytest.fail("Error occurred when checking pod logs: " + str(e)) + + +# Function that returns the pod logs of a given container. +def get_pod_logs(api_instance, pod_namespace, pod_name, container_name): + try: + return api_instance.read_namespaced_pod_log(pod_name, pod_namespace, container=container_name) + except Exception as e: + pytest.fail("Error occurred when fetching pod logs: " + str(e)) diff --git a/test/e2e/src/common/kubernetes_secret_utility.py b/test/e2e/src/common/kubernetes_secret_utility.py new file mode 100644 index 000000000..8cc07fd4d --- /dev/null +++ b/test/e2e/src/common/kubernetes_secret_utility.py @@ -0,0 +1,26 @@ +import sys + +from kubernetes import watch + + +# This function returns the kubernetes secret object present in a given namespace +def get_kubernetes_secret(api_instance, namespace, secret_name): + try: + return api_instance.read_namespaced_secret(secret_name, namespace) + except Exception as e: + sys.exit("Error occurred when retrieving secret '{}': ".format(secret_name) + str(e)) + + +# Function that watches events corresponding to kubernetes secrets and passes the events to a callback function +def watch_kubernetes_secret(api_instance, namespace, secret_name, timeout, callback=None): + if not callback: + return + field_selector = "metadata.name={}".format(secret_name) if secret_name else "" + try: + w = watch.Watch() + for event in w.stream(api_instance.list_namespaced_secret, namespace, field_selector=field_selector, timeout_seconds=timeout): + if callback(event): + return + except Exception as e: + sys.exit("Error occurred when watching kubernetes secret events: " + str(e)) + sys.exit("The watch on the kubernetes secret events has timed out. Please see the pod logs for more info.") diff --git a/test/e2e/src/common/kubernetes_service_utility.py b/test/e2e/src/common/kubernetes_service_utility.py new file mode 100644 index 000000000..694af885a --- /dev/null +++ b/test/e2e/src/common/kubernetes_service_utility.py @@ -0,0 +1,19 @@ +import pytest + +from kubernetes import watch + + +# Returns a list of services in a given namespace +def list_service(api_instance, namespace, field_selector="", label_selector=""): + try: + return api_instance.list_namespaced_service(namespace, field_selector=field_selector, label_selector=label_selector) + except Exception as e: + pytest.fail("Error occured when retrieving services: " + str(e)) + + +# Deletes a service +def delete_service(api_instance, namespace, service_name): + try: + return api_instance.delete_namespaced_service(service_name, namespace) + except Exception as e: + pytest.fail("Error occured when deleting service: " + str(e)) diff --git a/test/e2e/src/common/kubernetes_version_utility.py b/test/e2e/src/common/kubernetes_version_utility.py new file mode 100644 index 000000000..884d1df2f --- /dev/null +++ b/test/e2e/src/common/kubernetes_version_utility.py @@ -0,0 +1,9 @@ +import pytest + + +def get_kubernetes_server_version(api_instance): + try: + api_response = api_instance.get_code() + return api_response.git_version + except Exception as e: + pytest.fail("Error occured when retrieving kubernetes server version: " + str(e)) diff --git a/test/e2e/src/common/results_utility.py b/test/e2e/src/common/results_utility.py new file mode 100644 index 000000000..14066bf16 --- /dev/null +++ b/test/e2e/src/common/results_utility.py @@ -0,0 +1,24 @@ +import pytest +import shutil +import tarfile + +from pathlib import Path + + + +# Function to create the test result directory +def create_results_dir(results_dir): + print(results_dir) + try: + Path(results_dir).mkdir(parents=True, exist_ok=True) + except Exception as e: + pytest.fail("Unable to create the results directory: " + str(e)) + + +# Function to append logs from the test run into a result file +def append_result_output(message, result_file_path): + try: + with open(result_file_path, "a") as result_file: + result_file.write(message) + except Exception as e: + pytest.fail("Error while appending message '{}' to results file: ".format(message) + str(e)) diff --git a/test/e2e/src/core/Dockerfile b/test/e2e/src/core/Dockerfile new file mode 100644 index 000000000..9f85bdf4c --- /dev/null +++ b/test/e2e/src/core/Dockerfile @@ -0,0 +1,17 @@ +FROM python:3.6 + +RUN pip install --trusted-host pypi.org --trusted-host files.pythonhosted.org pytest pytest-xdist filelock requests kubernetes adal msrestazure + +RUN curl https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash \ + && helm version + +COPY ./core/e2e_tests.sh / +COPY ./core/pytest.ini /e2etests/ +COPY ./core/conftest.py /e2etests/ +COPY ./core/helper.py /e2etests/ +COPY ./core/ /e2etests/ +COPY ./common/ /e2etests/ +COPY ./tests/ /e2etests/ + +RUN ["chmod", "+x", "/e2e_tests.sh"] +ENTRYPOINT ["./e2e_tests.sh"] diff --git a/test/e2e/src/core/conftest.py b/test/e2e/src/core/conftest.py new file mode 100644 index 000000000..e659d5189 --- /dev/null +++ b/test/e2e/src/core/conftest.py @@ -0,0 +1,90 @@ +import pytest +import os +import time +import pickle + +import constants + +from filelock import FileLock +from pathlib import Path +from results_utility import create_results_dir, append_result_output + +pytestmark = pytest.mark.agentests + +# Fixture to collect all the environment variables, install pre-requisites. It will be run before the tests. +@pytest.fixture(scope='session', autouse=True) +def env_dict(): + my_file = Path("env.pkl") # File to store the environment variables. + with FileLock(str(my_file) + ".lock"): # Locking the file since each test will be run in parallel as separate subprocesses and may try to access the file simultaneously. + env_dict = {} + if not my_file.is_file(): + # Creating the results directory + create_results_dir('/tmp/results') + + # Setting some environment variables + env_dict['SETUP_LOG_FILE'] = '/tmp/results/setup' + env_dict['TEST_AGENT_LOG_FILE'] = '/tmp/results/containerinsights' + env_dict['NUM_TESTS_COMPLETED'] = 0 + + print("Starting setup...") + append_result_output("Starting setup...\n", env_dict['SETUP_LOG_FILE']) + + # Collecting environment variables + env_dict['TENANT_ID'] = os.getenv('TENANT_ID') + env_dict['CLIENT_ID'] = os.getenv('CLIENT_ID') + env_dict['CLIENT_SECRET'] = os.getenv('CLIENT_SECRET') + + # get default query time interval for log analytics queries + queryTimeInterval = int(os.getenv('DEFAULT_QUERY_TIME_INTERVAL_IN_MINUTES')) if os.getenv('DEFAULT_QUERY_TIME_INTERVAL_IN_MINUTES') else constants.DEFAULT_QUERY_TIME_INTERVAL_IN_MINUTES + # add minute suffix since this format required for LA queries + env_dict['DEFAULT_QUERY_TIME_INTERVAL_IN_MINUTES'] = str(queryTimeInterval) + "m" + + # get default query time interval for metrics queries + env_dict['DEFAULT_METRICS_QUERY_TIME_INTERVAL_IN_MINUTES'] = int(os.getenv('DEFAULT_METRICS_QUERY_TIME_INTERVAL_IN_MINUTES')) if os.getenv('DEFAULT_METRICS_QUERY_TIME_INTERVAL_IN_MINUTES') else constants.DEFAULT_METRICS_QUERY_TIME_INTERVAL_IN_MINUTES + + + # expected agent pod restart count + env_dict['AGENT_POD_EXPECTED_RESTART_COUNT'] = int(os.getenv('AGENT_POD_EXPECTED_RESTART_COUNT')) if os.getenv('AGENT_POD_EXPECTED_RESTART_COUNT') else constants.AGENT_POD_EXPECTED_RESTART_COUNT + + # default to azure public cloud if AZURE_CLOUD not specified + env_dict['AZURE_ENDPOINTS'] = constants.AZURE_CLOUD_DICT.get(os.getenv('AZURE_CLOUD')) if os.getenv('AZURE_CLOUD') else constants.AZURE_PUBLIC_CLOUD_ENDPOINTS + + if not env_dict.get('TENANT_ID'): + pytest.fail('ERROR: variable TENANT_ID is required.') + + if not env_dict.get('CLIENT_ID'): + pytest.fail('ERROR: variable CLIENT_ID is required.') + + if not env_dict.get('CLIENT_SECRET'): + pytest.fail('ERROR: variable CLIENT_SECRET is required.') + + print("Setup Complete.") + append_result_output("Setup Complete.\n", env_dict['SETUP_LOG_FILE']) + + with Path.open(my_file, "wb") as f: + pickle.dump(env_dict, f, pickle.HIGHEST_PROTOCOL) + else: + with Path.open(my_file, "rb") as f: + env_dict = pickle.load(f) + + yield env_dict + + my_file = Path("env.pkl") + with FileLock(str(my_file) + ".lock"): + with Path.open(my_file, "rb") as f: + env_dict = pickle.load(f) + + env_dict['NUM_TESTS_COMPLETED'] = 1 + env_dict.get('NUM_TESTS_COMPLETED') + if env_dict['NUM_TESTS_COMPLETED'] == int(os.getenv('NUM_TESTS')): + # Checking if cleanup is required. + if os.getenv('SKIP_CLEANUP'): + return + print('Starting cleanup...') + append_result_output("Starting Cleanup...\n", env_dict['SETUP_LOG_FILE']) + + print("Cleanup Complete.") + append_result_output("Cleanup Complete.\n", env_dict['SETUP_LOG_FILE']) + return + + with Path.open(my_file, "wb") as f: + pickle.dump(env_dict, f, pickle.HIGHEST_PROTOCOL) diff --git a/test/e2e/src/core/e2e_tests.sh b/test/e2e/src/core/e2e_tests.sh new file mode 100644 index 000000000..3bfafdce9 --- /dev/null +++ b/test/e2e/src/core/e2e_tests.sh @@ -0,0 +1,26 @@ +#!/bin/sh + +results_dir="${RESULTS_DIR:-/tmp/results}" + +# saveResults prepares the results for handoff to the Sonobuoy worker. +# See: https://github.com/vmware-tanzu/sonobuoy/blob/master/docs/plugins.md +saveResults() { + cd ${results_dir} + + # Sonobuoy worker expects a tar file. + tar czf results.tar.gz * + + # Signal to the worker that we are done and where to find the results. + printf ${results_dir}/results.tar.gz > ${results_dir}/done +} + +# Ensure that we tell the Sonobuoy worker we are done regardless of results. +trap saveResults EXIT + +# The variable 'TEST_LIST' should be provided if we want to run specific tests. If not provided, all tests are run + +NUM_PROCESS=$(pytest /e2etests/ --collect-only -k "$TEST_NAME_LIST" -m "$TEST_MARKER_LIST" | grep " 0): + pytest.fail("numberMisscheduled shouldnt be greater than 0 for the daemonset {}.".format( + daemonset_name)) + + except Exception as e: + pytest.fail("Error occured while checking daemonset status: " + str(e)) + +# This function checks the status of kubernetes pods +def check_kubernetes_pods_status(pod_namespace, label_selector, expectedPodRestartCount, outfile=None): + try: + api_instance = client.CoreV1Api() + pod_list = get_pod_list(api_instance, pod_namespace, label_selector) + append_result_output("podlist output {}\n".format(pod_list), outfile) + if not pod_list: + pytest.fail("pod_list shouldnt be null or empty") + pods = pod_list.items + if not pods: + pytest.fail("pod items shouldnt be null or empty") + if len(pods) <= 0: + pytest.fail("pod count should be greater than 0") + for pod in pods: + status = pod.status + podstatus = status.phase + if not podstatus: + pytest.fail("status should not be null or empty") + if podstatus != "Running": + pytest.fail("pod status should be in running state") + containerStatuses = status.container_statuses + if not containerStatuses: + pytest.fail("containerStatuses shouldnt be nil or empty") + if len(containerStatuses) <= 0: + pytest.fail("length containerStatuses should be greater than 0") + for containerStatus in containerStatuses: + containerId = containerStatus.container_id + if not containerId: + pytest.fail("containerId shouldnt be nil or empty") + image = containerStatus.image + if not image: + pytest.fail("image shouldnt be nil or empty") + imageId = containerStatus.image_id + if not imageId: + pytest.fail("imageId shouldnt be nil or empty") + restartCount = containerStatus.restart_count + if restartCount > expectedPodRestartCount: + pytest.fail("restartCount shouldnt be greater than expected pod restart count: {}".format(expectedPodRestartCount)) + ready = containerStatus.ready + if not ready: + pytest.fail("container status should be in ready state") + containerState = containerStatus.state + if not containerState.running: + pytest.fail("container state should be in running state") + except Exception as e: + pytest.fail("Error occured while checking pods status: " + str(e)) + + +def check_namespace_status_using_watch(outfile=None, namespace_list=None, timeout=300): + namespace_dict = {} + for namespace in namespace_list: + namespace_dict[namespace] = 0 + append_result_output( + "Namespace dict: {}\n".format(namespace_dict), outfile) + print("Generated the namespace dictionary.") + + # THe callback function to check the namespace status + def namespace_event_callback(event): + try: + append_result_output("{}\n".format(event), outfile) + namespace_name = event['raw_object'].get('metadata').get('name') + namespace_status = event['raw_object'].get('status') + if not namespace_status: + return False + if namespace_status.get('phase') == 'Active': + namespace_dict[namespace_name] = 1 + if all(ele == 1 for ele in list(namespace_dict.values())): + return True + return False + except Exception as e: + pytest.fail( + "Error occured while processing the namespace event: " + str(e)) + + # Checking the namespace status + api_instance = client.CoreV1Api() + watch_namespace(api_instance, timeout, namespace_event_callback) + +# This function checks the status of daemonset in a given namespace. The daemonset to be monitored are identified using the pod label list parameter. +def check_kubernetes_daemonset_status_using_watch(daemonset_namespace, outfile=None, daemonset_label_list=None, timeout=300): + daemonset_label_dict = {} + if daemonset_label_list: # This parameter is a list of label values to identify the daemonsets that we want to monitor in the given namespace + for daemonset_label in daemonset_label_list: + daemonset_label_dict[daemonset_label] = 0 + append_result_output("daemonset label dict: {}\n".format( + daemonset_label_dict), outfile) + print("Generated the daemonset dictionary.") + + # The callback function to check if the pod is in running state + def daemonset_event_callback(event): + try: + # append_result_output("{}\n".format(event), outfile) + daemonset_status = event['raw_object'].get('status') + daemonset_metadata = event['raw_object'].get('metadata') + daemonset_metadata_labels = daemonset_metadata.get('labels') + if not daemonset_metadata_labels: + return False + + # It contains the list of all label values for the pod whose event was called. + daemonset_metadata_label_values = daemonset_metadata_labels.values() + # This label value will be common in pod event and label list provided and will be monitored + current_label_value = None + for label_value in daemonset_metadata_label_values: + if label_value in daemonset_label_dict: + current_label_value = label_value + if not current_label_value: + return False + + currentNumberScheduled = daemonset_status.get( + 'currentNumberScheduled') + desiredNumberScheduled = daemonset_status.get( + 'desiredNumberScheduled') + numberAvailable = daemonset_status.get('numberAvailable') + numberReady = daemonset_status.get('numberReady') + numberMisscheduled = daemonset_status.get('numberMisscheduled') + + if (currentNumberScheduled != desiredNumberScheduled): + pytest.fail("currentNumberScheduled doesnt match with currentNumberScheduled for the daemonset {}.".format( + daemonset_metadata.get('name'))) + + if (numberAvailable != numberReady): + pytest.fail("numberAvailable doesnt match with expected numberReady for the daemonset {}.".format( + daemonset_metadata.get('name'))) + + if (numberMisscheduled > 0): + pytest.fail("numberMisscheduled is greater than 0 for the daemonset {}.".format( + daemonset_metadata.get('name'))) + + return True + except Exception as e: + print("Error occured while processing the pod event: " + str(e)) + + # Checking status of all pods + if daemonset_label_dict: + api_instance = client.AppsV1Api() + watch_daemon_set_status( + api_instance, daemonset_namespace, timeout, daemonset_event_callback) + +# This function checks the status of deployment in a given namespace. The deployment to be monitored are identified using the pod label list parameter. +def check_kubernetes_deployments_status_using_watch(deployment_namespace, outfile=None, deployment_label_list=None, timeout=300): + deployment_label_dict = {} + if deployment_label_list: # This parameter is a list of label values to identify the deployments that we want to monitor in the given namespace + for deployment_label in deployment_label_list: + deployment_label_dict[deployment_label] = 0 + append_result_output("Deployment label dict: {}\n".format( + deployment_label_dict), outfile) + print("Generated the deployment dictionary.") + + # The callback function to check if the pod is in running state + def deployment_event_callback(event): + try: + # append_result_output("{}\n".format(event), outfile) + deployment_status = event['raw_object'].get('status') + deployment_metadata = event['raw_object'].get('metadata') + deployment_metadata_labels = deployment_metadata.get('labels') + if not deployment_metadata_labels: + return False + + # It contains the list of all label values for the deployment whose event was called. + deployment_metadata_label_values = deployment_metadata_labels.values() + # This label value will be common in deployment event and label list provided and will be monitored + current_label_value = None + for label_value in deployment_metadata_label_values: + if label_value in deployment_label_dict: + current_label_value = label_value + if not current_label_value: + return False + + availableReplicas = deployment_status.get('availableReplicas') + readyReplicas = deployment_status.get('readyReplicas') + replicas = deployment_status.get('replicas') + + if (replicas != availableReplicas): + pytest.fail("availableReplicas doesnt match with expected replicas for the deployment {}.".format( + deployment_metadata.get('name'))) + + if (replicas != readyReplicas): + pytest.fail("readyReplicas doesnt match with expected replicas for the deployment {}.".format( + deployment_metadata.get('name'))) + + return True + except Exception as e: + print("Error occured while processing the pod event: " + str(e)) + + # Checking status of all pods + if deployment_label_dict: + api_instance = client.AppsV1Api() + watch_deployment_status( + api_instance, deployment_namespace, timeout, deployment_event_callback) + +# This function checks the status of pods in a given namespace. The pods to be monitored are identified using the pod label list parameter. +def check_kubernetes_pods_status_using_watch(pod_namespace, outfile=None, pod_label_list=None, timeout=300): + pod_label_dict = {} + if pod_label_list: # This parameter is a list of label values to identify the pods that we want to monitor in the given namespace + for pod_label in pod_label_list: + pod_label_dict[pod_label] = 0 + append_result_output( + "Pod label dict: {}\n".format(pod_label_dict), outfile) + print("Generated the pods dictionary.") + + # The callback function to check if the pod is in running state + def pod_event_callback(event): + try: + # append_result_output("{}\n".format(event), outfile) + pod_status = event['raw_object'].get('status') + pod_metadata = event['raw_object'].get('metadata') + pod_metadata_labels = pod_metadata.get('labels') + if not pod_metadata_labels: + return False + + # It contains the list of all label values for the pod whose event was called. + pod_metadata_label_values = pod_metadata_labels.values() + # This label value will be common in pod event and label list provided and will be monitored + current_label_value = None + for label_value in pod_metadata_label_values: + if label_value in pod_label_dict: + current_label_value = label_value + if not current_label_value: + return False + + if pod_status.get('containerStatuses'): + for container in pod_status.get('containerStatuses'): + if container.get('restartCount') > 0: + pytest.fail("The pod {} was restarted. Please see the pod logs for more info.".format( + container.get('name'))) + if not container.get('state').get('running'): + pod_label_dict[current_label_value] = 0 + return False + else: + pod_label_dict[current_label_value] = 1 + if all(ele == 1 for ele in list(pod_label_dict.values())): + return True + return False + except Exception as e: + pytest.fail( + "Error occured while processing the pod event: " + str(e)) + + # Checking status of all pods + if pod_label_dict: + api_instance = client.CoreV1Api() + watch_pod_status(api_instance, pod_namespace, + timeout, pod_event_callback) + + +# Function to check if the crd instance status has been updated with the status fields mentioned in the 'status_list' parameter +def check_kubernetes_crd_status_using_watch(crd_group, crd_version, crd_namespace, crd_plural, crd_name, status_dict={}, outfile=None, timeout=300): + # The callback function to check if the crd event received has been updated with the status fields + def crd_event_callback(event): + try: + append_result_output("{}\n".format(event), outfile) + crd_status = event['raw_object'].get('status') + if not crd_status: + return False + for status_field in status_dict: + if not crd_status.get(status_field): + return False + if crd_status.get(status_field) != status_dict.get(status_field): + pytest.fail( + "The CRD instance status has been updated with incorrect value for '{}' field.".format(status_field)) + return True + except Exception as e: + pytest.fail("Error occured while processing crd event: " + str(e)) + + # Checking if CRD instance has been updated with status fields + api_instance = client.CustomObjectsApi() + watch_crd_instance(api_instance, crd_group, crd_version, crd_namespace, + crd_plural, crd_name, timeout, crd_event_callback) + + +# Function to monitor the pod logs. It will ensure that are logs passed in the 'log_list' parameter are present in the container logs. +def check_kubernetes_pod_logs_using_watch(pod_namespace, pod_name, container_name, logs_list=None, error_logs_list=None, outfile=None, timeout=300): + logs_dict = {} + for log in logs_list: + logs_dict[log] = 0 + print("Generated the logs dictionary.") + + # The callback function to examine the pod log + def pod_log_event_callback(event): + try: + append_result_output("{}\n".format(event), outfile) + for error_log in error_logs_list: + if error_log in event: + pytest.fail("Error log found: " + event) + for log in logs_dict: + if log in event: + logs_dict[log] = 1 + if all(ele == 1 for ele in list(logs_dict.values())): + return True + return False + except Exception as e: + pytest.fail( + "Error occured while processing pod log event: " + str(e)) + + # Checking the pod logs + api_instance = client.CoreV1Api() + watch_pod_logs(api_instance, pod_namespace, pod_name, + container_name, timeout, pod_log_event_callback) + +# Function to monitor the kubernetes secret. It will determine if the secret has been successfully created. +def check_kubernetes_secret_using_watch(secret_namespace, secret_name, timeout=300): + # The callback function to check if the secret event received has secret data + def secret_event_callback(event): + try: + secret_data = event['raw_object'].get('data') + if not secret_data: + return False + return True + except Exception as e: + pytest.fail( + "Error occured while processing secret event: " + str(e)) + + # Checking the kubernetes secret + api_instance = client.CoreV1Api() + watch_kubernetes_secret(api_instance, secret_namespace, + secret_name, timeout, secret_event_callback) diff --git a/test/e2e/src/core/pytest.ini b/test/e2e/src/core/pytest.ini new file mode 100644 index 000000000..f4dc462f0 --- /dev/null +++ b/test/e2e/src/core/pytest.ini @@ -0,0 +1,4 @@ +[pytest] +markers = + agentests: marks tests are a part of arc agent conformance tests (deselect with '-m "not agentests"') + \ No newline at end of file diff --git a/test/e2e/src/tests/test_ds_workflows.py b/test/e2e/src/tests/test_ds_workflows.py new file mode 100755 index 000000000..81ef08325 --- /dev/null +++ b/test/e2e/src/tests/test_ds_workflows.py @@ -0,0 +1,60 @@ +import pytest +import constants + +from kubernetes import client, config +from kubernetes_pod_utility import get_pod_list, get_log_file_content +from results_utility import append_result_output +from helper import check_kubernetes_deployment_status +from helper import check_kubernetes_daemonset_status +from helper import check_kubernetes_pods_status +from kubernetes.stream import stream + +pytestmark = pytest.mark.agentests + +# validation of ds agent workflows +def test_ds_workflows(env_dict): + print("Starting daemonset agent workflows test.") + append_result_output("test_ds_workflows start \n", + env_dict['TEST_AGENT_LOG_FILE']) + # Loading in-cluster kube-config + try: + config.load_incluster_config() + except Exception as e: + pytest.fail("Error loading the in-cluster config: " + str(e)) + + print("getting daemonset pod list") + api_instance = client.CoreV1Api() + pod_list = get_pod_list(api_instance, constants.AGENT_RESOURCES_NAMESPACE, + constants.AGENT_DAEMON_SET_PODS_LABEL_SELECTOR) + if not pod_list: + pytest.fail("daemonset pod_list shouldnt be null or empty") + + if len(pod_list.items) <= 0: + pytest.fail("number of items in daemonset pod list should be greater than 0") + + for podItem in pod_list.items: + podName = podItem.metadata.name + logcontent = get_log_file_content( + api_instance, constants.AGENT_RESOURCES_NAMESPACE, podName, constants.AGENT_OMSAGENT_LOG_PATH) + if not logcontent: + pytest.fail("logcontent should not be null or empty for pod: " + podName) + loglines = logcontent.split("\n") + if len(loglines) <= 0: + pytest.fail("number of log lines should be greater than 0 for pod :" + podName) + + IsContainerPerfEmitStream = False + IsContainerInventoryStream = False + for line in loglines: + if line.find(constants.CONTAINER_PERF_EMIT_STREAM) >= 0: + IsContainerPerfEmitStream = True + if line.find(constants.CONTAINER_INVENTORY_EMIT_STREAM) >= 0: + IsContainerInventoryStream = True + + if IsContainerPerfEmitStream == False: + pytest.fail("ContainerPerf stream not emitted successfully from pod:" + podName) + if IsContainerInventoryStream == False: + pytest.fail("ContainerInventory stream not emitted successfully from pod:" + podName) + + append_result_output("test_ds_workflows end \n", + env_dict['TEST_AGENT_LOG_FILE']) + print("Successfully completed daemonset workflows test.") diff --git a/test/e2e/src/tests/test_e2e_workflows.py b/test/e2e/src/tests/test_e2e_workflows.py new file mode 100755 index 000000000..11a8e18e3 --- /dev/null +++ b/test/e2e/src/tests/test_e2e_workflows.py @@ -0,0 +1,330 @@ +import pytest +import constants +import requests + +from arm_rest_utility import fetch_aad_token +from kubernetes import client, config +from kubernetes_pod_utility import get_pod_list +from results_utility import append_result_output + + +pytestmark = pytest.mark.agentests + +# validation of workflows e2e +def test_e2e_workflows(env_dict): + print("Starting e2e workflows test.") + append_result_output("test_e2e_workflows start \n", + env_dict['TEST_AGENT_LOG_FILE']) + # Loading in-cluster kube-config + try: + config.load_incluster_config() + except Exception as e: + pytest.fail("Error loading the in-cluster config: " + str(e)) + + # query time interval for LA queries + queryTimeInterval = env_dict['DEFAULT_QUERY_TIME_INTERVAL_IN_MINUTES'] + if not queryTimeInterval: + pytest.fail("DEFAULT_QUERY_TIME_INTERVAL_IN_MINUTES should not be null or empty") + + # get the cluster resource id from replicaset pod envvars + api_instance = client.CoreV1Api() + pod_list = get_pod_list(api_instance, constants.AGENT_RESOURCES_NAMESPACE, + constants.AGENT_DEPLOYMENT_PODS_LABEL_SELECTOR) + + if not pod_list: + pytest.fail("pod_list shouldnt be null or empty") + + if len(pod_list.items) <= 0: + pytest.fail("number of items in pod list should be greater than 0") + + envVars = pod_list.items[0].spec.containers[0].env + if not envVars: + pytest.fail("environment variables should be defined in the replicaset pod") + + clusterResourceId = '' + for env in envVars: + if env.name == "AKS_RESOURCE_ID": + clusterResourceId = env.value + print("cluster resource id: {}".format(clusterResourceId)) + + if not clusterResourceId: + pytest.fail("failed to get clusterResourceId from replicaset pod environment variables") + + # fetch AAD token for log analytics resource for the queries + tenant_id = env_dict.get('TENANT_ID') + authority_uri = env_dict.get('AZURE_ENDPOINTS').get('activeDirectory') + tenant_id + client_id = env_dict.get('CLIENT_ID') + client_secret = env_dict.get('CLIENT_SECRET') + resource = env_dict.get('AZURE_ENDPOINTS').get('logAnalytics') + aad_token = fetch_aad_token(client_id, client_secret, authority_uri, resource) + if not aad_token: + pytest.fail("failed to fetch AAD token") + + access_token = aad_token.get('accessToken') + if not access_token: + pytest.fail("access_token shouldnt be null or empty") + + # validate e2e workflows by checking data in log analytics workspace through resource centric queries + queryUrl = resource + "/v1" + clusterResourceId + "/query" + Headers = { + "Authorization": str("Bearer " + access_token), + "Content-Type": "application/json" + } + # KubePodInventory + query = constants.KUBE_POD_INVENTORY_QUERY.format(queryTimeInterval) + params = { 'query': query} + result = requests.get(queryUrl, params=params, headers=Headers, verify=False) + if not result: + pytest.fail("log analytics query response shouldnt be null or empty for workflow: {0}".format('KUBE_POD_INVENTORY')) + + rowCount = result.json()['tables'][0]['rows'][0][0] + if not rowCount: + pytest.fail("rowCount should be greater than for cluster: {0} and workflow: {1}".format(clusterResourceId, 'KUBE_POD_INVENTORY')) + + # KubeNodeInventory + query = constants.KUBE_NODE_INVENTORY_QUERY.format(queryTimeInterval) + params = { 'query': query} + result = requests.get(queryUrl, params=params, headers=Headers, verify=False) + if not result: + pytest.fail("log analytics query response shouldnt be null or empty for workflow: {0}".format('KUBE_NODE_INVENTORY')) + + rowCount = result.json()['tables'][0]['rows'][0][0] + if not rowCount: + pytest.fail("rowCount should be greater than for cluster: {0} for workflow: {1} ".format(clusterResourceId, 'KUBE_NODE_INVENTORY')) + + # KubeServices + query = constants.KUBE_SERVICES_QUERY.format(queryTimeInterval) + params = { 'query': query} + result = requests.get(queryUrl, params=params, headers=Headers, verify=False) + if not result: + pytest.fail("log analytics query response shouldnt be null or empty for workflow: {0}".format('KUBE_SERVICES')) + + rowCount = result.json()['tables'][0]['rows'][0][0] + if not rowCount: + pytest.fail("rowCount should be greater than for cluster: {0} for workflow: {1} ".format(clusterResourceId, 'KUBE_SERVICES')) + + # KubeEvents + query = constants.KUBE_EVENTS_QUERY.format(queryTimeInterval) + params = { 'query': query} + result = requests.get(queryUrl, params=params, headers=Headers, verify=False) + if not result: + pytest.fail("log analytics query response shouldnt be null or empty for workflow: {0}".format('KUBE_EVENTS')) + + rowCount = result.json()['tables'][0]['rows'][0][0] + if not rowCount: + pytest.fail("rowCount should be greater than for cluster: {0} for workflow: {1} ".format(clusterResourceId, 'KUBE_EVENTS')) + + # Container Node Inventory + query = constants.CONTAINER_NODE_INVENTORY_QUERY.format(queryTimeInterval) + params = { 'query': query} + result = requests.get(queryUrl, params=params, headers=Headers, verify=False) + if not result: + pytest.fail("log analytics query response shouldnt be null or empty for workflow: {0}".format('CONTAINER_NODE_INVENTORY')) + + rowCount = result.json()['tables'][0]['rows'][0][0] + if not rowCount: + pytest.fail("rowCount should be greater than for cluster: {0} for workflow: {1} ".format(clusterResourceId, 'CONTAINER_NODE_INVENTORY')) + + # Node Perf + # cpu capacity + query = constants.NODE_PERF_CPU_CAPCITY_QUERY.format(queryTimeInterval) + params = { 'query': query} + result = requests.get(queryUrl, params=params, headers=Headers, verify=False) + if not result: + pytest.fail("log analytics query response shouldnt be null or empty for workflow: {0}".format('NODE_PERF_CPU_CAPCITY')) + + rowCount = result.json()['tables'][0]['rows'][0][0] + if not rowCount: + pytest.fail("rowCount should be greater than for cluster: {0} for workflow: {1} ".format(clusterResourceId, 'NODE_PERF_CPU_CAPCITY')) + + # memory capacity + query = constants.NODE_PERF_MEMORY_CAPCITY_QUERY.format(queryTimeInterval) + params = { 'query': query} + result = requests.get(queryUrl, params=params, headers=Headers, verify=False) + if not result: + pytest.fail("log analytics query response shouldnt be null or empty for workflow: {0}".format('NODE_PERF_MEMORY_CAPCITY')) + + rowCount = result.json()['tables'][0]['rows'][0][0] + if not rowCount: + pytest.fail("rowCount should be greater than for cluster: {0} for workflow: {1} ".format(clusterResourceId, 'NODE_PERF_MEMORY_CAPCITY')) + + # cpu allocatable + query = constants.NODE_PERF_CPU_ALLOCATABLE_QUERY.format(queryTimeInterval) + params = { 'query': query} + result = requests.get(queryUrl, params=params, headers=Headers, verify=False) + if not result: + pytest.fail("log analytics query response shouldnt be null or empty for workflow: {0}".format('NODE_PERF_CPU_ALLOCATABLE')) + + rowCount = result.json()['tables'][0]['rows'][0][0] + if not rowCount: + pytest.fail("rowCount should be greater than for cluster: {0} for workflow: {1} ".format(clusterResourceId, 'NODE_PERF_CPU_ALLOCATABLE')) + + # memory allocatable + query = constants.NODE_PERF_MEMORY_ALLOCATABLE_QUERY.format(queryTimeInterval) + params = { 'query': query} + result = requests.get(queryUrl, params=params, headers=Headers, verify=False) + if not result: + pytest.fail("log analytics query response shouldnt be null or empty for workflow: {0}".format('NODE_PERF_MEMORY_ALLOCATABLE')) + + rowCount = result.json()['tables'][0]['rows'][0][0] + if not rowCount: + pytest.fail("rowCount should be greater than for cluster: {0} for workflow: {1} ".format(clusterResourceId, 'NODE_PERF_MEMORY_ALLOCATABLE')) + + # cpu usage + query = constants.NODE_PERF_CPU_USAGE_QUERY.format(queryTimeInterval) + params = { 'query': query} + result = requests.get(queryUrl, params=params, headers=Headers, verify=False) + if not result: + pytest.fail("log analytics query response shouldnt be null or empty for workflow: {0}".format('NODE_PERF_CPU_USAGE')) + + rowCount = result.json()['tables'][0]['rows'][0][0] + if not rowCount: + pytest.fail("rowCount should be greater than for cluster: {0} for workflow: {1} ".format(clusterResourceId, 'NODE_PERF_CPU_USAGE')) + + # memory rss usage + query = constants.NODE_PERF_MEMORY_RSS_USAGE_QUERY.format(queryTimeInterval) + params = { 'query': query} + result = requests.get(queryUrl, params=params, headers=Headers, verify=False) + if not result: + pytest.fail("log analytics query response shouldnt be null or empty for workflow: {0}".format('NODE_PERF_MEMORY_RSS_USAGE')) + + rowCount = result.json()['tables'][0]['rows'][0][0] + if not rowCount: + pytest.fail("rowCount should be greater than for cluster: {0} for workflow: {1} ".format(clusterResourceId, 'NODE_PERF_MEMORY_RSS_USAGE')) + + # memory ws usage + query = constants.NODE_PERF_MEMORY_WS_USAGE_QUERY.format(queryTimeInterval) + params = { 'query': query} + result = requests.get(queryUrl, params=params, headers=Headers, verify=False) + if not result: + pytest.fail("log analytics query response shouldnt be null or empty for workflow: {0}".format('NODE_PERF_MEMORY_WS_USAGE')) + + rowCount = result.json()['tables'][0]['rows'][0][0] + if not rowCount: + pytest.fail("rowCount should be greater than for cluster: {0} for workflow: {1} ".format(clusterResourceId, 'NODE_PERF_MEMORY_WS_USAGE')) + + # restartime epoch + query = constants.NODE_PERF_RESTART_TIME_EPOCH_QUERY.format(queryTimeInterval) + params = { 'query': query} + result = requests.get(queryUrl, params=params, headers=Headers, verify=False) + if not result: + pytest.fail("log analytics query response shouldnt be null or empty for workflow: {0}".format('NODE_PERF_RESTART_TIME_EPOCH')) + + rowCount = result.json()['tables'][0]['rows'][0][0] + if not rowCount: + pytest.fail("rowCount should be greater than for cluster: {0} for workflow: {1} ".format(clusterResourceId, 'NODE_PERF_RESTART_TIME_EPOCH')) + + # Container Perf + # container cpu limits + query = constants.CONTAINER_PERF_CPU_LIMITS_QUERY.format(queryTimeInterval) + params = { 'query': query} + result = requests.get(queryUrl, params=params, headers=Headers, verify=False) + if not result: + pytest.fail("log analytics query response shouldnt be null or empty for workflow: {0}".format('CONTAINER_PERF_CPU_LIMITS')) + + rowCount = result.json()['tables'][0]['rows'][0][0] + if not rowCount: + pytest.fail("rowCount should be greater than for cluster: {0} for workflow: {1} ".format(clusterResourceId, 'CONTAINER_PERF_CPU_LIMITS')) + + # container memory limits + query = constants.CONTAINER_PERF_MEMORY_LIMITS_QUERY.format(queryTimeInterval) + params = { 'query': query} + result = requests.get(queryUrl, params=params, headers=Headers, verify=False) + if not result: + pytest.fail("log analytics query response shouldnt be null or empty for workflow: {0}".format('CONTAINER_PERF_MEMORY_LIMITS')) + + rowCount = result.json()['tables'][0]['rows'][0][0] + if not rowCount: + pytest.fail("rowCount should be greater than for cluster: {0} for workflow: {1} ".format(clusterResourceId, 'CONTAINER_PERF_MEMORY_LIMITS')) + + # cpu requests + query = constants.CONTAINER_PERF_CPU_REQUESTS_QUERY.format(queryTimeInterval) + params = { 'query': query} + result = requests.get(queryUrl, params=params, headers=Headers, verify=False) + if not result: + pytest.fail("log analytics query response shouldnt be null or empty for workflow: {0}".format('CONTAINER_PERF_CPU_REQUESTS')) + + rowCount = result.json()['tables'][0]['rows'][0][0] + if not rowCount: + pytest.fail("rowCount should be greater than for cluster: {0} for workflow: {1} ".format(clusterResourceId, 'CONTAINER_PERF_CPU_REQUESTS')) + + # memory requests + query = constants.CONTAINER_PERF_MEMORY_REQUESTS_QUERY.format(queryTimeInterval) + params = { 'query': query} + result = requests.get(queryUrl, params=params, headers=Headers, verify=False) + if not result: + pytest.fail("log analytics query response shouldnt be null or empty for workflow: {0}".format('CONTAINER_PERF_MEMORY_REQUESTS_QUERY')) + + rowCount = result.json()['tables'][0]['rows'][0][0] + if not rowCount: + pytest.fail("rowCount should be greater than for cluster: {0} for workflow: {1} ".format(clusterResourceId, 'CONTAINER_PERF_MEMORY_REQUESTS')) + + # cpu usage + query = constants.CONTAINER_PERF_CPU_USAGE_QUERY.format(queryTimeInterval) + params = { 'query': query} + result = requests.get(queryUrl, params=params, headers=Headers, verify=False) + if not result: + pytest.fail("log analytics query response shouldnt be null or empty for workflow: {0}".format('CONTAINER_PERF_CPU_USAGE')) + + rowCount = result.json()['tables'][0]['rows'][0][0] + if not rowCount: + pytest.fail("rowCount should be greater than for cluster: {0} for workflow: {1} ".format(clusterResourceId, 'CONTAINER_PERF_CPU_USAGE')) + + # memory rss usage + query = constants.CONTAINER_PERF_MEMORY_RSS_USAGE_QUERY.format(queryTimeInterval) + params = { 'query': query} + result = requests.get(queryUrl, params=params, headers=Headers, verify=False) + if not result: + pytest.fail("log analytics query response shouldnt be null or empty for workflow: {0}".format('CONTAINER_PERF_MEMORY_RSS_USAGE')) + + rowCount = result.json()['tables'][0]['rows'][0][0] + if not rowCount: + pytest.fail("rowCount should be greater than for cluster: {0} for workflow: {1} ".format(clusterResourceId, 'CONTAINER_PERF_MEMORY_RSS_USAGE')) + + # memory ws usage + query = constants.CONTAINER_PERF_MEMORY_WS_USAGE_QUERY.format(queryTimeInterval) + params = { 'query': query} + result = requests.get(queryUrl, params=params, headers=Headers, verify=False) + if not result: + pytest.fail("log analytics query response shouldnt be null or empty for workflow: {0}".format('CONTAINER_PERF_MEMORY_WS_USAGE')) + + rowCount = result.json()['tables'][0]['rows'][0][0] + if not rowCount: + pytest.fail("rowCount should be greater than for cluster: {0} for workflow: {1} ".format(clusterResourceId, 'CONTAINER_PERF_MEMORY_WS_USAGE')) + + # restart time epoch + query = constants.CONTAINER_PERF_RESTART_TIME_EPOCH_QUERY.format(queryTimeInterval) + params = { 'query': query} + result = requests.get(queryUrl, params=params, headers=Headers, verify=False) + if not result: + pytest.fail("log analytics query response shouldnt be null or empty for workflow: {0}".format('CONTAINER_PERF_RESTART_TIME_EPOCH')) + + rowCount = result.json()['tables'][0]['rows'][0][0] + if not rowCount: + pytest.fail("rowCount should be greater than for cluster: {0} for workflow: {1} ".format(clusterResourceId, 'CONTAINER_PERF_RESTART_TIME_EPOCH')) + + # Container log + query = constants.CONTAINER_LOG_QUERY.format(queryTimeInterval) + params = { 'query': query} + result = requests.get(queryUrl, params=params, headers=Headers, verify=False) + if not result: + pytest.fail("log analytics query response shouldnt be null or empty for workflow: {0}".format('CONTAINER_LOG')) + + rowCount = result.json()['tables'][0]['rows'][0][0] + if not rowCount: + pytest.fail("rowCount should be greater than for cluster: {0} for workflow: {1} ".format(clusterResourceId, 'CONTAINER_LOG')) + + # InsightsMetrics + query = constants.INSIGHTS_METRICS_QUERY.format(queryTimeInterval) + params = { 'query': query} + result = requests.get(queryUrl, params=params, headers=Headers, verify=False) + if not result: + pytest.fail("log analytics query response shouldnt be null or empty for workflow: {0}".format('INSIGHTS_METRICS')) + + rowCount = result.json()['tables'][0]['rows'][0][0] + if not rowCount: + pytest.fail("rowCount should be greater than for cluster: {0} for workflow: {1} ".format(clusterResourceId, 'INSIGHTS_METRICS')) + + append_result_output("test_e2e_workflows end \n", + env_dict['TEST_AGENT_LOG_FILE']) + print("Successfully completed e2e workflows test.") diff --git a/test/e2e/src/tests/test_node_metrics_e2e_workflow.py b/test/e2e/src/tests/test_node_metrics_e2e_workflow.py new file mode 100755 index 000000000..4346f89a8 --- /dev/null +++ b/test/e2e/src/tests/test_node_metrics_e2e_workflow.py @@ -0,0 +1,420 @@ +import pytest +import constants +import requests + +from arm_rest_utility import fetch_aad_token +from kubernetes import client, config +from kubernetes_pod_utility import get_pod_list +from results_utility import append_result_output +from datetime import datetime, timedelta + +pytestmark = pytest.mark.agentests + +# validation of node metrics e2e workflow +def test_node_metrics_e2e_workflow(env_dict): + print("Starting node metrics e2e workflow test.") + append_result_output("test_node_metrics_e2e_workflow start \n", + env_dict['TEST_AGENT_LOG_FILE']) + # Loading in-cluster kube-config + try: + config.load_incluster_config() + except Exception as e: + pytest.fail("Error loading the in-cluster config: " + str(e)) + + # query time interval for metric queries + metricQueryIntervalInMins = env_dict['DEFAULT_METRICS_QUERY_TIME_INTERVAL_IN_MINUTES'] + if not metricQueryIntervalInMins: + pytest.fail( + "DEFAULT_METRICS_QUERY_TIME_INTERVAL_IN_MINUTES should not be null or empty or 0") + + # get the cluster resource id from replicaset pod envvars + api_instance = client.CoreV1Api() + pod_list = get_pod_list(api_instance, constants.AGENT_RESOURCES_NAMESPACE, + constants.AGENT_DEPLOYMENT_PODS_LABEL_SELECTOR) + + if not pod_list: + pytest.fail("pod_list shouldnt be null or empty") + + if len(pod_list.items) <= 0: + pytest.fail("number of items in pod list should be greater than 0") + + envVars = pod_list.items[0].spec.containers[0].env + if not envVars: + pytest.fail( + "environment variables should be defined in the replicaset pod") + + clusterResourceId = '' + for env in envVars: + if env.name == "AKS_RESOURCE_ID": + clusterResourceId = env.value + print("cluster resource id: {}".format(clusterResourceId)) + + if not clusterResourceId: + pytest.fail( + "failed to get clusterResourceId from replicaset pod environment variables") + + # fetch AAD token for metric queries + tenant_id = env_dict.get('TENANT_ID') + authority_uri = env_dict.get('AZURE_ENDPOINTS').get( + 'activeDirectory') + tenant_id + client_id = env_dict.get('CLIENT_ID') + client_secret = env_dict.get('CLIENT_SECRET') + resourceManager = env_dict.get('AZURE_ENDPOINTS').get('resourceManager') + aad_token = fetch_aad_token( + client_id, client_secret, authority_uri, resourceManager) + if not aad_token: + pytest.fail("failed to fetch AAD token") + + access_token = aad_token.get('accessToken') + if not access_token: + pytest.fail("access_token shouldnt be null or empty") + + # validate metrics e2e workflow + now = datetime.utcnow() + endtime = now.isoformat()[:-3]+'Z' + starttime = (now - timedelta(hours=0, + minutes=constants.DEFAULT_METRICS_QUERY_TIME_INTERVAL_IN_MINUTES)).isoformat()[:-3]+'Z' + Headers = { + "Authorization": str("Bearer " + access_token), + "Content-Type": "application/json", + "content-length": "0" + } + params = {} + # node metric - memoryRssBytes + custommetricsUrl = '{0}{1}/providers/microsoft.Insights/metrics?timespan={2}/{3}&interval=FULL&metricnames={4}&aggregation={5}&metricNamespace={6}&validatedimensions=false&api-version={7}'.format( + resourceManager.rstrip("/"), + clusterResourceId, + starttime, + endtime, + constants.NODE_MEMORY_RSS_METRIC_NAME, + constants.NODE_METRIC_METRIC_AGGREGATION, + constants.NODE_METRICS_NAMESPACE, + constants.METRICS_API_VERSION) + + response = requests.get(custommetricsUrl, params=params, + headers=Headers, verify=False) + + if not response: + pytest.fail( + "response of the metrics query API shouldnt be null or empty") + + if response.status_code != 200: + pytest.fail("metrics query API failed with an error code: {}".format( + response.status_code)) + + responseJSON = response.json() + if not responseJSON: + pytest.fail("response JSON shouldnt be null or empty") + + namespace = responseJSON['namespace'] + if namespace != constants.NODE_METRICS_NAMESPACE: + pytest.fail("got the namespace: {0} but expected namespace:{1} in the response".format( + namespace, constants.NODE_METRICS_NAMESPACE)) + + responseValues = responseJSON['value'] + if not responseValues: + pytest.fail("response JSON shouldnt be null or empty") + + if len(responseValues) <= 0: + pytest.fail("length of value array in the response should be greater than 0") + + for responseVal in responseValues: + metricName = responseVal['name']['value'] + if metricName != constants.NODE_MEMORY_RSS_METRIC_NAME: + pytest.fail("got the metricname: {0} but expected metricname:{1} in the response".format(metricName, constants.NODE_MEMORY_RSS_METRIC_NAME)) + timeseries = responseVal['timeseries'] + if not timeseries: + pytest.fail("metric series shouldnt be null or empty for metric:{0} in namespace: {1}".format( + constants.NODE_MEMORY_RSS_METRIC_NAME, constants.NODE_METRICS_NAMESPACE)) + if len(timeseries) <= 0: + pytest.fail("length of timeseries should be greater than for 0 for metric: {0} in namespace :{1}".format(constants.NODE_MEMORY_RSS_METRIC_NAME, constants.NODE_METRICS_NAMESPACE)) + + # node metric - memoryRssPercentage + custommetricsUrl = '{0}{1}/providers/microsoft.Insights/metrics?timespan={2}/{3}&interval=FULL&metricnames={4}&aggregation={5}&metricNamespace={6}&validatedimensions=false&api-version={7}'.format( + resourceManager.rstrip("/"), + clusterResourceId, + starttime, + endtime, + constants.NODE_MEMORY_RSS_PERCENTAGE_METRIC_NAME, + constants.NODE_METRIC_METRIC_AGGREGATION, + constants.NODE_METRICS_NAMESPACE, + constants.METRICS_API_VERSION) + + response = requests.get(custommetricsUrl, params=params, + headers=Headers, verify=False) + + if not response: + pytest.fail( + "response of the metrics query API shouldnt be null or empty") + + if response.status_code != 200: + pytest.fail("metrics query API failed with an error code: {}".format( + response.status_code)) + + responseJSON = response.json() + if not responseJSON: + pytest.fail("response JSON shouldnt be null or empty") + + namespace = responseJSON['namespace'] + if namespace != constants.NODE_METRICS_NAMESPACE: + pytest.fail("got the namespace: {0} but expected namespace:{1} in the response".format( + namespace, constants.NODE_METRICS_NAMESPACE)) + + responseValues = responseJSON['value'] + if not responseValues: + pytest.fail("response JSON shouldnt be null or empty") + + if len(responseValues) <= 0: + pytest.fail("length of value array in the response should be greater than 0") + + for responseVal in responseValues: + metricName = responseVal['name']['value'] + if metricName != constants.NODE_MEMORY_RSS_PERCENTAGE_METRIC_NAME: + pytest.fail("got the metricname: {0} but expected metricname:{1} in the response".format(metricName, constants.NODE_MEMORY_RSS_PERCENTAGE_METRIC_NAME)) + timeseries = responseVal['timeseries'] + if not timeseries: + pytest.fail("metric series shouldnt be null or empty for metric:{0} in namespace: {1}".format( + constants.NODE_MEMORY_RSS_PERCENTAGE_METRIC_NAME, constants.NODE_METRICS_NAMESPACE)) + if len(timeseries) <= 0: + pytest.fail("length of timeseries should be greater than for 0 for metric: {0} in namespace :{1}".format(constants.NODE_MEMORY_RSS_PERCENTAGE_METRIC_NAME, constants.NODE_METRICS_NAMESPACE)) + + # node metric - memoryWorkingSetBytes + custommetricsUrl = '{0}{1}/providers/microsoft.Insights/metrics?timespan={2}/{3}&interval=FULL&metricnames={4}&aggregation={5}&metricNamespace={6}&validatedimensions=false&api-version={7}'.format( + resourceManager.rstrip("/"), + clusterResourceId, + starttime, + endtime, + constants.NODE_MEMORY_WS_METRIC_NAME, + constants.NODE_METRIC_METRIC_AGGREGATION, + constants.NODE_METRICS_NAMESPACE, + constants.METRICS_API_VERSION) + + response = requests.get(custommetricsUrl, params=params, + headers=Headers, verify=False) + + if not response: + pytest.fail("response of the metrics query API shouldnt be null or empty") + + if response.status_code != 200: + pytest.fail("metrics query API failed with an error code: {}".format( + response.status_code)) + + responseJSON = response.json() + if not responseJSON: + pytest.fail("response JSON shouldnt be null or empty") + + namespace = responseJSON['namespace'] + if namespace != constants.NODE_METRICS_NAMESPACE: + pytest.fail("got the namespace: {0} but expected namespace:{1} in the response".format( + namespace, constants.NODE_METRICS_NAMESPACE)) + + responseValues = responseJSON['value'] + if not responseValues: + pytest.fail("response JSON shouldnt be null or empty") + + if len(responseValues) <= 0: + pytest.fail("length of value array in the response should be greater than 0") + + for responseVal in responseValues: + metricName = responseVal['name']['value'] + if metricName != constants.NODE_MEMORY_WS_METRIC_NAME: + pytest.fail("got the metricname: {0} but expected metricname:{1} in the response".format(metricName, constants.NODE_MEMORY_WS_METRIC_NAME)) + timeseries = responseVal['timeseries'] + if not timeseries: + pytest.fail("metric series shouldnt be null or empty for metric:{0} in namespace: {1}".format( + constants.NODE_MEMORY_WS_METRIC_NAME, constants.NODE_METRICS_NAMESPACE)) + if len(timeseries) <= 0: + pytest.fail("length of timeseries should be greater than for 0 for metric: {0} in namespace :{1}".format(constants.NODE_MEMORYE_WS_METRIC_NAME, constants.NODE_METRICS_NAMESPACE)) + + # node metric - memoryWorkingSetPercentage + custommetricsUrl = '{0}{1}/providers/microsoft.Insights/metrics?timespan={2}/{3}&interval=FULL&metricnames={4}&aggregation={5}&metricNamespace={6}&validatedimensions=false&api-version={7}'.format( + resourceManager.rstrip("/"), + clusterResourceId, + starttime, + endtime, + constants.NODE_MEMORY_WS_PERCENTAGE_METRIC_NAME, + constants.NODE_METRIC_METRIC_AGGREGATION, + constants.NODE_METRICS_NAMESPACE, + constants.METRICS_API_VERSION) + + response = requests.get(custommetricsUrl, params=params, + headers=Headers, verify=False) + + if not response: + pytest.fail("response of the metrics query API shouldnt be null or empty") + + if response.status_code != 200: + pytest.fail("metrics query API failed with an error code: {}".format( + response.status_code)) + + responseJSON = response.json() + if not responseJSON: + pytest.fail("response JSON shouldnt be null or empty") + + namespace = responseJSON['namespace'] + if namespace != constants.NODE_METRICS_NAMESPACE: + pytest.fail("got the namespace: {0} but expected namespace:{1} in the response".format( + namespace, constants.NODE_METRICS_NAMESPACE)) + + responseValues = responseJSON['value'] + if not responseValues: + pytest.fail("response JSON shouldnt be null or empty") + + if len(responseValues) <= 0: + pytest.fail("length of value array in the response should be greater than 0") + + for responseVal in responseValues: + metricName = responseVal['name']['value'] + if metricName != constants.NODE_MEMORY_WS_PERCENTAGE_METRIC_NAME: + pytest.fail("got the metricname: {0} but expected metricname:{1} in the response".format(metricName, constants.NODE_MEMORY_WS_PERCENTAGE_METRIC_NAME)) + timeseries = responseVal['timeseries'] + if not timeseries: + pytest.fail("metric series shouldnt be null or empty for metric:{0} in namespace: {1}".format( + constants.NODE_MEMORY_WS_PERCENTAGE_METRIC_NAME, constants.NODE_METRICS_NAMESPACE)) + if len(timeseries) <= 0: + pytest.fail("length of timeseries should be greater than for 0 for metric: {0} in namespace :{1}".format(constants.NODE_MEMORY_WS_PERCENTAGE_METRIC_NAME, constants.NODE_METRICS_NAMESPACE)) + + # node metric - cpuUsageMilliCores + custommetricsUrl = '{0}{1}/providers/microsoft.Insights/metrics?timespan={2}/{3}&interval=FULL&metricnames={4}&aggregation={5}&metricNamespace={6}&validatedimensions=false&api-version={7}'.format( + resourceManager.rstrip("/"), + clusterResourceId, + starttime, + endtime, + constants.NODE_CPU_USAGE_MILLI_CORES_METRIC_NAME, + constants.NODE_METRIC_METRIC_AGGREGATION, + constants.NODE_METRICS_NAMESPACE, + constants.METRICS_API_VERSION) + + response = requests.get(custommetricsUrl, params=params, + headers=Headers, verify=False) + + if not response: + pytest.fail("response of the metrics query API shouldnt be null or empty") + + if response.status_code != 200: + pytest.fail("metrics query API failed with an error code: {}".format(response.status_code)) + + responseJSON = response.json() + if not responseJSON: + pytest.fail("response JSON shouldnt be null or empty") + + namespace = responseJSON['namespace'] + if namespace != constants.NODE_METRICS_NAMESPACE: + pytest.fail("got the namespace: {0} but expected namespace:{1} in the response".format( + namespace, constants.NODE_METRICS_NAMESPACE)) + + responseValues = responseJSON['value'] + if not responseValues: + pytest.fail("response JSON shouldnt be null or empty") + + if len(responseValues) <= 0: + pytest.fail("length of value array in the response should be greater than 0") + + for responseVal in responseValues: + metricName = responseVal['name']['value'] + if metricName != constants.NODE_CPU_USAGE_MILLI_CORES_METRIC_NAME: + pytest.fail("got the metricname: {0} but expected metricname:{1} in the response".format(metricName, constants.NODE_CPU_USAGE_MILLI_CORES_METRIC_NAME)) + timeseries = responseVal['timeseries'] + if not timeseries: + pytest.fail("metric series shouldnt be null or empty for metric:{0} in namespace: {1}".format( + constants.NODE_CPU_USAGE_MILLI_CORES_METRIC_NAME, constants.NODE_METRICS_NAMESPACE)) + if len(timeseries) <= 0: + pytest.fail("length of timeseries should be greater than for 0 for metric: {0} in namespace :{1}".format(constants.NODE_CPU_USAGE_MILLI_CORES_METRIC_NAME, constants.NODE_METRICS_NAMESPACE)) + + # node metric - cpuUsagePercentage + custommetricsUrl = '{0}{1}/providers/microsoft.Insights/metrics?timespan={2}/{3}&interval=FULL&metricnames={4}&aggregation={5}&metricNamespace={6}&validatedimensions=false&api-version={7}'.format( + resourceManager.rstrip("/"), + clusterResourceId, + starttime, + endtime, + constants.NODE_CPU_USAGE_PERCENTAGE_METRIC_NAME, + constants.NODE_METRIC_METRIC_AGGREGATION, + constants.NODE_METRICS_NAMESPACE, + constants.METRICS_API_VERSION) + + response = requests.get(custommetricsUrl, params=params, + headers=Headers, verify=False) + + if not response: + pytest.fail("response of the metrics query API shouldnt be null or empty") + + if response.status_code != 200: + pytest.fail("metrics query API failed with an error code: {}".format(response.status_code)) + + responseJSON = response.json() + if not responseJSON: + pytest.fail("response JSON shouldnt be null or empty") + + namespace = responseJSON['namespace'] + if namespace != constants.NODE_METRICS_NAMESPACE: + pytest.fail("got the namespace: {0} but expected namespace:{1} in the response".format( + namespace, constants.NODE_METRICS_NAMESPACE)) + + responseValues = responseJSON['value'] + if not responseValues: + pytest.fail("response JSON shouldnt be null or empty") + + if len(responseValues) <= 0: + pytest.fail("length of value array in the response should be greater than 0") + + for responseVal in responseValues: + metricName = responseVal['name']['value'] + if metricName != constants.NODE_CPU_USAGE_PERCENTAGE_METRIC_NAME: + pytest.fail("got the metricname: {0} but expected metricname:{1} in the response".format(metricName, constants.NODE_CPU_USAGE_PERCENTAGE_METRIC_NAME)) + timeseries = responseVal['timeseries'] + if not timeseries: + pytest.fail("metric series shouldnt be null or empty for metric:{0} in namespace: {1}".format( + constants.NODE_CPU_USAGE_PERCENTAGE_METRIC_NAME, constants.NODE_METRICS_NAMESPACE)) + if len(timeseries) <= 0: + pytest.fail("length of timeseries should be greater than for 0 for metric: {0} in namespace :{1}".format(constants.NODE_CPU_USAGE_PERCENTAGE_METRIC_NAME, constants.NODE_METRICS_NAMESPACE)) + + # node metric - nodesCount + custommetricsUrl = '{0}{1}/providers/microsoft.Insights/metrics?timespan={2}/{3}&interval=FULL&metricnames={4}&aggregation={5}&metricNamespace={6}&validatedimensions=false&api-version={7}'.format( + resourceManager.rstrip("/"), + clusterResourceId, + starttime, + endtime, + constants.NODE_COUNT_METRIC_NAME, + constants.NODE_METRIC_METRIC_AGGREGATION, + constants.NODE_METRICS_NAMESPACE, + constants.METRICS_API_VERSION) + + response = requests.get(custommetricsUrl, params=params, + headers=Headers, verify=False) + + if not response: + pytest.fail("response of the metrics query API shouldnt be null or empty") + + if response.status_code != 200: + pytest.fail("metrics query API failed with an error code: {}".format(response.status_code)) + + responseJSON = response.json() + if not responseJSON: + pytest.fail("response JSON shouldnt be null or empty") + + namespace = responseJSON['namespace'] + if namespace != constants.NODE_METRICS_NAMESPACE: + pytest.fail("got the namespace: {0} but expected namespace:{1} in the response".format( + namespace, constants.NODE_METRICS_NAMESPACE)) + + responseValues = responseJSON['value'] + if not responseValues: + pytest.fail("response JSON shouldnt be null or empty") + + if len(responseValues) <= 0: + pytest.fail("length of value array in the response should be greater than 0") + + for responseVal in responseValues: + metricName = responseVal['name']['value'] + if metricName != constants.NODE_COUNT_METRIC_NAME: + pytest.fail("got the metricname: {0} but expected metricname:{1} in the response".format(metricName, constants.NODE_COUNT_METRIC_NAME)) + timeseries = responseVal['timeseries'] + if not timeseries: + pytest.fail("metric series shouldnt be null or empty for metric:{0} in namespace: {1}".format( + constants.NODE_COUNT_METRIC_NAME, constants.NODE_METRICS_NAMESPACE)) + if len(timeseries) <= 0: + pytest.fail("length of timeseries should be greater than for 0 for metric: {0} in namespace :{1}".format(constants.NODE_COUNT_METRIC_NAME, constants.NODE_METRICS_NAMESPACE)) + + append_result_output("test_node_metrics_e2e_workflow end \n", + env_dict['TEST_AGENT_LOG_FILE']) + print("Successfully completed node metrics e2e workflow test.") diff --git a/test/e2e/src/tests/test_pod_metrics_e2e_workflow.py b/test/e2e/src/tests/test_pod_metrics_e2e_workflow.py new file mode 100755 index 000000000..cd4260f76 --- /dev/null +++ b/test/e2e/src/tests/test_pod_metrics_e2e_workflow.py @@ -0,0 +1,134 @@ +import pytest +import constants +import requests + +from arm_rest_utility import fetch_aad_token +from kubernetes import client, config +from kubernetes_pod_utility import get_pod_list +from results_utility import append_result_output +from datetime import datetime, timedelta + +pytestmark = pytest.mark.agentests + +# validation of pod metrics e2e workflows +def test_pod_metrics_e2e_workflow(env_dict): + print("Starting pod metrics e2e workflows test.") + append_result_output("test_pod_metrics_e2e_workflow start \n", + env_dict['TEST_AGENT_LOG_FILE']) + # Loading in-cluster kube-config + try: + config.load_incluster_config() + except Exception as e: + pytest.fail("Error loading the in-cluster config: " + str(e)) + + # query time interval for metrics queries + metricQueryIntervalInMins = env_dict['DEFAULT_METRICS_QUERY_TIME_INTERVAL_IN_MINUTES'] + if not metricQueryIntervalInMins: + pytest.fail( + "DEFAULT_METRICS_QUERY_TIME_INTERVAL_IN_MINUTES should not be null or empty or 0") + + # get the cluster resource id from replicaset pod envvars + api_instance = client.CoreV1Api() + pod_list = get_pod_list(api_instance, constants.AGENT_RESOURCES_NAMESPACE, + constants.AGENT_DEPLOYMENT_PODS_LABEL_SELECTOR) + + if not pod_list: + pytest.fail("pod_list shouldnt be null or empty") + + if len(pod_list.items) <= 0: + pytest.fail("number of items in pod list should be greater than 0") + + envVars = pod_list.items[0].spec.containers[0].env + if not envVars: + pytest.fail( + "environment variables should be defined in the replicaset pod") + + clusterResourceId = '' + for env in envVars: + if env.name == "AKS_RESOURCE_ID": + clusterResourceId = env.value + print("cluster resource id: {}".format(clusterResourceId)) + + if not clusterResourceId: + pytest.fail( + "failed to get clusterResourceId from replicaset pod environment variables") + + # fetch AAD token for metrics queries + tenant_id = env_dict.get('TENANT_ID') + authority_uri = env_dict.get('AZURE_ENDPOINTS').get( + 'activeDirectory') + tenant_id + client_id = env_dict.get('CLIENT_ID') + client_secret = env_dict.get('CLIENT_SECRET') + resourceManager = env_dict.get('AZURE_ENDPOINTS').get('resourceManager') + aad_token = fetch_aad_token( + client_id, client_secret, authority_uri, resourceManager) + if not aad_token: + pytest.fail("failed to fetch AAD token") + + access_token = aad_token.get('accessToken') + if not access_token: + pytest.fail("access_token shouldnt be null or empty") + + # validate metrics e2e workflow + now = datetime.utcnow() + endtime = now.isoformat()[:-3]+'Z' + starttime = (now - timedelta(hours=0, + minutes=constants.DEFAULT_METRICS_QUERY_TIME_INTERVAL_IN_MINUTES)).isoformat()[:-3]+'Z' + Headers = { + "Authorization": str("Bearer " + access_token), + "Content-Type": "application/json", + "content-length": "0" + } + params = {} + # pod metric - PodCount + custommetricsUrl = '{0}{1}/providers/microsoft.Insights/metrics?timespan={2}/{3}&interval=FULL&metricnames={4}&aggregation={5}&metricNamespace={6}&validatedimensions=false&api-version={7}'.format( + resourceManager.rstrip("/"), + clusterResourceId, + starttime, + endtime, + constants.POD_COUNT_METRIC_NAME, + constants.POD_METRIC_METRIC_AGGREGATION, + constants.POD_METRICS_NAMESPACE, + constants.METRICS_API_VERSION) + + response = requests.get(custommetricsUrl, params=params, + headers=Headers, verify=False) + + if not response: + pytest.fail( + "response of the metrics query API shouldnt be null or empty") + + if response.status_code != 200: + pytest.fail("metrics query API failed with an error code: {}".format( + response.status_code)) + + responseJSON = response.json() + if not responseJSON: + pytest.fail("response JSON shouldnt be null or empty") + + namespace = responseJSON['namespace'] + if namespace != constants.POD_METRICS_NAMESPACE: + pytest.fail("got the namespace: {0} but expected namespace:{1} in the response".format( + namespace, constants.POD_METRICS_NAMESPACE)) + + responseValues = responseJSON['value'] + if not responseValues: + pytest.fail("response JSON shouldnt be null or empty") + + if len(responseValues) <= 0: + pytest.fail("length of value array in the response should be greater than 0") + + for responseVal in responseValues: + metricName = responseVal['name']['value'] + if metricName != constants.POD_COUNT_METRIC_NAME: + pytest.fail("got the metricname: {0} but expected metricname:{1} in the response".format(metricName, constants.POD_COUNT_METRIC_NAME)) + timeseries = responseVal['timeseries'] + if not timeseries: + pytest.fail("metric series shouldnt be null or empty for metric:{0} in namespace: {1}".format( + constants.POD_COUNT_METRIC_NAME, constants.POD_METRICS_NAMESPACE)) + if len(timeseries) <= 0: + pytest.fail("length of timeseries should be greater than for 0 for metric: {0} in namespace :{1}".format(constants.POD_COUNT_METRIC_NAME, constants.POD_METRICS_NAMESPACE)) + + append_result_output("test_pod_metrics_e2e_workflow end \n", + env_dict['TEST_AGENT_LOG_FILE']) + print("Successfully completed e2e workflows test.") diff --git a/test/e2e/src/tests/test_resource_status.py b/test/e2e/src/tests/test_resource_status.py new file mode 100755 index 000000000..bb63dac7c --- /dev/null +++ b/test/e2e/src/tests/test_resource_status.py @@ -0,0 +1,43 @@ +import pytest +import constants + +from kubernetes import client, config +from results_utility import append_result_output +from helper import check_kubernetes_deployment_status +from helper import check_kubernetes_daemonset_status +from helper import check_kubernetes_pods_status + +pytestmark = pytest.mark.agentests + +# validate all the critical resources such as ds, rs, ds pods and rs pod etc. are up and running +def test_resource_status(env_dict): + print("Starting resource status check.") + append_result_output("test_resource_status start \n", + env_dict['TEST_AGENT_LOG_FILE']) + # Loading in-cluster kube-config + try: + config.load_incluster_config() + #config.load_kube_config() + except Exception as e: + pytest.fail("Error loading the in-cluster config: " + str(e)) + + # checking the deployment status + check_kubernetes_deployment_status( + constants.AGENT_RESOURCES_NAMESPACE, constants.AGENT_DEPLOYMENT_NAME, env_dict['TEST_AGENT_LOG_FILE']) + + # checking the daemonset status + check_kubernetes_daemonset_status( + constants.AGENT_RESOURCES_NAMESPACE, constants.AGENT_DAEMONSET_NAME, env_dict['TEST_AGENT_LOG_FILE']) + + expectedPodRestartCount = env_dict['AGENT_POD_EXPECTED_RESTART_COUNT'] + # checking deployment pod status + check_kubernetes_pods_status(constants.AGENT_RESOURCES_NAMESPACE, + constants.AGENT_DEPLOYMENT_PODS_LABEL_SELECTOR, expectedPodRestartCount, env_dict['TEST_AGENT_LOG_FILE']) + + # checking daemonset pod status + check_kubernetes_pods_status(constants.AGENT_RESOURCES_NAMESPACE, + constants.AGENT_DAEMON_SET_PODS_LABEL_SELECTOR, expectedPodRestartCount, env_dict['TEST_AGENT_LOG_FILE']) + + append_result_output("test_resource_status end \n", + env_dict['TEST_AGENT_LOG_FILE']) + print("Successfully checked resource status check.") diff --git a/test/e2e/src/tests/test_rs_workflows.py b/test/e2e/src/tests/test_rs_workflows.py new file mode 100755 index 000000000..aef422171 --- /dev/null +++ b/test/e2e/src/tests/test_rs_workflows.py @@ -0,0 +1,93 @@ +import pytest +import constants + +from kubernetes import client, config +from kubernetes_pod_utility import get_pod_list, get_log_file_content +from results_utility import append_result_output +from helper import check_kubernetes_deployment_status +from helper import check_kubernetes_daemonset_status +from helper import check_kubernetes_pods_status +from kubernetes.stream import stream + +pytestmark = pytest.mark.agentests + +# validation of replicaset agent workflows +def test_rs_workflows(env_dict): + print("Starting replicaset agent workflows test.") + append_result_output("test_rs_workflows start \n", + env_dict['TEST_AGENT_LOG_FILE']) + # Loading in-cluster kube-config + try: + config.load_incluster_config() + except Exception as e: + pytest.fail("Error loading the in-cluster config: " + str(e)) + + print("getting pod list") + api_instance = client.CoreV1Api() + pod_list = get_pod_list(api_instance, constants.AGENT_RESOURCES_NAMESPACE, + constants.AGENT_DEPLOYMENT_PODS_LABEL_SELECTOR) + if not pod_list: + pytest.fail("pod_list shouldnt be null or empty") + + if len(pod_list.items) <= 0: + pytest.fail("number of items in pod list should be greater than 0") + + rspodName = pod_list.items[0].metadata.name + if not rspodName: + pytest.fail("replicaset pod name should not be null or empty") + + logcontent = get_log_file_content( + api_instance, constants.AGENT_RESOURCES_NAMESPACE, rspodName, constants.AGENT_OMSAGENT_LOG_PATH) + if not logcontent: + pytest.fail("logcontent should not be null or empty for rs pod: {}".format(rspodName)) + loglines = logcontent.split("\n") + if len(loglines) <= 0: + pytest.fail("number of log lines should be greater than 0") + + IsKubePodInventorySuccessful = False + IsKubeNodeInventorySuccessful = False + IsKubeDeploymentInventorySuccessful = False + IsKubeContainerPerfInventorySuccessful = False + IsKubeServicesInventorySuccessful = False + IsContainerNodeInventorySuccessful = False + IsKubeEventsSuccessful = False + for line in loglines: + if line.find(constants.KUBE_POD_INVENTORY_EMIT_STREAM) >= 0: + IsKubePodInventorySuccessful = True + if line.find(constants.KUBE_NODE_INVENTORY_EMIT_STREAM) >= 0: + IsKubeNodeInventorySuccessful = True + if line.find(constants.KUBE_DEPLOYMENT_INVENTORY_EMIT_STREAM) >= 0: + IsKubeDeploymentInventorySuccessful = True + if line.find(constants.KUBE_CONTAINER_PERF_EMIT_STREAM) >= 0: + IsKubeContainerPerfInventorySuccessful = True + if line.find(constants.KUBE_SERVICES_EMIT_STREAM) >= 0: + IsKubeServicesInventorySuccessful = True + if line.find(constants.KUBE_CONTAINER_NODE_INVENTORY_EMIT_STREAM) >= 0: + IsContainerNodeInventorySuccessful = True + if line.find(constants.KUBE_EVENTS_EMIT_STREAM) >= 0: + IsKubeEventsSuccessful = True + + if IsKubePodInventorySuccessful == False: + pytest.fail("KubePodInventory stream not emitted successfully from pod:" + rspodName) + + if IsKubeNodeInventorySuccessful == False: + pytest.fail("KubeNodeInventory stream not emitted successfully from pod:" + rspodName) + + if IsKubeDeploymentInventorySuccessful == False: + pytest.fail("KubeDeploymentInventory stream not emitted successfully from pod:" + rspodName) + + if IsKubeContainerPerfInventorySuccessful == False: + pytest.fail("KubeContainerPerfInventory stream not emitted successfully from pod:" + rspodName) + + if IsKubeServicesInventorySuccessful == False: + pytest.fail("KubeServicesInventory stream not emitted successfully from pod:" + rspodName) + + if IsContainerNodeInventorySuccessful == False: + pytest.fail("ContainerNodeInventory stream not emitted successfully from pod:" + rspodName) + + if IsKubeEventsSuccessful == False: + pytest.fail("KubeEventsInventory stream not emitted successfully from rs pod:" + rspodName) + + append_result_output("test_rs_workflows end \n", + env_dict['TEST_AGENT_LOG_FILE']) + print("Successfully completed replicaset workflows test.") From 91f954f07ee7552673915570f53987457b7dcfc4 Mon Sep 17 00:00:00 2001 From: Grace Wehner Date: Thu, 25 Feb 2021 09:08:28 -0800 Subject: [PATCH 075/301] scrape new kubelet pod count metric name (#508) --- build/linux/installer/conf/telegraf.conf | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/build/linux/installer/conf/telegraf.conf b/build/linux/installer/conf/telegraf.conf index 202ac9741..5a5bb2d8c 100644 --- a/build/linux/installer/conf/telegraf.conf +++ b/build/linux/installer/conf/telegraf.conf @@ -675,7 +675,9 @@ ## An array of urls to scrape metrics from. urls = ["$CADVISOR_METRICS_URL"] - fieldpass = ["kubelet_running_pod_count","volume_manager_total_volumes", "kubelet_node_config_error", "process_resident_memory_bytes", "process_cpu_seconds_total"] + # <= 1.18: metric name is kubelet_running_pod_count + # >= 1.19: metric name changed to kubelet_running_pods + fieldpass = ["kubelet_running_pod_count","kubelet_running_pods","volume_manager_total_volumes", "kubelet_node_config_error", "process_resident_memory_bytes", "process_cpu_seconds_total"] metric_version = 2 url_tag = "scrapeUrl" From 4a8ff2328210ace141834b0cacb616dfcee801e7 Mon Sep 17 00:00:00 2001 From: Nicolas Yuen Date: Sun, 21 Mar 2021 03:45:59 +0800 Subject: [PATCH 076/301] Adding explicit json output to az commands as the script fails if az is configured with Table output #409 (#513) --- scripts/onboarding/managed/disable-monitoring.sh | 2 +- scripts/onboarding/managed/enable-monitoring.sh | 10 +++++----- scripts/onboarding/managed/upgrade-monitoring.sh | 4 ++-- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/scripts/onboarding/managed/disable-monitoring.sh b/scripts/onboarding/managed/disable-monitoring.sh index d43a79f51..29b755331 100644 --- a/scripts/onboarding/managed/disable-monitoring.sh +++ b/scripts/onboarding/managed/disable-monitoring.sh @@ -127,7 +127,7 @@ remove_monitoring_tags() # validate cluster identity for Azure Arc enabled Kubernetes cluster if [ "$isArcK8sCluster" = true ] ; then - identitytype=$(az resource show -g ${clusterResourceGroup} -n ${clusterName} --resource-type $resourceProvider --query identity.type) + identitytype=$(az resource show -g ${clusterResourceGroup} -n ${clusterName} --resource-type $resourceProvider --query identity.type -o json) identitytype=$(echo $identitytype | tr "[:upper:]" "[:lower:]" | tr -d '"') echo "cluster identity type:" $identitytype if [[ "$identitytype" != "systemassigned" ]]; then diff --git a/scripts/onboarding/managed/enable-monitoring.sh b/scripts/onboarding/managed/enable-monitoring.sh index 9d0c0aca5..1162ba0d3 100644 --- a/scripts/onboarding/managed/enable-monitoring.sh +++ b/scripts/onboarding/managed/enable-monitoring.sh @@ -339,7 +339,7 @@ validate_cluster_identity() { local rgName="$(echo ${1})" local clusterName="$(echo ${2})" - local identitytype=$(az resource show -g ${rgName} -n ${clusterName} --resource-type $resourceProvider --query identity.type) + local identitytype=$(az resource show -g ${rgName} -n ${clusterName} --resource-type $resourceProvider --query identity.type -o json) identitytype=$(echo $identitytype | tr "[:upper:]" "[:lower:]" | tr -d '"') echo "cluster identity type:" $identitytype @@ -454,7 +454,7 @@ create_default_log_analytics_workspace() { echo "using existing default workspace:"$workspaceName fi - workspaceResourceId=$(az resource show -g $workspaceResourceGroup -n $workspaceName --resource-type $workspaceResourceProvider --query id) + workspaceResourceId=$(az resource show -g $workspaceResourceGroup -n $workspaceName --resource-type $workspaceResourceProvider --query id -o json) workspaceResourceId=$(echo $workspaceResourceId | tr -d '"') echo "workspace resource Id: ${workspaceResourceId}" } @@ -477,12 +477,12 @@ get_workspace_guid_and_key() { local wsName="$(echo ${resourceId} | cut -d'/' -f9)" # get the workspace guid - workspaceGuid=$(az resource show -g $rgName -n $wsName --resource-type $workspaceResourceProvider --query properties.customerId) + workspaceGuid=$(az resource show -g $rgName -n $wsName --resource-type $workspaceResourceProvider --query properties.customerId -o json) workspaceGuid=$(echo $workspaceGuid | tr -d '"') echo "workspaceGuid:"$workspaceGuid echo "getting workspace primaryshared key" - workspaceKey=$(az rest --method post --uri $workspaceResourceId/sharedKeys?api-version=2015-11-01-preview --query primarySharedKey) + workspaceKey=$(az rest --method post --uri $workspaceResourceId/sharedKeys?api-version=2015-11-01-preview --query primarySharedKey -o json) workspaceKey=$(echo $workspaceKey | tr -d '"') } @@ -621,7 +621,7 @@ else set_azure_subscription $workspaceSubscriptionId fi - workspaceRegion=$(az resource show --ids ${workspaceResourceId} --query location) + workspaceRegion=$(az resource show --ids ${workspaceResourceId} --query location -o json) workspaceRegion=$(echo $workspaceRegion | tr -d '"') echo "Workspace Region:"$workspaceRegion fi diff --git a/scripts/onboarding/managed/upgrade-monitoring.sh b/scripts/onboarding/managed/upgrade-monitoring.sh index 6d14dfa5f..e54822f74 100644 --- a/scripts/onboarding/managed/upgrade-monitoring.sh +++ b/scripts/onboarding/managed/upgrade-monitoring.sh @@ -202,7 +202,7 @@ validate_cluster_identity() { local rgName="$(echo ${1})" local clusterName="$(echo ${2})" - local identitytype=$(az resource show -g ${rgName} -n ${clusterName} --resource-type $resourceProvider --query identity.type) + local identitytype=$(az resource show -g ${rgName} -n ${clusterName} --resource-type $resourceProvider --query identity.type -o json) identitytype=$(echo $identitytype | tr "[:upper:]" "[:lower:]" | tr -d '"') echo "cluster identity type:" $identitytype @@ -216,7 +216,7 @@ validate_cluster_identity() { validate_monitoring_tags() { echo "get loganalyticsworkspaceResourceId tag on to cluster resource" - logAnalyticsWorkspaceResourceIdTag=$(az resource show --query tags.logAnalyticsWorkspaceResourceId -g $clusterResourceGroup -n $clusterName --resource-type $resourceProvider) + logAnalyticsWorkspaceResourceIdTag=$(az resource show --query tags.logAnalyticsWorkspaceResourceId -g $clusterResourceGroup -n $clusterName --resource-type $resourceProvider -o json) echo "configured log analytics workspace: ${logAnalyticsWorkspaceResourceIdTag}" echo "successfully got logAnalyticsWorkspaceResourceId tag on the cluster resource" if [ -z "$logAnalyticsWorkspaceResourceIdTag" ]; then From 512e5c0df258d67ba1c15c49d3650529a61ec9aa Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Mon, 22 Mar 2021 11:08:49 -0700 Subject: [PATCH 077/301] Gangams/arc proxy contract and token renewal updates (#511) * fix issue with crd status updates * handle renewal token delays * add proxy contract * updates for proxy cert for linux * remove proxycert related changes * fix whitespace issue * fix whitespace issue * remove proxy in arm template --- .../templates/omsagent-deployment.yaml | 2 +- .../templates/omsagent-rbac.yaml | 2 +- .../templates/omsagent-secret.yaml | 14 +++++- charts/azuremonitor-containers/values.yaml | 6 +++ .../existingClusterOnboarding.json | 12 +---- .../existingClusterParam.json | 3 -- .../plugins/ruby/arc_k8s_cluster_identity.rb | 45 ++++++++++++++----- 7 files changed, 56 insertions(+), 28 deletions(-) diff --git a/charts/azuremonitor-containers/templates/omsagent-deployment.yaml b/charts/azuremonitor-containers/templates/omsagent-deployment.yaml index 012dd2720..37b8faacc 100644 --- a/charts/azuremonitor-containers/templates/omsagent-deployment.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-deployment.yaml @@ -157,5 +157,5 @@ spec: - name: omsagent-adx-secret secret: secretName: omsagent-adx-secret - optional: true + optional: true {{- end }} diff --git a/charts/azuremonitor-containers/templates/omsagent-rbac.yaml b/charts/azuremonitor-containers/templates/omsagent-rbac.yaml index 5db5c2dab..c0a6e3722 100644 --- a/charts/azuremonitor-containers/templates/omsagent-rbac.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-rbac.yaml @@ -28,7 +28,7 @@ rules: resources: ["healthstates"] verbs: ["get", "create", "patch"] - apiGroups: ["clusterconfig.azure.com"] - resources: ["azureclusteridentityrequests"] + resources: ["azureclusteridentityrequests", "azureclusteridentityrequests/status"] resourceNames: ["container-insights-clusteridentityrequest"] verbs: ["get", "create", "patch"] - nonResourceURLs: ["/metrics"] diff --git a/charts/azuremonitor-containers/templates/omsagent-secret.yaml b/charts/azuremonitor-containers/templates/omsagent-secret.yaml index 1a7f087ed..8c245338c 100644 --- a/charts/azuremonitor-containers/templates/omsagent-secret.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-secret.yaml @@ -13,7 +13,19 @@ data: WSID: {{ required "A valid workspace id is required!" .Values.omsagent.secret.wsid | b64enc | quote }} KEY: {{ required "A valid workspace key is required!" .Values.omsagent.secret.key | b64enc | quote }} DOMAIN: {{ .Values.omsagent.domain | b64enc | quote }} - {{- if ne .Values.omsagent.proxy "" }} + {{- $httpsProxyDict := urlParse .Values.Azure.proxySettings.httpsProxy -}} + {{- $httpProxyDict := urlParse .Values.Azure.proxySettings.httpProxy -}} + {{- if and (and (.Values.Azure.proxySettings.isProxyEnabled) (.Values.Azure.proxySettings.httpsProxy)) ($httpsProxyDict.userinfo) }} + PROXY: {{ .Values.Azure.proxySettings.httpsProxy | b64enc | quote }} + {{- else if and (and (.Values.Azure.proxySettings.isProxyEnabled) (.Values.Azure.proxySettings.httpsProxy)) (empty $httpsProxyDict.userinfo) }} + # adding arbitrary creds since omsagent expects arbitrary creds in case of no auth + PROXY: {{ urlJoin (dict "scheme" $httpsProxyDict.scheme "userinfo" "admin:secret" "host" $httpsProxyDict.host) | b64enc | quote }} + {{- else if and (and (.Values.Azure.proxySettings.isProxyEnabled) (.Values.Azure.proxySettings.httpProxy)) ($httpProxyDict.userinfo) }} + PROXY: {{ .Values.Azure.proxySettings.httpProxy | b64enc | quote }} + {{- else if and (and (.Values.Azure.proxySettings.isProxyEnabled) (.Values.Azure.proxySettings.httpProxy)) (empty $httpProxyDict.userinfo) }} + # adding arbitrary creds since omsagent expects arbitrary creds in case of no auth + PROXY: {{ urlJoin (dict "scheme" $httpProxyDict.scheme "userinfo" "admin:secret" "host" $httpProxyDict.host) | b64enc | quote }} + {{- else if ne .Values.omsagent.proxy "" }} PROXY: {{ .Values.omsagent.proxy | b64enc | quote }} {{- end }} {{- end }} diff --git a/charts/azuremonitor-containers/values.yaml b/charts/azuremonitor-containers/values.yaml index 5831c9889..caf0217c3 100644 --- a/charts/azuremonitor-containers/values.yaml +++ b/charts/azuremonitor-containers/values.yaml @@ -12,6 +12,12 @@ Azure: Extension: Name: "" ResourceId: "" + proxySettings: + isProxyEnabled: false + httpProxy: "" + httpsProxy: "" + noProxy: "" + proxyCert: "" omsagent: image: repo: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod" diff --git a/scripts/onboarding/templates/arc-k8s-extension/existingClusterOnboarding.json b/scripts/onboarding/templates/arc-k8s-extension/existingClusterOnboarding.json index 8ebef232a..95e7ba5d0 100644 --- a/scripts/onboarding/templates/arc-k8s-extension/existingClusterOnboarding.json +++ b/scripts/onboarding/templates/arc-k8s-extension/existingClusterOnboarding.json @@ -13,14 +13,7 @@ "metadata": { "description": "Location of the Azure Arc Connected Cluster Resource e.g. \"eastus\"" } - }, - "proxyEndpointUrl": { - "type": "string", - "defaultValue": "", - "metadata": { - "description": "If the cluster behind forward proxy, then specify Proxy Endpoint URL in this format: http(s)://:@:" - } - }, + }, "workspaceResourceId": { "type": "string", "metadata": { @@ -114,8 +107,7 @@ }, "configurationProtectedSettings": { "omsagent.secret.wsid": "[reference(parameters('workspaceResourceId'), '2015-03-20').customerId]", - "omsagent.secret.key": "[listKeys(parameters('workspaceResourceId'), '2015-03-20').primarySharedKey]" , - "omsagent.proxy": "[if(equals(parameters('proxyEndpointUrl'), ''), '', parameters('proxyEndpointUrl'))]" + "omsagent.secret.key": "[listKeys(parameters('workspaceResourceId'), '2015-03-20').primarySharedKey]" }, "autoUpgradeMinorVersion": true, "releaseTrain": "Stable", diff --git a/scripts/onboarding/templates/arc-k8s-extension/existingClusterParam.json b/scripts/onboarding/templates/arc-k8s-extension/existingClusterParam.json index b74b5ac95..6829d3d05 100644 --- a/scripts/onboarding/templates/arc-k8s-extension/existingClusterParam.json +++ b/scripts/onboarding/templates/arc-k8s-extension/existingClusterParam.json @@ -8,9 +8,6 @@ "clusterRegion": { "value": "" }, - "proxyEndpointUrl": { - "value": "" - }, "workspaceResourceId": { "value": "/subscriptions//resourcegroups//providers/microsoft.operationalinsights/workspaces/" }, diff --git a/source/plugins/ruby/arc_k8s_cluster_identity.rb b/source/plugins/ruby/arc_k8s_cluster_identity.rb index 7824f3d4e..552dafb1f 100644 --- a/source/plugins/ruby/arc_k8s_cluster_identity.rb +++ b/source/plugins/ruby/arc_k8s_cluster_identity.rb @@ -26,6 +26,7 @@ def initialize @log.info "initialize start @ #{Time.now.utc.iso8601}" @token_expiry_time = Time.now @cached_access_token = String.new + @isLastTokenRenewalUpdatePending = false @token_file_path = "/var/run/secrets/kubernetes.io/serviceaccount/token" @cert_file_path = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" @kube_api_server_url = KubernetesApiClient.getKubeAPIServerUrl @@ -41,14 +42,20 @@ def initialize def get_cluster_identity_token() begin - # get the cluster msi identity token either if its empty or near expirty. Token is valid 24 hrs. + # get the cluster msi identity token either if its empty or near expiry. Token is valid 24 hrs. if @cached_access_token.to_s.empty? || (Time.now + 60 * 60 > @token_expiry_time) # Refresh token 1 hr from expiration # renew the token if its near expiry if !@cached_access_token.to_s.empty? && (Time.now + 60 * 60 > @token_expiry_time) - @log.info "renewing the token since its near expiry @ #{Time.now.utc.iso8601}" - renew_near_expiry_token - # sleep 60 seconds to get the renewed token available - sleep 60 + if !@isLastTokenRenewalUpdatePending + @log.info "token expiry - @ #{@token_expiry_time}" + @log.info "renewing the token since token has near expiry @ #{Time.now.utc.iso8601}" + renew_near_expiry_token + # sleep 60 seconds to get the renewed token available + sleep 60 + @isLastTokenRenewalUpdatePending = true + else + @log.warn "last token renewal update still pending @ #{Time.now.utc.iso8601}" + end end @log.info "get token reference from crd @ #{Time.now.utc.iso8601}" tokenReference = get_token_reference_from_crd @@ -61,6 +68,7 @@ def get_cluster_identity_token() token = get_token_from_secret(token_secret_name, token_secret_data_name) if !token.nil? @cached_access_token = token + @isLastTokenRenewalUpdatePending = false else @log.warn "got token nil from secret: #{@token_secret_name}" end @@ -123,7 +131,17 @@ def get_token_reference_from_crd() tokenReference["expirationTime"] = status["expirationTime"] tokenReference["secretName"] = status["tokenReference"]["secretName"] tokenReference["dataName"] = status["tokenReference"]["dataName"] - end + elsif get_response.code.to_i == 404 # this might happen if the crd resource deleted by user accidently + @log.info "since crd resource doesnt exist hence creating crd resource : #{@@cluster_identity_resource_name} @ #{Time.now.utc.iso8601}" + crd_request_body = get_crd_request_body + crd_request_body_json = crd_request_body.to_json + create_request = Net::HTTP::Post.new(crd_request_uri) + create_request["Content-Type"] = "application/json" + create_request["Authorization"] = "Bearer #{@service_account_token}" + create_request.body = crd_request_body_json + create_response = @http_client.request(create_request) + @log.info "Got response of #{create_response.code} for POST #{crd_request_uri} @ #{Time.now.utc.iso8601}" + end rescue => err @log.warn "get_token_reference_from_crd call failed: #{err}" ApplicationInsightsUtility.sendExceptionTelemetry(err, { "FeatureArea" => "MDM" }) @@ -141,20 +159,23 @@ def renew_near_expiry_token() cluster_identity_resource_namespace: @@cluster_identity_resource_namespace, cluster_identity_resource_name: @@cluster_identity_resource_name, } - crd_request_body = get_crd_request_body - crd_request_body_json = crd_request_body.to_json - update_request = Net::HTTP::Patch.new(crd_request_uri) + update_crd_request_body = { 'status': {'expirationTime': ''} } + update_crd_request_body_json = update_crd_request_body.to_json + update_crd_request_uri = crd_request_uri + "/status" + update_request = Net::HTTP::Patch.new(update_crd_request_uri) update_request["Content-Type"] = "application/merge-patch+json" update_request["Authorization"] = "Bearer #{@service_account_token}" - update_request.body = crd_request_body_json + update_request.body = update_crd_request_body_json update_response = @http_client.request(update_request) - @log.info "Got response of #{update_response.code} for PATCH #{crd_request_uri} @ #{Time.now.utc.iso8601}" + @log.info "Got response of #{update_response.code} for PATCH #{update_crd_request_uri} @ #{Time.now.utc.iso8601}" if update_response.code.to_i == 404 @log.info "since crd resource doesnt exist hence creating crd resource : #{@@cluster_identity_resource_name} @ #{Time.now.utc.iso8601}" create_request = Net::HTTP::Post.new(crd_request_uri) create_request["Content-Type"] = "application/json" create_request["Authorization"] = "Bearer #{@service_account_token}" - create_request.body = crd_request_body_json + create_crd_request_body = get_crd_request_body + create_crd_request_body_json = create_crd_request_body.to_json + create_request.body = create_crd_request_body_json create_response = @http_client.request(create_request) @log.info "Got response of #{create_response.code} for POST #{crd_request_uri} @ #{Time.now.utc.iso8601}" end From 6b48b6a846184070cdd26cc733b650be4f4fb5ae Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Mon, 22 Mar 2021 16:06:47 -0700 Subject: [PATCH 078/301] doc updates for microsoft charts repo release (#512) * doc updates for microsoft charts repo release * wip --- ReleaseProcess.md | 36 +++++++++++++++++++++++++++++++++++- 1 file changed, 35 insertions(+), 1 deletion(-) diff --git a/ReleaseProcess.md b/ReleaseProcess.md index 2a3e6001a..c6f51bb65 100644 --- a/ReleaseProcess.md +++ b/ReleaseProcess.md @@ -43,13 +43,47 @@ This needs to be co-ordinated with Red hat and ARO-RP team for the release and Make PR against [AKS-Engine](https://github.com/Azure/aks-engine). Refer PR https://github.com/Azure/aks-engine/pull/2318 -## ARO v4, On-prem K8s, Azure Arc K8s and OpenShift v4 clusters +## ARO v4, Azure Arc K8s and OpenShift v4 clusters Make sure azuremonitor-containers chart yamls updates with all changes going with the release and also make sure to bump the chart version, imagetag and docker provider version etc. Similar to agent container image, build pipeline automatically push the chart to container insights prod acr for canary and prod repos accordingly. Both the agent and helm chart will be replicated to `mcr.microsoft.com`. The way, customers will be onboard the monitoring to these clusters using onboarding scripts under `onboarding\managed` directory so please bump chart version for prod release. Once we move to Arc K8s Monitoring extension Public preview, these will be taken care so at that point of time no manual changes like this required. +## Microsoft Charts Repo release for On-prem K8s + +Since HELM charts repo being deprecated, Microsoft charts repo being used for HELM chart release of on-prem K8s clusters. +To make chart release PR, fork [Microsoft-charts-repo]([https://github.com/microsoft/charts/tree/gh-pages) and make the PR against `gh-pages` branch of the upstream repo. + +Refer PR - https://github.com/microsoft/charts/pull/23 for example. +Once the PR merged, latest version of HELM chart should be available in couple of mins in https://microsoft.github.io/charts/repo and https://artifacthub.io/. + +Instructions to create PR +``` +# 1. create helm package for the release candidate + git clone git@github.com:microsoft/Docker-Provider.git + git checkout ci_prod + cd ~/Docker-Provider/charts/azuremonitor-containers # this path based on where you have cloned the repo + helm package . + +# 2. clone your fork repo and checkout gh_pages branch # gh_pages branch used as release branch + cd ~ + git clone + cd ~/charts # assumed the root dir of the clone is charts + git checkout gh_pages + +# 3. copy release candidate helm package + cd ~/charts/repo/azuremonitor-containers + # update chart version value with the version of chart being released + cp ~/Docker-Provider/charts/azuremonitor-containers/azuremonitor-containers-.tgz . + cd ~/charts/repo + # update repo index file + helm repo index . + +# 4. Review the changes and make PR. Please note, you may need to revert unrelated changes automatically added by `helm repo index .` command + +``` + # 4. Monitor agent roll-out status In Container Insights Agent (AKS) telemetry dashboard, update the agent roll status by region chart with released agent image and track rollout status. If you see any issues with agent rollout, reach out AKS on-call team for the help on investigation and understanding whats going on. From d93c680db71c7a562054faaa0854addb53c023a2 Mon Sep 17 00:00:00 2001 From: seenu433 Date: Mon, 22 Mar 2021 20:23:11 -0400 Subject: [PATCH 079/301] Update enable-monitoring.sh (#514) Line 314 and 343 seems to have trailing spaces for some subscriptions which is exiting the script even for valid scenarios Co-authored-by: Ganga Mahesh Siddem --- scripts/onboarding/managed/enable-monitoring.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/onboarding/managed/enable-monitoring.sh b/scripts/onboarding/managed/enable-monitoring.sh index 1162ba0d3..a9560b5c5 100644 --- a/scripts/onboarding/managed/enable-monitoring.sh +++ b/scripts/onboarding/managed/enable-monitoring.sh @@ -311,7 +311,7 @@ parse_args() { validate_and_configure_supported_cloud() { echo "get active azure cloud name configured to azure cli" - azureCloudName=$(az cloud show --query name -o tsv | tr "[:upper:]" "[:lower:]") + azureCloudName=$(az cloud show --query name -o tsv | tr "[:upper:]" "[:lower:]" | tr -d "[:space:]") echo "active azure cloud name configured to azure cli: ${azureCloudName}" if [ "$isArcK8sCluster" = true ]; then if [ "$azureCloudName" != "azurecloud" -a "$azureCloudName" != "azureusgovernment" ]; then @@ -340,7 +340,7 @@ validate_cluster_identity() { local clusterName="$(echo ${2})" local identitytype=$(az resource show -g ${rgName} -n ${clusterName} --resource-type $resourceProvider --query identity.type -o json) - identitytype=$(echo $identitytype | tr "[:upper:]" "[:lower:]" | tr -d '"') + identitytype=$(echo $identitytype | tr "[:upper:]" "[:lower:]" | tr -d '"' | tr -d "[:space:]") echo "cluster identity type:" $identitytype if [[ "$identitytype" != "systemassigned" ]]; then From 4d386ce2b8c2b6acd56156150a623b8aabd1878c Mon Sep 17 00:00:00 2001 From: rashmichandrashekar Date: Thu, 25 Mar 2021 16:01:54 -0700 Subject: [PATCH 080/301] Prometheus scraping from sidecar and OSM changes (#515) --- README.md | 1 + .../scripts/tomlparser-prom-customconfig.rb | 423 ++++++++++++++++++ .../installer/conf/prometheus-side-car.conf | 4 + .../conf/td-agent-bit-prom-side-car.conf | 28 ++ .../conf/telegraf-prom-side-car.conf | 162 +++++++ build/linux/installer/conf/telegraf-rs.conf | 23 +- .../installer/datafiles/base_container.data | 28 +- .../linux/installer/scripts/livenessprobe.sh | 27 +- .../scripts/tomlparser-osm-config.rb | 168 +++++++ .../scripts/tomlparser-prom-customconfig.rb | 267 ----------- build/windows/installer/conf/fluent-bit.conf | 9 + build/windows/installer/conf/telegraf.conf | 162 +++++++ .../templates/omsagent-daemonset-windows.yaml | 6 + .../templates/omsagent-deployment.yaml | 13 +- kubernetes/container-azm-ms-agentconfig.yaml | 11 + kubernetes/container-azm-ms-osmconfig.yaml | 17 + kubernetes/linux/Dockerfile | 2 +- kubernetes/linux/defaultpromenvvariables-rs | 19 +- .../linux/defaultpromenvvariables-sidecar | 9 + kubernetes/linux/main.sh | 209 ++++++--- kubernetes/linux/setup.sh | 8 +- kubernetes/omsagent.yaml | 72 +++ kubernetes/windows/Dockerfile | 4 + kubernetes/windows/main.ps1 | 113 +++-- .../setdefaulttelegrafenvvariables.ps1 | 17 + kubernetes/windows/setup.ps1 | 16 + .../windows/install-build-pre-requisites.ps1 | 6 +- source/plugins/go/src/oms.go | 2 +- source/plugins/go/src/telemetry.go | 157 +++++-- source/plugins/ruby/in_kube_nodes.rb | 6 + 30 files changed, 1548 insertions(+), 441 deletions(-) create mode 100644 build/common/installer/scripts/tomlparser-prom-customconfig.rb create mode 100644 build/linux/installer/conf/prometheus-side-car.conf create mode 100644 build/linux/installer/conf/td-agent-bit-prom-side-car.conf create mode 100644 build/linux/installer/conf/telegraf-prom-side-car.conf create mode 100644 build/linux/installer/scripts/tomlparser-osm-config.rb delete mode 100644 build/linux/installer/scripts/tomlparser-prom-customconfig.rb create mode 100644 build/windows/installer/conf/telegraf.conf create mode 100644 kubernetes/container-azm-ms-osmconfig.yaml create mode 100644 kubernetes/linux/defaultpromenvvariables-sidecar create mode 100644 kubernetes/windows/setdefaulttelegrafenvvariables.ps1 diff --git a/README.md b/README.md index 3564345ee..555234c61 100644 --- a/README.md +++ b/README.md @@ -67,6 +67,7 @@ The general directory structure is: │ │ ├── acrworkflows/ - acr work flows for the Linux Agent container image │ │ ├── defaultpromenvvariables - default environment variables for Prometheus scraping │ │ ├── defaultpromenvvariables-rs - cluster level default environment variables for Prometheus scraping +│ │ ├── defaultpromenvvariables-sidecar - cluster level default environment variables for Prometheus scraping in sidecar │ ├── windows/ - scripts to build the Docker image for Windows Agent │ │ ├── dockerbuild - script to build the code and docker imag, and publish docker image │ │ ├── acrworkflows/ - acr work flows for the Windows Agent container image diff --git a/build/common/installer/scripts/tomlparser-prom-customconfig.rb b/build/common/installer/scripts/tomlparser-prom-customconfig.rb new file mode 100644 index 000000000..819c1956f --- /dev/null +++ b/build/common/installer/scripts/tomlparser-prom-customconfig.rb @@ -0,0 +1,423 @@ +#!/usr/local/bin/ruby + +#this should be require relative in Linux and require in windows, since it is a gem install on windows +@os_type = ENV["OS_TYPE"] +if !@os_type.nil? && !@os_type.empty? && @os_type.strip.casecmp("windows") == 0 + require "tomlrb" +else + require_relative "tomlrb" +end +# require_relative "tomlrb" +require_relative "ConfigParseErrorLogger" +require "fileutils" + +@promConfigMapMountPath = "/etc/config/settings/prometheus-data-collection-settings" +@replicaset = "replicaset" +@daemonset = "daemonset" +@promSideCar = "prometheussidecar" +@windows = "windows" +@configSchemaVersion = "" +@defaultDsInterval = "1m" +@defaultDsPromUrls = [] +@defaultDsFieldPass = [] +@defaultDsFieldDrop = [] +@defaultRsInterval = "1m" +@defaultRsPromUrls = [] +@defaultRsFieldPass = [] +@defaultRsFieldDrop = [] +@defaultRsK8sServices = [] +# @defaultRsMonitorPods = false +@defaultCustomPrometheusInterval = "1m" +@defaultCustomPrometheusFieldPass = [] +@defaultCustomPrometheusFieldDrop = [] +@defaultCustomPrometheusMonitorPods = false +@defaultCustomPrometheusLabelSelectors = "" +@defaultCustomPrometheusFieldSelectors = "" + +#Configurations to be used for the auto-generated input prometheus plugins for namespace filtering +@metricVersion = 2 +@monitorKubernetesPodsVersion = 2 +@urlTag = "scrapeUrl" +@bearerToken = "/var/run/secrets/kubernetes.io/serviceaccount/token" +@responseTimeout = "15s" +@tlsCa = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" +@insecureSkipVerify = true + +# Checking to see if this is the daemonset or replicaset to parse config accordingly +@controller = ENV["CONTROLLER_TYPE"] +@containerType = ENV["CONTAINER_TYPE"] +@sidecarScrapingEnabled = ENV["SIDECAR_SCRAPING_ENABLED"] + +# Use parser to parse the configmap toml file to a ruby structure +def parseConfigMap + begin + # Check to see if config map is created + if (File.file?(@promConfigMapMountPath)) + puts "config::configmap container-azm-ms-agentconfig for settings mounted, parsing values for prometheus config map" + parsedConfig = Tomlrb.load_file(@promConfigMapMountPath, symbolize_keys: true) + puts "config::Successfully parsed mounted prometheus config map" + return parsedConfig + else + puts "config::configmap container-azm-ms-agentconfig for settings not mounted, using defaults for prometheus scraping" + return nil + end + rescue => errorStr + ConfigParseErrorLogger.logError("Exception while parsing config map for prometheus config: #{errorStr}, using defaults, please check config map for errors") + return nil + end +end + +def checkForTypeArray(arrayValue, arrayType) + if (arrayValue.nil? || (arrayValue.kind_of?(Array) && ((arrayValue.length == 0) || (arrayValue.length > 0 && arrayValue[0].kind_of?(arrayType))))) + return true + else + return false + end +end + +def checkForType(variable, varType) + if variable.nil? || variable.kind_of?(varType) + return true + else + return false + end +end + +def replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods, kubernetesLabelSelectors, kubernetesFieldSelectors) + begin + puts "config::Starting to substitute the placeholders in telegraf conf copy file with no namespace filters" + new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS", ("monitor_kubernetes_pods = #{monitorKubernetesPods}")) + new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_SCRAPE_SCOPE", ("pod_scrape_scope = \"#{(@controller.casecmp(@replicaset) == 0) ? "cluster" : "node"}\"")) + new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_PLUGINS_WITH_NAMESPACE_FILTER", "") + new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_LABEL_SELECTOR", ("kubernetes_label_selector = \"#{kubernetesLabelSelectors}\"")) + new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_FIELD_SELECTOR", ("kubernetes_field_selector = \"#{kubernetesFieldSelectors}\"")) + rescue => errorStr + puts "Exception while replacing default pod monitor settings for custom prometheus scraping: #{errorStr}" + end + return new_contents +end + +def createPrometheusPluginsWithNamespaceSetting(monitorKubernetesPods, monitorKubernetesPodsNamespaces, new_contents, interval, fieldPassSetting, fieldDropSetting, kubernetesLabelSelectors, kubernetesFieldSelectors) + begin + puts "config::Starting to substitute the placeholders in telegraf conf copy file with namespace filters" + + new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS", "# Commenting this out since new plugins will be created per namespace\n # $AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS") + new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_LABEL_SELECTOR", "# Commenting this out since new plugins will be created per namespace\n # $AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_LABEL_SELECTOR") + new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_FIELD_SELECTOR", "# Commenting this out since new plugins will be created per namespace\n # $AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_FIELD_SELECTOR") + new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_SCRAPE_SCOPE", "# Commenting this out since new plugins will be created per namespace\n # $AZMON_TELEGRAF_CUSTOM_PROM_SCRAPE_SCOPE") + + pluginConfigsWithNamespaces = "" + monitorKubernetesPodsNamespaces.each do |namespace| + if !namespace.nil? + #Stripping namespaces to remove leading and trailing whitespaces + namespace.strip! + if namespace.length > 0 + pluginConfigsWithNamespaces += "\n[[inputs.prometheus]] + interval = \"#{interval}\" + monitor_kubernetes_pods = true + pod_scrape_scope = \"#{(@controller.casecmp(@replicaset) == 0) ? "cluster" : "node"}\" + monitor_kubernetes_pods_namespace = \"#{namespace}\" + kubernetes_label_selector = \"#{kubernetesLabelSelectors}\" + kubernetes_field_selector = \"#{kubernetesFieldSelectors}\" + fieldpass = #{fieldPassSetting} + fielddrop = #{fieldDropSetting} + metric_version = #{@metricVersion} + url_tag = \"#{@urlTag}\" + bearer_token = \"#{@bearerToken}\" + response_timeout = \"#{@responseTimeout}\" + tls_ca = \"#{@tlsCa}\" + insecure_skip_verify = #{@insecureSkipVerify}\n" + end + end + end + new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_PLUGINS_WITH_NAMESPACE_FILTER", pluginConfigsWithNamespaces) + return new_contents + rescue => errorStr + puts "Exception while creating prometheus input plugins to filter namespaces for custom prometheus: #{errorStr}, using defaults" + replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods, kubernetesLabelSelectors, kubernetesFieldSelectors) + end +end + +# Use the ruby structure created after config parsing to set the right values to be used as environment variables +def populateSettingValuesFromConfigMap(parsedConfig) + if !@controller.nil? + if !parsedConfig.nil? && !parsedConfig[:prometheus_data_collection_settings].nil? + if @controller.casecmp(@replicaset) == 0 && !parsedConfig[:prometheus_data_collection_settings][:cluster].nil? + #Get prometheus replicaset custom config settings + begin + interval = parsedConfig[:prometheus_data_collection_settings][:cluster][:interval] + fieldPass = parsedConfig[:prometheus_data_collection_settings][:cluster][:fieldpass] + fieldDrop = parsedConfig[:prometheus_data_collection_settings][:cluster][:fielddrop] + urls = parsedConfig[:prometheus_data_collection_settings][:cluster][:urls] + kubernetesServices = parsedConfig[:prometheus_data_collection_settings][:cluster][:kubernetes_services] + + # Remove below 4 lines after phased rollout + monitorKubernetesPods = parsedConfig[:prometheus_data_collection_settings][:cluster][:monitor_kubernetes_pods] + monitorKubernetesPodsNamespaces = parsedConfig[:prometheus_data_collection_settings][:cluster][:monitor_kubernetes_pods_namespaces] + kubernetesLabelSelectors = parsedConfig[:prometheus_data_collection_settings][:cluster][:kubernetes_label_selector] + kubernetesFieldSelectors = parsedConfig[:prometheus_data_collection_settings][:cluster][:kubernetes_field_selector] + + # Check for the right datatypes to enforce right setting values + if checkForType(interval, String) && + checkForTypeArray(fieldPass, String) && + checkForTypeArray(fieldDrop, String) && + checkForTypeArray(kubernetesServices, String) && + checkForTypeArray(urls, String) && + # Remove below check after phased rollout + checkForType(kubernetesLabelSelectors, String) && + checkForType(kubernetesFieldSelectors, String) && + (monitorKubernetesPods.nil? || (!monitorKubernetesPods.nil? && (!!monitorKubernetesPods == monitorKubernetesPods))) # Checking for Boolean type, since 'Boolean' is not defined as a type in ruby + puts "config::Successfully passed typecheck for config settings for replicaset" + #if setting is nil assign default values + interval = (interval.nil?) ? @defaultRsInterval : interval + fieldPass = (fieldPass.nil?) ? @defaultRsFieldPass : fieldPass + fieldDrop = (fieldDrop.nil?) ? @defaultRsFieldDrop : fieldDrop + kubernetesServices = (kubernetesServices.nil?) ? @defaultRsK8sServices : kubernetesServices + urls = (urls.nil?) ? @defaultRsPromUrls : urls + # Remove below lines after phased rollout + monitorKubernetesPods = (monitorKubernetesPods.nil?) ? @defaultRsMonitorPods : monitorKubernetesPods + kubernetesLabelSelectors = (kubernetesLabelSelectors.nil?) ? @defaultCustomPrometheusLabelSelectors : kubernetesLabelSelectors + kubernetesFieldSelectors = (kubernetesFieldSelectors.nil?) ? @defaultCustomPrometheusFieldSelectors : kubernetesFieldSelectors + + file_name = "/opt/telegraf-test-rs.conf" + # Copy the telegraf config file to a temp file to run telegraf in test mode with this config + FileUtils.cp("/etc/opt/microsoft/docker-cimprov/telegraf-rs.conf", file_name) + + puts "config::Starting to substitute the placeholders in telegraf conf copy file for replicaset" + #Replace the placeholder config values with values from custom config + text = File.read(file_name) + new_contents = text.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_INTERVAL", interval) + fieldPassSetting = (fieldPass.length > 0) ? ("[\"" + fieldPass.join("\",\"") + "\"]") : "[]" + new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_FIELDPASS", fieldPassSetting) + fieldDropSetting = (fieldDrop.length > 0) ? ("[\"" + fieldDrop.join("\",\"") + "\"]") : "[]" + new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_FIELDDROP", fieldDropSetting) + new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_URLS", ((urls.length > 0) ? ("[\"" + urls.join("\",\"") + "\"]") : "[]")) + new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_K8S_SERVICES", ((kubernetesServices.length > 0) ? ("[\"" + kubernetesServices.join("\",\"") + "\"]") : "[]")) + + # Check to see if monitor_kubernetes_pods is set to true with a valid setting for monitor_kubernetes_namespaces to enable scraping for specific namespaces + # Adding nil check here as well since checkForTypeArray returns true even if setting is nil to accomodate for other settings to be able - + # - to use defaults in case of nil settings + # Remove below block after phased rollout + if (@sidecarScrapingEnabled.nil? || (!@sidecarScrapingEnabled.nil? && (@sidecarScrapingEnabled.casecmp("false") == 0))) + monitorKubernetesPodsNSConfig = [] + if monitorKubernetesPods && !monitorKubernetesPodsNamespaces.nil? && checkForTypeArray(monitorKubernetesPodsNamespaces, String) + # Adding a check to see if an empty array is passed for kubernetes namespaces + if (monitorKubernetesPodsNamespaces.length > 0) + new_contents = createPrometheusPluginsWithNamespaceSetting(monitorKubernetesPods, monitorKubernetesPodsNamespaces, new_contents, interval, fieldPassSetting, fieldDropSetting, kubernetesLabelSelectors, kubernetesFieldSelectors) + monitorKubernetesPodsNamespacesLength = monitorKubernetesPodsNamespaces.length + monitorKubernetesPodsNSConfig = monitorKubernetesPodsNamespaces + else + new_contents = replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods, kubernetesLabelSelectors, kubernetesFieldSelectors) + monitorKubernetesPodsNamespacesLength = 0 + end + else + new_contents = replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods, kubernetesLabelSelectors, kubernetesFieldSelectors) + monitorKubernetesPodsNamespacesLength = 0 + end + # Label and field selectors are passed as strings. For field selectors, split by commas to get the number of key-value pairs. + # Label selectors can be formatted as "app in (app1, app2, app3)", so split by commas only outside parentheses to get the number of key-value pairs. + kubernetesLabelSelectorsLength = kubernetesLabelSelectors.split(/,\s*(?=[^()]*(?:\(|$))/).length + kubernetesFieldSelectorsLength = kubernetesFieldSelectors.split(",").length + end + + File.open(file_name, "w") { |file| file.puts new_contents } + puts "config::Successfully substituted the placeholders in telegraf conf file for replicaset" + #Set environment variables for telemetry + file = File.open("telemetry_prom_config_env_var", "w") + if !file.nil? + file.write("export TELEMETRY_RS_PROM_INTERVAL=\"#{interval}\"\n") + #Setting array lengths as environment variables for telemetry purposes + file.write("export TELEMETRY_RS_PROM_FIELDPASS_LENGTH=\"#{fieldPass.length}\"\n") + file.write("export TELEMETRY_RS_PROM_FIELDDROP_LENGTH=\"#{fieldDrop.length}\"\n") + file.write("export TELEMETRY_RS_PROM_K8S_SERVICES_LENGTH=#{kubernetesServices.length}\n") + file.write("export TELEMETRY_RS_PROM_URLS_LENGTH=#{urls.length}\n") + # Remove below block after phased rollout + if (@sidecarScrapingEnabled.nil? || (!@sidecarScrapingEnabled.nil? && (@sidecarScrapingEnabled.casecmp("false") == 0))) + file.write("export TELEMETRY_RS_PROM_MONITOR_PODS=\"#{monitorKubernetesPods}\"\n") + file.write("export TELEMETRY_RS_PROM_MONITOR_PODS_NS_LENGTH=\"#{monitorKubernetesPodsNamespacesLength}\"\n") + file.write("export TELEMETRY_RS_PROM_LABEL_SELECTOR_LENGTH=\"#{kubernetesLabelSelectorsLength}\"\n") + file.write("export TELEMETRY_RS_PROM_FIELD_SELECTOR_LENGTH=\"#{kubernetesFieldSelectorsLength}\"\n") + end + + # Close file after writing all environment variables + file.close + puts "config::Successfully created telemetry file for replicaset" + end + else + ConfigParseErrorLogger.logError("Typecheck failed for prometheus config settings for replicaset, using defaults, please use right types for all settings") + end # end of type check condition + rescue => errorStr + ConfigParseErrorLogger.logError("Exception while parsing config file for prometheus config for replicaset: #{errorStr}, using defaults") + setRsPromDefaults + puts "****************End Prometheus Config Processing********************" + end + elsif @controller.casecmp(@daemonset) == 0 && + ((!@containerType.nil? && @containerType.casecmp(@promSideCar) == 0) || + (!@os_type.nil? && !@os_type.empty? && @os_type.strip.casecmp("windows") == 0) && @sidecarScrapingEnabled.strip.casecmp("true") == 0) && + !parsedConfig[:prometheus_data_collection_settings][:cluster].nil? + #Get prometheus custom config settings for monitor kubernetes pods + begin + interval = parsedConfig[:prometheus_data_collection_settings][:cluster][:interval] + fieldPass = parsedConfig[:prometheus_data_collection_settings][:cluster][:fieldpass] + fieldDrop = parsedConfig[:prometheus_data_collection_settings][:cluster][:fielddrop] + monitorKubernetesPods = parsedConfig[:prometheus_data_collection_settings][:cluster][:monitor_kubernetes_pods] + monitorKubernetesPodsNamespaces = parsedConfig[:prometheus_data_collection_settings][:cluster][:monitor_kubernetes_pods_namespaces] + kubernetesLabelSelectors = parsedConfig[:prometheus_data_collection_settings][:cluster][:kubernetes_label_selector] + kubernetesFieldSelectors = parsedConfig[:prometheus_data_collection_settings][:cluster][:kubernetes_field_selector] + + # Check for the right datattypes to enforce right setting values + if checkForType(interval, String) && + checkForType(kubernetesLabelSelectors, String) && + checkForType(kubernetesFieldSelectors, String) && + checkForTypeArray(fieldPass, String) && + checkForTypeArray(fieldDrop, String) && + (monitorKubernetesPods.nil? || (!monitorKubernetesPods.nil? && (!!monitorKubernetesPods == monitorKubernetesPods))) #Checking for Boolean type, since 'Boolean' is not defined as a type in ruby + puts "config::Successfully passed typecheck for config settings for custom prometheus scraping" + #if setting is nil assign default values + interval = (interval.nil?) ? @defaultCustomPrometheusInterval : interval + fieldPass = (fieldPass.nil?) ? @defaultCustomPrometheusFieldPass : fieldPass + fieldDrop = (fieldDrop.nil?) ? @defaultCustomPrometheusFieldDrop : fieldDrop + monitorKubernetesPods = (monitorKubernetesPods.nil?) ? @defaultCustomPrometheusMonitorPods : monitorKubernetesPods + kubernetesLabelSelectors = (kubernetesLabelSelectors.nil?) ? @defaultCustomPrometheusLabelSelectors : kubernetesLabelSelectors + kubernetesFieldSelectors = (kubernetesFieldSelectors.nil?) ? @defaultCustomPrometheusFieldSelectors : kubernetesFieldSelectors + + if !@os_type.nil? && !@os_type.empty? && @os_type.strip.casecmp("windows") == 0 + file_name = "/etc/telegraf/telegraf.conf" + else + file_name = "/opt/telegraf-test-prom-side-car.conf" + # Copy the telegraf config file to a temp file to run telegraf in test mode with this config + FileUtils.cp("/etc/opt/microsoft/docker-cimprov/telegraf-prom-side-car.conf", file_name) + end + puts "config::Starting to substitute the placeholders in telegraf conf copy file for linux or conf file for windows for custom prometheus scraping" + #Replace the placeholder config values with values from custom config + text = File.read(file_name) + new_contents = text.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_INTERVAL", interval) + fieldPassSetting = (fieldPass.length > 0) ? ("[\"" + fieldPass.join("\",\"") + "\"]") : "[]" + new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_FIELDPASS", fieldPassSetting) + fieldDropSetting = (fieldDrop.length > 0) ? ("[\"" + fieldDrop.join("\",\"") + "\"]") : "[]" + new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_FIELDDROP", fieldDropSetting) + + # Check to see if monitor_kubernetes_pods is set to true with a valid setting for monitor_kubernetes_namespaces to enable scraping for specific namespaces + # Adding nil check here as well since checkForTypeArray returns true even if setting is nil to accomodate for other settings to be able - + # - to use defaults in case of nil settings + monitorKubernetesPodsNSConfig = [] + if monitorKubernetesPods && !monitorKubernetesPodsNamespaces.nil? && checkForTypeArray(monitorKubernetesPodsNamespaces, String) + # Adding a check to see if an empty array is passed for kubernetes namespaces + if (monitorKubernetesPodsNamespaces.length > 0) + new_contents = createPrometheusPluginsWithNamespaceSetting(monitorKubernetesPods, monitorKubernetesPodsNamespaces, new_contents, interval, fieldPassSetting, fieldDropSetting, kubernetesLabelSelectors, kubernetesFieldSelectors) + monitorKubernetesPodsNamespacesLength = monitorKubernetesPodsNamespaces.length + monitorKubernetesPodsNSConfig = monitorKubernetesPodsNamespaces + else + new_contents = replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods, kubernetesLabelSelectors, kubernetesFieldSelectors) + monitorKubernetesPodsNamespacesLength = 0 + end + else + new_contents = replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods, kubernetesLabelSelectors, kubernetesFieldSelectors) + monitorKubernetesPodsNamespacesLength = 0 + end + + # Label and field selectors are passed as strings. For field selectors, split by commas to get the number of key-value pairs. + # Label selectors can be formatted as "app in (app1, app2, app3)", so split by commas only outside parentheses to get the number of key-value pairs. + kubernetesLabelSelectorsLength = kubernetesLabelSelectors.split(/,\s*(?=[^()]*(?:\(|$))/).length + kubernetesFieldSelectorsLength = kubernetesFieldSelectors.split(",").length + + File.open(file_name, "w") { |file| file.puts new_contents } + puts "config::Successfully substituted the placeholders in telegraf conf file for custom prometheus scraping" + #Set environment variables for telemetry in the sidecar container + if (!@containerType.nil? && @containerType.casecmp(@promSideCar) == 0) + file = File.open("telemetry_prom_config_env_var", "w") + if !file.nil? + #Setting array lengths as environment variables for telemetry purposes + file.write("export TELEMETRY_CUSTOM_PROM_MONITOR_PODS=\"#{monitorKubernetesPods}\"\n") + file.write("export TELEMETRY_CUSTOM_PROM_MONITOR_PODS_NS_LENGTH=\"#{monitorKubernetesPodsNamespacesLength}\"\n") + file.write("export TELEMETRY_CUSTOM_PROM_LABEL_SELECTOR_LENGTH=\"#{kubernetesLabelSelectorsLength}\"\n") + file.write("export TELEMETRY_CUSTOM_PROM_FIELD_SELECTOR_LENGTH=\"#{kubernetesFieldSelectorsLength}\"\n") + + # Close file after writing all environment variables + file.close + puts "config::Successfully created telemetry file for prometheus sidecar" + end + end + else + ConfigParseErrorLogger.logError("Typecheck failed for prometheus config settings for prometheus side car, using defaults, please use right types for all settings") + end # end of type check condition + rescue => errorStr + ConfigParseErrorLogger.logError("Exception while parsing config file for prometheus config for promethues side car: #{errorStr}, using defaults") + puts "****************End Prometheus Config Processing********************" + end + elsif @controller.casecmp(@daemonset) == 0 && !parsedConfig[:prometheus_data_collection_settings][:node].nil? + #Get prometheus daemonset custom config settings + begin + interval = parsedConfig[:prometheus_data_collection_settings][:node][:interval] + fieldPass = parsedConfig[:prometheus_data_collection_settings][:node][:fieldpass] + fieldDrop = parsedConfig[:prometheus_data_collection_settings][:node][:fielddrop] + urls = parsedConfig[:prometheus_data_collection_settings][:node][:urls] + + # Check for the right datattypes to enforce right setting values + if checkForType(interval, String) && + checkForTypeArray(fieldPass, String) && + checkForTypeArray(fieldDrop, String) && + checkForTypeArray(urls, String) + puts "config::Successfully passed typecheck for config settings for daemonset" + + #if setting is nil assign default values + interval = (interval.nil?) ? @defaultDsInterval : interval + fieldPass = (fieldPass.nil?) ? @defaultDsFieldPass : fieldPass + fieldDrop = (fieldDrop.nil?) ? @defaultDsFieldDrop : fieldDrop + urls = (urls.nil?) ? @defaultDsPromUrls : urls + + file_name = "/opt/telegraf-test.conf" + # Copy the telegraf config file to a temp file to run telegraf in test mode with this config + FileUtils.cp("/etc/opt/microsoft/docker-cimprov/telegraf.conf", file_name) + + puts "config::Starting to substitute the placeholders in telegraf conf copy file for daemonset" + #Replace the placeholder config values with values from custom config + text = File.read(file_name) + new_contents = text.gsub("$AZMON_DS_PROM_INTERVAL", interval) + new_contents = new_contents.gsub("$AZMON_DS_PROM_FIELDPASS", ((fieldPass.length > 0) ? ("[\"" + fieldPass.join("\",\"") + "\"]") : "[]")) + new_contents = new_contents.gsub("$AZMON_DS_PROM_FIELDDROP", ((fieldDrop.length > 0) ? ("[\"" + fieldDrop.join("\",\"") + "\"]") : "[]")) + new_contents = new_contents.gsub("$AZMON_DS_PROM_URLS", ((urls.length > 0) ? ("[\"" + urls.join("\",\"") + "\"]") : "[]")) + File.open(file_name, "w") { |file| file.puts new_contents } + puts "config::Successfully substituted the placeholders in telegraf conf file for daemonset" + + #Set environment variables for telemetry + file = File.open("telemetry_prom_config_env_var", "w") + if !file.nil? + file.write("export TELEMETRY_DS_PROM_INTERVAL=\"#{interval}\"\n") + #Setting array lengths as environment variables for telemetry purposes + file.write("export TELEMETRY_DS_PROM_FIELDPASS_LENGTH=\"#{fieldPass.length}\"\n") + file.write("export TELEMETRY_DS_PROM_FIELDDROP_LENGTH=\"#{fieldDrop.length}\"\n") + file.write("export TELEMETRY_DS_PROM_URLS_LENGTH=#{urls.length}\n") + # Close file after writing all environment variables + file.close + puts "config::Successfully created telemetry file for daemonset" + end + else + ConfigParseErrorLogger.logError("Typecheck failed for prometheus config settings for daemonset, using defaults, please use right types for all settings") + end # end of type check condition + rescue => errorStr + ConfigParseErrorLogger.logError("Exception while parsing config file for prometheus config for daemonset: #{errorStr}, using defaults, please check correctness of configmap") + puts "****************End Prometheus Config Processing********************" + end + end # end of controller type check + end + else + ConfigParseErrorLogger.logError("Controller undefined while processing prometheus config, using defaults") + end +end + +@configSchemaVersion = ENV["AZMON_AGENT_CFG_SCHEMA_VERSION"] +puts "****************Start Prometheus Config Processing********************" +if !@configSchemaVersion.nil? && !@configSchemaVersion.empty? && @configSchemaVersion.strip.casecmp("v1") == 0 #note v1 is the only supported schema version , so hardcoding it + configMapSettings = parseConfigMap + if !configMapSettings.nil? + populateSettingValuesFromConfigMap(configMapSettings) + end +else + if (File.file?(@promConfigMapMountPath)) + ConfigParseErrorLogger.logError("config::unsupported/missing config schema version - '#{@configSchemaVersion}' , using defaults, please use supported version") + else + puts "config::No configmap mounted for prometheus custom config, using defaults" + end +end +puts "****************End Prometheus Config Processing********************" diff --git a/build/linux/installer/conf/prometheus-side-car.conf b/build/linux/installer/conf/prometheus-side-car.conf new file mode 100644 index 000000000..fd40910d9 --- /dev/null +++ b/build/linux/installer/conf/prometheus-side-car.conf @@ -0,0 +1,4 @@ + + + + diff --git a/build/linux/installer/conf/td-agent-bit-prom-side-car.conf b/build/linux/installer/conf/td-agent-bit-prom-side-car.conf new file mode 100644 index 000000000..720f54820 --- /dev/null +++ b/build/linux/installer/conf/td-agent-bit-prom-side-car.conf @@ -0,0 +1,28 @@ +[SERVICE] + #Default service flush interval is 15 seconds + Flush 15 + HTTP_Server Off + Daemon Off + storage.path /var/opt/microsoft/docker-cimprov/state/flbstore/ + storage.sync normal + storage.checksum off + storage.backlog.mem_limit 10M + Log_Level info + Parsers_File /etc/opt/microsoft/docker-cimprov/azm-containers-parser.conf + Log_File /var/opt/microsoft/docker-cimprov/log/fluent-bit.log + +[INPUT] + Name tcp + Tag oms.container.perf.telegraf.* + Listen 0.0.0.0 + Port 25229 + Chunk_Size 1m + Buffer_Size 1m + Mem_Buf_Limit 20m + +[OUTPUT] + Name oms + EnableTelemetry true + Retry_Limit 10 + TelemetryPushIntervalSeconds 300 + Match oms.container.* \ No newline at end of file diff --git a/build/linux/installer/conf/telegraf-prom-side-car.conf b/build/linux/installer/conf/telegraf-prom-side-car.conf new file mode 100644 index 000000000..b3b4ba1d3 --- /dev/null +++ b/build/linux/installer/conf/telegraf-prom-side-car.conf @@ -0,0 +1,162 @@ +# Telegraf Configuration +# +# Telegraf is entirely plugin driven. All metrics are gathered from the +# declared inputs, and sent to the declared outputs. +# +# Plugins must be declared in here to be active. +# To deactivate a plugin, comment out the name and any variables. +# +# Use 'telegraf -config telegraf.conf -test' to see what metrics a config +# file would generate. +# +# Environment variables can be used anywhere in this config file, simply prepend +# them with $. For strings the variable must be within quotes (ie, "$STR_VAR"), +# for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR) + + +# Global tags can be specified here in key="value" format. +[global_tags] + hostName = "placeholder_hostname" + +# Configuration for telegraf agent +[agent] + ## Default data collection interval for all inputs + interval = "60s" + ## Rounds collection interval to 'interval' + ## ie, if interval="10s" then always collect on :00, :10, :20, etc. + round_interval = true + + ## Telegraf will send metrics to outputs in batches of at most + ## metric_batch_size metrics. + ## This controls the size of writes that Telegraf sends to output plugins. + metric_batch_size = 3000 + + ## For failed writes, telegraf will cache metric_buffer_limit metrics for each + ## output, and will flush this buffer on a successful write. Oldest metrics + ## are dropped first when this buffer fills. + ## This buffer only fills when writes fail to output plugin(s). + metric_buffer_limit = 60000 + + ## Collection jitter is used to jitter the collection by a random amount. + ## Each plugin will sleep for a random time within jitter before collecting. + ## This can be used to avoid many plugins querying things like sysfs at the + ## same time, which can have a measurable effect on the system. + collection_jitter = "0s" + + ## Default flushing interval for all outputs. You shouldn't set this below + ## interval. Maximum flush_interval will be flush_interval + flush_jitter + flush_interval = "15s" + ## Jitter the flush interval by a random amount. This is primarily to avoid + ## large write spikes for users running a large number of telegraf instances. + ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s + flush_jitter = "0s" + + ## By default or when set to "0s", precision will be set to the same + ## timestamp order as the collection interval, with the maximum being 1s. + ## ie, when interval = "10s", precision will be "1s" + ## when interval = "250ms", precision will be "1ms" + ## Precision will NOT be used for service inputs. It is up to each individual + ## service input to set the timestamp at the appropriate precision. + ## Valid time units are "ns", "us" (or "µs"), "ms", "s". + precision = "" + + ## Logging configuration: + ## Run telegraf with debug log messages. + debug = false + ## Run telegraf in quiet mode (error log messages only). + quiet = true + ## Specify the log file name. The empty string means to log to stderr. + logfile = "" + ## Override default hostname, if empty use os.Hostname() + #hostname = "placeholder_hostname" + ## If set to true, do no set the "host" tag in the telegraf agent. + omit_hostname = true + + +############################################################################### +# OUTPUT PLUGINS # +############################################################################### + +# Generic socket writer capable of handling multiple socket types. +[[outputs.socket_writer]] + ## URL to connect to + address = "tcp://0.0.0.0:25229" + # address = "tcp://example.com:http" + # address = "tcp4://127.0.0.1:8094" + # address = "tcp6://127.0.0.1:8094" + # address = "tcp6://[2001:db8::1]:8094" + # address = "udp://127.0.0.1:8094" + # address = "udp4://127.0.0.1:8094" + # address = "udp6://127.0.0.1:8094" + # address = "unix:///tmp/telegraf.sock" + # address = "unixgram:///tmp/telegraf.sock" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## Period between keep alive probes. + ## Only applies to TCP sockets. + ## 0 disables keep alive probes. + ## Defaults to the OS configuration. + # keep_alive_period = "5m" + + ## Data format to generate. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "json" + namedrop = ["agent_telemetry"] + +############################################################################### +# PROCESSOR PLUGINS # +############################################################################### + +[[processors.converter]] + [processors.converter.fields] + float = ["*"] + +#Prometheus Custom Metrics +[[inputs.prometheus]] + interval = "$AZMON_TELEGRAF_CUSTOM_PROM_INTERVAL" + + ## Scrape Kubernetes pods for the following prometheus annotations: + ## - prometheus.io/scrape: Enable scraping for this pod + ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to + ## set this to `https` & most likely set the tls config. + ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. + ## - prometheus.io/port: If port is not 9102 use this annotation + $AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS + $AZMON_TELEGRAF_CUSTOM_PROM_SCRAPE_SCOPE + + $AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_LABEL_SELECTOR + $AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_FIELD_SELECTOR + + fieldpass = $AZMON_TELEGRAF_CUSTOM_PROM_FIELDPASS + fielddrop = $AZMON_TELEGRAF_CUSTOM_PROM_FIELDDROP + + metric_version = 2 + url_tag = "scrapeUrl" + ## Kubernetes config file to create client from. + # kube_config = "/path/to/kubernetes.config" + + ## Use bearer token for authorization. ('bearer_token' takes priority) + bearer_token = "/var/run/secrets/kubernetes.io/serviceaccount/token" + ## OR + # bearer_token_string = "abc_123" + + ## Specify timeout duration for slower prometheus clients (default is 3s) + response_timeout = "15s" + + ## Optional TLS Config + tls_ca = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" + ## Use TLS but skip chain & host verification + insecure_skip_verify = true + +$AZMON_TELEGRAF_CUSTOM_PROM_PLUGINS_WITH_NAMESPACE_FILTER + +## OSM Prometheus configuration +$AZMON_TELEGRAF_OSM_PROM_PLUGINS diff --git a/build/linux/installer/conf/telegraf-rs.conf b/build/linux/installer/conf/telegraf-rs.conf index d81196330..ee1cf8819 100644 --- a/build/linux/installer/conf/telegraf-rs.conf +++ b/build/linux/installer/conf/telegraf-rs.conf @@ -540,13 +540,13 @@ #Prometheus Custom Metrics [[inputs.prometheus]] - interval = "$AZMON_RS_PROM_INTERVAL" + interval = "$AZMON_TELEGRAF_CUSTOM_PROM_INTERVAL" ## An array of urls to scrape metrics from. - urls = $AZMON_RS_PROM_URLS + urls = $AZMON_TELEGRAF_CUSTOM_PROM_URLS ## An array of Kubernetes services to scrape metrics from. - kubernetes_services = $AZMON_RS_PROM_K8S_SERVICES + kubernetes_services = $AZMON_TELEGRAF_CUSTOM_PROM_K8S_SERVICES ## Scrape Kubernetes pods for the following prometheus annotations: ## - prometheus.io/scrape: Enable scraping for this pod @@ -554,10 +554,15 @@ ## set this to `https` & most likely set the tls config. ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. ## - prometheus.io/port: If port is not 9102 use this annotation - $AZMON_RS_PROM_MONITOR_PODS + $AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS - fieldpass = $AZMON_RS_PROM_FIELDPASS - fielddrop = $AZMON_RS_PROM_FIELDDROP + $AZMON_TELEGRAF_CUSTOM_PROM_SCRAPE_SCOPE + + $AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_LABEL_SELECTOR + $AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_FIELD_SELECTOR + + fieldpass = $AZMON_TELEGRAF_CUSTOM_PROM_FIELDPASS + fielddrop = $AZMON_TELEGRAF_CUSTOM_PROM_FIELDDROP metric_version = 2 url_tag = "scrapeUrl" @@ -581,7 +586,11 @@ insecure_skip_verify = true #tagexclude = ["AgentVersion","AKS_RESOURCE_ID","ACS_RESOURCE_NAME", "Region", "ClusterName", "ClusterType", "Computer", "ControllerType"] -$AZMON_RS_PROM_PLUGINS_WITH_NAMESPACE_FILTER +$AZMON_TELEGRAF_CUSTOM_PROM_PLUGINS_WITH_NAMESPACE_FILTER + +## OSM Prometheus configuration +$AZMON_TELEGRAF_OSM_PROM_PLUGINS + # [[inputs.exec]] # ## Commands array # interval = "15m" diff --git a/build/linux/installer/datafiles/base_container.data b/build/linux/installer/datafiles/base_container.data index c680f0eea..df8fbc3da 100644 --- a/build/linux/installer/datafiles/base_container.data +++ b/build/linux/installer/datafiles/base_container.data @@ -110,24 +110,28 @@ MAINTAINER: 'Microsoft Corporation' /opt/tomlrb/string_utils.rb; source/toml-parser/tomlrb/string_utils.rb; 644; root; root /opt/tomlrb/version.rb; source/toml-parser/tomlrb/version.rb; 644; root; root -/opt/td-agent-bit/bin/out_oms.so; intermediate/${{BUILD_CONFIGURATION}}/out_oms.so; 755; root; root -/etc/opt/microsoft/docker-cimprov/td-agent-bit.conf; build/linux/installer/conf/td-agent-bit.conf; 644; root; root -/etc/opt/microsoft/docker-cimprov/td-agent-bit-rs.conf; build/linux/installer/conf/td-agent-bit-rs.conf; 644; root; root -/etc/opt/microsoft/docker-cimprov/azm-containers-parser.conf; build/linux/installer/conf/azm-containers-parser.conf; 644; root; root -/etc/opt/microsoft/docker-cimprov/out_oms.conf; build/linux/installer/conf/out_oms.conf; 644; root; root -/etc/opt/microsoft/docker-cimprov/telegraf.conf; build/linux/installer/conf/telegraf.conf; 644; root; root -/etc/opt/microsoft/docker-cimprov/telegraf-rs.conf; build/linux/installer/conf/telegraf-rs.conf; 644; root; root -/opt/microsoft/docker-cimprov/bin/TelegrafTCPErrorTelemetry.sh; build/linux/installer/scripts/TelegrafTCPErrorTelemetry.sh; 755; root; root -/opt/livenessprobe.sh; build/linux/installer/scripts/livenessprobe.sh; 755; root; root -/opt/tomlparser-prom-customconfig.rb; build/linux/installer/scripts/tomlparser-prom-customconfig.rb; 755; root; root -/opt/tomlparser-mdm-metrics-config.rb; build/linux/installer/scripts/tomlparser-mdm-metrics-config.rb; 755; root; root -/opt/tomlparser-metric-collection-config.rb; build/linux/installer/scripts/tomlparser-metric-collection-config.rb; 755; root; root +/opt/td-agent-bit/bin/out_oms.so; intermediate/${{BUILD_CONFIGURATION}}/out_oms.so; 755; root; root +/etc/opt/microsoft/docker-cimprov/prometheus-side-car.conf; build/linux/installer/conf/prometheus-side-car.conf; 644; root; root +/etc/opt/microsoft/docker-cimprov/td-agent-bit.conf; build/linux/installer/conf/td-agent-bit.conf; 644; root; root +/etc/opt/microsoft/docker-cimprov/td-agent-bit-prom-side-car.conf; build/linux/installer/conf/td-agent-bit-prom-side-car.conf; 644; root; root +/etc/opt/microsoft/docker-cimprov/td-agent-bit-rs.conf; build/linux/installer/conf/td-agent-bit-rs.conf; 644; root; root +/etc/opt/microsoft/docker-cimprov/azm-containers-parser.conf; build/linux/installer/conf/azm-containers-parser.conf; 644; root; root +/etc/opt/microsoft/docker-cimprov/out_oms.conf; build/linux/installer/conf/out_oms.conf; 644; root; root +/etc/opt/microsoft/docker-cimprov/telegraf.conf; build/linux/installer/conf/telegraf.conf; 644; root; root +/etc/opt/microsoft/docker-cimprov/telegraf-prom-side-car.conf; build/linux/installer/conf/telegraf-prom-side-car.conf; 644; root; root +/etc/opt/microsoft/docker-cimprov/telegraf-rs.conf; build/linux/installer/conf/telegraf-rs.conf; 644; root; root +/opt/microsoft/docker-cimprov/bin/TelegrafTCPErrorTelemetry.sh; build/linux/installer/scripts/TelegrafTCPErrorTelemetry.sh; 755; root; root +/opt/livenessprobe.sh; build/linux/installer/scripts/livenessprobe.sh; 755; root; root +/opt/tomlparser-prom-customconfig.rb; build/common/installer/scripts/tomlparser-prom-customconfig.rb; 755; root; root +/opt/tomlparser-mdm-metrics-config.rb; build/linux/installer/scripts/tomlparser-mdm-metrics-config.rb; 755; root; root +/opt/tomlparser-metric-collection-config.rb; build/linux/installer/scripts/tomlparser-metric-collection-config.rb; 755; root; root /opt/tomlparser-agent-config.rb; build/linux/installer/scripts/tomlparser-agent-config.rb; 755; root; root /opt/tomlparser.rb; build/common/installer/scripts/tomlparser.rb; 755; root; root /opt/td-agent-bit-conf-customizer.rb; build/common/installer/scripts/td-agent-bit-conf-customizer.rb; 755; root; root /opt/ConfigParseErrorLogger.rb; build/common/installer/scripts/ConfigParseErrorLogger.rb; 755; root; root /opt/tomlparser-npm-config.rb; build/linux/installer/scripts/tomlparser-npm-config.rb; 755; root; root +/opt/tomlparser-osm-config.rb; build/linux/installer/scripts/tomlparser-osm-config.rb; 755; root; root /opt/microsoft/omsagent/plugin/filter_cadvisor_health_container.rb; source/plugins/ruby/filter_cadvisor_health_container.rb; 644; root; root diff --git a/build/linux/installer/scripts/livenessprobe.sh b/build/linux/installer/scripts/livenessprobe.sh index e3f9fb475..a82fa28eb 100644 --- a/build/linux/installer/scripts/livenessprobe.sh +++ b/build/linux/installer/scripts/livenessprobe.sh @@ -26,15 +26,22 @@ then exit 1 fi -if [ ! -s "inotifyoutput.txt" ] +if [ -s "inotifyoutput.txt" ] then - # inotifyoutput file is empty and the grep commands for omsagent and td-agent-bit succeeded - exit 0 -else - if [ -s "inotifyoutput.txt" ] - then - # inotifyoutput file has data(config map was applied) - echo "inotifyoutput.txt has been updated - config changed" > /dev/termination-log - exit 1 - fi + # inotifyoutput file has data(config map was applied) + echo "inotifyoutput.txt has been updated - config changed" > /dev/termination-log + exit 1 fi + +# Perform the following check only for prometheus sidecar that does OSM scraping or for replicaset when sidecar scraping is disabled +if [[ ( ( ! -e "/etc/config/kube.conf" ) && ( "${CONTAINER_TYPE}" == "PrometheusSidecar" ) ) || + ( ( -e "/etc/config/kube.conf" ) && ( ( ! -z "${SIDECAR_SCRAPING_ENABLED}" ) && ( "${SIDECAR_SCRAPING_ENABLED}" == "false" ) ) ) ]]; then + if [ -s "inotifyoutput-osm.txt" ] + then + # inotifyoutput-osm file has data(config map was applied) + echo "inotifyoutput-osm.txt has been updated - config changed" > /dev/termination-log + exit 1 + fi +fi + +exit 0 diff --git a/build/linux/installer/scripts/tomlparser-osm-config.rb b/build/linux/installer/scripts/tomlparser-osm-config.rb new file mode 100644 index 000000000..096064db8 --- /dev/null +++ b/build/linux/installer/scripts/tomlparser-osm-config.rb @@ -0,0 +1,168 @@ +#!/usr/local/bin/ruby + +require_relative "tomlrb" +require "fileutils" +require_relative "ConfigParseErrorLogger" + +@controllerType = ENV["CONTROLLER_TYPE"] +@containerType = ENV["CONTAINER_TYPE"] +@sidecarScrapingEnabled = ENV["SIDECAR_SCRAPING_ENABLED"] + +@replicaset = "replicaset" +@prometheusSidecar = "prometheussidecar" + +if !@controllerType.nil? && !@controllerType.empty? && @controllerType.strip.casecmp(@replicaset) == 0 && + (@sidecarScrapingEnabled.nil? || (!@sidecarScrapingEnabled.nil? && !@sidecarScrapingEnabled.empty? && @sidecarScrapingEnabled.strip.casecmp("false") == 0)) + @tgfConfigFile = "/etc/opt/microsoft/docker-cimprov/telegraf-rs.conf" + @tgfTestConfigFile = "/opt/telegraf-test-rs.conf" +elsif !@containerType.nil? && !@containerType.empty? && @containerType.strip.casecmp(@prometheusSidecar) == 0 + @tgfConfigFile = "/etc/opt/microsoft/docker-cimprov/telegraf-prom-side-car.conf" + @tgfTestConfigFile = "/opt/telegraf-test-prom-side-car.conf" +end + +@configMapMountPath = "/etc/config/osm-settings/osm-metric-collection-configuration" +@configSchemaVersion = "" +# @tgfConfigFileSidecar = "/etc/opt/microsoft/docker-cimprov/telegraf-prom-side-car.conf" +# @tgfTestConfigFile = "/opt/telegraf-test-prom-side-car.conf" +@osmMetricNamespaces = [] + +#Configurations to be used for the auto-generated input prometheus plugins for namespace filtering +@metricVersion = 2 +@monitorKubernetesPodsVersion = 2 +#@fieldPassSetting = "[\"envoy_cluster_upstream_rq_xx\", \"envoy_cluster_upstream_rq\"]" +@fieldPassSetting = "[\"envoy_cluster_upstream_cx_total\", \"envoy_cluster_upstream_cx_connect_fail\", \"envoy_cluster_upstream_rq\", \"envoy_cluster_upstream_rq_xx\", \"envoy_cluster_upstream_rq_total\", \"envoy_cluster_upstream_rq_time_bucket\", \"envoy_cluster_upstream_cx_rx_bytes_total\", \"envoy_cluster_upstream_cx_tx_bytes_total\", \"envoy_cluster_upstream_cx_active\"]" +@scrapeInterval = "1m" +@urlTag = "scrapeUrl" +@bearerToken = "/var/run/secrets/kubernetes.io/serviceaccount/token" +@responseTimeout = "15s" +@tlsCa = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" +@insecureSkipVerify = true + +# Use parser to parse the configmap toml file to a ruby structure +def parseConfigMap + begin + # Check to see if config map is created + if (File.file?(@configMapMountPath)) + puts "config::configmap container-azm-ms-osmconfig for osm metrics found, parsing values" + parsedConfig = Tomlrb.load_file(@configMapMountPath, symbolize_keys: true) + puts "config::Successfully parsed mounted config map for osm metrics" + return parsedConfig + else + puts "config::configmap container-azm-ms-osmconfig for osm metrics not mounted, using defaults" + return nil + end + rescue => errorStr + ConfigParseErrorLogger.logError("Exception while parsing config map for osm metrics: #{errorStr}, using defaults, please check config map for errors") + return nil + end +end + +def checkForTypeArray(arrayValue, arrayType) + if (arrayValue.nil? || (arrayValue.kind_of?(Array) && ((arrayValue.length == 0) || (arrayValue.length > 0 && arrayValue[0].kind_of?(arrayType))))) + return true + else + return false + end +end + +# Use the ruby structure created after config parsing to set the right values to be used as environment variables +def populateSettingValuesFromConfigMap(parsedConfig) + begin + if !parsedConfig.nil? && + !parsedConfig[:osm_metric_collection_configuration].nil? && + !parsedConfig[:osm_metric_collection_configuration][:settings].nil? + osmPromMetricNamespaces = parsedConfig[:osm_metric_collection_configuration][:settings][:monitor_namespaces] + puts "config::osm::got:osm_metric_collection_configuration.settings.monitor_namespaces='#{osmPromMetricNamespaces}'" + + # Check to see if osm_metric_collection_configuration.settings has a valid setting for monitor_namespaces to enable scraping for specific namespaces + # Adding nil check here as well since checkForTypeArray returns true even if setting is nil to accomodate for other settings to be able - + # - to use defaults in case of nil settings + if !osmPromMetricNamespaces.nil? && checkForTypeArray(osmPromMetricNamespaces, String) + # Adding a check to see if an empty array is passed for kubernetes namespaces + if (osmPromMetricNamespaces.length > 0) + @osmMetricNamespaces = osmPromMetricNamespaces + end + end + end + rescue => errorStr + puts "config::osm::error:Exception while reading config settings for osm configuration settings - #{errorStr}, using defaults" + @osmMetricNamespaces = [] + end +end + +def replaceOsmTelegrafConfigPlaceHolders + begin + #replace place holders in configuration file + tgfConfig = File.read(@tgfTestConfigFile) #read returns only after closing the file + + if @osmMetricNamespaces.length > 0 + osmPluginConfigsWithNamespaces = "" + @osmMetricNamespaces.each do |namespace| + if !namespace.nil? + #Stripping namespaces to remove leading and trailing whitespaces + namespace.strip! + if namespace.length > 0 + osmPluginConfigsWithNamespaces += "\n[[inputs.prometheus]] + name_prefix=\"container.azm.ms.osm/\" + interval = \"#{@scrapeInterval}\" + monitor_kubernetes_pods = true + pod_scrape_scope = \"#{(@controllerType.casecmp(@replicaset) == 0) ? "cluster" : "node"}\" + monitor_kubernetes_pods_namespace = \"#{namespace}\" + fieldpass = #{@fieldPassSetting} + metric_version = #{@metricVersion} + url_tag = \"#{@urlTag}\" + bearer_token = \"#{@bearerToken}\" + response_timeout = \"#{@responseTimeout}\" + tls_ca = \"#{@tlsCa}\" + insecure_skip_verify = #{@insecureSkipVerify}\n" + end + end + end + tgfConfig = tgfConfig.gsub("$AZMON_TELEGRAF_OSM_PROM_PLUGINS", osmPluginConfigsWithNamespaces) + else + puts "Using defaults for OSM configuration since there was an error in OSM config map or no namespaces were set" + tgfConfig = tgfConfig.gsub("$AZMON_TELEGRAF_OSM_PROM_PLUGINS", "") + end + File.open(@tgfTestConfigFile, "w") { |file| file.puts tgfConfig } # 'file' will be closed here after it goes out of scope + puts "config::osm::Successfully substituted the OSM placeholders in #{@tgfTestConfigFile} file in sidecar container" + rescue => errorStr + # TODO: test this scenario out + puts "config::osm::error:Exception while replacing telegraf configuration settings for osm - #{errorStr}, using defaults" + end +end + +@osmConfigSchemaVersion = ENV["AZMON_OSM_CFG_SCHEMA_VERSION"] +puts "****************Start OSM Config Processing********************" +if !@osmConfigSchemaVersion.nil? && !@osmConfigSchemaVersion.empty? && @osmConfigSchemaVersion.strip.casecmp("v1") == 0 #note v1 is the only supported schema version , so hardcoding it + configMapSettings = parseConfigMap + if !configMapSettings.nil? + populateSettingValuesFromConfigMap(configMapSettings) + # Check to see if the prometheus custom config parser has created a test config file so that we can replace the settings in the test file and run it, If not create + # a test config file by copying contents of the actual telegraf config file. + if (!File.exist?(@tgfTestConfigFile)) + # Copy the telegraf config file to a temp file to run telegraf in test mode with this config + puts "test telegraf config file #{@tgfTestConfigFile} does not exist, creating new one" + FileUtils.cp(@tgfConfigFile, @tgfTestConfigFile) + end + + replaceOsmTelegrafConfigPlaceHolders() + + # Write the telemetry to file, so that they can be set as environment variables + telemetryFile = File.open("integration_osm_config_env_var", "w") + + if !telemetryFile.nil? + telemetryFile.write("export TELEMETRY_OSM_CONFIGURATION_NAMESPACES_COUNT=#{@osmMetricNamespaces.length}\n") + # Close file after writing all environment variables + telemetryFile.close + else + puts "config::osm::Exception while opening file for writing OSM telemetry environment variables" + end + end +else + if (File.file?(@configMapMountPath)) + ConfigParseErrorLogger.logError("config::osm::unsupported/missing config schema version - '#{@osmConfigSchemaVersion}' , using defaults, please use supported schema version") + else + puts "config::No configmap mounted for OSM config, using defaults" + end +end +puts "****************End OSM Config Processing********************" diff --git a/build/linux/installer/scripts/tomlparser-prom-customconfig.rb b/build/linux/installer/scripts/tomlparser-prom-customconfig.rb deleted file mode 100644 index 7aad580ee..000000000 --- a/build/linux/installer/scripts/tomlparser-prom-customconfig.rb +++ /dev/null @@ -1,267 +0,0 @@ -#!/usr/local/bin/ruby - -require_relative "tomlrb" -require_relative "ConfigParseErrorLogger" -require "fileutils" - -@promConfigMapMountPath = "/etc/config/settings/prometheus-data-collection-settings" -@replicaset = "replicaset" -@daemonset = "daemonset" -@configSchemaVersion = "" -@defaultDsInterval = "1m" -@defaultDsPromUrls = [] -@defaultDsFieldPass = [] -@defaultDsFieldDrop = [] -@defaultRsInterval = "1m" -@defaultRsPromUrls = [] -@defaultRsFieldPass = [] -@defaultRsFieldDrop = [] -@defaultRsK8sServices = [] -@defaultRsMonitorPods = false - -#Configurations to be used for the auto-generated input prometheus plugins for namespace filtering -@metricVersion = 2 -@urlTag = "scrapeUrl" -@bearerToken = "/var/run/secrets/kubernetes.io/serviceaccount/token" -@responseTimeout = "15s" -@tlsCa = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" -@insecureSkipVerify = true - -# Use parser to parse the configmap toml file to a ruby structure -def parseConfigMap - begin - # Check to see if config map is created - if (File.file?(@promConfigMapMountPath)) - puts "config::configmap container-azm-ms-agentconfig for settings mounted, parsing values for prometheus config map" - parsedConfig = Tomlrb.load_file(@promConfigMapMountPath, symbolize_keys: true) - puts "config::Successfully parsed mounted prometheus config map" - return parsedConfig - else - puts "config::configmap container-azm-ms-agentconfig for settings not mounted, using defaults for prometheus scraping" - return nil - end - rescue => errorStr - ConfigParseErrorLogger.logError("Exception while parsing config map for prometheus config: #{errorStr}, using defaults, please check config map for errors") - return nil - end -end - -def checkForTypeArray(arrayValue, arrayType) - if (arrayValue.nil? || (arrayValue.kind_of?(Array) && ((arrayValue.length == 0) || (arrayValue.length > 0 && arrayValue[0].kind_of?(arrayType))))) - return true - else - return false - end -end - -def checkForType(variable, varType) - if variable.nil? || variable.kind_of?(varType) - return true - else - return false - end -end - -def replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods) - begin - new_contents = new_contents.gsub("$AZMON_RS_PROM_MONITOR_PODS", ("monitor_kubernetes_pods = #{monitorKubernetesPods}")) - new_contents = new_contents.gsub("$AZMON_RS_PROM_PLUGINS_WITH_NAMESPACE_FILTER", "") - rescue => errorStr - puts "Exception while replacing default pod monitor settings: #{errorStr}" - end - return new_contents -end - -def createPrometheusPluginsWithNamespaceSetting(monitorKubernetesPods, monitorKubernetesPodsNamespaces, new_contents, interval, fieldPassSetting, fieldDropSetting) - begin - new_contents = new_contents.gsub("$AZMON_RS_PROM_MONITOR_PODS", "# Commenting this out since new plugins will be created per namespace\n # $AZMON_RS_PROM_MONITOR_PODS") - pluginConfigsWithNamespaces = "" - monitorKubernetesPodsNamespaces.each do |namespace| - if !namespace.nil? - #Stripping namespaces to remove leading and trailing whitespaces - namespace.strip! - if namespace.length > 0 - pluginConfigsWithNamespaces += "\n[[inputs.prometheus]] - interval = \"#{interval}\" - monitor_kubernetes_pods = true - monitor_kubernetes_pods_namespace = \"#{namespace}\" - fieldpass = #{fieldPassSetting} - fielddrop = #{fieldDropSetting} - metric_version = #{@metricVersion} - url_tag = \"#{@urlTag}\" - bearer_token = \"#{@bearerToken}\" - response_timeout = \"#{@responseTimeout}\" - tls_ca = \"#{@tlsCa}\" - insecure_skip_verify = #{@insecureSkipVerify}\n" - end - end - end - new_contents = new_contents.gsub("$AZMON_RS_PROM_PLUGINS_WITH_NAMESPACE_FILTER", pluginConfigsWithNamespaces) - return new_contents - rescue => errorStr - puts "Exception while creating prometheus input plugins to filter namespaces: #{errorStr}, using defaults" - replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods) - end -end - -# Use the ruby structure created after config parsing to set the right values to be used as environment variables -def populateSettingValuesFromConfigMap(parsedConfig) - # Checking to see if this is the daemonset or replicaset to parse config accordingly - controller = ENV["CONTROLLER_TYPE"] - if !controller.nil? - if !parsedConfig.nil? && !parsedConfig[:prometheus_data_collection_settings].nil? - if controller.casecmp(@replicaset) == 0 && !parsedConfig[:prometheus_data_collection_settings][:cluster].nil? - #Get prometheus replicaset custom config settings - begin - interval = parsedConfig[:prometheus_data_collection_settings][:cluster][:interval] - fieldPass = parsedConfig[:prometheus_data_collection_settings][:cluster][:fieldpass] - fieldDrop = parsedConfig[:prometheus_data_collection_settings][:cluster][:fielddrop] - urls = parsedConfig[:prometheus_data_collection_settings][:cluster][:urls] - kubernetesServices = parsedConfig[:prometheus_data_collection_settings][:cluster][:kubernetes_services] - monitorKubernetesPods = parsedConfig[:prometheus_data_collection_settings][:cluster][:monitor_kubernetes_pods] - monitorKubernetesPodsNamespaces = parsedConfig[:prometheus_data_collection_settings][:cluster][:monitor_kubernetes_pods_namespaces] - - # Check for the right datattypes to enforce right setting values - if checkForType(interval, String) && - checkForTypeArray(fieldPass, String) && - checkForTypeArray(fieldDrop, String) && - checkForTypeArray(kubernetesServices, String) && - checkForTypeArray(urls, String) && - (monitorKubernetesPods.nil? || (!monitorKubernetesPods.nil? && (!!monitorKubernetesPods == monitorKubernetesPods))) #Checking for Boolean type, since 'Boolean' is not defined as a type in ruby - puts "config::Successfully passed typecheck for config settings for replicaset" - #if setting is nil assign default values - interval = (interval.nil?) ? @defaultRsInterval : interval - fieldPass = (fieldPass.nil?) ? @defaultRsFieldPass : fieldPass - fieldDrop = (fieldDrop.nil?) ? @defaultRsFieldDrop : fieldDrop - kubernetesServices = (kubernetesServices.nil?) ? @defaultRsK8sServices : kubernetesServices - urls = (urls.nil?) ? @defaultRsPromUrls : urls - monitorKubernetesPods = (monitorKubernetesPods.nil?) ? @defaultRsMonitorPods : monitorKubernetesPods - - file_name = "/opt/telegraf-test-rs.conf" - # Copy the telegraf config file to a temp file to run telegraf in test mode with this config - FileUtils.cp("/etc/opt/microsoft/docker-cimprov/telegraf-rs.conf", file_name) - - puts "config::Starting to substitute the placeholders in telegraf conf copy file for replicaset" - #Replace the placeholder config values with values from custom config - text = File.read(file_name) - new_contents = text.gsub("$AZMON_RS_PROM_INTERVAL", interval) - fieldPassSetting = (fieldPass.length > 0) ? ("[\"" + fieldPass.join("\",\"") + "\"]") : "[]" - new_contents = new_contents.gsub("$AZMON_RS_PROM_FIELDPASS", fieldPassSetting) - fieldDropSetting = (fieldDrop.length > 0) ? ("[\"" + fieldDrop.join("\",\"") + "\"]") : "[]" - new_contents = new_contents.gsub("$AZMON_RS_PROM_FIELDDROP", fieldDropSetting) - new_contents = new_contents.gsub("$AZMON_RS_PROM_URLS", ((urls.length > 0) ? ("[\"" + urls.join("\",\"") + "\"]") : "[]")) - new_contents = new_contents.gsub("$AZMON_RS_PROM_K8S_SERVICES", ((kubernetesServices.length > 0) ? ("[\"" + kubernetesServices.join("\",\"") + "\"]") : "[]")) - - # Check to see if monitor_kubernetes_pods is set to true with a valid setting for monitor_kubernetes_namespaces to enable scraping for specific namespaces - # Adding nil check here as well since checkForTypeArray returns true even if setting is nil to accomodate for other settings to be able - - # - to use defaults in case of nil settings - if monitorKubernetesPods && !monitorKubernetesPodsNamespaces.nil? && checkForTypeArray(monitorKubernetesPodsNamespaces, String) - new_contents = createPrometheusPluginsWithNamespaceSetting(monitorKubernetesPods, monitorKubernetesPodsNamespaces, new_contents, interval, fieldPassSetting, fieldDropSetting) - monitorKubernetesPodsNamespacesLength = monitorKubernetesPodsNamespaces.length - else - new_contents = replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods) - monitorKubernetesPodsNamespacesLength = 0 - end - - File.open(file_name, "w") { |file| file.puts new_contents } - puts "config::Successfully substituted the placeholders in telegraf conf file for replicaset" - #Set environment variables for telemetry - file = File.open("telemetry_prom_config_env_var", "w") - if !file.nil? - file.write("export TELEMETRY_RS_PROM_INTERVAL=\"#{interval}\"\n") - #Setting array lengths as environment variables for telemetry purposes - file.write("export TELEMETRY_RS_PROM_FIELDPASS_LENGTH=\"#{fieldPass.length}\"\n") - file.write("export TELEMETRY_RS_PROM_FIELDDROP_LENGTH=\"#{fieldDrop.length}\"\n") - file.write("export TELEMETRY_RS_PROM_K8S_SERVICES_LENGTH=#{kubernetesServices.length}\n") - file.write("export TELEMETRY_RS_PROM_URLS_LENGTH=#{urls.length}\n") - file.write("export TELEMETRY_RS_PROM_MONITOR_PODS=\"#{monitorKubernetesPods}\"\n") - file.write("export TELEMETRY_RS_PROM_MONITOR_PODS_NS_LENGTH=\"#{monitorKubernetesPodsNamespacesLength}\"\n") - - # Close file after writing all environment variables - file.close - puts "config::Successfully created telemetry file for replicaset" - end - else - ConfigParseErrorLogger.logError("Typecheck failed for prometheus config settings for replicaset, using defaults, please use right types for all settings") - end # end of type check condition - rescue => errorStr - ConfigParseErrorLogger.logError("Exception while parsing config file for prometheus config for replicaset: #{errorStr}, using defaults") - setRsPromDefaults - puts "****************End Prometheus Config Processing********************" - end - elsif controller.casecmp(@daemonset) == 0 && !parsedConfig[:prometheus_data_collection_settings][:node].nil? - #Get prometheus daemonset custom config settings - begin - interval = parsedConfig[:prometheus_data_collection_settings][:node][:interval] - fieldPass = parsedConfig[:prometheus_data_collection_settings][:node][:fieldpass] - fieldDrop = parsedConfig[:prometheus_data_collection_settings][:node][:fielddrop] - urls = parsedConfig[:prometheus_data_collection_settings][:node][:urls] - - # Check for the right datattypes to enforce right setting values - if checkForType(interval, String) && - checkForTypeArray(fieldPass, String) && - checkForTypeArray(fieldDrop, String) && - checkForTypeArray(urls, String) - puts "config::Successfully passed typecheck for config settings for daemonset" - - #if setting is nil assign default values - interval = (interval.nil?) ? @defaultDsInterval : interval - fieldPass = (fieldPass.nil?) ? @defaultDsFieldPass : fieldPass - fieldDrop = (fieldDrop.nil?) ? @defaultDsFieldDrop : fieldDrop - urls = (urls.nil?) ? @defaultDsPromUrls : urls - - file_name = "/opt/telegraf-test.conf" - # Copy the telegraf config file to a temp file to run telegraf in test mode with this config - FileUtils.cp("/etc/opt/microsoft/docker-cimprov/telegraf.conf", file_name) - - puts "config::Starting to substitute the placeholders in telegraf conf copy file for daemonset" - #Replace the placeholder config values with values from custom config - text = File.read(file_name) - new_contents = text.gsub("$AZMON_DS_PROM_INTERVAL", interval) - new_contents = new_contents.gsub("$AZMON_DS_PROM_FIELDPASS", ((fieldPass.length > 0) ? ("[\"" + fieldPass.join("\",\"") + "\"]") : "[]")) - new_contents = new_contents.gsub("$AZMON_DS_PROM_FIELDDROP", ((fieldDrop.length > 0) ? ("[\"" + fieldDrop.join("\",\"") + "\"]") : "[]")) - new_contents = new_contents.gsub("$AZMON_DS_PROM_URLS", ((urls.length > 0) ? ("[\"" + urls.join("\",\"") + "\"]") : "[]")) - File.open(file_name, "w") { |file| file.puts new_contents } - puts "config::Successfully substituted the placeholders in telegraf conf file for daemonset" - - #Set environment variables for telemetry - file = File.open("telemetry_prom_config_env_var", "w") - if !file.nil? - file.write("export TELEMETRY_DS_PROM_INTERVAL=\"#{interval}\"\n") - #Setting array lengths as environment variables for telemetry purposes - file.write("export TELEMETRY_DS_PROM_FIELDPASS_LENGTH=\"#{fieldPass.length}\"\n") - file.write("export TELEMETRY_DS_PROM_FIELDDROP_LENGTH=\"#{fieldDrop.length}\"\n") - file.write("export TELEMETRY_DS_PROM_URLS_LENGTH=#{urls.length}\n") - # Close file after writing all environment variables - file.close - puts "config::Successfully created telemetry file for daemonset" - end - else - ConfigParseErrorLogger.logError("Typecheck failed for prometheus config settings for daemonset, using defaults, please use right types for all settings") - end # end of type check condition - rescue => errorStr - ConfigParseErrorLogger.logError("Exception while parsing config file for prometheus config for daemonset: #{errorStr}, using defaults, please check correctness of configmap") - puts "****************End Prometheus Config Processing********************" - end - end # end of controller type check - end - else - ConfigParseErrorLogger.logError("Controller undefined while processing prometheus config, using defaults") - end -end - -@configSchemaVersion = ENV["AZMON_AGENT_CFG_SCHEMA_VERSION"] -puts "****************Start Prometheus Config Processing********************" -if !@configSchemaVersion.nil? && !@configSchemaVersion.empty? && @configSchemaVersion.strip.casecmp("v1") == 0 #note v1 is the only supported schema version , so hardcoding it - configMapSettings = parseConfigMap - if !configMapSettings.nil? - populateSettingValuesFromConfigMap(configMapSettings) - end -else - if (File.file?(@promConfigMapMountPath)) - ConfigParseErrorLogger.logError("config::unsupported/missing config schema version - '#{@configSchemaVersion}' , using defaults, please use supported version") - else - puts "config::No configmap mounted for prometheus custom config, using defaults" - end -end -puts "****************End Prometheus Config Processing********************" diff --git a/build/windows/installer/conf/fluent-bit.conf b/build/windows/installer/conf/fluent-bit.conf index 879ee4810..1eebe5fd6 100644 --- a/build/windows/installer/conf/fluent-bit.conf +++ b/build/windows/installer/conf/fluent-bit.conf @@ -12,6 +12,15 @@ Chunk_Size 32 Buffer_Size 64 +[INPUT] + Name tcp + Tag oms.container.perf.telegraf.* + Listen 0.0.0.0 + Port 25229 + Chunk_Size 32 + Buffer_Size 64 + Mem_Buf_Limit 5m + [OUTPUT] Name oms EnableTelemetry true diff --git a/build/windows/installer/conf/telegraf.conf b/build/windows/installer/conf/telegraf.conf new file mode 100644 index 000000000..5f4d2364e --- /dev/null +++ b/build/windows/installer/conf/telegraf.conf @@ -0,0 +1,162 @@ +# Telegraf Configuration +# +# Telegraf is entirely plugin driven. All metrics are gathered from the +# declared inputs, and sent to the declared outputs. +# +# Plugins must be declared in here to be active. +# To deactivate a plugin, comment out the name and any variables. +# +# Use 'telegraf -config telegraf.conf -test' to see what metrics a config +# file would generate. +# +# Environment variables can be used anywhere in this config file, simply prepend +# them with $. For strings the variable must be within quotes (ie, "$STR_VAR"), +# for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR) + + +# Global tags can be specified here in key="value" format. +[global_tags] + hostName = "placeholder_hostname" + + +# Configuration for telegraf agent +[agent] + ## Default data collection interval for all inputs + interval = "60s" + ## Rounds collection interval to 'interval' + ## ie, if interval="10s" then always collect on :00, :10, :20, etc. + round_interval = true + + ## Telegraf will send metrics to outputs in batches of at most + ## metric_batch_size metrics. + ## This controls the size of writes that Telegraf sends to output plugins. + metric_batch_size = 1000 + + ## For failed writes, telegraf will cache metric_buffer_limit metrics for each + ## output, and will flush this buffer on a successful write. Oldest metrics + ## are dropped first when this buffer fills. + ## This buffer only fills when writes fail to output plugin(s). + metric_buffer_limit = 10000 + + ## Collection jitter is used to jitter the collection by a random amount. + ## Each plugin will sleep for a random time within jitter before collecting. + ## This can be used to avoid many plugins querying things like sysfs at the + ## same time, which can have a measurable effect on the system. + collection_jitter = "0s" + + ## Default flushing interval for all outputs. You shouldn't set this below + ## interval. Maximum flush_interval will be flush_interval + flush_jitter + flush_interval = "15s" + ## Jitter the flush interval by a random amount. This is primarily to avoid + ## large write spikes for users running a large number of telegraf instances. + ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s + flush_jitter = "0s" + + ## By default or when set to "0s", precision will be set to the same + ## timestamp order as the collection interval, with the maximum being 1s. + ## ie, when interval = "10s", precision will be "1s" + ## when interval = "250ms", precision will be "1ms" + ## Precision will NOT be used for service inputs. It is up to each individual + ## service input to set the timestamp at the appropriate precision. + ## Valid time units are "ns", "us" (or "µs"), "ms", "s". + precision = "" + + ## Logging configuration: + ## Run telegraf with debug log messages. + debug = false + ## Run telegraf in quiet mode (error log messages only). + quiet = true + ## Specify the log file name. The empty string means to log to stderr. + logfile = "" + ## Override default hostname, if empty use os.Hostname() + #hostname = "placeholder_hostname" + ## If set to true, do no set the "host" tag in the telegraf agent. + omit_hostname = true + + +############################################################################### +# OUTPUT PLUGINS # +############################################################################### + +# Generic socket writer capable of handling multiple socket types. +[[outputs.socket_writer]] + ## URL to connect to + address = "tcp://0.0.0.0:25229" + # address = "tcp://example.com:http" + # address = "tcp4://127.0.0.1:8094" + # address = "tcp6://127.0.0.1:8094" + # address = "tcp6://[2001:db8::1]:8094" + # address = "udp://127.0.0.1:8094" + # address = "udp4://127.0.0.1:8094" + # address = "udp6://127.0.0.1:8094" + # address = "unix:///tmp/telegraf.sock" + # address = "unixgram:///tmp/telegraf.sock" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## Period between keep alive probes. + ## Only applies to TCP sockets. + ## 0 disables keep alive probes. + ## Defaults to the OS configuration. + # keep_alive_period = "5m" + + ## Data format to generate. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "json" + namedrop = ["agent_telemetry"] + #tagdrop = ["AgentVersion","AKS_RESOURCE_ID", "ACS_RESOURCE_NAME", "Region","ClusterName","ClusterType", "Computer", "ControllerType"] + +############################################################################### +# PROCESSOR PLUGINS # +############################################################################### + +[[processors.converter]] + [processors.converter.fields] + float = ["*"] + +#Prometheus Custom Metrics +[[inputs.prometheus]] + interval = "$AZMON_TELEGRAF_CUSTOM_PROM_INTERVAL" + + ## Scrape Kubernetes pods for the following prometheus annotations: + ## - prometheus.io/scrape: Enable scraping for this pod + ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to + ## set this to `https` & most likely set the tls config. + ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. + ## - prometheus.io/port: If port is not 9102 use this annotation + $AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS + $AZMON_TELEGRAF_CUSTOM_PROM_SCRAPE_SCOPE + $AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_LABEL_SELECTOR + $AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_FIELD_SELECTOR + + fieldpass = $AZMON_TELEGRAF_CUSTOM_PROM_FIELDPASS + fielddrop = $AZMON_TELEGRAF_CUSTOM_PROM_FIELDDROP + + metric_version = 2 + url_tag = "scrapeUrl" + ## Kubernetes config file to create client from. + # kube_config = "/path/to/kubernetes.config" + + ## Use bearer token for authorization. ('bearer_token' takes priority) + bearer_token = "/var/run/secrets/kubernetes.io/serviceaccount/token" + ## OR + # bearer_token_string = "abc_123" + + ## Specify timeout duration for slower prometheus clients (default is 3s) + response_timeout = "15s" + + ## Optional TLS Config + tls_ca = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" + #tls_cert = /path/to/certfile + # tls_key = /path/to/keyfile + ## Use TLS but skip chain & host verification + insecure_skip_verify = true + +$AZMON_TELEGRAF_CUSTOM_PROM_PLUGINS_WITH_NAMESPACE_FILTER diff --git a/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml b/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml index 82d210f3d..8868b86bb 100644 --- a/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml @@ -81,6 +81,12 @@ spec: valueFrom: fieldRef: fieldPath: status.hostIP + - name: PODNAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: SIDECAR_SCRAPING_ENABLED + value: "false" volumeMounts: - mountPath: C:\ProgramData\docker\containers name: docker-windows-containers diff --git a/charts/azuremonitor-containers/templates/omsagent-deployment.yaml b/charts/azuremonitor-containers/templates/omsagent-deployment.yaml index 37b8faacc..9b6656e9c 100644 --- a/charts/azuremonitor-containers/templates/omsagent-deployment.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-deployment.yaml @@ -72,7 +72,9 @@ spec: value: {{ .Values.Azure.Extension.Name | quote }} {{- end }} - name: USER_ASSIGNED_IDENTITY_CLIENT_ID - value: "" + value: "" + - name: SIDECAR_SCRAPING_ENABLED + value: "false" - name: ISTEST value: {{ .Values.omsagent.ISTEST | quote }} securityContext: @@ -109,6 +111,9 @@ spec: - mountPath: /etc/config/settings/adx name: omsagent-adx-secret readOnly: true + - mountPath: /etc/config/osm-settings + name: osm-settings-vol-config + readOnly: true livenessProbe: exec: command: @@ -157,5 +162,9 @@ spec: - name: omsagent-adx-secret secret: secretName: omsagent-adx-secret - optional: true + optional: true + - name: osm-settings-vol-config + configMap: + name: container-azm-ms-osmconfig + optional: true {{- end }} diff --git a/kubernetes/container-azm-ms-agentconfig.yaml b/kubernetes/container-azm-ms-agentconfig.yaml index aec1bb456..e38d9b4ab 100644 --- a/kubernetes/container-azm-ms-agentconfig.yaml +++ b/kubernetes/container-azm-ms-agentconfig.yaml @@ -76,6 +76,17 @@ data: ## ex: monitor_kubernetes_pods_namespaces = ["default1", "default2", "default3"] # monitor_kubernetes_pods_namespaces = ["default1"] + ## Label selector to target pods which have the specified label + ## This will take effect when monitor_kubernetes_pods is set to true + ## Reference the docs at https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + # kubernetes_label_selector = "env=dev,app=nginx" + + ## Field selector to target pods which have the specified field + ## This will take effect when monitor_kubernetes_pods is set to true + ## Reference the docs at https://kubernetes.io/docs/concepts/overview/working-with-objects/field-selectors/ + ## eg. To scrape pods on a specific node + # kubernetes_field_selector = "spec.nodeName=$HOSTNAME" + [prometheus_data_collection_settings.node] # Node level scrape endpoint(s). These metrics will be scraped from agent's DaemonSet running in every node in the cluster # Any errors related to prometheus scraping can be found in the KubeMonAgentEvents table in the Log Analytics workspace that the cluster is sending data to. diff --git a/kubernetes/container-azm-ms-osmconfig.yaml b/kubernetes/container-azm-ms-osmconfig.yaml new file mode 100644 index 000000000..05b7ac3ed --- /dev/null +++ b/kubernetes/container-azm-ms-osmconfig.yaml @@ -0,0 +1,17 @@ +kind: ConfigMap +apiVersion: v1 +data: + schema-version: + #string.used by agent to parse OSM config. supported versions are {v1}. Configs with other schema versions will be rejected by the agent. + v1 + config-version: + #string.used by OSM addon team to keep track of this config file's version in their source control/repository (max allowed 10 chars, other chars will be truncated) + ver1 + osm-metric-collection-configuration: |- + # OSM metric collection settings + [osm_metric_collection_configuration.settings] + # Namespaces to monitor + # monitor_namespaces = ["namespace1", "namespace2"] +metadata: + name: container-azm-ms-osmconfig + namespace: kube-system diff --git a/kubernetes/linux/Dockerfile b/kubernetes/linux/Dockerfile index bee718a31..bcdc31330 100644 --- a/kubernetes/linux/Dockerfile +++ b/kubernetes/linux/Dockerfile @@ -17,7 +17,7 @@ ENV KUBE_CLIENT_BACKOFF_BASE 1 ENV KUBE_CLIENT_BACKOFF_DURATION 0 ENV RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR 0.9 RUN /usr/bin/apt-get update && /usr/bin/apt-get install -y libc-bin wget openssl curl sudo python-ctypes init-system-helpers net-tools rsyslog cron vim dmidecode apt-transport-https gnupg && rm -rf /var/lib/apt/lists/* -COPY setup.sh main.sh defaultpromenvvariables defaultpromenvvariables-rs mdsd.xml envmdsd $tmpdir/ +COPY setup.sh main.sh defaultpromenvvariables defaultpromenvvariables-rs defaultpromenvvariables-sidecar mdsd.xml envmdsd $tmpdir/ WORKDIR ${tmpdir} # copy docker provider shell bundle to use the agent image diff --git a/kubernetes/linux/defaultpromenvvariables-rs b/kubernetes/linux/defaultpromenvvariables-rs index 1346e62b9..920f4e90e 100644 --- a/kubernetes/linux/defaultpromenvvariables-rs +++ b/kubernetes/linux/defaultpromenvvariables-rs @@ -1,7 +1,12 @@ -export AZMON_RS_PROM_INTERVAL="1m" -export AZMON_RS_PROM_MONITOR_PODS="monitor_kubernetes_pods = false" -export AZMON_RS_PROM_FIELDPASS="[]" -export AZMON_RS_PROM_FIELDDROP="[]" -export AZMON_RS_PROM_URLS="[]" -export AZMON_RS_PROM_K8S_SERVICES="[]" -export AZMON_RS_PROM_PLUGINS_WITH_NAMESPACE_FILTER="" +export AZMON_TELEGRAF_CUSTOM_PROM_INTERVAL="1m" +export AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS="monitor_kubernetes_pods = false" +export AZMON_TELEGRAF_CUSTOM_PROM_SCRAPE_SCOPE="pod_scrape_scope = 'cluster'" +export AZMON_TELEGRAF_CUSTOM_PROM_FIELDPASS="[]" +export AZMON_TELEGRAF_CUSTOM_PROM_FIELDDROP="[]" +export AZMON_TELEGRAF_CUSTOM_PROM_URLS="[]" +export AZMON_TELEGRAF_CUSTOM_PROM_K8S_SERVICES="[]" +export AZMON_TELEGRAF_CUSTOM_PROM_PLUGINS_WITH_NAMESPACE_FILTER="" +export AZMON_TELEGRAF_OSM_PROM_PLUGINS="" +export AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_LABEL_SELECTOR="kubernetes_label_selector = ''" +export AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_FIELD_SELECTOR="kubernetes_field_selector = ''" + diff --git a/kubernetes/linux/defaultpromenvvariables-sidecar b/kubernetes/linux/defaultpromenvvariables-sidecar new file mode 100644 index 000000000..3301488d8 --- /dev/null +++ b/kubernetes/linux/defaultpromenvvariables-sidecar @@ -0,0 +1,9 @@ +export AZMON_TELEGRAF_CUSTOM_PROM_INTERVAL="1m" +export AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS="monitor_kubernetes_pods = false" +export AZMON_TELEGRAF_CUSTOM_PROM_SCRAPE_SCOPE="pod_scrape_scope = 'node'" +export AZMON_TELEGRAF_CUSTOM_PROM_FIELDPASS="[]" +export AZMON_TELEGRAF_CUSTOM_PROM_FIELDDROP="[]" +export AZMON_TELEGRAF_CUSTOM_PROM_PLUGINS_WITH_NAMESPACE_FILTER="" +export AZMON_TELEGRAF_OSM_PROM_PLUGINS="" +export AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_LABEL_SELECTOR="kubernetes_label_selector = ''" +export AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_FIELD_SELECTOR="kubernetes_field_selector = ''" diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index c4067f25e..71e46875b 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -2,7 +2,17 @@ if [ -e "/etc/config/kube.conf" ]; then cat /etc/config/kube.conf > /etc/opt/microsoft/omsagent/sysconf/omsagent.d/container.conf +elif [ "${CONTAINER_TYPE}" == "PrometheusSidecar" ]; then + echo "setting omsagent conf file for prometheus sidecar" + cat /etc/opt/microsoft/docker-cimprov/prometheus-side-car.conf > /etc/opt/microsoft/omsagent/sysconf/omsagent.d/container.conf + # omsadmin.sh replaces %MONITOR_AGENT_PORT% and %SYSLOG_PORT% in the monitor.conf and syslog.conf with default ports 25324 and 25224. + # Since we are running 2 omsagents in the same pod, we need to use a different port for the sidecar, + # else we will see the Address already in use - bind(2) for 0.0.0.0:253(2)24 error. + # Look into omsadmin.sh scripts's configure_monitor_agent()/configure_syslog() and find_available_port() methods for more info. + sed -i -e 's/port %MONITOR_AGENT_PORT%/port 25326/g' /etc/opt/microsoft/omsagent/sysconf/omsagent.d/monitor.conf + sed -i -e 's/port %SYSLOG_PORT%/port 25226/g' /etc/opt/microsoft/omsagent/sysconf/omsagent.d/syslog.conf else + echo "setting omsagent conf file for daemonset" sed -i -e 's/bind 127.0.0.1/bind 0.0.0.0/g' /etc/opt/microsoft/omsagent/sysconf/omsagent.d/container.conf fi sed -i -e 's/bind 127.0.0.1/bind 0.0.0.0/g' /etc/opt/microsoft/omsagent/sysconf/omsagent.d/syslog.conf @@ -28,6 +38,12 @@ sudo setfacl -m user:omsagent:rwx /var/opt/microsoft/docker-cimprov/log #Run inotify as a daemon to track changes to the mounted configmap. inotifywait /etc/config/settings --daemon --recursive --outfile "/opt/inotifyoutput.txt" --event create,delete --format '%e : %T' --timefmt '+%s' +#Run inotify as a daemon to track changes to the mounted configmap for OSM settings. +if [[ ( ( ! -e "/etc/config/kube.conf" ) && ( "${CONTAINER_TYPE}" == "PrometheusSidecar" ) ) || + ( ( -e "/etc/config/kube.conf" ) && ( "${SIDECAR_SCRAPING_ENABLED}" == "false" ) ) ]]; then + inotifywait /etc/config/osm-settings --daemon --recursive --outfile "/opt/inotifyoutput-osm.txt" --event create,delete --format '%e : %T' --timefmt '+%s' +fi + #resourceid override for loganalytics data. if [ -z $AKS_RESOURCE_ID ]; then echo "not setting customResourceId" @@ -68,6 +84,24 @@ if [ -e "/etc/config/settings/config-version" ] && [ -s "/etc/config/settings/ echo "AZMON_AGENT_CFG_FILE_VERSION:$AZMON_AGENT_CFG_FILE_VERSION" fi +#set OSM config schema version +if [[ ( ( ! -e "/etc/config/kube.conf" ) && ( "${CONTAINER_TYPE}" == "PrometheusSidecar" ) ) || + ( ( -e "/etc/config/kube.conf" ) && ( "${SIDECAR_SCRAPING_ENABLED}" == "false" ) ) ]]; then + if [ -e "/etc/config/osm-settings/schema-version" ] && [ -s "/etc/config/osm-settings/schema-version" ]; then + #trim + osm_config_schema_version="$(cat /etc/config/osm-settings/schema-version | xargs)" + #remove all spaces + osm_config_schema_version="${osm_config_schema_version//[[:space:]]/}" + #take first 10 characters + osm_config_schema_version="$(echo $osm_config_schema_version| cut -c1-10)" + + export AZMON_OSM_CFG_SCHEMA_VERSION=$osm_config_schema_version + echo "export AZMON_OSM_CFG_SCHEMA_VERSION=$osm_config_schema_version" >> ~/.bashrc + source ~/.bashrc + echo "AZMON_OSM_CFG_SCHEMA_VERSION:$AZMON_OSM_CFG_SCHEMA_VERSION" + fi +fi + export PROXY_ENDPOINT="" # Check for internet connectivity or workspace deletion @@ -193,71 +227,58 @@ echo "export TELEMETRY_APPLICATIONINSIGHTS_KEY=$aikey" >> ~/.bashrc source ~/.bashrc +if [ "${CONTAINER_TYPE}" != "PrometheusSidecar" ]; then + #Parse the configmap to set the right environment variables. + /opt/microsoft/omsagent/ruby/bin/ruby tomlparser.rb -#Parse the configmap to set the right environment variables. -/opt/microsoft/omsagent/ruby/bin/ruby tomlparser.rb - -cat config_env_var | while read line; do - #echo $line - echo $line >> ~/.bashrc -done -source config_env_var - + cat config_env_var | while read line; do + echo $line >> ~/.bashrc + done + source config_env_var +fi #Parse the configmap to set the right environment variables for agent config. #Note > tomlparser-agent-config.rb has to be parsed first before td-agent-bit-conf-customizer.rb for fbit agent settings -/opt/microsoft/omsagent/ruby/bin/ruby tomlparser-agent-config.rb +if [ "${CONTAINER_TYPE}" != "PrometheusSidecar" ]; then + /opt/microsoft/omsagent/ruby/bin/ruby tomlparser-agent-config.rb -cat agent_config_env_var | while read line; do - #echo $line - echo $line >> ~/.bashrc -done -source agent_config_env_var + cat agent_config_env_var | while read line; do + #echo $line + echo $line >> ~/.bashrc + done + source agent_config_env_var -#Parse the configmap to set the right environment variables for network policy manager (npm) integration. -/opt/microsoft/omsagent/ruby/bin/ruby tomlparser-npm-config.rb + #Parse the configmap to set the right environment variables for network policy manager (npm) integration. + /opt/microsoft/omsagent/ruby/bin/ruby tomlparser-npm-config.rb -cat integration_npm_config_env_var | while read line; do - #echo $line - echo $line >> ~/.bashrc -done -source integration_npm_config_env_var + cat integration_npm_config_env_var | while read line; do + #echo $line + echo $line >> ~/.bashrc + done + source integration_npm_config_env_var +fi #Replace the placeholders in td-agent-bit.conf file for fluentbit with custom/default values in daemonset -if [ ! -e "/etc/config/kube.conf" ]; then +if [ ! -e "/etc/config/kube.conf" ] && [ "${CONTAINER_TYPE}" != "PrometheusSidecar" ]; then /opt/microsoft/omsagent/ruby/bin/ruby td-agent-bit-conf-customizer.rb fi #Parse the prometheus configmap to create a file with new custom settings. /opt/microsoft/omsagent/ruby/bin/ruby tomlparser-prom-customconfig.rb -#If config parsing was successful, a copy of the conf file with replaced custom settings file is created -if [ ! -e "/etc/config/kube.conf" ]; then - if [ -e "/opt/telegraf-test.conf" ]; then - echo "****************Start Telegraf in Test Mode**************************" - /opt/telegraf --config /opt/telegraf-test.conf -test - if [ $? -eq 0 ]; then - mv "/opt/telegraf-test.conf" "/etc/opt/microsoft/docker-cimprov/telegraf.conf" - fi - echo "****************End Telegraf Run in Test Mode**************************" - fi -else - if [ -e "/opt/telegraf-test-rs.conf" ]; then - echo "****************Start Telegraf in Test Mode**************************" - /opt/telegraf --config /opt/telegraf-test-rs.conf -test - if [ $? -eq 0 ]; then - mv "/opt/telegraf-test-rs.conf" "/etc/opt/microsoft/docker-cimprov/telegraf-rs.conf" - fi - echo "****************End Telegraf Run in Test Mode**************************" - fi -fi - #Setting default environment variables to be used in any case of failure in the above steps if [ ! -e "/etc/config/kube.conf" ]; then - cat defaultpromenvvariables | while read line; do - echo $line >> ~/.bashrc - done - source defaultpromenvvariables + if [ "${CONTAINER_TYPE}" == "PrometheusSidecar" ]; then + cat defaultpromenvvariables-sidecar | while read line; do + echo $line >> ~/.bashrc + done + source defaultpromenvvariables-sidecar + else + cat defaultpromenvvariables | while read line; do + echo $line >> ~/.bashrc + done + source defaultpromenvvariables + fi else cat defaultpromenvvariables-rs | while read line; do echo $line >> ~/.bashrc @@ -273,21 +294,37 @@ if [ -e "telemetry_prom_config_env_var" ]; then source telemetry_prom_config_env_var fi + #Parse the configmap to set the right environment variables for MDM metrics configuration for Alerting. -/opt/microsoft/omsagent/ruby/bin/ruby tomlparser-mdm-metrics-config.rb +if [ "${CONTAINER_TYPE}" != "PrometheusSidecar" ]; then + /opt/microsoft/omsagent/ruby/bin/ruby tomlparser-mdm-metrics-config.rb -cat config_mdm_metrics_env_var | while read line; do - echo $line >> ~/.bashrc -done -source config_mdm_metrics_env_var + cat config_mdm_metrics_env_var | while read line; do + echo $line >> ~/.bashrc + done + source config_mdm_metrics_env_var -#Parse the configmap to set the right environment variables for metric collection settings -/opt/microsoft/omsagent/ruby/bin/ruby tomlparser-metric-collection-config.rb + #Parse the configmap to set the right environment variables for metric collection settings + /opt/microsoft/omsagent/ruby/bin/ruby tomlparser-metric-collection-config.rb -cat config_metric_collection_env_var | while read line; do - echo $line >> ~/.bashrc -done -source config_metric_collection_env_var + cat config_metric_collection_env_var | while read line; do + echo $line >> ~/.bashrc + done + source config_metric_collection_env_var +fi + +# OSM scraping to be done in replicaset if sidecar car scraping is disabled and always do the scraping from the sidecar (It will always be either one of the two) +if [[ ( ( ! -e "/etc/config/kube.conf" ) && ( "${CONTAINER_TYPE}" == "PrometheusSidecar" ) ) || + ( ( -e "/etc/config/kube.conf" ) && ( "${SIDECAR_SCRAPING_ENABLED}" == "false" ) ) ]]; then + /opt/microsoft/omsagent/ruby/bin/ruby tomlparser-osm-config.rb + + if [ -e "integration_osm_config_env_var" ]; then + cat integration_osm_config_env_var | while read line; do + echo $line >> ~/.bashrc + done + source integration_osm_config_env_var + fi +fi #Setting environment variable for CAdvisor metrics to use port 10255/10250 based on curl request echo "Making wget request to cadvisor endpoint with port 10250" @@ -511,7 +548,7 @@ fi #start oneagent -if [ ! -e "/etc/config/kube.conf" ]; then +if [ ! -e "/etc/config/kube.conf" ] && [ "${CONTAINER_TYPE}" != "PrometheusSidecar" ]; then if [ ! -z $AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE ]; then echo "container logs configmap route is $AZMON_CONTAINER_LOGS_ROUTE" echo "container logs effective route is $AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE" @@ -552,18 +589,56 @@ if [ ! -e "/etc/config/kube.conf" ]; then fi echo "************end oneagent log routing checks************" +#If config parsing was successful, a copy of the conf file with replaced custom settings file is created +if [ ! -e "/etc/config/kube.conf" ]; then + if [ "${CONTAINER_TYPE}" == "PrometheusSidecar" ] && [ -e "/opt/telegraf-test-prom-side-car.conf" ]; then + echo "****************Start Telegraf in Test Mode**************************" + /opt/telegraf --config /opt/telegraf-test-prom-side-car.conf -test + if [ $? -eq 0 ]; then + mv "/opt/telegraf-test-prom-side-car.conf" "/etc/opt/microsoft/docker-cimprov/telegraf-prom-side-car.conf" + fi + echo "****************End Telegraf Run in Test Mode**************************" + else + if [ -e "/opt/telegraf-test.conf" ]; then + echo "****************Start Telegraf in Test Mode**************************" + /opt/telegraf --config /opt/telegraf-test.conf -test + if [ $? -eq 0 ]; then + mv "/opt/telegraf-test.conf" "/etc/opt/microsoft/docker-cimprov/telegraf.conf" + fi + echo "****************End Telegraf Run in Test Mode**************************" + fi + fi +else + if [ -e "/opt/telegraf-test-rs.conf" ]; then + echo "****************Start Telegraf in Test Mode**************************" + /opt/telegraf --config /opt/telegraf-test-rs.conf -test + if [ $? -eq 0 ]; then + mv "/opt/telegraf-test-rs.conf" "/etc/opt/microsoft/docker-cimprov/telegraf-rs.conf" + fi + echo "****************End Telegraf Run in Test Mode**************************" + fi +fi + #telegraf & fluentbit requirements if [ ! -e "/etc/config/kube.conf" ]; then - if [ "$CONTAINER_RUNTIME" == "docker" ]; then - /opt/td-agent-bit/bin/td-agent-bit -c /etc/opt/microsoft/docker-cimprov/td-agent-bit.conf -e /opt/td-agent-bit/bin/out_oms.so & - telegrafConfFile="/etc/opt/microsoft/docker-cimprov/telegraf.conf" + if [ "${CONTAINER_TYPE}" == "PrometheusSidecar" ]; then + echo "starting fluent-bit and setting telegraf conf file for prometheus sidecar" + /opt/td-agent-bit/bin/td-agent-bit -c /etc/opt/microsoft/docker-cimprov/td-agent-bit-prom-side-car.conf -e /opt/td-agent-bit/bin/out_oms.so & + telegrafConfFile="/etc/opt/microsoft/docker-cimprov/telegraf-prom-side-car.conf" else - echo "since container run time is $CONTAINER_RUNTIME update the container log fluentbit Parser to cri from docker" - sed -i 's/Parser.docker*/Parser cri/' /etc/opt/microsoft/docker-cimprov/td-agent-bit.conf - /opt/td-agent-bit/bin/td-agent-bit -c /etc/opt/microsoft/docker-cimprov/td-agent-bit.conf -e /opt/td-agent-bit/bin/out_oms.so & - telegrafConfFile="/etc/opt/microsoft/docker-cimprov/telegraf.conf" + echo "starting fluent-bit and setting telegraf conf file for daemonset" + if [ "$CONTAINER_RUNTIME" == "docker" ]; then + /opt/td-agent-bit/bin/td-agent-bit -c /etc/opt/microsoft/docker-cimprov/td-agent-bit.conf -e /opt/td-agent-bit/bin/out_oms.so & + telegrafConfFile="/etc/opt/microsoft/docker-cimprov/telegraf.conf" + else + echo "since container run time is $CONTAINER_RUNTIME update the container log fluentbit Parser to cri from docker" + sed -i 's/Parser.docker*/Parser cri/' /etc/opt/microsoft/docker-cimprov/td-agent-bit.conf + /opt/td-agent-bit/bin/td-agent-bit -c /etc/opt/microsoft/docker-cimprov/td-agent-bit.conf -e /opt/td-agent-bit/bin/out_oms.so & + telegrafConfFile="/etc/opt/microsoft/docker-cimprov/telegraf.conf" + fi fi else + echo "starting fluent-bit and setting telegraf conf file for replicaset" /opt/td-agent-bit/bin/td-agent-bit -c /etc/opt/microsoft/docker-cimprov/td-agent-bit-rs.conf -e /opt/td-agent-bit/bin/out_oms.so & telegrafConfFile="/etc/opt/microsoft/docker-cimprov/telegraf-rs.conf" fi diff --git a/kubernetes/linux/setup.sh b/kubernetes/linux/setup.sh index fe6c0565a..218e3c717 100644 --- a/kubernetes/linux/setup.sh +++ b/kubernetes/linux/setup.sh @@ -60,7 +60,13 @@ sudo apt-get install libcap2-bin -y #service telegraf stop -wget https://github.com/microsoft/Docker-Provider/releases/download/5.0.0.0/telegraf +#wget https://github.com/microsoft/Docker-Provider/releases/download/5.0.0.0/telegraf + +#1.18 pre-release +wget https://dl.influxdata.com/telegraf/releases/telegraf-1.18.0_linux_amd64.tar.gz +tar -zxvf telegraf-1.18.0_linux_amd64.tar.gz + +mv /opt/telegraf-1.18.0/usr/bin/telegraf /opt/telegraf chmod 777 /opt/telegraf diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index ebf0257af..c25b9bfd4 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -443,6 +443,59 @@ spec: - /opt/livenessprobe.sh initialDelaySeconds: 60 periodSeconds: 60 +#Only in sidecar scraping mode + - name: omsagent-prometheus + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod11092020" + imagePullPolicy: IfNotPresent + resources: + limits: + cpu: 500m + memory: 400Mi + requests: + cpu: 75m + memory: 225Mi + env: + # azure devops pipeline uses AKS_RESOURCE_ID and AKS_REGION hence ensure to uncomment these + - name: AKS_RESOURCE_ID + value: "VALUE_AKS_RESOURCE_ID_VALUE" + - name: AKS_REGION + value: "VALUE_AKS_RESOURCE_REGION_VALUE" + #Uncomment below two lines for ACS clusters and set the cluster names manually. Also comment out the above two lines for ACS clusters + #- name: ACS_RESOURCE_NAME + # value: "my_acs_cluster_name" + - name: CONTAINER_TYPE + value: "PrometheusSidecar" + - name: CONTROLLER_TYPE + value: "DaemonSet" + - name: NODE_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + # Update this with the user assigned msi client id for omsagent + - name: USER_ASSIGNED_IDENTITY_CLIENT_ID + value: "" + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/kubernetes/host + name: azure-json-path + - mountPath: /etc/omsagent-secret + name: omsagent-secret + readOnly: true + - mountPath: /etc/config/settings + name: settings-vol-config + readOnly: true + - mountPath: /etc/config/osm-settings + name: osm-settings-vol-config + readOnly: true + livenessProbe: + exec: + command: + - /bin/bash + - -c + - /opt/livenessprobe.sh + initialDelaySeconds: 60 + periodSeconds: 60 affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: @@ -502,6 +555,10 @@ spec: secret: secretName: omsagent-adx-secret optional: true + - name: osm-settings-vol-config + configMap: + name: container-azm-ms-osmconfig + optional: true --- apiVersion: apps/v1 kind: Deployment @@ -559,6 +616,9 @@ spec: # Update this with the user assigned msi client id for omsagent - name: USER_ASSIGNED_IDENTITY_CLIENT_ID value: "" + # Add the below environment variable to true only in sidecar enabled regions, else set it to false + - name: SIDECAR_SCRAPING_ENABLED + value: "true" securityContext: privileged: true ports: @@ -586,6 +646,8 @@ spec: readOnly: true - mountPath: /etc/config/settings/adx name: omsagent-adx-secret + - mountPath: /etc/config/osm-settings + name: osm-settings-vol-config readOnly: true livenessProbe: exec: @@ -658,6 +720,10 @@ spec: secret: secretName: omsagent-adx-secret optional: true + - name: osm-settings-vol-config + configMap: + name: container-azm-ms-osmconfig + optional: true --- apiVersion: apps/v1 kind: DaemonSet @@ -711,10 +777,16 @@ spec: valueFrom: fieldRef: fieldPath: spec.nodeName + - name: PODNAME + valueFrom: + fieldRef: + fieldPath: metadata.name - name: NODE_IP valueFrom: fieldRef: fieldPath: status.hostIP + - name: SIDECAR_SCRAPING_ENABLED + value: "true" volumeMounts: - mountPath: C:\ProgramData\docker\containers name: docker-windows-containers diff --git a/kubernetes/windows/Dockerfile b/kubernetes/windows/Dockerfile index d4f118449..c0bebcc93 100644 --- a/kubernetes/windows/Dockerfile +++ b/kubernetes/windows/Dockerfile @@ -47,6 +47,7 @@ RUN ./setup.ps1 COPY main.ps1 /opt/omsagentwindows/scripts/powershell COPY ./omsagentwindows/installer/scripts/filesystemwatcher.ps1 /opt/omsagentwindows/scripts/powershell COPY ./omsagentwindows/installer/scripts/livenessprobe.cmd /opt/omsagentwindows/scripts/cmd/ +COPY setdefaulttelegrafenvvariables.ps1 /opt/omsagentwindows/scripts/powershell # copy ruby scripts to /opt folder COPY ./omsagentwindows/installer/scripts/*.rb /opt/omsagentwindows/scripts/ruby/ @@ -62,6 +63,9 @@ COPY ./omsagentwindows/installer/conf/fluent-docker-parser.conf /etc/fluent/ COPY ./omsagentwindows/installer/conf/fluent-bit.conf /etc/fluent-bit COPY ./omsagentwindows/installer/conf/out_oms.conf /etc/omsagentwindows +# copy telegraf conf file +COPY ./omsagentwindows/installer/conf/telegraf.conf /etc/telegraf/ + # copy keepcert alive ruby scripts COPY ./omsagentwindows/installer/scripts/rubyKeepCertificateAlive/*.rb /etc/fluent/plugin/ diff --git a/kubernetes/windows/main.ps1 b/kubernetes/windows/main.ps1 index 722392157..95cba2579 100644 --- a/kubernetes/windows/main.ps1 +++ b/kubernetes/windows/main.ps1 @@ -273,9 +273,9 @@ function Get-ContainerRuntime { return $containerRuntime } -function Start-Fluent { +function Start-Fluent-Telegraf { - # Run fluent-bit service first so that we do not miss any logs being forwarded by the fluentd service. + # Run fluent-bit service first so that we do not miss any logs being forwarded by the fluentd service and telegraf service. # Run fluent-bit as a background job. Switch this to a windows service once fluent-bit supports natively running as a windows service Start-Job -ScriptBlock { Start-Process -NoNewWindow -FilePath "C:\opt\fluent-bit\bin\fluent-bit.exe" -ArgumentList @("-c", "C:\etc\fluent-bit\fluent-bit.conf", "-e", "C:\opt\omsagentwindows\out_oms.so") } @@ -289,35 +289,99 @@ function Start-Fluent { (Get-Content -Path C:/etc/fluent/fluent.conf -Raw) -replace 'fluent-docker-parser.conf','fluent-cri-parser.conf' | Set-Content C:/etc/fluent/fluent.conf } + # Start telegraf only in sidecar scraping mode + $sidecarScrapingEnabled = [System.Environment]::GetEnvironmentVariable('SIDECAR_SCRAPING_ENABLED') + if (![string]::IsNullOrEmpty($sidecarScrapingEnabled) -and $sidecarScrapingEnabled.ToLower() -eq 'true') + { + Write-Host "Starting telegraf..." + Start-Telegraf + } + fluentd --reg-winsvc i --reg-winsvc-auto-start --winsvc-name fluentdwinaks --reg-winsvc-fluentdopt '-c C:/etc/fluent/fluent.conf -o C:/etc/fluent/fluent.log' Notepad.exe | Out-Null } -function Generate-Certificates { - Write-Host "Generating Certificates" - C:\\opt\\omsagentwindows\\certgenerator\\certificategenerator.exe -} +function Start-Telegraf { + # Set default telegraf environment variables for prometheus scraping + Write-Host "**********Setting default environment variables for telegraf prometheus plugin..." + .\setdefaulttelegrafenvvariables.ps1 + + # run prometheus custom config parser + Write-Host "**********Running config parser for custom prometheus scraping**********" + ruby /opt/omsagentwindows/scripts/ruby/tomlparser-prom-customconfig.rb + Write-Host "**********End running config parser for custom prometheus scraping**********" + + + # Set required environment variable for telegraf prometheus plugin to run properly + Write-Host "Setting required environment variables for telegraf prometheus input plugin to run properly..." + $kubernetesServiceHost = [System.Environment]::GetEnvironmentVariable("KUBERNETES_SERVICE_HOST", "process") + if (![string]::IsNullOrEmpty($kubernetesServiceHost)) { + [System.Environment]::SetEnvironmentVariable("KUBERNETES_SERVICE_HOST", $kubernetesServiceHost, "machine") + Write-Host "Successfully set environment variable KUBERNETES_SERVICE_HOST - $($kubernetesServiceHost) for target 'machine'..." + } + else { + Write-Host "Failed to set environment variable KUBERNETES_SERVICE_HOST for target 'machine' since it is either null or empty" + } + + $kubernetesServicePort = [System.Environment]::GetEnvironmentVariable("KUBERNETES_SERVICE_PORT", "process") + if (![string]::IsNullOrEmpty($kubernetesServicePort)) { + [System.Environment]::SetEnvironmentVariable("KUBERNETES_SERVICE_PORT", $kubernetesServicePort, "machine") + Write-Host "Successfully set environment variable KUBERNETES_SERVICE_PORT - $($kubernetesServicePort) for target 'machine'..." + } + else { + Write-Host "Failed to set environment variable KUBERNETES_SERVICE_PORT for target 'machine' since it is either null or empty" + } + + $nodeIp = [System.Environment]::GetEnvironmentVariable("NODE_IP", "process") + if (![string]::IsNullOrEmpty($nodeIp)) { + [System.Environment]::SetEnvironmentVariable("NODE_IP", $nodeIp, "machine") + Write-Host "Successfully set environment variable NODE_IP - $($nodeIp) for target 'machine'..." + } + else { + Write-Host "Failed to set environment variable NODE_IP for target 'machine' since it is either null or empty" + } -function Bootstrap-CACertificates { + Write-Host "Installing telegraf service" + C:\opt\telegraf\telegraf.exe --service install --config "C:\etc\telegraf\telegraf.conf" + + # Setting delay auto start for telegraf since there have been known issues with windows server and telegraf - + # https://github.com/influxdata/telegraf/issues/4081 + # https://github.com/influxdata/telegraf/issues/3601 try { - # This is required when the root CA certs are different for some clouds. - $caCerts=Invoke-WebRequest 'http://168.63.129.16/machine?comp=acmspackage&type=cacertificates&ext=json' -UseBasicParsing | ConvertFrom-Json - if (![string]::IsNullOrEmpty($caCerts)) { - $certificates = $caCerts.Certificates - for ($index = 0; $index -lt $certificates.Length ; $index++) { - $name=$certificates[$index].Name - $certificates[$index].CertBody > $name - Write-Host "name: $($name)" - Import-Certificate -FilePath .\$name -CertStoreLocation 'Cert:\LocalMachine\Root' -Verbose - } + $serverName = [System.Environment]::GetEnvironmentVariable("PODNAME", "process") + if (![string]::IsNullOrEmpty($serverName)) { + sc.exe \\$serverName config telegraf start= delayed-auto + Write-Host "Successfully set delayed start for telegraf" + + } else { + Write-Host "Failed to get environment variable PODNAME to set delayed telegraf start" } } catch { - $e = $_.Exception - Write-Host $e - Write-Host "exception occured in Bootstrap-CACertificates..." + $e = $_.Exception + Write-Host $e + Write-Host "exception occured in delayed telegraf start.. continuing without exiting" } + Write-Host "Running telegraf service in test mode" + C:\opt\telegraf\telegraf.exe --config "C:\etc\telegraf\telegraf.conf" --test + Write-Host "Starting telegraf service" + C:\opt\telegraf\telegraf.exe --service start + + # Trying to start telegraf again if it did not start due to fluent bit not being ready at startup + Get-Service telegraf | findstr Running + if ($? -eq $false) + { + Write-Host "trying to start telegraf in again in 30 seconds, since fluentbit might not have been ready..." + Start-Sleep -s 30 + C:\opt\telegraf\telegraf.exe --service start + Get-Service telegraf + } +} + +function Generate-Certificates { + Write-Host "Generating Certificates" + C:\\opt\\omsagentwindows\\certgenerator\\certificategenerator.exe } function Test-CertificatePath { @@ -346,16 +410,9 @@ Remove-WindowsServiceIfItExists "fluentdwinaks" Set-EnvironmentVariables Start-FileSystemWatcher -#Bootstrapping CA certs for non public clouds and AKS clusters -$aksResourceId = [System.Environment]::GetEnvironmentVariable("AKS_RESOURCE_ID") -if (![string]::IsNullOrEmpty($aksResourceId) -and $aksResourceId.ToLower().Contains("/microsoft.containerservice/managedclusters/")) -{ - Bootstrap-CACertificates -} - Generate-Certificates Test-CertificatePath -Start-Fluent +Start-Fluent-Telegraf # List all powershell processes running. This should have main.ps1 and filesystemwatcher.ps1 Get-WmiObject Win32_process | Where-Object { $_.Name -match 'powershell' } | Format-Table -Property Name, CommandLine, ProcessId diff --git a/kubernetes/windows/setdefaulttelegrafenvvariables.ps1 b/kubernetes/windows/setdefaulttelegrafenvvariables.ps1 new file mode 100644 index 000000000..269894139 --- /dev/null +++ b/kubernetes/windows/setdefaulttelegrafenvvariables.ps1 @@ -0,0 +1,17 @@ +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_INTERVAL", "1m", "process") +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_INTERVAL", "1m", "machine") +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS", "monitor_kubernetes_pods = false", "process") +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS", "monitor_kubernetes_pods = false", "machine") +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_SCRAPE_SCOPE", "pod_scrape_scope = 'node'", "process") +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_SCRAPE_SCOPE", "pod_scrape_scope = 'node'", "machine") +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_FIELDPASS", "[]", "process") +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_FIELDPASS", "[]", "machine") +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_FIELDDROP", "[]", "process") +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_FIELDDROP", "[]", "machine") +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_PLUGINS_WITH_NAMESPACE_FILTER", " ", "process") +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_PLUGINS_WITH_NAMESPACE_FILTER", " ", "machine") +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_LABEL_SELECTOR", "kubernetes_label_selector = ''", "process") +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_LABEL_SELECTOR", "kubernetes_label_selector = ''", "machine") +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_FIELD_SELECTOR", "kubernetes_field_selector = ''", "process") +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_FIELD_SELECTOR", "kubernetes_field_selector = ''", "machine") + diff --git a/kubernetes/windows/setup.ps1 b/kubernetes/windows/setup.ps1 index dd6d52a11..25aad5e16 100644 --- a/kubernetes/windows/setup.ps1 +++ b/kubernetes/windows/setup.ps1 @@ -8,10 +8,12 @@ Write-Host ('Creating folder structure') New-Item -Type Directory -Path /opt/fluent-bit New-Item -Type Directory -Path /opt/scripts/ruby + New-Item -Type Directory -Path /opt/telegraf New-Item -Type Directory -Path /etc/fluent-bit New-Item -Type Directory -Path /etc/fluent New-Item -Type Directory -Path /etc/omsagentwindows + New-Item -Type Directory -Path /etc/telegraf New-Item -Type Directory -Path /etc/config/settings/ New-Item -Type Directory -Path /etc/config/adx/ @@ -32,6 +34,20 @@ Write-Host ('Installing Fluent Bit'); } Write-Host ('Finished Installing Fluentbit') +Write-Host ('Installing Telegraf'); +try { + $telegrafUri='https://dl.influxdata.com/telegraf/releases/telegraf-1.18.0_windows_amd64.zip' + Invoke-WebRequest -Uri $telegrafUri -OutFile /installation/telegraf.zip + Expand-Archive -Path /installation/telegraf.zip -Destination /installation/telegraf + Move-Item -Path /installation/telegraf/*/* -Destination /opt/telegraf/ -ErrorAction SilentlyContinue +} +catch { + $ex = $_.Exception + Write-Host "exception while downloading telegraf for windows" + Write-Host $ex + exit 1 +} +Write-Host ('Finished downloading Telegraf') Write-Host ('Installing Visual C++ Redistributable Package') $vcRedistLocation = 'https://aka.ms/vs/16/release/vc_redist.x64.exe' diff --git a/scripts/build/windows/install-build-pre-requisites.ps1 b/scripts/build/windows/install-build-pre-requisites.ps1 index b5e6e2d18..3bb56ac2a 100755 --- a/scripts/build/windows/install-build-pre-requisites.ps1 +++ b/scripts/build/windows/install-build-pre-requisites.ps1 @@ -21,7 +21,7 @@ function Install-Go { # install go lang Write-Host("installing go ...") - Start-Process msiexec.exe -Wait -ArgumentList '/I ' + $output + '/quiet' + Start-Process msiexec.exe -Wait -ArgumentList '/I ', $output, '/quiet' Write-Host("installing go completed") Write-Host "updating PATH variable" @@ -102,7 +102,7 @@ function Install-DotNetCoreSDK() { # install dotNet core sdk Write-Host("installing .net core sdk 3.1 ...") - Start-Process msiexec.exe -Wait -ArgumentList '/I ' + $output + '/quiet' + Start-Process msiexec.exe -Wait -ArgumentList '/I ', $output, '/quiet' Write-Host("installing .net core sdk 3.1 completed") } @@ -129,7 +129,7 @@ function Install-Docker() { # install docker Write-Host("installing docker for desktop ...") - Start-Process msiexec.exe -Wait -ArgumentList '/I ' + $output + '/quiet' + Start-Process msiexec.exe -Wait -ArgumentList '/I ', $output, '/quiet' Write-Host("installing docker for desktop completed") } diff --git a/source/plugins/go/src/oms.go b/source/plugins/go/src/oms.go index 0bd983297..d35acad3d 100644 --- a/source/plugins/go/src/oms.go +++ b/source/plugins/go/src/oms.go @@ -1491,4 +1491,4 @@ func InitializePlugin(pluginConfPath string, agentVersion string) { Log("Running in replicaset. Disabling container enrichment caching & updates \n") } -} +} \ No newline at end of file diff --git a/source/plugins/go/src/telemetry.go b/source/plugins/go/src/telemetry.go index 3d30ac5aa..48f82a9ab 100644 --- a/source/plugins/go/src/telemetry.go +++ b/source/plugins/go/src/telemetry.go @@ -10,9 +10,9 @@ import ( "strings" "time" + "github.com/fluent/fluent-bit-go/output" "github.com/microsoft/ApplicationInsights-Go/appinsights" "github.com/microsoft/ApplicationInsights-Go/appinsights/contracts" - "github.com/fluent/fluent-bit-go/output" ) var ( @@ -44,33 +44,45 @@ var ( ContainerLogsMDSDClientCreateErrors float64 //Tracks the number of write/send errors to ADX for containerlogs (uses ContainerLogTelemetryTicker) ContainerLogsSendErrorsToADXFromFluent float64 - //Tracks the number of ADX client create errors for containerlogs (uses ContainerLogTelemetryTicker) + //Tracks the number of ADX client create errors for containerlogs (uses ContainerLogTelemetryTicker) ContainerLogsADXClientCreateErrors float64 + //Tracks the number of OSM namespaces and sent only from prometheus sidecar (uses ContainerLogTelemetryTicker) + OSMNamespaceCount int + //Tracks whether monitor kubernetes pods is set to true and sent only from prometheus sidecar (uses ContainerLogTelemetryTicker) + PromMonitorPods string + //Tracks the number of monitor kubernetes pods namespaces and sent only from prometheus sidecar (uses ContainerLogTelemetryTicker) + PromMonitorPodsNamespaceLength int + //Tracks the number of monitor kubernetes pods label selectors and sent only from prometheus sidecar (uses ContainerLogTelemetryTicker) + PromMonitorPodsLabelSelectorLength int + //Tracks the number of monitor kubernetes pods field selectors and sent only from prometheus sidecar (uses ContainerLogTelemetryTicker) + PromMonitorPodsFieldSelectorLength int ) const ( - clusterTypeACS = "ACS" - clusterTypeAKS = "AKS" - envAKSResourceID = "AKS_RESOURCE_ID" - envACSResourceName = "ACS_RESOURCE_NAME" - envAppInsightsAuth = "APPLICATIONINSIGHTS_AUTH" - envAppInsightsEndpoint = "APPLICATIONINSIGHTS_ENDPOINT" - metricNameAvgFlushRate = "ContainerLogAvgRecordsFlushedPerSec" - metricNameAvgLogGenerationRate = "ContainerLogsGeneratedPerSec" - metricNameLogSize = "ContainerLogsSize" - metricNameAgentLogProcessingMaxLatencyMs = "ContainerLogsAgentSideLatencyMs" - metricNameNumberofTelegrafMetricsSentSuccessfully = "TelegrafMetricsSentCount" - metricNameNumberofSendErrorsTelegrafMetrics = "TelegrafMetricsSendErrorCount" - metricNameNumberofSend429ErrorsTelegrafMetrics = "TelegrafMetricsSend429ErrorCount" - metricNameErrorCountContainerLogsSendErrorsToMDSDFromFluent = "ContainerLogs2MdsdSendErrorCount" - metricNameErrorCountContainerLogsMDSDClientCreateError = "ContainerLogsMdsdClientCreateErrorCount" - metricNameErrorCountContainerLogsSendErrorsToADXFromFluent = "ContainerLogs2ADXSendErrorCount" - metricNameErrorCountContainerLogsADXClientCreateError = "ContainerLogsADXClientCreateErrorCount" + clusterTypeACS = "ACS" + clusterTypeAKS = "AKS" + envAKSResourceID = "AKS_RESOURCE_ID" + envACSResourceName = "ACS_RESOURCE_NAME" + envAppInsightsAuth = "APPLICATIONINSIGHTS_AUTH" + envAppInsightsEndpoint = "APPLICATIONINSIGHTS_ENDPOINT" + metricNameAvgFlushRate = "ContainerLogAvgRecordsFlushedPerSec" + metricNameAvgLogGenerationRate = "ContainerLogsGeneratedPerSec" + metricNameLogSize = "ContainerLogsSize" + metricNameAgentLogProcessingMaxLatencyMs = "ContainerLogsAgentSideLatencyMs" + metricNameNumberofTelegrafMetricsSentSuccessfully = "TelegrafMetricsSentCount" + metricNameNumberofSendErrorsTelegrafMetrics = "TelegrafMetricsSendErrorCount" + metricNameNumberofSend429ErrorsTelegrafMetrics = "TelegrafMetricsSend429ErrorCount" + metricNameErrorCountContainerLogsSendErrorsToMDSDFromFluent = "ContainerLogs2MdsdSendErrorCount" + metricNameErrorCountContainerLogsMDSDClientCreateError = "ContainerLogsMdsdClientCreateErrorCount" + metricNameErrorCountContainerLogsSendErrorsToADXFromFluent = "ContainerLogs2ADXSendErrorCount" + metricNameErrorCountContainerLogsADXClientCreateError = "ContainerLogsADXClientCreateErrorCount" defaultTelemetryPushIntervalSeconds = 300 - eventNameContainerLogInit = "ContainerLogPluginInitialized" - eventNameDaemonSetHeartbeat = "ContainerLogDaemonSetHeartbeatEvent" + eventNameContainerLogInit = "ContainerLogPluginInitialized" + eventNameDaemonSetHeartbeat = "ContainerLogDaemonSetHeartbeatEvent" + eventNameCustomPrometheusSidecarHeartbeat = "CustomPrometheusSidecarHeartbeatEvent" + eventNameWindowsFluentBitHeartbeat = "WindowsFluentBitHeartbeatEvent" ) // SendContainerLogPluginMetrics is a go-routine that flushes the data periodically (every 5 mins to App Insights) @@ -100,6 +112,11 @@ func SendContainerLogPluginMetrics(telemetryPushIntervalProperty string) { containerLogsMDSDClientCreateErrors := ContainerLogsMDSDClientCreateErrors containerLogsSendErrorsToADXFromFluent := ContainerLogsSendErrorsToADXFromFluent containerLogsADXClientCreateErrors := ContainerLogsADXClientCreateErrors + osmNamespaceCount := OSMNamespaceCount + promMonitorPods := PromMonitorPods + promMonitorPodsNamespaceLength := PromMonitorPodsNamespaceLength + promMonitorPodsLabelSelectorLength := PromMonitorPodsLabelSelectorLength + promMonitorPodsFieldSelectorLength := PromMonitorPodsFieldSelectorLength TelegrafMetricsSentCount = 0.0 TelegrafMetricsSendErrorCount = 0.0 @@ -118,17 +135,39 @@ func SendContainerLogPluginMetrics(telemetryPushIntervalProperty string) { ContainerLogTelemetryMutex.Unlock() if strings.Compare(strings.ToLower(os.Getenv("CONTROLLER_TYPE")), "daemonset") == 0 { - SendEvent(eventNameDaemonSetHeartbeat, make(map[string]string)) - flushRateMetric := appinsights.NewMetricTelemetry(metricNameAvgFlushRate, flushRate) - TelemetryClient.Track(flushRateMetric) - logRateMetric := appinsights.NewMetricTelemetry(metricNameAvgLogGenerationRate, logRate) - logSizeMetric := appinsights.NewMetricTelemetry(metricNameLogSize, logSizeRate) - TelemetryClient.Track(logRateMetric) - Log("Log Size Rate: %f\n", logSizeRate) - TelemetryClient.Track(logSizeMetric) - logLatencyMetric := appinsights.NewMetricTelemetry(metricNameAgentLogProcessingMaxLatencyMs, logLatencyMs) - logLatencyMetric.Properties["Container"] = logLatencyMsContainer - TelemetryClient.Track(logLatencyMetric) + if strings.Compare(strings.ToLower(os.Getenv("CONTAINER_TYPE")), "prometheussidecar") == 0 { + telemetryDimensions := make(map[string]string) + telemetryDimensions["CustomPromMonitorPods"] = promMonitorPods + if promMonitorPodsNamespaceLength > 0 { + telemetryDimensions["CustomPromMonitorPodsNamespaceLength"] = strconv.Itoa(promMonitorPodsNamespaceLength) + } + if promMonitorPodsLabelSelectorLength > 0 { + telemetryDimensions["CustomPromMonitorPodsLabelSelectorLength"] = strconv.Itoa(promMonitorPodsLabelSelectorLength) + } + if promMonitorPodsFieldSelectorLength > 0 { + telemetryDimensions["CustomPromMonitorPodsFieldSelectorLength"] = strconv.Itoa(promMonitorPodsFieldSelectorLength) + } + if osmNamespaceCount > 0 { + telemetryDimensions["OsmNamespaceCount"] = strconv.Itoa(osmNamespaceCount) + } + + SendEvent(eventNameCustomPrometheusSidecarHeartbeat, telemetryDimensions) + + } else if strings.Compare(strings.ToLower(os.Getenv("OS_TYPE")), "windows") == 0 { + SendEvent(eventNameWindowsFluentBitHeartbeat, make(map[string]string)) + } else { + SendEvent(eventNameDaemonSetHeartbeat, make(map[string]string)) + flushRateMetric := appinsights.NewMetricTelemetry(metricNameAvgFlushRate, flushRate) + TelemetryClient.Track(flushRateMetric) + logRateMetric := appinsights.NewMetricTelemetry(metricNameAvgLogGenerationRate, logRate) + logSizeMetric := appinsights.NewMetricTelemetry(metricNameLogSize, logSizeRate) + TelemetryClient.Track(logRateMetric) + Log("Log Size Rate: %f\n", logSizeRate) + TelemetryClient.Track(logSizeMetric) + logLatencyMetric := appinsights.NewMetricTelemetry(metricNameAgentLogProcessingMaxLatencyMs, logLatencyMs) + logLatencyMetric.Properties["Container"] = logLatencyMsContainer + TelemetryClient.Track(logLatencyMetric) + } } TelemetryClient.Track(appinsights.NewMetricTelemetry(metricNameNumberofTelegrafMetricsSentSuccessfully, telegrafMetricsSentCount)) if telegrafMetricsSendErrorCount > 0.0 { @@ -255,12 +294,60 @@ func InitializeTelemetryClient(agentVersion string) (int, error) { } if isProxyConfigured == true { - CommonProperties["IsProxyConfigured"] = "true" + CommonProperties["IsProxyConfigured"] = "true" } else { - CommonProperties["IsProxyConfigured"] = "false" - } + CommonProperties["IsProxyConfigured"] = "false" + } + + // Adding container type to telemetry + if strings.Compare(strings.ToLower(os.Getenv("CONTROLLER_TYPE")), "daemonset") == 0 { + if strings.Compare(strings.ToLower(os.Getenv("CONTAINER_TYPE")), "prometheussidecar") == 0 { + CommonProperties["ContainerType"] = "prometheussidecar" + } + } TelemetryClient.Context().CommonProperties = CommonProperties + + // Getting the namespace count, monitor kubernetes pods values and namespace count once at start because it wont change unless the configmap is applied and the container is restarted + + OSMNamespaceCount = 0 + osmNsCount := os.Getenv("TELEMETRY_OSM_CONFIGURATION_NAMESPACES_COUNT") + if osmNsCount != "" { + OSMNamespaceCount, err = strconv.Atoi(osmNsCount) + if err != nil { + Log("OSM namespace count string to int conversion error %s", err.Error()) + } + } + + PromMonitorPods = os.Getenv("TELEMETRY_CUSTOM_PROM_MONITOR_PODS") + + PromMonitorPodsNamespaceLength = 0 + promMonPodsNamespaceLength := os.Getenv("TELEMETRY_CUSTOM_PROM_MONITOR_PODS_NS_LENGTH") + if promMonPodsNamespaceLength != "" { + PromMonitorPodsNamespaceLength, err = strconv.Atoi(promMonPodsNamespaceLength) + if err != nil { + Log("Custom prometheus monitor kubernetes pods namespace count string to int conversion error %s", err.Error()) + } + } + + PromMonitorPodsLabelSelectorLength = 0 + promLabelSelectorLength := os.Getenv("TELEMETRY_CUSTOM_PROM_LABEL_SELECTOR_LENGTH") + if promLabelSelectorLength != "" { + PromMonitorPodsLabelSelectorLength, err = strconv.Atoi(promLabelSelectorLength) + if err != nil { + Log("Custom prometheus label selector count string to int conversion error %s", err.Error()) + } + } + + PromMonitorPodsFieldSelectorLength = 0 + promFieldSelectorLength := os.Getenv("TELEMETRY_CUSTOM_PROM_FIELD_SELECTOR_LENGTH") + if promFieldSelectorLength != "" { + PromMonitorPodsFieldSelectorLength, err = strconv.Atoi(promFieldSelectorLength) + if err != nil { + Log("Custom prometheus field selector count string to int conversion error %s", err.Error()) + } + } + return 0, nil } diff --git a/source/plugins/ruby/in_kube_nodes.rb b/source/plugins/ruby/in_kube_nodes.rb index c803c0fa2..c057f7c2c 100644 --- a/source/plugins/ruby/in_kube_nodes.rb +++ b/source/plugins/ruby/in_kube_nodes.rb @@ -19,7 +19,10 @@ class Kube_nodeInventory_Input < Input @@rsPromUrlCount = ENV["TELEMETRY_RS_PROM_URLS_LENGTH"] @@rsPromMonitorPods = ENV["TELEMETRY_RS_PROM_MONITOR_PODS"] @@rsPromMonitorPodsNamespaceLength = ENV["TELEMETRY_RS_PROM_MONITOR_PODS_NS_LENGTH"] + @@rsPromMonitorPodsLabelSelectorLength = ENV["TELEMETRY_RS_PROM_LABEL_SELECTOR_LENGTH"] + @@rsPromMonitorPodsFieldSelectorLength = ENV["TELEMETRY_RS_PROM_FIELD_SELECTOR_LENGTH"] @@collectAllKubeEvents = ENV["AZMON_CLUSTER_COLLECT_ALL_KUBE_EVENTS"] + @@osmNamespaceCount = ENV["TELEMETRY_OSM_CONFIGURATION_NAMESPACES_COUNT"] def initialize super @@ -296,6 +299,9 @@ def parse_and_emit_records(nodeInventory, batchTime = Time.utc.iso8601) properties["rsPromUrl"] = @@rsPromUrlCount properties["rsPromMonPods"] = @@rsPromMonitorPods properties["rsPromMonPodsNs"] = @@rsPromMonitorPodsNamespaceLength + properties["rsPromMonPodsLabelSelectorLength"] = @@rsPromMonitorPodsLabelSelectorLength + properties["rsPromMonPodsFieldSelectorLength"] = @@rsPromMonitorPodsFieldSelectorLength + properties["osmNamespaceCount"] = @@osmNamespaceCount end ApplicationInsightsUtility.sendMetricTelemetry("NodeCoreCapacity", capacityInfo["cpu"], properties) telemetrySent = true From 16936aa90a3950c878a9f5f9182d3d9db46c28a8 Mon Sep 17 00:00:00 2001 From: Vishwanath Date: Fri, 26 Mar 2021 10:54:01 -0700 Subject: [PATCH 081/301] add liveness timeout for exec (#518) --- .../azuremonitor-containers/templates/omsagent-daemonset.yaml | 1 + .../azuremonitor-containers/templates/omsagent-deployment.yaml | 1 + kubernetes/omsagent.yaml | 3 +++ 3 files changed, 5 insertions(+) diff --git a/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml b/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml index 615cd0485..7201ee6ae 100644 --- a/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml @@ -131,6 +131,7 @@ spec: - "/opt/livenessprobe.sh" initialDelaySeconds: 60 periodSeconds: 60 + timeoutSeconds: 15 {{- with .Values.omsagent.daemonset.affinity }} affinity: {{- toYaml . | nindent 8 }} {{- end }} diff --git a/charts/azuremonitor-containers/templates/omsagent-deployment.yaml b/charts/azuremonitor-containers/templates/omsagent-deployment.yaml index 9b6656e9c..fdc520cba 100644 --- a/charts/azuremonitor-containers/templates/omsagent-deployment.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-deployment.yaml @@ -122,6 +122,7 @@ spec: - "/opt/livenessprobe.sh" initialDelaySeconds: 60 periodSeconds: 60 + timeoutSeconds: 15 {{- with .Values.omsagent.deployment.affinity }} affinity: {{- toYaml . | nindent 8 }} {{- end }} diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index c25b9bfd4..4044c90e2 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -443,6 +443,7 @@ spec: - /opt/livenessprobe.sh initialDelaySeconds: 60 periodSeconds: 60 + timeoutSeconds: 15 #Only in sidecar scraping mode - name: omsagent-prometheus image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod11092020" @@ -496,6 +497,7 @@ spec: - /opt/livenessprobe.sh initialDelaySeconds: 60 periodSeconds: 60 + timeoutSeconds: 15 affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: @@ -657,6 +659,7 @@ spec: - /opt/livenessprobe.sh initialDelaySeconds: 60 periodSeconds: 60 + timeoutSeconds: 15 affinity: nodeAffinity: # affinity to schedule on to ephemeral os node if its available From 12964be1ebf5e108f5861d82a5ce87634ec59912 Mon Sep 17 00:00:00 2001 From: rashmichandrashekar Date: Fri, 26 Mar 2021 13:01:28 -0700 Subject: [PATCH 082/301] chart and other updates (#519) --- ReleaseNotes.md | 17 +++++++++++++++++ build/version | 4 ++-- charts/azuremonitor-containers/Chart.yaml | 2 +- charts/azuremonitor-containers/values.yaml | 6 +++--- kubernetes/linux/Dockerfile | 2 +- kubernetes/omsagent.yaml | 14 +++++++------- kubernetes/windows/Dockerfile | 2 +- .../onboarding/managed/enable-monitoring.ps1 | 2 +- scripts/onboarding/managed/enable-monitoring.sh | 2 +- .../onboarding/managed/upgrade-monitoring.sh | 2 +- 10 files changed, 35 insertions(+), 18 deletions(-) diff --git a/ReleaseNotes.md b/ReleaseNotes.md index 80d6f188d..04bd7c6e5 100644 --- a/ReleaseNotes.md +++ b/ReleaseNotes.md @@ -10,6 +10,23 @@ additional questions or comments. ## Release History Note : The agent version(s) below has dates (ciprod), which indicate the agent build dates (not release dates) + +### 03/26/2021 - +##### Version microsoft/oms:ciprod03262021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod03262021 (linux) +##### Version microsoft/oms:win-ciprod03262021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod03262021 (windows) +##### Code change log +- Started collecting new metric - kubelet running pods count +- Onboarding script fixes to add explicit json output +- Proxy and token updates for ARC +- Doc updates for Microsoft charts repo release +- Bug fixes for trailing whitespaces in enable-monitoring.sh script +- Support for higher volume of prometheus metrics by scraping metrics from sidecar +- Update to get new version of telegraf - 1.18 +- Add label and field selectors for prometheus scraping using annotations +- Support for OSM integration +- Removed wireserver calls to get CA certs since access is removed +- Added liveness timeout for exec for linux containers + ### 02/23/2021 - ##### Version microsoft/oms:ciprod02232021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod02232021 (linux) ##### Version microsoft/oms:win-ciprod02232021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod02232021 (windows) diff --git a/build/version b/build/version index 2da3efa39..83a0a174b 100644 --- a/build/version +++ b/build/version @@ -2,11 +2,11 @@ # Build Version Information -CONTAINER_BUILDVERSION_MAJOR=13 +CONTAINER_BUILDVERSION_MAJOR=14 CONTAINER_BUILDVERSION_MINOR=0 CONTAINER_BUILDVERSION_PATCH=0 CONTAINER_BUILDVERSION_BUILDNR=0 -CONTAINER_BUILDVERSION_DATE=20210223 +CONTAINER_BUILDVERSION_DATE=20210326 CONTAINER_BUILDVERSION_STATUS=Developer_Build #-------------------------------- End of File ----------------------------------- diff --git a/charts/azuremonitor-containers/Chart.yaml b/charts/azuremonitor-containers/Chart.yaml index ce64fd1ce..9c8014ed0 100644 --- a/charts/azuremonitor-containers/Chart.yaml +++ b/charts/azuremonitor-containers/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v1 appVersion: 7.0.0-1 description: Helm chart for deploying Azure Monitor container monitoring agent in Kubernetes name: azuremonitor-containers -version: 2.8.1 +version: 2.8.2 kubeVersion: "^1.10.0-0" keywords: - monitoring diff --git a/charts/azuremonitor-containers/values.yaml b/charts/azuremonitor-containers/values.yaml index caf0217c3..4b539546b 100644 --- a/charts/azuremonitor-containers/values.yaml +++ b/charts/azuremonitor-containers/values.yaml @@ -21,10 +21,10 @@ Azure: omsagent: image: repo: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod" - tag: "ciprod02232021" - tagWindows: "win-ciprod02232021" + tag: "ciprod03262021" + tagWindows: "win-ciprod03262021" pullPolicy: IfNotPresent - dockerProviderVersion: "13.0.0-0" + dockerProviderVersion: "14.0.0-0" agentVersion: "1.10.0.1" # The priority used by the omsagent priority class for the daemonset pods diff --git a/kubernetes/linux/Dockerfile b/kubernetes/linux/Dockerfile index bcdc31330..76b8622b4 100644 --- a/kubernetes/linux/Dockerfile +++ b/kubernetes/linux/Dockerfile @@ -2,7 +2,7 @@ FROM ubuntu:18.04 MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=ciprod02232021 +ARG IMAGE_TAG=ciprod03262021 ENV AGENT_VERSION ${IMAGE_TAG} ENV tmpdir /opt ENV APPLICATIONINSIGHTS_AUTH NzAwZGM5OGYtYTdhZC00NThkLWI5NWMtMjA3ZjM3NmM3YmRi diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index 4044c90e2..206d9a8f0 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -358,7 +358,7 @@ spec: tier: node annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "13.0.0-0" + dockerProviderVersion: "14.0.0-0" schema-versions: "v1" spec: serviceAccountName: omsagent @@ -368,7 +368,7 @@ spec: value: "3" containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod02232021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod03262021" imagePullPolicy: IfNotPresent resources: limits: @@ -446,7 +446,7 @@ spec: timeoutSeconds: 15 #Only in sidecar scraping mode - name: omsagent-prometheus - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod11092020" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod03262021" imagePullPolicy: IfNotPresent resources: limits: @@ -583,13 +583,13 @@ spec: rsName: "omsagent-rs" annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "13.0.0-0" + dockerProviderVersion: "14.0.0-0" schema-versions: "v1" spec: serviceAccountName: omsagent containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod02232021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod03262021" imagePullPolicy: IfNotPresent resources: limits: @@ -750,7 +750,7 @@ spec: tier: node-win annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "13.0.0-0" + dockerProviderVersion: "14.0.0-0" schema-versions: "v1" spec: serviceAccountName: omsagent @@ -760,7 +760,7 @@ spec: value: "3" containers: - name: omsagent-win - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod02232021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod03262021" imagePullPolicy: IfNotPresent resources: limits: diff --git a/kubernetes/windows/Dockerfile b/kubernetes/windows/Dockerfile index c0bebcc93..e4ace417a 100644 --- a/kubernetes/windows/Dockerfile +++ b/kubernetes/windows/Dockerfile @@ -3,7 +3,7 @@ MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=win-ciprod02232021 +ARG IMAGE_TAG=win-ciprod03262021 # Do not split this into multiple RUN! # Docker creates a layer for every RUN-Statement diff --git a/scripts/onboarding/managed/enable-monitoring.ps1 b/scripts/onboarding/managed/enable-monitoring.ps1 index db035b13d..baf547497 100644 --- a/scripts/onboarding/managed/enable-monitoring.ps1 +++ b/scripts/onboarding/managed/enable-monitoring.ps1 @@ -64,7 +64,7 @@ $isUsingServicePrincipal = $false # released chart version in mcr $mcr = "mcr.microsoft.com" -$mcrChartVersion = "2.8.1" +$mcrChartVersion = "2.8.2" $mcrChartRepoPath = "azuremonitor/containerinsights/preview/azuremonitor-containers" $helmLocalRepoName = "." $omsAgentDomainName="opinsights.azure.com" diff --git a/scripts/onboarding/managed/enable-monitoring.sh b/scripts/onboarding/managed/enable-monitoring.sh index a9560b5c5..9747d932d 100644 --- a/scripts/onboarding/managed/enable-monitoring.sh +++ b/scripts/onboarding/managed/enable-monitoring.sh @@ -44,7 +44,7 @@ defaultAzureCloud="AzureCloud" omsAgentDomainName="opinsights.azure.com" # released chart version in mcr -mcrChartVersion="2.8.1" +mcrChartVersion="2.8.2" mcr="mcr.microsoft.com" mcrChartRepoPath="azuremonitor/containerinsights/preview/azuremonitor-containers" helmLocalRepoName="." diff --git a/scripts/onboarding/managed/upgrade-monitoring.sh b/scripts/onboarding/managed/upgrade-monitoring.sh index e54822f74..1cf7b5c97 100644 --- a/scripts/onboarding/managed/upgrade-monitoring.sh +++ b/scripts/onboarding/managed/upgrade-monitoring.sh @@ -20,7 +20,7 @@ set -e set -o pipefail # released chart version for Azure Arc enabled Kubernetes public preview -mcrChartVersion="2.8.1" +mcrChartVersion="2.8.2" mcr="mcr.microsoft.com" mcrChartRepoPath="azuremonitor/containerinsights/preview/azuremonitor-containers" From 73548c0053c96a175a70dc2e7ff9e9ef1d0c7f0a Mon Sep 17 00:00:00 2001 From: saaror <31900410+saaror@users.noreply.github.com> Date: Mon, 5 Apr 2021 15:20:14 -0700 Subject: [PATCH 083/301] Saaror osmdoc (#523) * Create ReadMe.md * Update ReadMe.md * Update ReadMe.md * Update ReadMe.md * Update ReadMe.md * Add files via upload * Update ReadMe.md * Update ReadMe.md * Update ReadMe.md * Update ReadMe.md * Update ReadMe.md * Update ReadMe.md --- Documentation/OSMPrivatePreview/Image1.jpg | Bin 0 -> 120932 bytes Documentation/OSMPrivatePreview/ReadMe.md | 68 +++++++++++++++++++++ 2 files changed, 68 insertions(+) create mode 100644 Documentation/OSMPrivatePreview/Image1.jpg create mode 100644 Documentation/OSMPrivatePreview/ReadMe.md diff --git a/Documentation/OSMPrivatePreview/Image1.jpg b/Documentation/OSMPrivatePreview/Image1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..04cd03ab127c94aec072d56c418da612b934604a GIT binary patch literal 120932 zcmeFZXH=8jw=Wt6K`EkC5f!CN6{Luih)Nfc-h{jgNDYx1N+1f-n}Cp)E=@!Tp?4C2 z00JUXBb|hz^n@Bn2siIJdz^dDe~-P-z4ycYu*X?>)=0*9)|1RR*PL_Bx#n->^w;SE z;F5ukz7F8bnKOX*^cUcC8t@2k_RN`ou77I`=NSHJOpJ^S=a`w8ng8kMFR)%Xf1c$$ zGcyZ23kxe7y)j?7$idEb@t@ECe91qL|ML|6Vmr@#{+|*5@7n2Cz~%D{uNmJkoDl+? zy?ln@@|jaCKo9^pbB<2!KLr0E|Px3ILbwy|||a(?CF>gMk2=N|wK3<{2T7a0{D6C0PB_8~n3@pooc zVNr3(r_#@5UutUW>KhuHnp--%u-!eq-}?GTM#sh{aK9#}@Px&s<(1!mR@X>7yLurkU-VFgzaA@*EY<*ax8MP;|S6TWgh ze^wvxV9&}0uKgMZuR8^F*>S|JFR0RPk*^9~Q;+8)2N(s9`@ae>mN&P}RG;~9>&FTH;s1k-`G32URtZtEOWbX{&A`u)2@lUh>Q0#P5*r!OK78sjK%Pni8l8e ziNdzgh0@Kp#Px$%a^fQ9LZBKJIDO+L*$t*R&uJSPzA{fyUTFKw1*`s|zpEwj-Lq?X z{oA+t00)fhDIkv0IjEbt**Lg)n1|4Q0eebLE2Lf{`&S0mVxi@wt~Ug#vrWgmGWomP zF5iNNeqp^Pp$^dBkjg#D1iB3|h5Zs__M5*E$ZQ@8>ar~#xW>n6JK#wnq!gvth*wqt@HH5%*dMy$!~)_t@4yfh)CFVm)FxT^Pi6+ z_sC0{aijOc!#>!{bch|?Z;{F*zaW)%%nQ_6jkWO+hS&pJpT<%fTZaAh4`M=(GPmfpGwd!v9)bs`C`EPy(uVny)tA&|W+R z#7vlU%OVS{D{NmHv=*tPeS81F+R52cpC@MMmLAFvl?7iwVQQ6YDbL{@YBb?MxvpxB z9~g~d+oXKY{NNoAo5ki!$p<-)pTX-oo2!!y60%MK5nc_u^&5UKO#KT><}2r{cD7(8 zrvS#uQ$V;JOyi^o?lLI6D89)GE~A>06G=~J)wL9l^N@ZiHg-Q4zQzfqsK_T2aZa+n z|H3V&&b(C~`}#q`5BsgTVW5HbYypOAj}` zCv&k+U;BK_N;|)M3OM(KY`iGrvpxz(7MVu=wCP$dWKS@XThjV8D zRT058LR$)R}XjgE$F;tsMwdrs`zKVv3I$~mv+ITQ@0L)6y8GL~Aw`}1Z|G6Srp7HC8|gTPuHrk#1!cbS=p@%stz_d^zFl z_yKMMLl+3s!c^N@^5&$ zPZArZwDT%HbkVv_m9F?X-;d+x^5Y&de2#dde)|+4xaFI~aTXRVZ##6cRQkHKjEdXS z+ua9VUYtjweR6u=aF_e5&9LU=zeZAb4!pCd=0)ow7IXmxkae+e;*s3sAwfKW2Z22W z2-KZGdvONV`CO_Wpm`njYq5^jM|6q*f%rs? zOwL|90#kZsca?*t(COnUJP$v($@%-1Ec2+AkVRsVeEYHYljpo6;Dppxs+~F??cx)% zl@iV^(G!Hp^^m({zk5dvKMCeHfQ>r*{zaK@P856jt839NZ8q*a2X%|*a zs?{6WGm^bBG$`GYT9Bo*-K70H`7^2nW<(L5UI$e(H50u~0oX8c(!2Zl(GW$;i8lSv zgH6-TPx{IOidgooiuH)3vvMV2KzN6Gn9v%io<15hDRmG$PH6Ha-o@UZk5M;de3K&h z>yw*w`YB+>XV`JL7wOyi!QDKMy|v6hVRLKK>=Ymxw>u_kX^uDM2me)!JlT%RrBHC7 zQOzp4qY3jv>K<5=(ycbAu{cFDK zejMmvGi{#gdkW|aP#b`dWd#$_l5SU%<25;H{igsfO(Ck>s7Q(cw&%$2NRC%oa>vd# zNTF^`IlpD{O4m76hChPu;6zglBkkvWRb!myJ@Qq7T+80hES&;)Na37bLt_TcGGkM1 z^1JTe!Y|L_zk1h?M+IBaiDxW7snNWIp>m1iyEh!}5;J0*7f@_dzdT6x?suk@(|sAW z#CKEv=AK*oaY69jYaL&ZFuX%i(GclDyRc934?OOfBaD8{)5ymn!uWM#u&JFHh{$5? zo3N$~k;S$r_mOT14!zSO@feZkAKFIsK_$i69#3;0opT?TF!0bH)JBPAhe0iCag}|) z4o?A-a2jioKgD(p$R^z;NL1^&KSq;n=cUQCj>?LmN7c^>%o;KpdHZFw+E3lXc3*$N zl7vx3(G5KO8O-n}nw>TcBt8T31ZJBy4DYcscMpWXLtaliHpa^Co9gdtyt$V8fM2Y= zoW?#$oXv=!bDv8N@%CVRJy<^LjzD;QIg@Ad`T1l1WZxPITwXu3JM}v`ouUlWI{Ao} zrp@pS8pKxqq+*Q z)__4s#x&K@Pl=nv>weR1?eQtmj)>tx7s0=Uph6B;`OxERS1 zq9cGV90Gv@9S6rSt2~MEg>h|keu193;?E9YJ4D-#f~7w1y+s{UR<>)QKs=o_RF_7` z!Ik)f$TZWYYI2xg*Jp!KtmZgtMHFv?#04h!y({72TAqI`th99&yV-3K z4m_v&<6}pS&)w$cYq$cz>Tjc=+@BWqHX65`U(K!Dv)H`#umU}=;L%?29S!lCniZ`+ zKt%%=S{$U>$>30uxb?~3VIqI>nQ4PEJ=uibb{pEyvs`Rx)6#1A#+2}~+^gpHUlTjg zf79n0LYq1T5Q(5kT^(}nDd0SgDg>OuSq02bVy1wQownF4nK?*0FBJSkna$`9HtvSr+P#_0>`1~fmmzJvkq)U=OL2|a zD-G>(ko2t4@g?foh5LQU z9mC|j13L}AjfXa{V%>|?n|Jeeo(IWP7W!}TCna%GKC%qbZEgu;VLscSi4iS$*VZsd ztQF#19vC|O_x((0F%j%9QiRtBW$IOwxSA(gxHxUKgTL%r&VL^d8Cy154XEGyL(PW> zkHVc%ze4c9jThbour5C7Epy}s>@S%u|0{pBW!S0QiB&edlJd6>OTQ2IyRTOmQp3E* zF$;BhH_6%MfrZ-(x*L>5V4?#P(FmI{g^81?xAq%w%ahMM_rdsDs{Q-o>t#`@%wGKZ zN2Rn2N(2b+dKmW!D(oCgjTBZ&;=m)`?HN*x&v=;itpxFL>3Va^Ke{Pkpr1G|#wE{Z zarV*kQvmgHQ74?28cMl2>52xNRJ8$##pK}Qo$I!97nf??n1Scr`=Xs)#khx2tK5U)+mThGZ!Y6X=u0yt526k?@_O9XRQ4?dQ^ z&Gv{`d%&N3AWYX*Q`%^YeB z(0>DpSt!ouRM#NP!fmQf@@S0if|F$*R}a~}S~l1EM6_n3O}E*;gSJIp@F*i(Pz~@` zaIzScuDj6gcw-)5@BGx=-gOT)di3E)#`Nf#jjZfmsyvMDe_r!j(zk;j6@_u+NNO>r za!S!y@=4S9p>E1sd@?Kb`8XVZi}*Wcn;ZEdZuAE!DAkcAp|<62e!9q;sGHZuw6q8} zOHi$1Y14UYb0|b+$$WdXxKea>a^R;4DhjS0B1Z8aqulKP5+8#m^YQQ`7+u#`scNTy zcOlptoMYg94P_e67L)oogU{;Oy7Z4!hDK(ewD8wjD~+_<@D6d!^RQs@DS#js3%mrA zoV!Q5h`HY0B8Cc*nsR&nh($-JG1A6FOX7J3+jMT#0}(542qlopg_)=eRURPVx{$_Z zq;oTt_rV$Y1XjE0?9~!yb8ABEN>ujHAG_W!6C}djXcJySWkL6A02!vh^u;O$o}o~^jJ=If>7n`=Ct#7Y6TKHHGs@c zmNd-{u(aB>8V;J>XgyZ;1roJU7gjMBFbkhS_2S`Z-WenD4w>`QGHth^qJt%sMa@=D zUf5Lg__`nZQk+XU44|@cykHfyLoWOjP!ru50*nS;@CR|sJ#zoFMcUlglT%`U>M7Yi z-gRq0{z&HmOOgBMX8;rw>0ENpto`U^I%OL5r!7?>sW#WpF0;{V4phGTLYv#1*MuY< zyEo7DCv7blUEo;a*6>mB5SmtT>EQ(cZ@?{Gk&DKrQp=zu!TO@h2@c{as)m)6}K8+RWuyV^aXu^^rCf^onLijXK= za99C-u4uwzeCFiP5;r5kX{5g$Fi*zl`xlb7i{}qCthT7T1|(gKlzdDBFAOom9-owH zN(D+SV0z@DhVaQOFwJT-P94kL^J3W+bW7()tQMT?KS zOhCkvvB%*K-HJG&<%zkaCt{87?sax?Rk&wrKYND+1fSy>8Y1G1o5-c$MF!34lX8y| zqaqOov!<&-Q8tB{nMjo*_B*a0F2n`Jm3+8k`ByN@f0-?sqv?YSFK?<(brcsg~vZef^pBdW^5d; zd`SKLsxeiSUrQWN%^+3>citw8)4eO|SJx0c3eK?yWSyPWT^bC%lDcwiy~B}#yL< zjeYy3R#sx2g@qAots*f_#+Y%$eN?Y8V?FWC|Gpu}rVTd!LY-hjo|L$*qC z^nuFFrEZAeynK%`4<@Y_Zc3dZho6*Y#Xr2;3N0T1Gc%+;9M+kYgUF0F&TX%CtWAf4 z;ps@Yp;WN&I}&IEPF(L5tiwdn?zrdOpjcr+=YOHIi|KJaq}jN}t|`+-J^lT|BR(#l zKgL72x54|)^56`4g!4xc9)Ln1J>y}kKdFe4%~;^uIh_afJ8f3QAeYlUK*eLJEh#k&%LOFC?o#ZH%W886Qj7g-Nvy6!s!Z=plA z!(4avhEJw}h)3T}0Up3#W_YX3!2;J~NH>s0JkE2EHM*th(P#TfrM9F3>!`*I<(qF5 zE(pKv^1F16VxV$Y$ zR~24*T(}*Jxm-+gKoQ|xLluK6Q=-a^DwdYDdQS~C>J*CE^p{rdratw}I!t`DP#USk z5zz?ez*JSpsPl!C3>5YiTbKUYocN(PB_7FW_r%>K=7=RF@4V93u)#puUm;m7b@HmbkPqiwGqZXx`*1Z3k7c8)nA?pdIgjkgev5JNNH+@z;KAbG(5Cx zh%D~1p^93)!9jK_^O!sP^JYd!@&)UlsrGKF3KvJcB(h6ygj%Dwe7)iQ`Tiu`pWLRO zbDFc}3KW=DiQWiUaw9Ygx8_4s#L2*j!0Xj0}S+5#@g18o$V(osazSU&h=SJ>3wTISZ}PL zGYWIsSA7jFB`D_sW0ud>fH0{ z89UJ$->lvI$pPAhT%yW>F1hSWAPztDMj9qZFnW1G#>)%5?Nw|nI%{8)#4qYvXxd+T zxiI;|#q_N)v(}a0>-EReRTa%a_aTrwDBwu}-6lcNzRHgJ@?Aqyck1}IuElZiGr!YLpse~Knb;^}K0lLO*~ z#=*4V+W_84p8i52Gt0*it8wmUBVd(Lh}8DsWN4?Y!z>&%cC>zALzk`v_1sAuP1u+) z9~H((kAyw2e7we``u-=4pXa>ZPF_!Q?#9Lr=tQge_Bt4avTE}@H7d^6vXS|xz7zGT zG{P^Y4J01JXE18zkTs~ds&vaUe8=~*a8XCF~p(e!Vz0{yHe1gskN>_JBk(i)?{1ehs$F%e#5&CIT9~F zwZNk^Mai)XC;_U{DAk^jXi5#7TeoaV9Wbk_N?I9VY_O4$aSCx2GQ0oTB{X82@0rar zu8lb$Sql%0ztk-l2b06kbM|^wq{xIxLHbb7@iV)<3(n$u#LwnpU3E{Y6HzL8iw z#R5|$3b{i0q=IB~TF!l}%kVcXm(pI|kmY`VCE<;CR^_X$W%Rr+Rk3k~QWIo4494Ke z>(Je;dJ^ay)ro}Yfe4W845~=uJ(-_8?r+BzW>B8gROi`jddg*}J&s+9((AbtDOVkT z+jBX_KMl{x3Xna4UPFn>DQ_<2 z?j3;|AJLZ^PZXPbaPOwng1w!VE&}VoPW(|zxGu{B3;kj(TQ@AiT)m52%DWL9H2JN^ zBbo%k`jS)@p%Li2s{u7@S)uJ)WuM3fYbB|vvIM+)sPPZ_D2 zjwtc6^?w_@$h_5OCoI& zGJ@__*dGypTf<|F7OlgCR~w;xygL0oQugY|h&h=-L>lM7QqP~N6!kRk`_7F%LHB5i zRDB|_)0jCh9Hten{-|6_|&{3)jWt*TqH<&eK(zn%qIe7-a3Ni`= zEhI;%b0FP=Gve`rY;ISR8m_pjSlXKN?t->n4*tF`;iiC1YsyK>YW5}$bs@6s(-Y8w z!=g2#V27TO*^QU3NbsCISJ&0_PMzU5kq-c$JFa{w+tz_)Dc8}qyW#c|t*c9shO-}3 z>kZ++=&i}s2DIl<_#&1jOyuYW#eps%_cuWnn2|7k*W-~@h3To#xHZ$PG)c{GjB~d4 z72k-&VZSZZ%68iu#{s#hV4_OrA?GAjuZc9U9(HEJpX}u&?YB((-c;9+WDSwYnB!*m z+?1WI4Vw8I4R3G#(@R&pNtjWohtWq@WQ7~=%19^m7uL(O;ARB3DR?GE>*kmDs;f46 z{NytWz;o6fye%r>FyZ1Ua@5_6B5XRuFOJ6J2Vi?P#y`#S!O^=OR)a5k$wQnwm zL3VAAf)&d`kg6WU`5*nPrvPxUKc?>&S-Zr&1@zW)+Jws#0!VU3eBD5>l(4^M;`e0LnFb{j9UtIA5X*dx!Pk>JgTM;7% z-`!7+-Sl)G7pEu{8ZxrDr14K_8kcG)^xw?@9OE{pNjK*4;u}ST`Af5pWU%*r>;xr+ z81x4HS%jAM&Sm-peeLV$ks_rLI1=`g6SGc!G*`~@WH?WIu)xb3lD`y|{VB@#(HBYe zfjbj%?_5`Y>Ep24K4nS)Y0%~P8Vquhid)p>CF~@Oe7OBm{OQFU`=Z-YjjH97ntg&G z+D{PC()haD4Y}xtM4fsMZ<&M+)V5PiV=C`dP-NKC+{|c=4~MAHI2sHr8vn3%XHnI0 zpn0ta^Knhx=Gz@rdvzZ5>q-I?!@5+TMzMqafQ^7aj_#l+f3rKsvo=q;PY`nIIeF-j zZOhFNxh6aW;|5|}g|oC-EP5n*YT@%4_2InDP|@Nwzk_NQyZ6J~yB9kX6Td(ccxL$Z zN1Dh(_;PPN!^x)_RI>A>KQe5b1}$%uN9@nCl$}REeoD!ACo=gw018u`Y8&fLko%{A zw+dynA@V38l&fd`{EtcLkCF2h2@E?QT!xVo4>+jUBW9>(5QHRdL;M|~l+Dgbvj>`; zq{CD{2YPuNZAK0qJeqYkbODFKy8jwtN_NGmN1gLJ<2)6R-y z3izlv(?4)+^JM7?dWmJ#_|OBFv6H&_DtY97CrJ*A#`|sR160obJS%Ern*@3?sxGi7cx|0m^EUBSedrh4bxj#G z?jSVlp6#@0CP%ghMC7uO7_pTJ z*Zc-7eP#O_Ba?6#nx~cw#Y19%_0eBLC2@r4_%{6lax@+jD`)jQx*MuIl^nO^v3bK~ z63P~1Fb@`qpIgWBRSpoD)XqEwRT4f z(+1lblKQ`|1#BS{g5=vLU2HOJy7Qde9v}j8@)>`fN1p(9;+CJY*4Z zyN4f(8>k^}En^g~sAfl{1>6R%G^})+XLll%#-`zyd%Hb`&dbHvS?8=F0!&r~J&Y9HH`N6 z2abH%E4jUyC#6b8A$X(w*ftqFGY);rntZo6kEQLWrxNN?Ux@dOQjPgc*}tbEr~8!W zGbTXueZr3$b6|P4=h0PZWGDQ(+daQ+x60!q+oi4M#e>3<)xa{l<-qlZ+$^_HiZ*>o zb<-3kcMt_S!-RfPUl33+(5$3#Xa#w_>wu=%Mhrknr+kd_km5ECPRdce-bT zd+4=&Lh?RvV|!6Xm>~ES#I&AuZUPxv=sqP=GM?B|E@c=hEyMD`Z8la;R=zp@TEo+) zz83vBdKNuuapbyj5Wu>D%U?JJaOl2jy`Ii(l}{U)uXBi{=i~D5%j=-F-AduAs)k)l z!3KN%rs7vtosa*vbV>PU{y0EG-u6Y+zBiBK!vx4}H4*1n>`2#0dhk$&yp?Awsc2C< zzmxsOPgcW35A6N1Pos~AWv+ylTQ>CsR0ise!A1<`MO<0AehRXY1xNz~rJ+r}Vlt@v zYTaSh{t_Jsl@{ni5t68AQ*upF%xIi|>J_XsrfPBD`UI7AhG&S!{`9c5cXv2LlJ@Nq zLo8yFZ>ZI9TuZ6FF5YG9<{5tesBm=Ai)sj!p!*Grbqtdxn=Oov?(1u9nrM53G?3kI z_uAB*s`p56^mTlfs5@}Ya7X8j#C82+JxX zt;3+!mxfUL^D(x%QEe7Qe-H>A88eVwRMneD!r5=nG9%JWnZ|GK==k;+Y2#6`Q{r(i zM%1&LrMK$WZ6Kb0Ck9jYDkgbX;-parhLK7B=baLimjqzgA1Z9rzJfsJF3d%shv3`z z;=tW*2Z=TkQ#8^&mE8qlYGHsuML0Gut<}PN^vGX$3p7eVo znJ~n;PE#6#X$KMKEznu`Hv2D+gbgh05x!(Dptkq`}>oFkL=w;PqevBFYgG_v7m5HnhI^i_7o7Ej8)rY)ei?< z>Vwdu%)eI7RZUDAM=}_)Rx{n?j}^(-yJl4kb0QjJ<)Y~Ib(jS~3KTyll=oX}cx(TP zBjWK$Tl0-84%(L8Rrf?6+*4X;2}2YFYOIjJy>Xhi1L0WP#$R+}oe!GOTKBPyFju7Vyq$ zSH2wdX%K4EJ5Y3U+}7Z7iNI`#$X-0`2Q}$fW_x&s+RL1XsPg;L++Tro*|eR|_bK-+ zcRy&f=de0?7bX#yPjHD)w?743u&kTfUx`O`yFmO7-cPHfiKUm?Z%A=|$Z)+(+q z%3HXRo>6V=T&OvH%U`pI$D&bWsd9EM)dO12(nrE2*3C!Fv5v#EDrS&*X@ygUAy75H zB#E)wYyRR4Z&bzOCQxE0O~_B#2aY60QgLcm$#b`3b44nHWR z(;07sefp{^@K!1;k>w>LBklIhrQ+r_;dB4AQP;0)|Rsl@EK_RQo@fwg{JKszDJNBW)oX| zj&vkTdHyV0%OMFoQpy!`*{x!mVTfSyS>%o0+^j`(S(Lw5T8r&QVh2M)X<y{iC^hN$kL&Yq-e%B;pfk!>yMv1MtMs{&VxyzTQGB%#=OS8+)vMw^$U^!}KZ?K~!mY6yV zyLta2cm3I|JvUnw?`4y5!#^z&g=nYa*Tt`kaL_tlQW|o@0rK~WF2{bnSY=x4vca)^tBS|8(_DL{KSHqHIHBJ7#yVP=IFU^^ zX59`5hkCI6x7u2nAlN|Lth=RLE>8X{IkCv1>s>%>?uYVFW;_SqY8a5%9#vmBKed+i zwCwE6l>M~c4XK!4P6{bwR&U*cnQ|%CFvC{jhDr+z#7t!Xmuf^I3Z8$6#wrYYrhVPj z=bg>?Szz@3R+lo)u}%orPQKd_Y}|!nwf>XEo9j}%de!rxfSsZBJMRbWuen1~^|$oJ z)9fdwI(sgap7$Bkcg3WaS3)O>W+z3Fd0z&iMKUCPbM=R^vLRN~zR4Zg4-;kfjU^LF z+oo|6s9epMWtsxjqJ}I-+@gTmqC&dPB(WMwd1vkml-a)wpSm@>ls?Q9c&>&HO!_?` zc#$Rpdz>)uPRBGl5;f))dLI*e(w&2N-rVGg^N9ae5|q#AoN|jn^UNBcf2mSyBuEAs1~`Ez0&46SDr35GGc5QOXVUNcY`jXMOHO0k(Pg? zRoakKK1FxYBu4SdOgLUjne|aps6%$2H0c>bNS3x$v zNkycZ6*ZfKq*<|sqwgd&-@soLK1LAR=hh_+vPP;x;k6);&UB~!y%e85zGDY;#F1l+6b z>^B7+_L-cstxLy6=a^-0{ zGdh59_}hE$dEMdBkey$BTJXIO&Hgk3qTH)wb0;bB?v7=vid5=L_e{F&Ex;6ZsJY4 zHn)1L)2f_$OnvXcLVC3Fa8<&OT{`Y(O@Dz%7o!_Ps6K_)9Xb*S?X`W9HCIZ|;OL~u zqv;S@-Dj;K>zUF;NoR*O1~O6?I(!b5X-)|vg2ETS_hXwv=6&IQA3%ihHq^8oqV-DO ze!wYU1bGT5#&m^(H|WY$AH8W@8rC<*OWZ=1xmiuMISApnwBiI5)K3ALh8~%P=Zjxt z&Yg=nYyRq)A%H!&j`#aSu$LWn&q|bKeZeRof zLA?ddyE}HZ>&Xc0c99Lpc3Q6i!T0L6S0y3+laQ2+uaLBXHGuEEM=>!Oi{Ycnk?HtJ z6#`q!t7*mReM2JZ%5fmj# zhVv$tcz`(SwUDkYe(%rwt=AsdW!uPHyLmFw$3FA*esW5`@|Clt0tfF%j5LX#XztY= zJHx`jBJ2&x5xK%q5vM%DFgVw1+ayb2;9aAPY!UF@P&3PiYJ*EA(bCOnb&mX6 z<@GXrcjFSXTT=tlXE%l}`mN!guPkf2Hq{3im}CV!y^ppKmnw1QMF~jR-xz&fnyjf3 zXopXZr%HWXZ>bBo+L(wEsTu}P-M%*WJtdQ$zNhhkJM_0?Nv8Sr?;rR5iI`3M^1x9# z4l@}oeG2I0C;sl4yOZ6iY?;LuQI8bb)`{wRf|C(VHqFviu{trLfG9V5BK{sjImCBx|jacN@x+ z_Oy^-ZIa?(k*pRcrGG`5>5M<)j?~KbTfw?9K`x-{AQYRr-pG4{V;s%%Crqra2Hhq< z{Y$Xk{pA7Zc8qx-iN)X-qi2FVrG9C?W+|7ULa;v%d zSfx<5{?y3B#3zQ1>sGFX6PZE>W$?7x3lAm&u5(DVI2yV~}$N{N-Aj+parrd4ap{ zm8;e^_U3g4kEED&SpIxls}QWsm^IG*oQb!L2VX@xPuceNU8$Yoeq~H_J=w{yl#7hV~OdC5gc;LjGV6eJP(t zTQ)R48L3m1G?MwS%(LSI3X7jO{4?A~{(bO=>_Xh_zFL_MuW&!)z#m@rP-$_A+Sf2M z{FyJ12|*!|OZ9b5rq{7M1&RFj2W4!*sB2+AHMz;5P<*+^=EIe>^&U>o;mtkd{oj^( z`*$KsFHiLf%d}h&o>x1=85G-$^Z7l1d0~9b^#}_cCTrO-+vG2Qo~l;4LbJ6*1>Q1qIW~o0x#; zDSFzZDd`I0+4R?^0)zbp>;+Bp^_rUOf=rp3-$Z7}jOQIXqca@yYUrA(sV z6j?N9xvl+k#FdT{{kMa(|9`*p*qKep_}O{OEMt|cYdtxn-ZAcYr_DzkJ!9sen8D7) zALNkPQGqwWk5_LaV}K*wm?`x%9;_LvuNivwB_Od23G?-)qisY};YVNSNa1FeQ$Wl5 zHV4^w4|vSFiXmUhU(}_F9}AwG+ncAJHzuJ<`|b_g>z8QL1lgss?So=bW;S^H3vxaL0UqP+3y_B%2_18XlwA)@2@Vj^8zd1KN-hMn+f}%qF(E5M;z29 z_hTNf3)8qxIB4O=5E`R6nV$1rphceov_e%*0cW|W9&CqkzE6IqQ4CN((WB5`N1Y`Vj;Y_jIClx`{G$&UGp}-n0H_FHs>BYSfr9GGvlE zesybv-+vJTm|BRbolxoIr^Q2hwwz$NnQ26<}1D~ zxk(IuzQc~)6xV@1O9Z_``nSPdy9)ngOk7^O!p59aEfjl)KYB2~Ez}4+4O1hYe6YRx zsKAvZ9>w=6q0ISeqA0BBqRdy&OtQp>PRC`@VDG2&-24>q=oIh~O;0A@UZ5$$|AiL% z*>RLi#4+ViRmOvklQbsS-`^TCA-^E*&_-xBzgxn8cV+nW`iksot8c9hzaJF&*(IDd zpl(v#_e=*9r+Frjt5=X6sm_lKlNYlwf>SRyh#Ep4U3~{QcP8xF zy>5?x{q28|gP;qd^ay;&Y_~6APvs%{)8$552%?JOGuwC@241NH#Sjc`FVEeq0KJl0 z_GDy}ongBu31p+P5Ou?;e5GWFE)g2(m(StGXBhgTWYX=9mb3HgnxJyUXI~ig8T0Qs z1EPMbuF(IH;C~grATj?Be2;GwPP_%|iEo`p9eG|?4ZfC~gmIF4H_xpkFg?UckLsWV zsdj^9If(ceRmBA=I<|Tx);Xm(0pXv($ma7f!R4A&!N0FA(@(Pei@g7ZX8|d>_0r{! za8Mb_!G(0Vz*|bn$Ifw#DV_rO<^s)Vmkr~~KZ5<%Lz-PuLjoFRFs@dK2Hn!@yE;d^ z#!r0d4(mVve!myq7I*eFkTUzW#fL^3S3O*Q{qBC@Mz~FpY<1lOdyd+E%3JkUg-SahL)r_C&8_aqD^LJ?%E$95U%m*v;n7u^pZH&ly6As4!7+lo zk~fdN`-o54Y)uu}-8$GH@%-m*&lHq{?*io4E?&HgLjV=qrbKMS4@I}>Md{evJt2;# zm+WMo;k>Nq9eosWX=XPqubzJJ>Obepabjh<-0u$T3`GT|Mm7#Rbl@fOg;SOMMRx4f zrz~T^F2aLCo2d_n3sW~fGIs#Vm6INh{Xs)^w5R%pS-G`&XGy4!U4 zhn38i57>+Oox_pB)kw&H=)?-#8WH%$*`Lr??K~;};&EL6owc1m!Y%Sqxo{37L_BRA zZybA>?DE*&>kwP*2Aw)Ht~v18p~IVfy8BLZWQMBdut=T_d&5J6Dzhr<+LI)-w9RZF z(+d*3Gg-VV8`>~3NoAJe=Rt<`Kba>f91S6U=xBO{mj99a4~a#6JO$X^y`eYi6R)p? zybqrIlvgR!`e|#rwN*3Z#Bv~L*+fUbAunB0)Nj=Gwn)WDjX{5Z?{&&O823aN`xLna zaw#xr0DIhNvgm=mnsCmUdp72ZF9AH-&2LnUdhUsU*@?Kdb!rwmgwxzM0jR(#F4tlPbEuxn#Ka%A9X_SHSh|JZ17S;4Q`gGzhg zlOej~p^Bl;1IWPqE?ncWnQQR7DVeqD|BJmh4`;hu_r`VBs;a8FL~CqKHP_t9Tuo7P zNlT4ssG5S1wx*hkqKy)zsJTkaqzGE7YD@_tVyGb`#vs!7`<#9DwV%Dud-gg1{N8iD zzjyzU%Y|fReb>6zz3%Drxoh&AY=2&poAHVC5WCca!GA+sKB*Ug{@*dM7 zR(XwfhJ8)if)>zK_AW1gj$-0MuWP&ECeJs29LTsaI5$#pCSb7ihA7{KpE2#qKIv}b zl-UBCDQn>B|GT^QKb!;=y!DliyWs_6Myu|fYMZmh^FlIcw%eY$rkGmgeCc3cHD||I zpoILtxUJSXZs99*=}OcS1X94e(xu?UcWN|B1e@DST1HYW!B{RT)wf3SEUR!7`1SlM`-fs}Cg@mDEj6S}(yWVDb&7 z`Hf0UZUi5-;xpycswp)wvXCCu#@uqypJuzVC6?NKEwF2fKKqh&_Q((P;!#P)AO z4dpj_%_k^wlmdtT@tcd5{n3xFU1)+>?WZung#?1fE=S@5o zVT*#PT8hhjeCxN$Sq}NISbzL4<{J1w>6|{lUFu+!vGG1dY?!fq#V05LbY|5SI-$nv zg^#;EX@74fjGNh~$~-~~?Tf{fxtY@f<5y)9QWx>97wk=(T3dRd-h7;`JhSFkkNkY< zF#aq_uk`O8?C*|8sRAD=ZNATPf@$)}f4S_>g1Z&7g4~_5hn>re%9fy?DvzE z2lxAhDstCW0SSK-DAe%P1M@3Cj_4iAVL5Q;zq+j1^j!A+_w@+@;CB^wn~X?}9DMY! zZR6obWf&nPvq;KHnV0Apy+77cF`HH7<+bAf{Hbm$`XKtp-{=`V>2dy6+QP!PMm=co zV5%<`=}O(WVk3hv0j@kRunS-3h1gjykyknAIIeVmMsjv|GvxNOWf`1gO*GFEYNU|N zA>e4AsXAQIre6~<8!i}PBYN=41(^dP2bZtPtz0c{{Igy2A6$zcMW|^{y^iXNMzdJ& z5hID!kyXdSEbzs4iL)Vy)8@`L`$kxlT;!f6Lzh~MTUr(L$UaI9 z$RcZ-Ie4q=Iq-}L-Cow2XfsK>CLNCPO;;b&mVP(PxR}4RoBbvPS@6?w(!B~ZlBM0z zU?dw+CWsrjiQ^J{J(J8Pcij5HkK{Z5g$81~net1phG@ao1*$@}V`C=O2er(mAqU{{ zI34kRt;#~r-Ru}xVCaL{FAtNxqHf9y+g(|gDZn|S@31;8S?^&LQjRe!3b z5uaAM!=mtE_RK8<7`w-QI7^i&2 zoM<{S*Dw2i$d5i4fQy&hb3_n!%#yPaoq*!|7^v>Cc3DX3a7>moxf zrzRsP%-=pvsStNRoBCajy{BNJOfCCx!b8W{LS;H0tNxDu3A-RjKqfiZs-3_KkE|!Ws)(jN+9Cr$LE_o_7oAS^kmw> z_Kxfe^pL60kPbWRv@KaTvma$%7&hIDJsq>(e?2r!G?5z-G!nayxAY~IX>3EPhh0B@ z%D6j7N}n8|O7^$l&q%$xZK%Le z-@A%j#nwz6{>fF7!yf$4tylL$|DDT*1)uxr zpPizoD!=|UiGOpj|Fsf-4K@60CH~Wuuu~O?5XjH%gDLyA?GVHs@!KCeb~q&bDi?sm zV)@tWzrO}nUhk$ep}d!lr*++FK1RL+ueE_*NoV~od@Wrk?p~{Uc>|xnc&}(m%lVEq z+%Jwq>;e#Zxq#gK04U)lW)^Y{NY>lc?@E^}4(*-8+7^xf(j{s9W+{X);(oJ?0QPdl zE)b^sg5CF=|CI;4=h%p6pawui`j>Hjv;5;gQo9(P)=gB__wYY`^v^?cKklDpED39#&^SV-wy4hQ)N;A1K9@b>TT=}?{Ahezz&6MEd6FlM(z&x z^&1QqZE=(it^AW=%r-qk!OvX4x_>5_ljb>KVy{@MHh$#6#hOM3n|o))>ZHYLQ?a^8>*GN(M? z-xMPgIG*DkyYDsL2F0u=76->x-V@JSPUngdz4Ble4}AcaUnZvoU-_=h5j9S{^yO6@>yaS7F#M`l+8H?|BZYuaaKmcUZOrj3Nylo1>+ zQ?4xBo5!ujp{YHSa`d7HR3N*CKDAMV;3lC@2+8Obgho@OLq1FrLQz2?og(oP&X(Yb z>XdUj0r&yk(_sQO48u@XqDdexLv*)VpcnhHyK(Nt6-z`>H)3Ao{>F^85KVB)fJ(%z zo`E;s#Ws&j*U_@llO6=utlum;8|*TKzr)wAZ|E8{t~34l!@VQw5*IBVM+^+C!-B6| z3-g2^U+1exWRCR5?iAJhW?8dG8m?;xl8<#|>2`%_Q}Z$NZMNP`{O|1H1+rGAymxHm z-_`M&2)`X^qgFo&N`I0XAmLM8AZsMakLJXBha#=Xee@K8>k&nYw4{fWGJO3Y50s06W;CanKqWC)!Wjd zx03M;p*^-mC#>KQ*F=`8v-ZLkY+ ztVtzrtY>=l;I-)qJ-O9{1oYEQH0QX7LbL zTR`(L*%rf&4y|lj4q)sL6oP$(K0|w-9%%gbQs*8y{r;0DrcMm={Y+V=*dL?=sz9w< zV8rU+Z5RNp*gFG#=-~9_i`eN~uFtJZOCJ)%8}%;jRKy(PgQrPKEVyS++7oEA&8zv_ z!|FNHWqpG*0y2%_)riS+G?*fel}raxILPdRrc`C&qtxN@O6s5i4L`O?5G*sIczL_j zK|b;2W)p+CUxcj(zwSQwKJ&8)5Oqg-_MKx6C&OV{9K4YTx%oB$NGyhs1-)HAeBMJo zzB^sD=iC+!raG9OHTv-+vH^xmGNdc)U4(~BIhGYndZ?j>aVSWHGV#bwd5OfUBLVD6 zH$-6xMz7xxece3TNY-z6Bmv~OYYY&7_{lh5h*1C0qU_k03l9cy^p()P?^0(j7LDEz ztyZo5%`zqzf1>J38`OqJ&lU}xp>SJ{fiqkuu+)odj8YQPG`rX?B*ok$H=*U%I)Y$c zTVXmZbYp9SW;y?id2Y00Ds)eDe-%|OS^Z_QSlVR z;p^v~JKPE3SSHh?yU@H%x+H+84;rk4&iRG6^B?Ek3JivAp^LMw3$~y1?K;w3(&3@3 z&{Ui%m3A}vVXZ_x7(ixJ9s-^oV0lz;0++*YbWR=|6svUbZVx;?{zbIzKsk&JLim=! zGpCpE3?($1ldx9yb{8Gd!u6FD8fY>kL9>p-ys4%rI%oigSwH+vkgJj1ihGK3Qr{8d zq69Nep=2{_mP=-@V0|sMl;deoII&uCKaL4R zi~55Td_kv?-p$^hf3sB2J@050hY+QdKLbLo}sjjoFWeD%*|c=e)dbJU)8sbCT*U zQvJk0rSd9W!Y$md?~KQ`4?S<1W6L#pdXv_P&K0^1S|-Sv^ai=OfJH;ZDnzTK8)4&! zDq&MSag{5+9%nZ<=1uo$bL-7D6CQSEo?e>OWi!K?$OR)r%Y54}Y>hPz8JjIbPbGc8 zK%!d?(=7tD51x&@$B=uYl(py$e)b{kl30@ z85&0{!ReZz87aSzZK_B-ab`p!5x^fjd5UP<)|Hg;Ad#h(w8x!VJi33gsQGWJ zU|yCO%kE`5FfZEAlrlCKSNamLE`3P=qXN>c?|B$#L>8cDn?R@!wa9vA3t*pHJRa+b zql7IUX^g$vbJ9uuy`_Spx{EmhuW676Z``8NGo7jJ9caFdUf$31B*}0ULYZRdrFn`0 zWT;G5-ZZOs#y9%(O~UY~h&%dHp5npr;0@5niik4i$)9jOPaRR_*FnZTEw^)>)1YdV zsiE)rv*~L!J37AHoj(XyCeWzLO)OoK<2k19%wqi5d_oR2s<8whgz^kK5pAJI-Fg@7 z!lhNg`^xpi+A~pHs?6bK2J0*J*=G6$3Oq<|~!kRCfAmp!+X7G!E;Ukj+s72_M50WN4m-4|dcFE72SaeI@dWA$(17S6oy1YpcT15+fq-M6cVv zIFgk$@p?PC-sphPwJ+~WCWQwR-xW!ME(m?|={$1AQ=yTy@S?3y+I{%!m&Bg|;xYTu zVO0o=!p!~4Y5efSDQyGfI>piQ=&M1u5(hQY_fJ&Ty%}fO+IJx_Kq0tmpDjHI$o<}K z1D|!spstQq?pch z_$?zYsYF|G3~D&yX_0uq?HSmmz#}v9;fuGmy1P;XFa|wD{t9K};RP^BR*>W!#RS

dj+R1b zA;+g4mua>mCq)w}51(a9IsD`kxM1>-1FLrzr#vf{DYD&kRUm$gz%&S#~? zg>A@OeSR!mA+-l?zoi-1p3OSMkk-Y?zd&_Gly8J2oaoVSsjIyFYipp>>EK0Yj7iKb zAE6gs`Dm7dftM#73rXNpH>QBS_GmDzGTs_bWu}$EpkIfcJ%xy*u=~QtN0jjYK%6q6=3n}i)L_7bDZ?FszrTErQaso z)>qrQ#J^7{zU@|b<^Z3V^ck+h_d_XNs9rqQIg8ighbbod1;R3Ck(1s)?yLzei?1p3 zFC=~!H#!VeK6AaFZ!S%>hl{%NeTvUTUoqkCZ0%ycAfi5K-=X4DP30>~)rP&E)%f2m zTPxs{u)sBK@2y>$3qU$gfigKK1b(wv{LoUHwx*7cX;-u-aeAuReSAJ;KQZ1_@)`S} zR*LCiW2b)2=IDa6hhgk|92N?WMH#fM#)c_bWQpu=JakE+9VIDjr+uE3*&liOEG6V)4ql=ji122G^<&b2fHo7*a_{w}6Jjsl z$U4q})STY%Ov2L==RUhV8SsRBLPq<8qp~Hbr5O!B1{u~B*cU4^wz=zhbP@;gu~GfN-c>r5 zgpSt=0&FtjCG06#hynGRqvXIQ22($%yvH|#f}gaGlf~uZo*3}2n;SAd&#;n3+&aRf z0C??oC-RxF#t*IQG{`-=8!r1~W9^VdYePju*x-?ode4I@QSVo;NNuf!ZQko74Y8b1 z300$cZ&^^cJHecbW99ilWp+eQluAwFC)-|u&}aZ~4y&z?^Ml2Z(a`6F)R{YX;^(td zxA^-paUf2l7rnj9QGBS$QZ+#*ugH^EHBKub-%+sfPI5c!9OuPHqNP5UJ|+A-qx5B% zwYw+N51`Rg#WPKyn^@~Ww_a`)@d@OD0+wmoyDMfz_u0D&&+8y!gfS!sYinv!G^~25 zF#^yUR=@KatQDd5K4t8kKa=}_3O61wnt~WU9P;2Vn(dXoO=}uz=^CqbH zpuee{MxC(D3w-ENFZvL+ z_UJfWpV%%C9nO#~S>(+?&7e9cn2y=;KMDt^DBH7*U-Cs?K8+887}4XV zkaU}DUJbXcmil+Di(~VYU_Jb3d336lZ%_yo87P@C`=xFtFVIm5CCa>(zp2!sj_~Zq zx?!UZ+R zJ{)O4T`43_wWhcHL~OhaAcp6tZz zNV13gScR`N$tmIC4s7Idn+o4}N8b;o;(jVZlZ+f~ze#@(7*b}C<9;#Uz}x3!KU+!ll&jV@9+>14 z$^ME%HeUL85tr?1az)*urUB}!v5p#6SKU>8TSV-y>x5{+8afm5uAd&wUre1%aV#?G zWLoRE(%dMLWYDP6Rx!ZKLOi!H3Bosl3*n+|C|In!f`fruv59{%&=P*Rf#ppv+Mr5Zu=ZS5gH>9HI~p^m&&O&)f7>4VF=sGTUA zSZQwq7yp-!BRI_rfQV5qRkbjX9EawF0$7#MRh;&vE|cLEOsK8hqfz5E!gyQ10Vr9q z@u;xl&(;LPt0rC>fG4ASc!hb&Zc=BX8$n@+QrMhZOz!!bPr`{l>p;^2=^I;7`-M8na7_bx$5hWw6Z?C5R6D7x zFAPotSk!Sa=3BJc$*_of{tWZdl(nJoXB-M?*lv*M7Qhr zNcJSUW^?ad`Ph{@;}uhrnv@yF*do}sXzZso$|WOR@*C!$Vr_D&jZLryUm8;Yl}#u2sWS7bp43bE;EJ zJmQr~8*%2#kHyU)Gw1h@5}C(`b3B-=RQ)UuGBze4c(wh*Earz6_*9;!N{FU9CuiyF zOlNP|mY=zTE|Ezqt>2d`DG1#?nT23oT;DNx%PLM-8V^`tM-~L461;tj9P}=>ByQQb zpp3R~^^bpuK;)e+rKpnmD6od{eYuS_fB>%=+=#bGvr~R$jZ4ALRctTfT?(?o5VS&o zy(zF=hmZb-Z0P45y$)9*Stp56CpJqu{2J2BoT}{g2W~RGGfq~0R zq~*@0=`Fu%kXjEOJJ-`chE&V1@Gl(~_&}bt+SslS3qdr?Fzzt>g+=!>5eBq%s~^fs zZqaw>%|eMKWsV-T!C4uRlv6PeDWN_v&zl3C{Mz2qBS-hzv(*L) z=-%WK=ffGn9V3|~s+kW$JlSB`-5$4+OYWcS(t7s9hCjn=da3W{$N=7q=CB>z6i7SN zx|>zQQH2mP;gzny!0H2OJE9gyWnwNjeK1 zGoPT0lPxoFAE!jnfi4S#`6YDlX>k0TPmBNEag>(S)B(C$TS}sW0JJ0|#N|o=L+< z3d5ANFL%=fDrhTO*Mu@`y?FLEUwrNP6X?8Z?8h*u4xj__baSGiczt3k@BwZtyjxtPEq>LER%u z{$9!WSSpc{MdRpd;iCsS=Mm_R9Gvuu&fVJZRd0;LA@f1XfToEw;q@vn7L+G(S2La@FHCaoCm=LQLMH8jdA79Mc_Ab;7ZF|~9k=vlE@fqFZ6w6DX>c}a zf>M{9#^p6|_T+_#GC$Is>-6*}Opvp$9bXg~BWD(BGAz zPq~jpl}5`q3lOB%6@V};SmJCCrRHrXm(8J$-oXn)i3>QI#7!j%T9s%fUr>0D%f z*>30xZ+ZZql%=5)Rf?3^9oyT311hy60(LDXul{n5A=N&c8CTU8ShO|%@O{ceVYY&j z7wM%d5C>$K>a7T?(wM^TV|wx~GS|VRIS8FYsh{!*^P5HeEP`wADnp;TRYmRG>vC>A zc8#8Y{Ds|uRTTkDic2o&`4G0XXgeV{ebHNy$L7A2G3d2xT?H#ZZhlh*!~qY-B>)&B zrA`6%=x~1!pY0?Z**aE};C*4#=wLMu-a5tTXwkXcIbFHAp<+a4EG60J7vSUTIk{* zZmoh@ORK;->Ss4V9AxDlDMeSs6}4n+HV~i=4l4c?W#kx|B>9gPccVQd?ygvbw_3PL zNP;7jUDITMp-Y-DHd4+cG)BA-%a_Q8UB7~Rv)|iv#%T7msD%jY>Ql5(m?Q)6Nvemd z2IoTA%m(H|eJ!bSjU+)+3vgCG?_>KhNO}h4ajE(#F?bT1XQ~N*lUUi?QVF;=)V>#5 zNE%y*fCJ8IAwNrhy2DFLUA!qywz;G9hh2v#_-qW9Os%Y@tN2$#Z|FZP#X3lkK58+y`;I&0!f3|B)o?01Vc@ zP>J_v)i8lpv!qw3H@fuDVLw;bU7Kyog$`AV)zTBrlR%&S>UHG3AQryEHfK2d;JJ=E zDo8X2KE9Ov)`5+jU73S8OOwRu3jL%b+nJz2zM1+^vu&Rb#^6tuI=kR7ViK=jYqNmiiNn6)GO90|Hp84w|a zE-7g&`t~!eKvLfsfM=x|61AdbTmJJW^bXPJJ^Z;d;$-BG7LexGw{VR$ z$!m{B6ZeWq-2D38-CAFRm3P8cyzVwgy)(A<0teY*?UkAP;)|pF`=u*PL4@Z3;}CiO zLMPFRCfg$s8AMv@qX1G6z+Emwm7{!AXaURFKkT&g|H4ff{>IRvtC1iPYo6wC->ZYl zaS5)Z@=d|l=MR$ayor%8%2yAv&t^+~_Vfyiwdrl%JH|jp0y4aTeRykmW;}q2ErBi~ z@M;v0+Ok5~qJU&4k`vj{;y%+ztxZzlO1)5Ao%DY3WJyxZg`zdFPOm|>s5<@i5Xfko zw=lp-bu=Gnb8`WRJ`3C~;lxn8!rvCRWZk#D=dj3fGIJ;Px%0Tyq;_S}B>$Uc3%egO z2qX9O-GsD!24^pKcZctZ?l_s-pY%jdvW_PfcL$e9i5@;ttM{Db01iJ_lQK#Vxr2(Ig$7768NrqVN7@NdQsfAir7q zI2fVG&c+kc-erAXx)xuq10i<0Ud3;g`DmsN762Sy04mtlf!YBOgjU`bt5d5hvin}^ zyDpOx`kQ43xR#PC{y++|0{WT(R19?C8PJcpNx)b-MI1^%VkpAyHwzPvBrp8|m6b*Q zX3^56Z339xWvRR+-!+cn8{fzPfRqR5u=>B9melSzBmDpJS=kM?)^e$>8snOGzV1us z4sQI~=Hc~#V z?uA;N4^su-@4$)seyL0AYlc5~lc*bQj)@u$6U|MP~Z z{4_HdnV$jOjehgb2Q%BO-#IpXe$EGg=KoD*lKvzn4v}YpFTA#C81xqTXNk%CvzXu? z=z;9fFGK$|tN*#RBDdj*|9+ZF#sZ)Jtb;}$FwnFvVnB0qUsYPTL)i|iN~N>5iJU0D z%tE=H8#||AxKd`mb?97Ny{T{QBpXUq;^-Oq`(5lz?m;st& zkGl>7GB87S+=~2O>D&QyNU26y6guDh0h^u29>GekE`njEytqn-NrK#IHI9Zg{$`QEf);Ey-5=c+wFZe#1Q1>G7z_g~qHEEg{pp!g?8s?w zq%*ZECO^ZVf5RlfY09wdzLoP~kPdDp{a5{5z5Hm%)+apnq3$Se+Xz!Qnlb_r)Cwx; zr4Dp2$T!_-IZYiim>2WCRv5Ic)O20-H;aDETGS)WE}+Qr|&iuwrxqFqp&ApT8=5DZRiZFtG`mp| z{58DGSVJ|RyNH@z+W&e*&W}8k%2#)?(JUv}7NKOq;_ z8fXPY2^Jg|bk5A+TrqX_bral{^}fDaCz39O1fnCO-RtDUxakCWlK)mEz>ct)6zA92 zt2jWnTdcb0XmUUiw=FmwFB5{uS#EtGIFS#bIb8>kQ2{}7 z_!7zSP2M8Fvi2~UU{eR}8Ahqp-!!zk`vQlkvpmTDUgW!Rh7zJAu$OKguu#~N8SL+9 ztcbEo9lbJv9JfSTei?8~8s4q#L=tY8(>3=D8Jg7DiGtS%>1oj6C8qR9;b}ws%bF~q z(%6rjB6r@KO5Bj5+dOA0xB}5al10pP1T+`A;3n$Tzbf6yT@DHG4BmfBHr+MX(bz{QRhnu9|Inq6U zz%#9)){Z}tyXpC(33^bzyYeaj^(rr&2dQke)>9nITm@mh_>mf>5Iu%w+ay{b*1r(O z)ijcuX*7_R%zxMiBy#Unyu#Xm(Zrod5kwx(cEEZq^N5;T3+=Iq0aQB#XK|;B2;P^t zw8We=_l8ac3yV^fwr1%Sdq!~kY4*G&5F3J>>>z)&sR?{!QA4ie*b9cVqG7ITy=}n@ zN1PLJZ^xH_$2uMvWAOhh?>g}ZGEwVKB?I_G9t?9B+DA|N^$Z`nQBzYfp zGB?ypN;0TiJjJZ)yZ&?Wake8uVejb9do1q4$LQHw4=U(A(;!?{FhReL%0&B2ml+=i zlIqKDmt14dA3nVzUwOz#{>}LlI$oH#DS5iYY;9Xo1j_n31L!b7Mn-QW9JRGJRUPY% zPjYdeI`A;pUQItzVU5=+?a9^Db9a>o0wzA}dR&C`-6c=_8lFhoYKnYoN9fT< z->eC+Bm8V(p-;|FgIm$3mTFM|!uWNs-nS3vN{*$nc&^jlg+cy+~#Fs`9$Szj<_*bnA3W}->Ze2sfKaUForzuG)}MmjsU z!9(!cXh9G*3J5F9M`Rx!QC{UVn1UVj=5{Zyg-&OdnE6Z}pVMna!LC*LDt-I{TNC=6 zrtT>HUAb>8aJm$GYy%XZI!5PXXyWHSHbPDsI6rBaNKXq=HZ$-Wi>beCq#;(MX&?{$}ii5>kn_0YY}SQ zejZK`kr4!4-;eMUI%T4*#5{u)bf6!)HRL=jqs8H)o#~!#VQ@K{Tc4+m_Ad6mn4$3O zJM-dqs+V#wEt!r?lV*HCX<7}YK5dmBd8))g*RerUvb>S#mGqPzvv(KaP#sR5YqA-Ve5RD9>bgM1|^qheg~ELTcB*Y~6?=&v4l)1K%+cYJNj z(b9R4z%jjzxirA-_V4k|P+VO%sz4)d0N2*p5k!dFR9zFu!hYPB1RJAoI5UmCxR%rysSsgS1N?JFY5MuWj$Z z7T^$AcFF`<7%GlW3uUgV?8BtJ8-^b>;ah8we9@HEQyJxOV(Z#3DxBy0Zx*wG(k5L* z&7kgBexiPOSdP_2J3TSu{uWVbekf?;z%N{$mt8+e18kTA{WtUhd(`s?(xKNZgjkbNsY*B*WHwn#kK;N=F(o zaV;;udR(T-meWM+iHZ}6BoQ}uuLmMuJ`25oM8WVkQ)xh>(J}Y;JI9Sg?h8nb1~f>YboS@#v>WZ-S_(4i*ywjub2PPg zfChD&nB;p)>tAH7WH+p2*HpIAtF~(BsZ-6T9F96jx7IQ>2U4xNj-;ft)?zTt;#N5<9OipVzvvcZXp~2f_<%YHedtXduSGPw#5|;cl0fo<}0G z6JJIaAbyNNpBS-iYl)1@Xr_7_Y`okAKo22QBRg1jUwX0bZEM_G_?_R^~^AM-+>6Z`l< z>R!ViHnhV3>FCt|NMY)iF{z`%^>y|V&C@(6uejdw4t z1YYktk#jPelk3dz>%E|Npu%< zlYmExt2TGZpARGCf63D1I~gJd+YKyh|(3}n9B$a?3e(Ai^)7j^nT+g!|@QlLF{ zq@LG)w(g4L{<^^^nK_LlDwk@$zH7Dm@4tlp<$rq5=eW5Wx8OiwKYF_tunAv0h`i{STgoo|5rFbDD)PPB6`d9t+uLlnwTalmD3x6GY*6gm~|3eP& zzp&~5EquPJ5f5LF!ii!j5fycJ!jxuclMK+I`)KLfdP&RRNl_L1Z$jt zf9a!wZM~tq%8&(@#G7C023L4Q*Fh+Lm-wT8X;!)%KvN3jw0K7A9ynq5ZMOQLEBLUd zqb%MHk9croBy+k#vB$rxJSlh7@gAYo`{bH^7^AFdiglQQuS-k!mUkt}H{}I1E$Bda1LO#dRgVx^|{S}Z}3 z`_uvHdck9S51t=C%vB4?)+mo8e^|wELnFmWhZzR?F-W~d+KaOZ7dJFH@^3fU0eUI5UjtlKae5I+^Sw4&I~k> z^Z&{pSUZVVLBr(N;~?F$8#T$(+Pb(ij^dGNAl^$UbsQg@QPpPeR zv3Tu~`Rt2c_%@D%(}d=$M|F#-V1$z6K!S*y^js}X(!S<(+U<7$ji-hyy5viS6~zOl zcugrZ@%vP-Y`5Vgto4rNO|zT~EUUKQESO>Xfv_41%MBd)I$mwzU8uApHbMLfZEWcY z2_@^nD=0}HpmrVjcA#Bn$h~2P<_CCJ#}5Oa4<0G3D9$sbzrs*oGtBQ+`E z%&+7buB~}T%8F4i8;b+8!-0({u?`MfmBzBY*ZgZI3H@6GvNc;)lk?+4)&O2iC`)zi=hTUH2yYte7G$BX0J~)@cciH#gJxIHvltq+&lS_}Z^&s}s z76wT(2A@F~)(uri^lLdki6Lbw_^rB~N`AyI(vYa#sl9z`U_2=HfO~!oz@=j)ZE&`H zHFs&S(4n?rGro~)>u%#1F{^B*ln4&iSPY!xqG#m7mmC5A;XP4yb~T~`Z!(K!ce_<4;J7*)?ix-a`m2*0ufBUpvBS}i&;qBA;Q&VoDS+9c^+ioZXqwZ3^i22%MsPnA}tV?0S7^*i;o_|T0gIe28jB2os+qCT0G5~r5t@sNW ztrj#l*EQ6ntK%;7*<3n~$Q?8skWNc-@s7swg^4a{fWwBK-r%8c?=5vIi$|hq-NvuEV9%h^Tt_~2JwRfSn7pLZ7?d=x~ zyvnahUzqe=_n8O!!RnGAaQs#@y*bA-`~0Amk;k-y=;z(AypUoCn*Q4c@HhUB@( z^tykA$rbs^c!=pMyZ?Opj-YnMuCOYxL9MVj9s)I{1-8UgXo^>X4g8uawla#VGfE+* z;Q9|0Z(EN2Qe-IXV^oO8=z+LN1sW)dq9uA{`$ZnU=~`ep(rxjR1E;5R%AAqFq)`s< zDXPBJ<)TBZgO6o1$!`Suw#WmnG@Z)ny$TL^K5DRy*AYa6&DLZUO#CF6%5Fw7vku3c z&BytEkrOj|ammT~xMARYsHj}xZx(4)bdO2-;)LC`5u=UT?+Yq)?Z7H0gVSe zecpc`9qJakB^_N|x2M%3KRO4-a#19q<@$Hml)jXtQN5QtsB1#fr0BL#K_i+$qOv_G zabPi92ESUaWm|l2(!r)MGmt&M6wiBV#Me0E&Kob0QOz#cdHFe~yKz&I`v8)pQJ4MK z2jS8zFomhTLq-$ zksD{@`~&*O`A@xjRQ&1K{`U90-T>##1Qn_zktt-)obX_5473)*RQCNDUVqDv{54PV z|NoypAi%PB^#-DVM-VrVB|x>v_H!-q63=4ut!@1uWsl_K&(^GorHEPiG|shi^(psp zFnfsXT9UjmsmAocDF+2D_wwxX_NGTiI4h*R$K~2OwWx@%ieeL4ltpMmZescZ6ww%b zWMXw5iAGvPHtxUH5r8b|2n@N=gQh$%97kHYYop8G|A5)SPDs_qY8oi4JhM3D*?Cv) zN{5F7P=D>P+98cR*GuAH>ml2lAV0NE9~;Plne#pFnJxPYQI_D3`th?JWZ|H9b_IaM zu7~XJUnk-Vh(6(ffdIht!nhVPtN`xhxkyrpCZS$_1-4P9BYmY@POR&VQmN;=_m56i z&P+WVaZKLRMCktE?|EyPJeYzUv!TbCH9HuuTGsZ%@|qhOj$b%F>2j4KWmIryCLLF< z6s5lW;e9os04+y+>IbdAz1`jrmRB1u5m*(w91J#F+0$pP7!VOi1kgyiv8in-H9N4cK@t)cPOdV-oc z3(f*$fJ=I1Riz}xPgcN|VdRyLYN~MI54AdLr5F3eS~17k|HKZ9=E?i0!NCo>awaPb zc0X!tWK1QN&*^G1F7|S9@f8*pmO}-XJf#lB0ugmph6Y`-Y*8a49C2t0d)!mwbI1Nj z+&h8|I9QEmf9Zd*_ufHGe(#$wRzws;X@Y=KM1_!#(v*^@NEZ=7st}bLVu&CmKp-m8 z1q1{X1e78*gdS=lU79ojsUbmnOQ?Z__&(p=cXz(Ooqe~=y!+QWOoo|%Q*mYL4jHI34)ZSGu2p-9|BdDbO>FAe;WWBUAN?*7T=_;?-C{t5eIXurR9T)(gqc z@SqZzp!hfsM0rL+0&WJCgt|4sR^2Tk<{a{6UcrEWKBd(>!l=^t7p5@S#_Of%z`oE> zTF3PA%Qkk*hNLRM;mr4YW$Q4>+wZ1fb+rCsQVygVfslEt7HM;#He^y)H%=f)wE0(arZqP@C;CE(;xV(&}Tg|mUXJtHnPO* zKELC~4^tkNXCLtZUQt&e(is(h^VBTcq&R5sX;fM{^B67 z5_dIXF*mPN1o`X*K=Xv~qiv>0!SPx&pOA81uK8HK{;%no@ZMk6nI)b7M94fmoTlHV zoKA@u+4u(GV#MSz+H`2pH%c)=2-k(&r9^AxdSZ59QOD}VsynCk&c^DGx5?bnJmSr7 zx)fN;on0gD@5PkCTp`k)1&XCmlA0Fkpgb9)&3-lv@9x6>-O`i2Z`z9Qdc3%(>fu|I zt`As{PaNFtpKZI@_pC^X?g?^y-Fj*Am#v;m^{<9oFg0z9c!_}bg6(H}Q2&R`GxmLrGLQjy=;P zaI-7r;q8LjYRUsuH}@dVydPOV&limdxaxd5H@m4@Y+}a=#Bm5;IXyc43u6G#$1vud zK%_nHFY+8i1l;Ri)a%Pai%s@(Mx#KsjmwUXQMBDh5;p;q%bMZ%pAf)w#(99XFzw|< z+qzV7c57HCIWWqWPmAwfsYj@h*np8=j`OU~dkX;0J5fRFWQX^>g3@mN3&r~v@b=&S zlkk#BlSdM)Yr{?8Jz2h+)9sstHUQS=_=U7@N>;!5469Z_H zJ1htQYXEkL4}k&LAyB(~&27w9y(u;$0HFR&Ak4Kt92qaBj9I6w0kjf8460|DC1#cWL|$EPuoDe|J~=yEOhTjlWCde`jeF5&mMO4de+Rcp-ZZeD~uqDkx-*b8!dCZyKZgtfvR#p2I#Aadqp>7w}wGCJF|!NDPWET z@go5yo3+Z7N!G;p2c-A4`Z5#EAI4Xg4bO4pXI{T^NBekwzWP6wVfW(;WPFf9x&dN) zqZ4X-=g`XiZ}Gyv-ep;w!SXpfnN}Rc?33S?N0LsVwH_81lj04D^<~MEliOF5%42);J2>;> zj_sKzJfMq50bR}#p=;$EFMJ)qxZeb(_9x=Q7UGs1QI#!p_Pb`-(c)I~ZTgIRsX@Ek zO2I_s4$6liO1X4}ht*xVI##&{iZgVmgoUpsy~LPK;eP>{^JwZ$M4-?Z+K}oKTnF;a zHOd>g9WNV2eR;@T$vUEl85%nst!r3ZmiO+4&e<13EV=!BmQ^nL^|l0G18P5R^yClR zKK{iN4%JxW^h!gWH`eP#?!kDS#oyuao#$`!H3n% zt|A&+uOlB<-%mR5;K%5=qFzM4-Oc6Cr?P!iW~~A{-t|mrnl`EJL}PrZ-Ml9KDY_A3 zrq$(I_FNn&eb5Ruc1G0n^u7LKxpy92HaGO!2?{*}D}FZ1^=f3ZavuSN3>n5sTHi^m z867KY1-gISdd|9t^U2E8Q2S-K=fFnkIGen`6;C{qRqJ`ucdPSdw)<61?1of1V6Xyg%UnO^HuIB;gU@VpY5vG(kgw;q1Y35u>UM= zUs!uN&WB=uRm(Df@6x zdnLPz?i#P16`DB75%+~)TD+3b)$DfP{<`$O>D1b3-D&W#1?xJKx~s00&nKnb0v)VQ zUFQ^QMZB%t|G4SmNj^8$o`)b9(K(Gn?+wbPp(pQ`jO@SDivkEAg$}zPiXT6k<8Zgd z2W#Bm1>{}6E~s?NjlsQ3teX4Z*~Tdy%MRmk27UrAV$_@3N^kIR3 zVTG|pMgjO(vJr5+JoxwZnMc-=p{(2%p{!}DNB0S5UEAx*bN4@*mUykn{o0*(;a6WE zubQt@qwe(|ICi~9nU{41d|6-!c3<5C5)|ztQGzJp8+-{N0!TW)A;1WM9KJlvK&-TYVRR zUL5dN9(AH-d+UP!Z0Bj!WA@RI1IJZ9Q*;3F{k=;Z84>^OYlA#GKyA&WNo?COLZ^|E zqH<9djdo(h!DVWlj=!(WYtC4~n}vn<6V4uZ)pq;J6*IG`IZ#}+XI@9v;PurB;zn;S z{vG5%;3%0-w+(xkVL|1|rlkO)_|cflUQ}-q?Y*kXX@(VOIz&gcujj>cqTz0>pe>y> z--HraPCo&4!v{l!z2um-ye-9L3wxf)o%&dNudhun9*6S!qd;Bm+1;gaiW>6tsLl-! z#lq?K%#B^Ea`v3iO!oG*H}8k9jTyIGBr-FH=JDMBs-zu}Wq|0RZGI9V0BxNMrA#`b z`@%hI%rL`MkTm+wSlg_jhLQX+JWsDd2T%Jyw-rxmd-=((_|P_*?T*uU!wOkib1p#L z5Nvvn*3$M%!Gd|uC)!FtQr#{McP{2{F~o=cJ6Z^4n0#H*3|3a8}Uf@Q8L40XH}p$1A~ z8y>I)(56$vQJ5zgWCY|gf##i{k}ueK>t`a_;*Ptp3PFe5o(5LRFYi-q6Gr~%_?#Si zJ$GZXf(^?vIN6z0)Yi9YkrvnkD^Cn&-VW?ZR&6#kd65NIOH2b7S(sL)buM4O*&o8Z z<9;=YXW6h^DtA09?`UzI7}Jj>OK%Ip+@h)*3j`!JO$wLAm=Fh*H5E4QmZ_8qTCQG5 zp1sshm34TG=kl_D$tCTyD9d%-(2KOY=ew+Z!(T72$(gY0x9jGAM1X1soM2u@u*B%l zR~oidIjmsk(ex=pzI;Rp%!RP6Iwbh!G~sB!%V`SrnOIS5X(bO=(UQ&nXVwKukC-LH zb;DxatKL|vnl4j87;!>fYaGe2`Z|6pcho6nM*7A?J)rn)7!l+`6HpdHYg@mD_=H@pzR1dk*h&rO1=OYA>QwT%MT zR8Jji2YWAC+ZcZUJwW6wdzu3RbN-na6PnXUDwt={+wVtc;aK)W!b;(5J-4S%5*(#i z(IFKJv<(@k5kymdVCLL4b z+9PEenUg1cT}&?j-(Uc_?+_Ubb_gTJbYy`UK1k$ONaQq9)aEBDCs9?^MBf3V@#PHX zasJ)4T91wA09G z+RGKW8vW1hBPvgd4!nOq`SQx=(c_vItJ*|Q=Gv$Sy2_wg*4U=8je%jK1_WU% z`ws^wKv~lxf}V2X{GU5r`(7e_P#rn^Y(6DsWawsAX?%i` z?_QunC*6Fj7fK; z6Mv1^U4^G{+;0*`1QK1=4?O!U%{K?AhZaE~6YO6irt?Ie@)$}ROL8==CXKtkVjW|G zx3;nSHCD0seYpVXq1~w==y9O>RAD!N*@0(r`|p9WX2+1hnW`k|(U^E%@~1iw$!+E|7w=&DJ+gw7FRpvGaMa zORdS_W!)-W7xp{t3UIIOmaAdO>otA&r2wSA;^;g+=?{ksjBy1(>SJDcpmmq&OT>$z z&BSZz4TNh8dn#Yf4_vJDj$R2fbiP*SR%O>tC`xn+V5;~Ql8Jtyw?&w&d>UmeBafTJ_aaGuyzw_ zmyjRs(URl|fC@fI!6I-J<{4nG?aSLr8f&9Km$z_Z@X5<`R~z!9Unecviw9P>bSg__ z53XWMTfnhy&)t=3rLIF&kJhB0VE5S_r{^@QV~;P0>=)(WY#DwM0%L8iG4iS+kNQ7k z)aWx)JkV+X>2Z*8&oHeP>y^4M@z||nt&?5~^M`{Uj<2}jPgVolc`Qa4;N}M_ z8#4@N5{KK=yk-kN%Zn&{QEtVd;F2xRp9Y`5R#u?KUPFCok}(UGh7Dm+K(C>i1i%zJ z!B}899_b0yaJ*5`JK5T}xV>Ta(U3DjPVcFIV(jsMJl6?2W^3FL;>pDA-N~V?8pQC; zCDalEnAqZN+GykD8_Y-gKu#E8(Wjp=KA*-%uV@P`x53?zhEWEkjZKgOh`s{F zXGCvaI(CB!0TPgg2N=;TbS?F03>|FOuc7KrA&F0-ZH)bG)#xjf3S$}M3^Daitj$?* zT@&eUemAof2{{wd4wHyUlLvtl0kIL+I3iyr0*+1> zz=&#C6GA~mK}jR8!o^XqmDKmNFidB-Lma|0dcbxx7mVonD3ii_0@YA8bTKowDaWmQJHx^?|-Pu@%FhrC|B zl+BS9)=)_aI>?n@#;aJ8QCIMdT-nnE2)4@)dPa#hlUxG?MGGQRb_R+l{dc_!PtRoj zlnzH;T>n5}RokF$w4StgA=6CKgZ$P)ScU~n)CH+xT3VEB7=Cg|hg1Pq`o2Nj8=3G45G|d>uVu5JL$crJZG~~w ziRJ-Xy$_ug1&217Rh!u3A3p16S5>F!QJ4=TzrU6t^o%;FKO8&_e_Eh2y5h7iir6sn zi(o`0v9g>T;hW@VJ zwgIWgk4Ne*E6l&a&n7xNJ#&b^x?*OX)NTb^L#&CFlRD>$(u0^W1()!g`=6jF)JcpB zEi_1{_Arr;o)f7YW!wa%#eA2p{bzCX@%zcK@IDpf!{7w2P)scN7a)A z;{u~P)K>C`@9URORFWwEto;%AZ>Gdu6WAqS-1;#xQ>Yk;)1Fw-#pf=Q{ItQ4@Z9+_ ztuKq$79ZU5ciHGs9={YLZK8uIwxxzYoje>WZIOqu+^=Etvjoa}XNEPb_|s`|mRQ$? z61a+xb=Qu$CFja(x$D{PsMkEYL z+GX00s0?&sr#X?H7_J(&2`ed#zm*H8Yv@ZaUCRsO0BqYA_+~WMU1P;q-y)SWm(fUuVc zN2mxRsk_Mock8PhDpKy1SE&m1oJx3Xtmbn|>Gh@J&(7zQw@sKfEOiy=DK{)dIvB|o zyh*T=jm0@X9C(5-@s4|Ky7RqPY_WIkw&|}(sI@oHQ@Dz}WsD;ViTVsj71?&@TTZ}Dh@Wy!25{SI3s+Te#z0Qf z*GZ~U%B8g%SQ&p(!+ztE;|ya2-yN_$zI{b5m?6Q`Wf*Qp4xozaF8ky^($3LoTjghW@Y_^1ws<2hhV|e+<)a-mhNZcOd6}Erg$SvsXhm zZxmgs&TY(PS(H~G=G3^P6W8A!#9zCKsH_}$Cp>oUYJRdTi~kHorJmN$Z0SSC zZIiW%t3!Ia>U+IJUyPzjTCr#E=ZGu2-lU6q%9xEoqA2JJr;NuG4HP_ zFl8sdU&YcaLSw)AkzUv7Q-%jvTGL?~GI_csHyNjTZ^^yZpg5dc6vU z5+4i0!OtdjXAP6R+@>Z{>+tJFIXfP;+a(K|7V+pa|AIXL&6(Q_Q*F9~g`g48I75LHqB>Y5)QeP2E+{%b=CtJ8}#hR?@un+zz9$~cqer)$5 zn+b|3n8!!d=`tT-0&4e*m2&zpVsMEjKxwi<(@R6hAMu-!X;)rZUqV0(#*Ube{*FvU z%l|&)altD2veUt_Mb!U#{>q{D-!f(Xf9KY3@Q&G2W=rTVwlx`FC@5ZFIk_MvC25tJ zUl)dmuG-s?;m9n#0sjQ;&VSB6<>-;N$mz2h{F;AjAhFwKO)2x?K%PkKgc5st3JS;w z(2U4u7(xx(d%TTM9ymm#H&C9zGu;3^RgSxXD@1x0%$7bu;N)F9iWcPbPq5ABVx`TG z&&VkPd-3Z4bkH>P8~HQ?Wy3&B#~eYgV2Fy5+*yP>}R7?UiY%Z z_LJGy;SjF@d(BP-X12YW)HB|9yMo?xt?#(^&k zY%`fBPzW7E+20P&U>LU$WlD!yQwzHaV*%UHZj^MtR!+X)gLwToNAN)3#$(_jJ<_OwC5m3 zeL{|gdQl3x9}GRo6-M3>Jwa1%&QloxM31Y+4}6!9vkYv3ovkyXy=bvJ+qct8}?!=0=FuqSj4*v6ff!ZGq z_y}vD#(QyK&~Cv~hkv1!-~JXx&NyD*dS4+SwQzJI{TR!8KLH{aIPOC}qhXwnR*(qZ zNltI}+-MDhp%5uakE-uMzfLSe1TK|OES;gGS09-ClvxNeIO}sQ3{}p|E07gfB!)Zzm z)rs#VRMaKu1SpN(MXqv-qRMa2c_co6%N;d+{sYMc_7cX&z7p6c&c|qSrwvn&v{VcS zJJVWm6fh6Q05})(t#Ta&p|>?wLLL#5_{;2vZ$;MTGu~(YUTwV6@x7ZK9VHvCaUp-9@0v7W`-P1ZbmGCh z*_oAY-_SQK(dn2G!nPvKIFzNF0~c4I8^2^hv*AfusvFuu=BZQ6LgW>I zet(xF)TA$C9rq`HaY{#UOJW`y|UoEoM1& z+KKVRFFC1E-S7_w4z*D@$9H;ZP#*Abd(Aq+j02WvM z?SU5YbHv26vJs%|%4A&D&gnC9S5k|cmo08Qdwfuja})!fN)>J3BP&E^!IN(<`jjqO zL^`@AQdbvEKRU-h7M*#~d86~{qLHf^PRhtDMwWa;6jN0HZ09(UwWwO2Y8;n4hE-Rd z@yZxyv~Abzzpt~(3mk>gO3T-dGuGOOBEuT8C`xBNv{B+z4lOY|y9f0`qf-i5R~|h( zsaD9x1JX!OE`4e72sVPlfSp)4uM%-DQP>;}sh;1Xt&l>4HMKN1uwSvQb>II{3y zsSQy!7tmT2=BL}xGeV&D08Xf!QO};Sl&P?juA{cm1<0@MUsLKI8;dmtd`YkqKzO{( z_j&m(pQtjp#uNArD_D19YArZTcBt+`(IQ~txJ42^>C+_6PoHLBO%MSvQ`LVatjJm~ z4py0nI9+y%@z7J{=zO&Ynup{zt&0 zt9i;pUvtSCNUtqo49D8_D~Erq1w7nD7@nMDBcat+0iC~B@&N_YgDj){Xk`))22AVj zX#&52o~gS)=b2DOtSH(HK%_<=!RA$mlene6=7CnaBK=X%+xh$RBMSu{XF_i*{bq2| z)tkr_&`9OAQ;g$nRJ%yJEX6i7WNW0b!Un+&U6H$vzq`Gzt0+%28o0+NuJkyi{qbif zVrNE1dx2)frp4P#hgxfxy4++#*BW+-`W7FshGrG5RIdX<>}NVD#?3Kr_Szx}l?%Ya zKxXD?K6Ia9+(ZNqKijIe*GoA3nsRz}RG-wly~};tP$RllboB)2Ibc+fwETk3Nky#) z&{sR}_H(i%$znutYjhyFITCwN`?H;O3z}%#b)|QX*-WSHMH0ZPlg3;N$|8k^YL9%?p}#% zteSTFR$0V*qZ2IVzQAwRrgsUHKswec@!sOc?{(&CfGr?tX8`+@-qF@f2C%}FLsW8z zgc$oY+^?-^?G!piRyt94>*d3B|K4d0ogU4i?uJIA{m zN>9>b2vLlzv4_TBXi_6q988pY#PDvjDL?v$!#J5bA5%7NupqcnR&d~QRG8^I-1^XP zB=Ho+t+#?a8rY=6xCO_Jvb>2QVp0#%o4xt89>=*<4n64h6@`94Qr#~ee)8KRCakfv zHeg2ExGbj3Gre&bREImZx0u&PuPB1}tUN@Or>@O)-w5Q%B5{v1k{V!T=v@SLhJID zpy4&RsYqE{_W;E80Xz!4;`mC);VldUzdRv6#nE)?9uKs!XGEPkx4beUXxL@giiF10DL$dt{NXqUn6lEv zg0mC92rPr0LAX!w^PUauE}z5}-&&q2Kd4AayS|@7RJOx__A?>E3qZSkVNNMyZd%ja zpZAO*enh4+$=*k-PG@~lZ1(9xzGUSn+xGis?Z1KcxU`M0^l82qQ1^-WO~(JS{#wH- ze~~bb$E#}o;izB35C>SiJw1^UoKNWzEFgJhz$p`kMniR==}wSrGaE_>p*SSAvu<40 zvvz>hZdVgq_)d6ee+uH{6oR_oe+5x}n%B;;e-h92Z{9HyntoUrSuh_c#r_pBIm(Sw z6BZ~`kg{MtIBR##H}=c@=zCK-ax3Y_G4iywms{P=roF5ipXW!D!d4-}1-8Ey=r%q1 zM!<2y+4FusiFr>Sx9GQSRS#2UjVEJ6^fCxb!5eE7D9&xY z$kue8Iz%oyXB*8(@hMsq5vvSWNvDD%@kMs7#Q~+4e@C=Zk+&UQ-lm67$4*W}X1u+q zFHUA2M5|M=uLI99UJp=}hr?vb)jA2mIE;So=%V$0$j2ESLAj-ZM@>pZm-ivOQimN* zO7XSTnLO-96h+w#*euKuiKwb-Pdfq&v}?0!8@oGCF5zu47IPt;(cl95M}UaI1rb5_ zH2Mt&{|Kf+&sZ&~StEo%HC*Li+%_KB*eEKkT>IXacRH&0P%^H7-E4Q65evvcXq6{T z%{7aOQf)BT2Q|Ric8o}Bd75;2ZB{VFjIQBjCFTv7UfD+h5>qH8Gu z_h`n)Y(xuUbGXqFM<5-5FWEmog$a{`HcZUONJq0{Sd#QlX;g7Y$f$5JUm3*di`X zqCCsOfvBJj9;=8UOc|fFNx-|MAW;*u?=>|Cp_&u76v? z4yotiu1YOpvaK<5Tb_|Z6V+v?P8&hBOBqqqs1uAOTYCCiPuvx@C~rk~;n;YK>aUp^ z2OXOiO0F8MT9SYeZ_5kf`TZE8B07F5rn&z1QiF;+qiARPYdT%-ZZC4H)`IKh7H2!HYq)(&cT z)DYnbxeR!D({sM1m#em}opm7}!BV6MjJ(hD=H`6*R3nv18@b$PPSIx7z|SL_RHsnOWpqTZBl^Tru{qQ6V+p*A<70hd`c7xGOvN6bT=`(x-LH z-jAPKL0!H3co&%lna_BG3$!C8WW@oonbq#M0A8}J1;j98*Vh3(4ppWE%W^-OrAsqz zwsw9~NW3sUZ_OFz3FCv0+LfT5Duq|r8i2ILWvefhjx~1lrd9=fzLps@=lo{bdj z*j~T)s5@W{_v_wzZQ{zrF401Czb8AEb^#FcyTVdnl#}>2Q3so-aEvlNwAt=rP*Y_7 z$e3{f+&cTMcjNk`zVxZA@04_hZqZYZPG`MkLYG0b+BUWzV}iccl6Hd@mGkBdl{0Kv z8FX=y{NOdi5G9y$_(*D|hThp@&pVDzjTsh+5#MKgO|N=yp_j{wd(GT$*&)fD*~#wO zBY;ypuuOjz1k1%>P7SxFp8#}$vjOumT3TmwvQHbxKplC;GNoBig=XEnr(E$qNhnr* zm*gDP{6_xC%+n%`;%wWhKOE8IGW<4pWB3 zFC>A8yBb%ZVeIZ=2mme;e6K!huk>#e1?3?|ZLpH_Kn;{o~EV;tV&=ztD z+7A%fs*CmxNKtg<9>it=f>~Ix26jSeXT||LO<;DkKmb7J*T%v zgU@jQxv~>We*c{`oioTQuMWk8vToN8<21y)C=2K5ye%UYW5_288VOZp16{)#zI+{$ zC!}MMBX`}SA?y1S+LszGRqYrBtQ!B}Kq1O(a&Qan)t9~D`yFzBIG)+TSM2A2h&J5x zha;TCvxyh7M)xv`1{$HrmuPhZx*Hx|hz>HW20T<=*&41zchC z?KVHvRITqU2?d~h(4f^c!kwlGeTBH$-E0}8ntu;UYGb-ZqS%f^AEpRJxP4~8rlCiG zuWP_iFb=&zea4n247H?XAN{=NQD$em>s_i@47)m{vOjwlJz<-(Vhdm|q?O<);{%Kc zSg@Tmt*C%D)?xw}+dk3oIhvY>7+Q07N8Tv^sllv?Gt=Yazmp#%Va zsY@Lo{peV7tE@#v>IUMJSOeyS&P?);b}7o-{@bm{R8dM%9wj{_?FtRY!U~u72&2<@ zPcwd^gUtL5{mp=0>2#x^Ydw7F3Po6vL3!sn5Y^&~}>EL|Gd2L1Nid{goY~0TvwG!i_U*y5BXV zII8RKm|ef5bZ_CRxJgBnJrCm<2TxB>;4#*7`q3yRh+%5MxRrzUxyO*8sskQsedv%@ zs@w;V=UAk6ZCN<1Yh7ACJE5^F`{%J=N4=Qq5!os^KX&RISmUDbg~c_5oZkh5%Pnmk zCUyn}-H5#j@?aV&A0td6?iX!B!w_CBO5Nawy_iskO7*&(g(TQUNuynHrVbU`uWMgrh7?O%e^-H*w;Go-kB0qNEcx0p>B zIvQLzu_&4Pl<5=^`w5U@Yp*kYF?CV}cD5;F44v%KMHt%CYL`jPR1~x{`Q4;na{SA? zeBZa(jXJ`ToBxMh*3JPAh>l*_uImjVHy>zL$>(U~LWo<&Y(cR#);e(XA=aWu#H+{So)AQI)>T1D0=3gx3A7Zqp#^HEeIbHx> zw&3G2qL$t4ZV)Wo+Ew`FrR?Fnv&#t8z>v+X%zxHnCmTP)c*M7e)Bv z?3&}PY)89uxx=zL@AyEIX0>N|FL4RpT1*nxWZ9F;vPN} zrGTqQy-k%+@2ISd-czXvKVvWGn`1wBgqvB~^F?;q`RfwoEZwoPewtSV9sh_PP@jrx z3j<4+yk_^S->^ey2ZL}=D_|Gl((eUdMJmQ#@I0RJgT%Z{%)wtQ11h?7Kwm?738!{a z)#hh;&vFAe{IuAlS<=-`XX(pV*4}Y}GG7%leh3__Ca2N*;Jd!$J3C}jJ?=Z4NTTiS z`ViP-*;S+va)m@RQ4M28D@w?)LfbUe-1|Zi#hyX%2AtS?yImGJSDDr!7WYk9HR-(m zhm+Zdgvs75vk)%!(pncchHEt^mS^P@>DFHlG!fl6gVo|dbCaCI3=oihYf$eP%uTWiZ1_(625B&}Xg85HQBle7ZGBk>X5(@rYowhpMhyum-KO23})e#K}o_jAgk|^b2|0wu4 z-@{wR_v}z>usP!VYp~RT@yjK#(BaE>AJH_lu;;8EE!J+GGNi zcX^-Fasc42HH-5PdOF)ee47=GP}^0HC!e02O}xJ-sB-vV2jcb+8=%L(uVMsc8s;uq z3k7@HJ{1!mTDwyd*Jkol{MdBjW)p<@7 zEIuS1&;H^U-;RG=gaQ;onz(sir*msSDPmV_I-^vZyiY9*_1o5zuj`5gKbU!FR>^t& zzGgvI(c3#e7dsG6ztmwjGa}?J{O=hc-S_1`u#WvE5n>M1+R1I2JZ&wo zuMeJU;tb#E>Yr=Qv8&3w7nuQ{PB#%J92)eUZF`YVtMAs{rB%M7*=#4j-e^ka1^9f5 zaFKEO+5QPE<;l2?Wb$PZ8{LmGo|bbTM28JCiMLCN6%)2doPnM0E=0xtt)5fpvDQhA z8X7H$$)o9Kb3r_!zqLf{6~s3i^CV5Rx@wt&#H)`eCtjVOsA%tlrv6ghMp;s8mM#53rN10(n5o|BqjMA z+z;z+{78+~=DF8hYj#A&7)~?Yc48oN*uVT%>nXgUL{1ux(1cL#rKD^3BA-cD{VbyD zuNo?u9B19i9@oM%GA;V+uEn)ca6%7~Ck-V|WJTP^ zmFPY2X?AG-O0|0H9+zu&n)eBfMDYr3jFNp&QIf`9UL37hEqkH)J44^jKX20MEC)w; zmE?)%VL{Sw)4~lDGCv6<)jN*vT^d73|7N9_E6xOV-@dov@s(NOJ+rt!I3JxDaj7u! ze?~pj;AJ0IrLAZ6+`f7jTYgx>d8N9-EvoEqOn$C=$#kp$1EtlOT; z7uexF1&>?uJGt^nb=dFG@NN#OoU#wRn5C0+Lv_RQIA?0D9=(VVYJbv zEoiMdpm9NwpGPNsL00At4{miNnsd;{MUu%4P-jMr4;8u+fMgGUZOXwq)(?69!c;rP z_vpDdug$JF_sE^ImjFxY7wJ}#k~T?{H$`9Pe&cq4PLAsJ?24_PV>`go1gs^w zKSHE!<^?xWAFghNA6Z)PE?V$8@@hQY`5RjTm;zgL3rZ@03B?>? zsr=jukh@o33lXSg+E$VrL5pKaiA&3sWy&4@JZC>W6CRtWE6)r}{RwsI;X#P=(!YkH zz2qW!#axZ^$xr8-K;_Ht^2<-_2uxpk9MxB9^~l-h?h~89?;>71p>MF@j|1 zsmEpysOaH)jlE8-+38L}54jLQG*Uxkfbmou##IGiUMuyK{HhbV6PcsG&M~ufmb%{b zyf$NU`s~;@d^w4dR;LVrKtv{0Xar#Wzlj5kd__dtX9E|n`>Z;fwI0TY4W{Q=+S^4w zdIT#mgGiLo3Il}zch*Ag0(Tt) zsu|y|w5`^?uPuU0PsHM=c~x4DfW4SWJWB|ier44&y<~F*yj(k_6c0;p5fZFXuFr1G zQ@a~C(Q|zFLDHEk9q07k=5(7}G+RATW*2J-I|(_rfaTKQ`pWRpj&|9cPyj1{)tuAd zTrz3;(jHd7eVm+}q|2_#;@pM?9lKQ(mK%@v8Sb%^fQ3;e|6eLw|D)e>(3WJc))I=X zL4V(L^x|T_@+agb2Lml_ZE89d-{IVH9BX6o6hO>*r9zM{KY1{&)232I2u0Vk z9=khvIz%+LGdNak>sVuRkwFXRNZ7}&eo3Vt#K`Qx_THTBnbneMyf`Did5;+BPGO?> zCdREsU9H7F{ZgpJ+E>0l=CE;4e|FdCg3I)HS4l=;n$mt^Pct_&Oc@p?4ymCPeK}|1 z3K8~wW^?antd!n}^t;iNZ*k9#-if9NbV78*noyTFeq2l2sa>hvEs#TQSN3*%x^QeA z$gni|1DUmJeTbq-sJj#k4Ak;N(fGC{kEKl4Yc9l7!}bc81jF0|CDl^uw+!pF(pM)5 zpNpU^01f3d6zF*nWNhotA?3^&25l(E%kA3@4A23mba{G!X36f9) zg`2!wlMI!P(a}46ZT4QyMJ=@^kd&-Onm>GBtb}@yVpVDU~ zX6YB9EOo;t3Sig_k*54S6y|!!C4@KSF^4fu3}5$b%r)%m%49;2Y(?S$c+Sl<_1ZD` zrPz>E`93#7^o=a@YKM=(KVO~7?~W;x>wxgHJKQlh08PQXMdEcjA=-)7l43dUz7%R4 zEq3ATVYj~Q1YEJNh%y%ubv41~h+O@SU%k(8ZjKzTg5^M78qf-PBO@#W+GL%!BL#lZ z<~P6C@L#)!{*6WXzkAJgE(rSDx)Lzw!_Mnesr`o&pt4>fU>z@TC-~oha&hdmZ>ol| z$C0$0OS=%4!ix#K-|Nqs1u;b0nRn7Rd55qx3wOY5#iGE(ps9KW>Jra>wb#Qwz6xbT zK>B3&4{fEh1jey+fyJLDkN^Jt@A&+kAAi@!-}vx1KK|Vw{_ciyJxE_<4l!TU zv>&fji@g0q!#BzM#f0`bi^TS7ZsTX`I)`K9e#LT2oP<6DhXQGDot1cG=3;UN%=Z6b z@4cg%{@!&#tbhn8NEcA5h=58{O0dyHM3mm5(g_fy1__CPfOG)?1p$>NQbG?s(gg&h zOAtarFM)&-0x8btJ8Ra={LY=T&Y5-Y%$j@0KjK=VN%m*6-*>;|dEUp?F!-^*HtU1o z(%noLWiz`g_={u=MVc$lVRGKSQqmP72T~?uX7z!9)GJuNncIwHk820OPi-3G;hfCE zFV^;*UmaHYS3EJ!{Jw@4s&wPuXFH=)!#j@N*ku1SmlNfn%_tXXJ5WQHohlQ%nespVIX!SjBkPIqX23F<(g4c!^LiMZ8O$-6 zOH&;Il-$lJ_eroh|JLz8?*K^tF!JaE1d}M{X|irY`Gl}|!a{=R3A|^SGGa?t>dR3V z?d7|==l91D8yau8L+Kn;bMlc65G(cAY~pFUS_8=()5hkhv}^1z_hY~_}gaGBlz{^k!s8uo1Nu%MjXs4Ag^5!<)8|+d5m)G-%9hCP)|sP~y@MMo2Qx@q;*2q z=ju+M8?ew!Gkt0BMT?sz3ow1!7*D(Q@Ze5+$*u#-uo`WNk%we~&g;7j;YCWYvS&|W zyJFGvS8qQ=)uC&#p+0;qru`3n%<>;{tZ(c3kKOGXbZb_BSUI{9=SY-frH?}k$YU=G z_H3>-E{8aig6E)7AZ-|!25p;ApE%IGx-U@Wo^J6BEU8y{sdiJ#P4T?5e@6v>+=rKw zN4YBNQQ=}DIjuRYO>()@wx#(4skzpZ{fATKIl2!^o|8}gb}oVI8lk~v6(!h}m&@vx zgIn|&dYga$YDN{r8_4}E^nfhz!J+>^ls|&<$DLBloP{th&Y6tN-iTRI$!Kn4m&%$mw}=ZX zQg)qhEhWuSQmHAyL8K8ovXsN=#5Rnu_3Y1e&+5Kcqi`jaT4nGgqtdm(sfqJgV@cSB zkCW#gaFkgEg1JDaJsMCQ+_4Rt(d)wlU_pD`&nv;s*5Wi_?5Nar?ET>hUYt?b&BLZ| z*ih=!+oYWCnG3PA6R+s@v$Xib6SHwc)Ao@55a;KP1b5=jTD->ZUwIF!mlM#O%8UZR zyPjqb_Ai~VfZ*18nGY9zcAh6nR83@1YKwS>8of4=vxbUbvxWY^s(AD?aRsDF9m0dL zMmI?XHH5^-nM=b{hK&>czp9Z(aPENt0Ts;E-};v_ntR{7WR-t?dM=OmL)dY5Wg|_g+YVD2Y)G$6zem0rRiAmX2pu^QqjQChlEB)we)w_0W^*yPf~14FBKjHi)Uy zJmD}hYMn+(Te2rXUjejW?GIP&a5JkJiIbkybq_xF#2+!%8UWA7-#X8>u$aguK1Mez z`t8;t*3wixQdSdOY#SsK*)~ZqMA@6AMhjaSv|l-NGtm5KT)B|yH|8%4_bxIfr%E7> zuKf`kg0UK_-ab}}$w&%9)!$dYZZ69GP`;#=%vG<{LF>P0!X4xw5@i&&inFJ|y(pRJ zH0QLDb?Quujg-kEZ%q?1_pOSme=j<4VqxzExGZD&Rpycp*S+>k0q4!>PDw9{8y>f8O3iqb0|E#qCc`E5~JXmVD$GFq!{F<3tutvD+p z{XA}-u20pT%fX&*IS;Dm29d=g_BW=Rf0ROeRV%xNdsp~f zu8QNF&CX0KH+@;j`yh$dUms~fH~Kc&j1{`<)XiUVgD-e$icOFLwPQupg>c+LygNkfoqiZ(}Vm#Dl?r?0Mt$!OjN*)9XaVP5uV5&#+ zt@TU`>Ju+5=9Q7sCOn{`(01F`({nyf1{T3PSl{O)gz{mzG7n>B(PSN9freC&RxnKi z*|v}f5P~=W0xXw_>jE|44-1X=(W_6XNP=S!&(?9R?;PO?&2nn<`%ICV#q^EUoi&db zO}bGsr5<(3(Kv5otl55vn^6@ESLpErH84If<)-1!sjRa&L7Y2}n|k(zp;CXt>Lujm z$)+Lsx12AnPLd3c#__P;CCY~}-ZhB=ck@5Cr=ACEAfuXYyL;mx0WHb&_6B{wSHBI8 zro2l^)=}+R(Q58au_r=pW`^y)_O34#B6Obt&AY4r8I1)tR9Qso+mTZS(L&!vNy^~s z$W|?khbWX%MlvpZy~`bqVTIdxt3ZsVZ@15!9?lDYKXt|1sVd>t4E#=a>NuCPqln+d2N`%5ihIXQ&bWYc7a!@x3eCU{_5Xcis&dK8C zt5#oLcJK@-Dqgy`auVG9=0ctMRYUpRRCF+Z5h=Grc!|hLTJ+Mh&4RRp4h=yS2T*Xz zn*SHb!;*lcyX3Ztk54@J@?-8KEzWx#5xH$NW11d1{~WkHG$C*jHX>wL<2J?S1D4b(hE~j1F>322&yslR`gcLKe_%6#?f<1 z@SN>Ec5lw7uszBwnLnLo59|AeRZPh>2DHIp+-Bo>qh93b(0F@a)|obB8+zzZ-%=t7*uL`gXuTlZrddpr~0Sadtu-S3sLV_WEO3KpJ#|dTIsG&gP`f z&|1Z$6T3mDGdoWkDuLvNpN58N1cc@`i7C7T#d~TwUPoJfx_vy|S7UIYSL})QKzE|FBkN!~bRu=- z>Gd)&tt2(-?)x|zXiI@D`G(`)YSp&H$%fLCL61Za9dq5G`;9~N@2 z4*p05fUgFs;9QCTv2lUL%hU=(3I)`F218u`u&@ds={||n8|%oWmMVba9E)LIzae7Q zGi&rk?D}b^1l^0v?H9k1Zi;p~A^(F<=Crqt#D1g(lW^@Ab}dN=&_p`nrlAz+Cn5^; z+DH5{1xrd5_UZ#B&v<;PcX7FPHchsu^eZr};9sBO-+ql(?MAy-{8xDa<|$3_{ml|! zpgds{pwyTGpfAxx`W{g8e;)XUrCaX5(a#0wY~z5bH?5sGfCHNQZ@&D&@iD&G zf3pB!(%8Q~$=?P2-J!o{=-+oTf3564a>IXj=dF1>HmQ}YhTc$ktAQiKvElvi$WV94u`YazQ`dkmC zmLgYyTF>_#lFTH@|e(Nj2H7qB29lC_Rg{~ch$6+d zjnA9}T>_*XPwI*_5@aFaY_-+d=1&fH9jEvmt9u4pq3psEiAgjTDl6sJ$WDFJos5eA>AlV#9V4Y~Tg{w%hyi4ce)}L6_id>I z$)#nRN3Al@-zrEOxFuDSA~dOTkKZ8YEYbf_!gCAeNyJYkH{;_(M|YbrhEhigX!OOG zIx={Ikrk`__-a>~tvPByA0A2m<4$#I)9#j+w#0f7#XnX zJhJ2)9u_!#SAOB5HxkjexgVlEn~42KvFkKb0XFyztA?Tso}p_4(+H*bb)2$w%0tT3 z(r|kTVb|3^S1_j3u7rno207tK7UGi|Gb3%4n2s!M+b(Q%hD%TFXf1=IEd7Zc6qMH82N|z)wjA4KkC0v z*YakMBWH{;66u2g;T|QBedZd&8#a`5hBNyUXB6nOdWr!mZqCzdRMmOuwVQWFBe-ORgs_#i5P?Vy6&Du2`L%9fz#zM z!l$0D4^q3n56~$&N(y^WL;0z)p53z#i!L;OTtI@1W~QX8YH-DtBSTu^zz6#?GMj?Q z4&uk$b@<_${$>k_)QJR?rGzZiYz)SwBc3SC8x^W|DuQ@#h-H4XAIg^%E+PDpNZyq0C&>sd@ z?-2({ITTg&$P~dOUS*TZ`0LWT1;T71ed6&m8%;MJ!}o_~%Z+pEOE)LiD48T~BAB$V z^@z0DsR54JQG3%i!S5Y-^Bl{&h4`1rvaPI+Uu)T}En+~-<8X6ot2=NQxETd-M?Ck- zadAQaMsD81h|l9n@aK?EMjuxwr1LQYY_5@dr&~5vv)H)0laewh&|kKA*Dg(^sVPEK z2T}+wIl?>zvm~%al?-p*uOgcl)TU#btMfQ==S!UJXOlvuugfKOpijK2U+IDp^)qX# z7Y%X2rHZA+7RDYIne%z+lknzEl>7@X*T4zg((I^02V|=?#s|-?Q1&G?ThcHPy{~FS(03Kkl`MUG#{_5|K zOd~W|(54s6#kWUh3RKUy7n!Y@3X)2kg`R=6TU3xXL)SsAbA8hn$5&*AcnmszC72$`t31k5sVChP(6C*7 zkl3ybZN;6?aY1o`Z6JM6K%J(2JP}g7Cg6PKaoWnlpjMUFxp$N_*Vdlvt*A&Ts%#Nl z4sfE!>EH>t5&xhF-HgO6ox|`bD^AcHKA!9o+R&=IXBhsos2CVi3QA?|)QQ5BquLch z(0~$}03fZ+$a{av1LHXSkboWb?mwFx`c?<_Wv4Vqn)RZIp+g+l+ugTt_48)mG}!*0 zy#ZRN^ohr?{l4S(iQMFx&DspN%^l0uL8`pc`X2cY3t>7jv`LaI(Pu?g{IaF%=nd$$ zKEb%Px}N%Nc%og7g?H=vG9-^t2%ts)g9WCrJKEps1xAl2R1=Rp9Ch3--JuefFfFip zG3X>QH`!FoJaX$3;?dyo=EpEsV}1(@+$2TEbwo&bK~M=9Rf+Jv923=U z3JV*wuMS>_F@QRO?w(^Dth;gwo;2)Hr6@&c$>vje+XHOWq6*v()RGF?YAFF~YAaAL zh%l+>tlQzw3Ex=d9yMI(JCmP`C-%u^k&2O&BWiT0&cGD4wrM|9<0P@atT+Cy!r5 zi{9PqgaaZTtPo_66crfs^dtnh)$X*~fJtV6R(1C`a2Sqsr)Hu_eHM3O1jZGe92bGRtZQp41yo$b5TJtWn>!(_r&x0>3!zguhbp&lq#LaIs@2kZpV{g2C z02KhHI=HSTyzxVr5eZmiosm%?v#C1L+?2wDfAR(K$*8(ADuPXb-f=xRETT^ z6eygvA9aVfC()fr2dl@yQbr2-^J*ehaf}K){}d9ttwx)e2LP`Sa$*+*IE?Fvfrfi~ z*q0-!gQvk(GgVF@pT~!TbKQ@KEvtp)GT+JWL5SkmL?(MXO&g}kWQ!_-*tmwl&qIq= zKu6FMo?7O7Z*QA7HeZ&iXtWPqyq0FcQC@M+@FfciSKMuiV$)1TzEsonT>lFCJ|p;v z{$6%(4)#mtmPNW!@dWsO)C#;sYUCq{JT}AZQAtJ9tef3OZC?j$Zo_{NC;zwSY7Pec zc{}6nx>gW?<(UH>Y=2Y>C)=Meyk*2W*@gI3p6P`l24#kNMeKz{N-o>_0v~(%kdZ4@4x(7Nmok`NV)XWuHzszQ$Dxo zV(-nPrvT3_4|p;O#8Ar###_v6G*gs}?ht)}YexuCSDoMu}mJfr!zb-y|cUY50 zzc+PHJdeUFrF9C1komhEIHS*z`& zy0wu>X0;irT(jy?ZO1ZMLr zJW`@mICwBgejU}PfB+{ZJ@kNSn0t7!k9tov>S5tP0CXTv;-;UG1!mDFPGXk`<2iE> zBXE>jN^=ziTaz`A;w2SxD>ad5+A$38e*8;B@kCttIUgJV@uTyNQ2ZHRn9|*||LDLN zRgO0qUhO6Qs(l~gHgQp- z4*7CYQa^SFys`C23!5a|JW~=N#PY0n_%dg6k7qUl3hb&Yb2OWW3JB=ZHK_9BT#7X* zrwtREcH)ws3EztFr2uvH zN750|zq_DmuRL1%Yin(mTQ+O9D@w`SOs<>g6y^!zTsIAq7`nDA?V8neH{%mZA3rrd zs&s0SSK@oJSy1~vRtM|rD ze|KM)78`wcDf5?KqW3lfOZTD{(=@2uCIBWfvl@E1FcdDy8d_Gi-UoxZ)vo3JDE-VX zqBf=V`DDH0*4Dl@1W1d*h*YTY1adY%r%Jyyb_#;zrk{@yRb<;KW*hHn!)lfqmu_g) z-1zb?QpL2IGrU0A8jpGyFL;|!1Tw=Fpcx9ag?cg-ik-E)2)UScc<#_@Q_Bq7oz&j36 z^=a%=%Ma=1WGG#thv1UsH^N8{4xyxQr#S({5jUr)L{$()xiM~bZab67rO9}WIEetJ zOaw+~nat)Kg&BQA$dr>6YO~xGqY8B$rRhpX*H@I@3jC1l)1TRYY>E2eiwBeJF(PXi z9^`BexWXGlaLw=vwj_;>jqQveOS1K9MhufMSY%TOw!r*R!2;);s=#*QRX4vcXp76) z!>{s6N74>L0D1^blZ2*B1Nx0sa}HIc&9>|-*xyCo@FVA7+^2C+h8JumO{ovWa zr(9gK;r`pl6CdEWfi3T2{s41;WOc%KQL#+XDEmlg)sHmN@98Pq?_4JgeNUPkb-sW5 z_#3ZtOM<9$kQSoNfem<<3$pbBQwpv}ed1W6PrhwKGH%H%p=KCA+<-LhHr3S%Cxq_nP2yF`am=V)9Gzv!9(&YTr~i!>3#Ni3wxWYkxJq|li%YFCWa zaz&<^?r?@aHJ`m=`P&HckX)=`YB_JS-`LzJZ);KG*%qI#$CtNWu-=p$SHR${7Ucg)mJxFWx0l}$_QENKQw;}T-~!{Ca<$~hFkbx9M)vD z#^=%nv3QzXjmZ8SIvM{lOiD@e^_)duThTRhskxVs?PAJFswEA~YzLF>2Irhu#&D?^ zTM}wx3H1WEfBH{99ax;8Njy25d@Whz*PPwm5&XSxOS0}qE$f-BU>*#_B+fyB3`zwA zDP&2kVNJ3!$rfGq{Ht;^2Z7HLPsTJvOniT;7-W7xDYQ|4i`QLE9ivdqN)e5dQ$!iG z%!g9Jfyy;kFQBK}*V3q9;>t(?Xrnoi;%I^JU4>wLU<_}kutic3g`-C|Vl4BfT4zYU zqGbGc?6=4xtVtijIbCj*#fEk@iR z0}BDhQm>k(_k7t68H^?nQ6&zzzbeQhH5ony>dO(}Qs`wsSBse|vGEJy{} z^kHve#rll~GmJYGv8uA(#f#>)!%fm9^}pdI$tfWecUjy#9Jtkw0i?>XH_TrfEvH(@ zidm)1K|m>1dZB&!#r^mqBj@6o5;3d5#`i+rEMm_No#-gMe&xe))C}=@-wKJxP)1cM zn{GMhN*Dd~hvhu@C2kWUKG216svg`wKJ|xXLYjH2EFzm-XR|+O`Llz9O||eE{~s0} z04VhF<>#0SEdL|FP#<*M6Mr=l)H{%|ys#;JAo0i?_p6Lf;s-VLG**M^iE6`fiC=H6 z?YzuBM7F+BGcXq|UOtdh4aj*?Fdo$pt^(6}djb8+?qk$avJwFt4Z1=%4LA0Z=(8C0 zEZynjl$acFvNaNUzR)%}l81PJ7>X zHtm|=_8>p9RjtVxuY~33Y`I3gLhiPr>bsM9$o@)a+;T|q){wOH^w3+){TM78|5n{X z!o;xzgLB8kZB1{F-?dMz9=ShmV{9neQEK zCem<=DVDTM`X0#UpE>bI7skt4v>{+#CI{Vm9KiwW>rCntkLFPj#@P<-&YfyAR(QK$ zc3&V-)$XioTowPC-yfD=8Q|ABRje@x5xN_Bpn@FE^D!IE%E{auZ>?Macm>y1{vLzD7G@AMD-;%c8RmWO$vV@T`I9wQyy6D9S27P;b5U} zhEfmL`TCx*v-m*LsFJ`o;Kuz16A)t5EOI7c?=@2mhHL#f$Q_ljbhlVe)WO4QoBHkwp|-IU`mF(vTdI4Sp~C8;91c?|P;jgU0~z zAXO$Hg%$)G`KHl{i}r>C^WWI=HlfAs$KC)H*;grs$yUyp_uuSCFBCVi1>(tR`}ZL$ zt8!B5(k-2(3(n3|x6Ov3ne{-#HWPwgot~oYT1>8%It6tT9cdsGP(-;*oxoRmK=38X_crw-c@gid2U;&Z)tTu!-q1jdbf@^-s82i-WNC|XM#QkW&#q49_j z`*3)(gkVKzsfA|tzVVV3dJ-uqTN+$?#ullGoJTJpjO~dB=M~P{toSucLr_`iJCoZ0 zk$qBrE-DOh-d&*OI-_8BPz>g(`=&=MmOH$fC}Fg9<|{|zm%g5hWj?oF@15dsscg9j zoFfgosUsQFfjk9bqdSd*POdqqr)ehgTB(IY2%O@USpshL{8g1f%1JK=64W0%`45h% zJgOZ$7@ufdtHf2Va~PHVVJR3VZfQaBnDv!83@4lmXpay~;+S$|XdK+sOT~~7IO3(U z{kgI2Y5Y3zv2McUDDJQyKYpjJB%mo!s%;6JmnwwMIROjr5=JNPW^wg+dDCwfl}$C9 zb|GPNzGt@+-@h+EzUY`7{6Y?g)INrW7aWUdWyoexO@`O98N5trwB_70_vt zOFG~lmgwg&q}MZc)BBiTIanlQJ4wJBG{p78bHK+pdu)3t1~ZGTYw`;GxGanU^cinI zpg#Io?PE&?Evt~Rm+jOior17}hxz~m(|`K)*QMVn2cf!m&I_syPvaKt^5vv*bfGDu zBwV~W={_a`c^2cu0S-MV2=mh7%*WpL%I|iS>C2MF3&y~l9{+mlwD3-iF(7!^g_RI& zLRD*gG^)}EWrJX9fZDPxp6^4O0bHzH>siOwskd^$?-j;Er8fAd@894%NY%C2_7~f< zT%^@hA)E@`E<&6?Z#mc}o|(YRX5&u4Z_-4RzPW3uo&j??R@h$iB=}U_=$(07XB`AW zX1>>IPX;Cwu(RT+IR$bZyx@j!_WV)DBHkZ}eJImmpqp>+vP}wY;^V$HG zVe~m#(Y+ygteMVuaoHs81cx&^{nOS`CzL9U46ngaWfqHpk#qc1s`!o3b73l@^VQ6u zoF7JC%AB&DaRQlA@%-$+KY9zYT(H@~QJ>33*5puSRHN26%c$+NM-P71$j5{A$R2KC z?0!@4UEHrKG*Ny>h~_$P>&^OZy~zt%)0N9tmsP5P&@&*z{R)j-JkR1BML$nDlv*E5 z2ZD}(sggP!lC<+MWqdLqXsa;13yree?#JVJmmBUhcukjnsQAi=d61qzAJ4Xb<>Hfo zGB(p_q!TywymJ)baIq%$sEy>?gw1%wHR|3C!C{nYIl0s0+hzSHmqz*|W#-C-ZWT!p zWd&(7F*Pz@HTnoI0EHXycwflD#_bqCMhZ1J$bqucIOUxwegfI5hn#(_0VTH5Zmo#( z(Ur(bV5$lE$o)t`r_pMfa)9*ZrJfA!s+&@eE@%dpylX@CvPtb=s=Mi@yW4VV_1E)f zbq>=F$k5lSI1%a*6I)vyu?t;={)%HfT7B}pPKGCnuc|~;9^&FMqzT9LJc@~`vIS=@ zO$Y1?`}PcC{cBq6o0zVPiD5w2KYff0PVlqcOc1XIRFvXhutt6WWqw)}^KWjt!N(ZU zLaU#YIDf-AwpZ%V_K97<`(50l^Bl)9Pq^bZqcWUx5+tbp?j*W>-f7*BoO;)X{n6&S zrq9{p&b?$g?%P;0=B*|)ocZRdB;NqrD7n+jNnE}Z#oMzT8G z?kBW5XFZq}l;YuV>*?0-(;wmzZzugCXV6zibY_3|2jW?ycloF~G@vC)wZ|*mpBv27 ztFUhB)&{wkZV<=5pFKRp`|_$WBFFSj^J+pBHJcgYe?}YYkxaFi(a&M_Az7X<(p!!! ziyaSln9#bfawq-ASWWb+iHR*E`F^S7x+79e@tbi@DLp$UE_{9aA#Xw9S1uo4Z{i@7 zdXbxc>b5c9D&i17N$}eh)C7b$&M>dNz(Z{Xzezm(MdjXWHS>*Ev9#XWkI+WcsKgM~vVFx}$Z|HvWr-?je-DN$4J z@JNR^FX$Ys_d=6%8P&V5JtI%Uxu*6#kpV>O9!Vk#p$Yd2avw{6`%-E=weRfhmh{_*4M*Q?g=SlG`!igT8`&}ms2`pSykT(c?J0JaH}~ZM zICX?Bf&1X1gC;QQ##rDFOX{w_+i1Sx-3oV#N5{j``z1juhK7Onmx6SkAxU4FfFNDh zs2fRT{|rnxZ%L$@Ih^~$@;QGFLq9r%+y`4{h%WE}9rm`*WL5^B5>Qf_m5Kes(tDtT zH-&{WW6}Y^0}`+9AC`p>mcX}`tN?j8wnKc!tO?-OzRXbi!y>=@->zuq(2S{_IQ^p1 zDdf;YAR;~NhM}W!nS27Tb(V`@K&*O51Y^K*5vU3RHMBk6UI%FC! zH3`Fu>@Fu@9NrVBjngi2bPCH|)sXi?vSH>w!XN%&srbyzxVqwBF^h}928L_tm#$sd zvpg7lExWCVLY+0d{F?uY?Rl1?Vdpv|4o$yDYc`_2kJo4@`+Go~)*Gj7Er)DtoB7ao zRtKu`Hy;+}V;hNHe)jgXl`))a2P#^8F0m7Od}bM|^x6{;WuBe}<2-D-5`FXDt7A<( zYLoPRl6n`io!^$6Dt&D+3vZSJpREV zJ5&8N{dLzzxK0Qo*|V#d5(Mgi9uax2HnOlEitx&;$ZDFqR&u2#)#^*D&zE9FQ~=YP zxZ@D{>mt|N4+Z-lvT(YUwh(8hhN?DwB@Md5#ATJaWgG00~8z@e&~q|}-d3A#}CQ)a}IZZu1s zhPu}Kn=bO(8o4Vxl{UPXeD@E_wf3;kOgYV3%FqhX^?k7qEzK&KDYh!MhL)lKu%MB% zGgy1Dan2J0_Y}l{BpXRL3L&7_0d&=bgFtoFsG{H0_e&d!WeVQjoK3#BDYox(pGln* zQjb0kTIpSaSTpjahI8(gCw_Cd1_;KNo|%B)Y0nE8wd2ETla%*)!8NrBYEM6z7j6sI z9(J~qjYh8vpr)~~l9d?|s{QO00;lZVawEd}@FsXc+?<1+7Ov{<^dry>Q> zIFt)DnuP0Y5bqi#FNKvhsTa?H-FiN*zHq5Qs%41)Qp+YshwOBr&stLpQf-v^&uV1# zK04W;+k(cVqM$9kOq6%@_23@Anx@S@WWyFMjRE6;PU$=(<#Yfed~w(oVRBi;Cz!*O z(I|Gj&!om0uI{cHHmH2x!RkkxNRS)BJx@f011)r*WHhF*+kACqYuGG%d_y{0=)r=M z-l5c}ilf5hLPmCz@mDLlOEP8YmhEPAhHi8ImoRI1ad5;4s85Doa0lTo{k(2{K91~A z2n=gVq3Ymg!t;JtoA~Chj8lB7@4R(Qkk-3sz$(NlbY#UiB=5imr-|FBq~TKIL#F(j z0Vpjla8F5-XsUbWKa~IZ{|f zqvuu42aP1p5-zia*WE(JI;1!B`+rzI-TszCwVVcW_S(kY3rB#W)vG8%AmfDCQ6N`u z0aEDHYGm7OvL-Wv`!Es@2D0&9?B<1!T-;0^@o3!61sE@osnak_3!ux!&fT$ja7K$A zNYq0BK&m{X>GR8LI#dIMEb`~4SrfYa#vhh_TYb8e4lic;0ZEq0w}+&cmTkqb%mSeP z^VBRrcrgm-yrUX{GJu!(-v$2N!M|tluaW$9Gk+cR-<#y`UHms#_@6S|vWG@`t7ge zNLM6)fNT^0>;TbuvdtW%U?lj{`?l3;`=_2aLL9HR7JZmwtM79z^25vZ-_mU68jsnB z007ko+(Oj5d#!oh83 z`o-=})vsP0|+-fDTd!B>1LPWA+n?XUTVexV@jUq zZm-UN zZhbP)EBCjDn1vZ-Du4#l$Rm^UR%kLnn;=oX}B(CVG)Y1i279LM$uz`;?PgX!Lois z94BvN;q9SaGpC2>rX^mq6miphx}tY^|SU_$wUsESrr=GJE^*>Ng(c zn6+Kbhf{_lE=>(}Vb}qzT88_=8>#0KacM4E$5@jm(NqqXI8nEPMH)=US+W}(x{I=5yt+zT}UV|oRUoS_n!?>?--)eT76(p z>w214PO)OWrzfgjdfg#{H!CEj@DBYWJbHJnVdfbU8*I09}q1(;$cB zzseTLX$Of`jiA>3r?`10SJ}Vb^gnXX+3o353i%5-b^h90=adDZTnO$X5J z5PRQt;h7|YHV>7*c2uSD`=)|7%Qb1~xK!_(XT?U5V8l01AYo_uzkGqUnUfgOgAiGY z(O9)HuL-H}b&Djm%6wF1EojG>rE_T|4V?~wh<0qsUvhJL^EOiE@pi*yJ1L(Y@lqJa zoNEL^ntn073#*5AR*zV$Oek(p0=G0B0acx-oRe@U>h6mBN1eDQ z4eE}F(aVM^0fSe(uktIhcbV9n-=u33w?|Am;Wg~RWrHDKP*aeh$GoukCxPq%` zfb-^b9N2-hQUDsFO;51*YwHgeZG6x9`t`zx*lx{$2Y<%x90(9)XLW>Eg_)Unln_s& z7A5k1_t2^4ez}tvL8i`Z0>)fJvg<{OVbm(TGwfozJ(7IDz2~x7`+L4rqp&*o#l|O; zVkI%y)vXC41#6jg9PK5A^sG071bel9-_Ey|loOwMwrX}Z#y;pp!hs#}LJ62^#S~*= z$5n7dq6kgDy_P(^T8r_D9T++PMpOUNN~50v;rfZHmuz~iv*Ti+G$TNi)U_QbHwdgC z0A^x7UR&Y4l7FMU&$yRHf4a!4^b?CJl9HP593@i6TW(i1CtzbpiLY^_hqy5C$)?*R zW*0Cz;nl`dYKqp7yGBGojjKXf9^|}opLz!06D6V7*9M%=vtIAcYX_X`w{-E-WpGjP zc(@c(vGV2b-)@i++bq4AbB%F{P|Oc=j-ID=-74da|EtyC!P*`$4wRbKzM#?(d;r+d zTuX#(L`fcP5)g|Za4NU8IAmvtCnyf_>XJG8=YHp%zT?|+ZBcEQ!#(e?W^$e$BG^}F zcmi~;qH^5JZw2KgF{+fCYzwh`ZvlCg2z~ec)yCt-k>^L^*RnsqRQwm4bWo)^B1RfE z*Kw}1M#z)YTisnR_7axB(E`xQ(Z-Tj7v8s8Z6RTL0y0@y zL5W~SFuutCMC|XsmaQ>vOL~SDt~b{ZITeXHl6l z28l0@^s$7LQzO5*>0mKl)y%#v&=_U#Iy_ObT&C|v(6AjfRpQVNycY7&9(UM@~QuD#OaxbVBaOP9kwbftdG)UKeJjQhsqBfoVx zJErA~w^#2_=b38dPxmjjZ4Z)4Ki1iB=@P#0u4BsMBT5Uk%GKlMXyOx61D*QD7Oqc{ z{+-Qj#*VNC102ERaPhA_Bqd~SlwDiQZd zxs)hIb|wLlG$+boLQ1W=>LH*}kPq?n3*a)7p4>k1?T1>q<>P>cv%WSuf=EyBQMw#e z!D|j;ucstv1F?>6AI}!e(4-~u{`L&Cy3||S!!;VV<3&xQA&PyauH;-+qj z86&rws7l(jo)~zeay}21Zbc%$nr0OKU=Frqj0{XyTv-gVrOXni1_uws-`!eZ^ zKec(F#pr>4reSuLZcqc_7x+XOu*MJ%fTtenZ>-t+tomTSR%18++wfagMvyhz>*UKv zl2SUI=mLyJ1Rvdp@u9TkqI-)niIOurp{t&rkzEAwZqO?EI-@%k6mjky>%A^w9TaS1 zX|pc{M84bQ>y%w^z#o?PYonGTAv9e$XhiFJ{+a+tgXG_mRA$>BSb#GswUw^@bW(27 z=+aezDG3Sqk6r`Qo^$4*E1M4oC z!+XoprKO{*U&{3H6WyDK;g56rNa^u#X+kvUdq!lxq!qduTBX?p0Z9ZfCSmgr7#xx( z=Y@xc5VA1ej+UdK3nQx2l4hh|k+z9MBl)@w1>tM4?1Fb+6`e2*ZJjL=Ei(L4DCd^G zdvNqoN_C*R8!EMCka|HWu11u5mPR$gJLD9vVuZyX)(@4`K>5_(D(ukQ5JSg01}+uI zOa0_SUXc4`rAV0{^HOG>2n-}lD*=~D1Zn(WY3_I_LEETpPwZ6l_@|oM;f?BL$LuX@ z)R@#G?+POh$-H~RK|?RYWw{+(MLZh(m$S2tiz#c5mvi*@{Db%Nu$x}oCBUVjpfj6S zNlkCfK3nmVe#k$F?AQU48r714iv*VFdWmI)6n;wHU20}qqqxnn5zL}vqdy-y&QcfN_T*B2vxF{fkN35NvUby`p8 zGmVSxFzgr0F>Pr++9dwfW^&T{+sJSGUP|Rf{j(k%nic1cu_T-DtyNq27t!we6dV+^ zY}@eJUrhSapu(KPV@|=sJ9KI{Tai&e&~`+yJlm`A6(Z?SWsp zU6b019}gsYA^BK5y!yhY_wTz{K(3@hiuITxNMV(Eikhh2-Dqq_H_Pm#_yBtIBi>p% zMRTU=7J=Q?!omGKB@Q+Q$qtudz7BmQwpp%3@cmp>x1I9$La+W7%PQ>wjBwTWVnFWt z3#sItyGCGh*^mamA^IVbB|fqd4{&vvQY6=Ktt*65FuwaW5-&Ac<3z@U4*W0ey?0R4 zVYe=dz0kYVh!hb7l&X|Oq>F%nfDnoZ2q6>!fgmL|x`0v@1e7Wz^iTt#BVD>A)F2=| zp@aYlp5J$7zP-P_=bSrt&b|B0nS1>s!{nXe7xJ>+wbr|ywVvmbQmBGtg77JaLS^k5 zl6Ai@W1-l!OQ(BZRhJ{3Ji{|YKo_tMj6zKy#KfIZFc0GOvH{7&j^an?v+Ngq<4aNk&i0MxF zJY>D75MfGIp?%JOGrx(lUr^&CP_3H2wP`1--5Z!O&^~xtTYI3|{J{Cuf*K+J_F zlZg{|(=P9!Y5r!_k$xKSbxrP93T-vNDbt@Xn0ok!?iT#oNg08 zqE#vwtojp?abbh1qGGwqd%sDjqBa&D`1Wpm&a<4@dA;o0%lkxrGXu!!ckX#{CW`Iw~+3MnJc7}4eMiZqI*}w9;4m- zQcU(rYD{N}$ikxxHNp9-P#q`Q!T~|`0I5j(^h@f+MC36y9!PhNFBs(@^t*6&u{p8( zExb0dmCji|)jAtbzvg?kDAgl}DX*OxN_=wnhWPAr+D)y8TcXC08qcIU^4_we={=@2 zJ3t61?h5rb>p=pEz8Q4sSc*yqOkW7Fcce6dh)V5vf%~g3bY8hX2WVpQwZmGnp_B6Z z-*aO1HcD*OzLDp?SFd(oOtQj_0h|`mNVXLS^PW4mKBaMi&yy|4e+Z*TkfGcU66H#| zBGr^BZ}h_Lab-0Nlyp8oG*n6XSHgRv-;12jm%G8f)S;9C)y&LFgGs3nJvNIs;fNk^ z)IGS7p}%KJ8j=z4Dmq{$1bJ|w$A)_Qhj0H~P~1OC6`^nfd_#6J$A~!-t^H#)g#qg& zTva=25$IK`S@+B0Drtv&kK{g2%f+b7&7OB1{w-^T;1y6Yj1sE~Z^u3T{AFfke7O4O z6dL1x;la<|P&bd#p^K8Dd>SA8u?u|(n~9%mR>Z0$j8d%!4~N94CE1+$@XeqTM^-C? zM|XmxDZZst6^bXxsHKUIc=esz7Dg-Il5&w=0KVSs?8708hfR!ci!M((CCgQ5T;_|` zO1@v(1bi2@V~!PDI->tObh`9D!FLUTZg6P?~Z&^+;UVDufsyp|7^XpD@? z-~?y^WT=Kj)M5>pb1)XePSAUP)Z%ibY_zm9U9Pd={;ebE7Z@jqQKCc0q*L+Kiq+z= z9F?0`y1`1sS`zJIs#KWS5O(b>d~h0=q@<(l$l`s&Ea|0_uTWBwrS4^3x<#y0X>9$~ zPS11b((urYz3|XE^vKn*_B*hN6gbz_*E7~jPhTa96lXvA-g%=_ckvNTyMoF-hZ3Hz zc`HB2OhMi+A9}9Fg;SBFrx!Qp3%n)#03ew8a>k5~?p&_xzln~X8w3$ev1j05FfFwT z&q%L3x>xjzR1+qRn`s@}bb4ezUc8um_uGY^ABt2IZxrTyrh~UMDI(aAr>O?C+~-&> zDlf^i9c8+3u?v_Ki7m=QAXdjy6tR=#P%V3ffs2mJ&oOX7EFsL9E2Jxvm(bGYGFTAA%}H=va{a zzFuwNgPa3bI^^pGx~OWvZhbhO@GjJVgS zE^$+pA_S|nE5^*}?lRK1`I}g^n|F@I3&{`h=yZ!zbaT8t#qO~~N4u4UE}pja2*LcJ zlMV^|L$~6&8JA8FTrgC-4GxOBhFqcFO7ZnfgV@KNTnk|Z>VOXsr`qaEVhUF>E>{v6 z{QP;HrgPrUsNX_MJVAUsXEDg9I}m8m&pWX`xxjobqzzfVJ{|%hZARw>sYLM&a=|iq ziF+t=L#~ZMzO2+T*?key3cFJKf!eVxXtwid)WbS%xKN@{ug9lg+z^-?IN^e)aZdFF)#)hlqJ?fZ~3MaN99=6UifvG+|BjY(ug!M45S4xCzir9SP46$AiNnox2?AX*_7wjP69Ye0)SFNhRjTgZAz^K`w!bmC& zn8**D-RUJqQQ#y#au!Ku<5~&Dry0U3A1q^AO%&*lX(Yc-_MZ_cE3dChJ`@*f&|tqO zJb0+Os&~Mp`-d)|Y|Zr#U8VEerppMK4nes_EUn<`*dC+K9s6&yL9*4a*NhrQa7$3V z&JUh>cRdy&>SE0mw_lgCFosTL=Ey%tk!hP&m{wDuDq9Vj{j4+SpRp-EvKl`w4Mg!A zjG=zpv7q~9$Z$&B0ycDiBA%kt#XB@k0Hl-Ois-Ku@kPVGNh$E`R>|t0x)1Phb?V)$ z`s9vzH9|c}@j=dK;!hb9OT&YN2%aoKk|c8!fzha=Iz*WWniM83_J2?7i|;axm)ECw z%xK4T3x){2UmR1nI;xNQwgUEDrhF?ks>tZIK{+~$_VYnxTDoVkIGF41%wY!49CIj} zKA?~OxPB}MT!DZ-A})ub+)|f*C?w(WdWBUHufoP}@(76_nSPN=MJU}z&XkL7cK{V-(y;;^b+4ji!Bq8O9$-TtiWTx)I(Im!CKRJ9iivg$(i&8->9_0@S*gQFW% zHBu&!?@|rMfW)4mrTZ~gjZnBOJAQHy%}ZjZl%!&U0o@y-p1N*llAXt$U*q=MN^_I5_ig!!kw-7 zfgoV29Zpba94HryQ<$!*=E`b4xIgIS+AURo&e86M@^-WHXV6Em>_5muS(*f?w@G%b z2B6jq+0iQIv(&r8m_Vk6Y?m@SJ4&1ShsoS9J?X6YnIqGRk3y5rMB-iM-X=NphhzXb z$hJz?L0!|PcGL2i<)_2wP3y&h_>}E!8g>6@6t}0h=CGsR392j$mb_0%%5%Z;hlsi8 z5VR9YDGQn;$eYK*TWDw?lrQ>uZOyQKWfeS5!V;C<^0F$^`HcU138XWFyN=RNQX)5I zHiH-yHpCWn+GbhAUeQvkt@4VKYrLloByf(h1|r5cAAP?5?xDwn{wSJFiD?_?bdb#4 zU=(n>XiCcqPU{eq4;z@ZY&^PON^Y;R(Dz!byfpIU`@%0XsfMM2GY^9&5c~DpF!cbN zz-S|zU6_qGTDm(Z@t_IXOT)T_ZO^+Qaez!~65- z(qeD?NSxG#=N0E z@pGE@8l0NQef%?{-rq=LCy-zi+9VpJNBaa+sPa29nf4f6Jx>I=lucYsxlgoC=ADh$ zz4V~f_ga-Nqr6YKFY|eu4k7cO*%!evfDcX!k|QZ~A%6minj!INdhy`%{TGJ$UO86k zCue6KsRx|i=jvu`t&)<7kBM+H*KzvU)4Tz>XKXOt_5mrI(l3s3cPhQ&F+Pp!wO27P z>Ml)4%%%7!OyrxDE*W(|CZm|6L1(D;CBMf(0vP8)K)VxsVPvmJ<`RXMk;U3_a_y!{ zVnuZsTK2qsfM%t-6r927yyOkRa&k91cZ^itSKaVln}0B6yU$)L*aDzQIHXwk z5|4nMNRWbnPTPK}!g6Gj_-e|KwT&f)xd0bnzQ*va??k*H2rg}*Qa;lZm-QJWHT!&G zcAabk39H%(zMk}Of3_b&1ENm?(tFMC_z5wk&Bg)fKz*G^btYBWa@E%Q6VHa)e_d5l zL~){j^-6!)foeU|p4#L(er)<7QRI1PD)#Bt1%M-{{K$&RK;n*4 zWgq%LmthLW7XAlEbFtXGkRC``Y3mjXWkREA!~>%+hoaY=@JKP-k>1r!#+^QS_^Ba|6@+eaDWT;G>!H(=(cYOqO?WXv2v|`CZBYwlefLgz${}{uUxr*Imy>o zdagWYAXn&=%rz#nds>qD<4jpvmtU%1x*UscrJkaF0)viw!F)imHavv&2Njg(ic361 zFIInY6Vi_^Nb~vrlgeqanI(}GYG%eF*YM9V*HWN)(s3{re3>FZgcFQj*;DLD$~#^A zg3_e0>@4-*xpYG{W|M$D`rc@WEVWfCnMd8|^LK8h+e=Nvtja{`nTC$8EiWXp-Xy7b zqta;8AQ+dKimfziD;1zx&N!{%mRX^g=?)O91!G2bWg-PdiItSfju5f2tE7~$g|cAq5~1U+o>Z~o4=otqKcf%t{83Nl1>TENf#tN9i9F)QnHACR`OcqXX;%ZjrDrJ%ifYJ zG7+8_XJ)#fqV)&g~(n={zL+q z`~V2MRqe7`$id*vrES8a>j?WLEw=&pVfT{WF+~LrI>l@%FT{neKimI88pAgF^O7lGMwv^{7P9o;zux(gMj&ZSgKu|a!ORy7V9jW zgcxi|mMWxtLQA%HjMiLW#C!-a%_?>>z{a1v-1?aJaI4Lhbr{3rtkx6<5NbZ;R+63R z=S;7-6F(@Kl5BpFPI^;cPViB@p2&ENyOr&*_qQrU z6=7AB_NzV}6cByL<|P016hdh@h_{p~w1BdjvE!lSTalO|Ju22Rt_*J|L_d<+@DPiV z-SSe7zQwS1{z=|^+4*-9>IA#+`emwIcm&jV2rYH09a+WPWgurmJdO@|t)|^GDN`)M zrVpZ4esJ6E_ORgtXsT{}zk?xh5LhLxw&Nj8;e#PjYx4 z^POK}^P=`f_U+~@w#Cm*)M|aI1dz4%x4;2*Yji^`daQbuk{XZAT6*DIVFgF>Q$~nc zx)7JySKLj4#KX2+7+7)~ZSw?5f9E~EaUkYfk+ne;9WOL8MLNJaAzrUk6>#WLRBK|~ z>kt-esuD|n82UWgj(!#A#%6`eP0n>w)yogqa9#N27aAe^rR{REqA84{+?<=HI?t#QujB z9r9c3&H`{+f^`6*wy{F-lcoP*W$0f&?C+=l%fI69&i}Q#zbEdmxB2HO`0Hu^Mw$Qf zBCKhfQ1aytURh*l`G*6xYrFqedq45dXqBCi5DZFe8e|-D* z+@7Kv))~)oj(FF4rxl{|R;j8z`?!hmWw1>8kJNmUC4RhW z>!n$ijrtZLo>Y&yKh}y5V9}B6X1^j^7d0=G!tm^q4m~6c>6#9CI3bUBcWx8BPV~)a zJUzh+&Mwh{b5?&V*K~e-+O+#R^DjyLibbK|Z$KISmlJCFsm1))RI3DfYq+QhEt-E{hnV$(aM zGkc9?BYR;tKRH}1P?^$L>MZ{4Arx`L+EGrThMou~kDrlHcp5-23OvfP2;eyk-oa#b|j2QBRhr-4|Fcy!2&4Za?LyWmCdJ; z;23r9Pev2uiWaX;ItRp&P~=%GRs$tl{l2I+fQCWKON85mw>tDrCYa} zzRe8`MqMZ1V@Az~_17Dpd&7Eq(}p~-uB8{Os0Kg{+j`1y0z!;dia!4N72HL>JRfqr ze(=_*86Q(&R%GC7DmZ zy2K3wZr$3}g*>+7M7Dk^|KT)I zbBK%4&#v}NE-IW5FrrUEmUI;xI4j1X5MfIJo|tPK^eln;DxN|M?^Lw6mUZOatb*$G z7QZRzpGi1!X~Ws7LAPBu-~yv1H55>TBTDhc_% zZ>tFKnC7>oqW0cCpz2T9nUy8WE1jGS>2~G_dG7H=QM6k0J=;YVC2y~u-Xq@GH6%S% zCl5!JqX-cfpra^v_bd#ta~a-WV{9NEZ|vYf(mDDN(VXJLnrxFP+hW@DaNZ#C?owxpI6Iss8%mx(>^>wEk8@0?LTqjgb{sq6r?+13|MDI4xzEUgi#5cZW2 zA)*Qbdf&jq3)RAWX!b_#OKtf2c7LtM-8@CSMDiVnGcRvIHawlPvS>UpJrZNZLbT&OAp=8)ye>y%@~ zmFrcYFuGqBz-FzYe+HXaAh=P^e4{)?OzWqT_uXV03D@k3`u(Koh5Uw|nxJ7ylcdbz z)YfvRc1>l1yNxBDU^rksooMQZ+wi=ay@>`;`$Oi?u($h!!Wz4Y(~R-V)n4+0xLBSb9&k+|LZ-9+v^Gm%m;m zee5W^<&69K(VO>N%T`o~piP>dpcv)$?n zFDs_7hVAy8H!zN0)>KKzJOMXn|2=d1hy1V6U7DRHQPX0d7aJB&!09=Dgrs}2wJ`qp z*hy@0uaLWxctK!NdzX0IEf`k`Evnjb8Whbdvej41L186t7dqVBWKFsShU;si{Jx-q z1ya<;NuG#0DO%h;T3*#BRZ#*>2Rgi$d48J0HL+3OnN~#mJcYmW1m=*afT`5{y&B7r z6SE^+a3V2kN9J_M4?zeqt|M5fZANGqqeGxMJRq9YL|h$`FM^(*>0E_RQWtk}5keO( zUix}sX^L>8S;f;en$ossK{!H;>ySfY=%kg$S>L4(UE{G7u52J zyWh{wUQdzQPjDU(e_UYrAS!yvpyVm)d2xJvLWTFzQK0zIv}S&Efx4kVKA%m_R`J-b zSG30syKd*cUE~D?>_*^MO2YjizE<4TNQ^@fJh{@g~}3JR72J4*j8fI(vx<9+zk4`MphXupnXBTsMjz*;*So@6xJKi*!kI0($=bG%Tgb%g%c#FHJW4M)@~iCd}oyNnV~z| zb076MLM<=8UwpJ&LPTyjeL>_A068ZG+DVm0uB~=I+10YPi>tiBn25M%Q5BV6t^f#C zVO+JDsQPz7XObkD1xs14w{E&#H}U}u;T|L)S)eNuBbUofbM*4@9nMP0DBE7?< zVUj|)v%)CP2)oL-cF*W0W@1cDgo5YAZWIM+o?tX4Gbbm3h`y7q>90ayElu#1kYMMn z)LPO$|2XH$nn{@1Ez>wz!|{}T=+X-a8fFQva+^@V$I7oQ-6%5gkT#xy%;=$bN0Mr7 zR>(7jQsTW8M1Wm;el|joi`bc)v(hAlN$7YXwOI$Z7pQU%IPDk1Yrs$*$u#-4q8O?! z9HK7FQ>a|C@imL0(`tKt6r(qvc?sdV(-PLPCm^;;4^106^Q*}WLkdm~$ojz3{)S%Y znY~w~iiQtw+rg;#qgJB8j@H#Oj4=Eg}2j0AyXwK?9E zb@@!0`p=u;WN|%tE-oHVAptdvn25|iKp1OU5OF}!_KAy<7@ zI>?vy0Xu(_Lh@Qbs9qpq39_p=ON*&zOS9Hs&o>p8H zY`tPkfoPDfwo9U*Kv&;I{BM{$AM}y6^Y;vkhKw17or`6U_CGOosoW^mj%%C6!P}7R zRQntk+}%}^{^H+0YeV{?wUI)xe2M5}o>MF<%J&!VaW;!j-o6BOB02yJ(4j~2znl0o z+dd%V+O1NkLa91ozIZ8}hqWVf`Z2df9&Ngpu#G!E_f$4>nGqcje%#}K4Lv_Q8P^Fp zgE5|uW1$!TotbIx)^md2-^1a#514n{IpTzKCBJg41k;o@^ z!P1mWOHaBW0ZQb#cOLFrOQTe%%h~Mgsq?}$AGKc(+k2g06)ECQJ1ekg^Yd|U)w6(m z1r}*ZkA3bJ%D=SRzQ> zSK)JV$W3fTeT8oNyYlObH}GZeIqBF)<$U_jbTm3N8voFht{sk`CJDUAb|cQ9Q>2uR zS$3eSg#*x3IMs~25G1>8XSG(16Zlp?hRLj!hbd%ax;d~y&!3R)%PqZqs&`rP4y|B| zDn*>_RPLb~k+|Ps0%$1=$;yJ|^Ea~)9+J^T-!`)9hkJkGr)D3C>}WT+VCqcyI`4l< z;y=s}$2zX;%}%m+%rdJ&kub4R(iw$F--aVm*b``swq=ggrT5o-BONs#G@mT{*356g zz7RJlM}Ta|nx0uVp>h%O{(v>-mNy)mtf%TfzL&_k3HG`{_d{P$0ZZ? zi8URfmf9Zn_>3g-7Y18WBWxSsH#|#W07iQf>~@D+v^X9nau{pT{m))LNY3zRP>WCg z>m1@<6ZZOt91GKO%{uV+v|3W7U+Q_)n<0c%^E#??cyJ1r;^fxqjmBlomMJkjnNhm42qG z97zCUn_KG(M8h`idg9nbTnLkvXM}W#bNIb+Vc6~P4`!*um*5Q$IJUkpa zDaqE;3jiEy=aHr~wowv+nmh16#tKFqUd5Yx*UNNzMhJ>g@_mT65Crms-LR^G58QX* zmYW-&d8Tgk{mN0+w^E*Dk$PFXmQzZTeNMt3l5g}~e38GtND8z_XhE`6ZE?Cr6BteB zpEmerTd!t`&!;_nCH1&543-^fF8{g3hxfa%*4^KI-wIoFE@3N{<8~peZ6p<7;^D2b zXOso_a_zW+l;DDKU7S|Nc;Kb$jBovmJf7M~OF}JAFO43=-)(Mx)neE-AJ>B9p&0f9 z&?U6hw0B|J;}6{<(iBXzJ^G{p>=zWjvTmkvpwC%sAo4BaNlv_*_OobaoVOu<8~KYI z)|6^U-uEnhD7ZLx=mS3ny#VeDk{09R>%-gWatq8InP(6>^PVwFBnVa10?9*2o>sjU z2zPML`>j;NYc`(yB-uxc!Jp~dc(3PGoO41STgq8k#Zlyi5XB(9rr4u4#Sl@_0JZ>8 z!N+GnZ%)d8bM{Se_6*_a*fQ7blPWe#t7a+NtB7>R+H`xlTF-=Pipr4!<;bX(0{cQ4 zMqccnN4}9-a!F186znDTm2lgkGPM<_Uledzvue^)HFW&;GMldY84hVgpMje6{?Bkg5cW#r8!iuH!t(qu}0EPH*Cq?^U9~w#ufei5>VIJ)HE_| zTPZ&guQbrpGb8zu~%8Gvl{rEU!A=L0x~P zp?l}3%jMM7Qaa{468CmWC++4rCjg52H(XX{rb}Y-bW{9O;JLqtyQM#6b*c3wh$a;& z3*BffDK8O@e3p6LSX$UlD_5$$L1t}lV$gOTI8*QbFX6fpl@4Sjw(Sp{09A8D)vg4= z-GUp8mN&jMf^OL0!Yvq=ZU3wfuwV+)C0EKZ7T?elPOym)P++S6FW2hk{PYbWsXewO zNLbSwk#8$ETlF=1+$xR_+SHx}jTtO3QV|eU3NiBiWInh#ILlt3j*+{FCW~@Mc937l> z*Vwq6$PHOJk-U(Qy4l}q|IIQj6I+(qdBHg-NL?(^;RyD2B}LGVJV>pQOWUe= zFO3aeie!et@tMq`f*~*|zHzR_8l|qSzykdAi`nY%@UX#jo%1;94atHTK;*7%^Qhm) zwW=oAsI0rW#AhDVLC^dSSeM^oTX{C4?obfPU_5=LxTr$=-R8wU<*&bu0D;#K_N#4a zDARdU%7v=B5NXk=a~;DPRfxAOWPjci^7ydRA3Er}=y;34Z9ghrGM1K9`g}vOm24Vh zGz!&>w_!Im3a>A|Z;+RE_UIMjhZjI!ymK28@a1P2ZE~lo*5hWb*$&tR*WEdc6xiTv z$nQzt%6XVxRr#=T|MtkfiOEiV%v8u#PrrL)uL(;nJ7{Sw#(BX&Zhk*Jcg#01v*OVm zQs?%8r`UMBR?Jxr_TKu~&(0Z==1ph8TonJp|2_}$ycVMXumPJ3a=7zu?{qYAl*hUY zKYFy8`8+{dM7LC^Mi!Wk43-PrCERmH)HNVU zx&=Kq^-7nARJv7~pis@aqzO4a8wKID7m~jqFJ~FLVnZMZe#)|8-I!-pTo`EzkB3K$ z^zxPD8B~qrOgi}T!$H7I3r5FzfKTE3@c>w$daUUx;>D3A<>*pm@p zF0Gi8DHlZwevu|_3m;>ho^373|P$)M@b{=Hj)O4592jR++7)PonbPQR=HVi3RaECa4>muHN0Q3)wAFB8{LoDAXlGu4NwOFWXZ-8k0tapB zb74^jbOvM3<2qJCW3HAcK^Jy?b=mi?2zM!-516{#%V~Dq>$6Ws&>I5WhPj{gZnpBy zkK~DC76AhpfR4)f=){=ncgEPE6Wa#FxQ#%sozQDox-N71c8a|ZVUVGps7dkt#rrfX z=*gjw-L0Pj%1D_b3(oE}rm{Tw(Fv^##i{?8DmYv4rC-l|Ues~fJe&AL2li~A; z&++FJh;hxDKT43!P#YyeX2d#`hd}dbwhGP<(8soI@x~a34Y0?DFz(fx5^<9Mlc`tZ8%92k&t=Q{sm+NDaa7a7yO_X@UiDl`@e?B!^B!oz-e#Kf$i%6 zI_9qYt)N*M$ac?HiaQZ+O)_dn^1E6srJcL8#Q2bKqh>o!fY%M+Mk9HAa$;OJr6`%%2=3);Jxq3hdp_rty%e7&&EoVry`{GK z^zI%B87+9JNhnBD`DlXz4Cc~%{6sX@LYx$@wk6-a>H8+Gf-l>BX^S?~caSNz>K}PF zE}!B|5SgVn=cK5}HwoJ4VF3Bw%k$j0U%VqfX^Ah-S8N9@)og-57-rkV+7Q_Xi6RrS z`NJf`d)$T=Lo*hO)#DSUeX)V!Jvq*N+xJTC0=^k$na7~bqb#r;KaP~^`U45R;o%`7 zl)%;zEk9g%r2Ce%yQ8Kl1KjP3>^ad_rw@wWo%*pXaO_0MC&;`?^?y~>2x{TVJoT06 z(NH4mYSl5tVhwOPV8Vvd!9xtp0+@Yfo>r0gt~cmtZr!nh<8C<>MCKgkEW+bTSwbh- zyp#U6+n3n$XpUHss0)@emy$TmgrBOgn)$rErV&00Q{Ll`-``CPuuWVR*@p!VQEXB0 zN_k6s`lF$!=*E%-X~(p9^GQV%x!TBJu)-xP>cHps>nQOrC;xA}F5;SU-iu##y}bBwfsRS9xm22H1hkEmtlC~43DN=>&@Tl+CW6$mL9F1E(sZ>Y z%t(tbJi@WEzV>D4bBUheq5^TtyY=O}rJR(Et`;!&sBgN-oaIWYL2*rrPtx_vVG7cl zcTRs6zxwipMK_W@L>M6lrJxoJbc61X00maC=gDbgtLcJkBZ6q z4ErYQQKxI|D#McXIKQwiOZ!d1(1B`X18swlYqm7$WE5|Uc6V?6!BG!TK-(DKcwV>5 z+-wwEpV`GIv$1c-r0O{r3=VwcdA-DUE#`{WD4TI@0Ng3qd-d*I=1tQr2FG6v`Z9P) z4U$qvTtw4(V&@wIl8cfupPQaE{uQ1tm7^E5Zy*sNl~-3ACYB7jhUwpo%3qwamW{hW zaVWF_nrdta?%ZT6QX@8&dP7xcvC9iZ>Dh0$75K52R2Qc5JlgpEjBb4JhkQTu-QP_H z&c+{Jd|SN)LZ67N%_TXJ|Da>I^FISp{<-c2;RD~YqH>Syex=2fP_m(zM`=z{es(P@d}uE zfrY93bKWZ1l9JgKvgJFaxU^0yyOzON$VH4VA@C>|)N*L*T5q0PI0H#?$kF#dt$m?H z%rpMds<9Dw>!|Qby6VnG{0E`MJG#J?xEWvG~T4y?wkhCV5=Il zc`SapwZ3s9=P)*sP|Ap~gQ78w#QjLLuDkXpf(zJ;@yYi$ zI3Zzw=-g>QWw}jIq;W!3XaRZ7RZyV3d^9YsNZncVIR6YaUgL>-D5{TCl`zQO`av~P z{5{?GuZfu)6q^NRCV+*NftD5mT9Je8H(j9wsIpUL+fe13K?gMz3BfVf?n>wadHzjEa; z1mu?vxWR%;|4O!Gks2~Qi%tQCAf03wp{s)8X{M=x8%u+R!|NhVjrEVn2l3~Bu)ObT zQro#_A|iX)*<_VDcLJ#Cw*W=|U%gFNkUy_scNM;RM5a~oZ9lE)^u&43&O@RS?v7xD z7nlX%k@?l-!4C7^*Ksz|?Kib*KTccUsr9$y6pqQTWH51yd3}=ye2s>#;)6?Z*_{WV zcqF?a`fGk970aOGVl}9rKr@@d!fR{jbT!^a(%)aYrr7DHcpJhO^puDNdPkokdaX$0 z%6x>KE(!To9#LhZVf;~@Dcj50s~~{7`gYu~bHcxThBc~QTKj03@W`@6lXGpUfq{JE-G zJ@}SRik_s>qDN7wG&Xv7cx^g~w%h@!ITya+F5gWRo!{aL(vSK>rykwGUK1vSMy^jw zTOTNy1{V?-4$?#?83LC*#qMkSrLL=*0)&asHoMc8DQ{CyMF^h<=<50rk&%SKlt)L# zT2)cgw`N7QuIom3T4{Cjf7s4nM=hkwkOdpI!IE}5b4PJ(6ngc9d%Q)(GQmC`6UQMb z=U|>!lmIT|?60ynKe{vbnk{0CLLucAt-yRO9OQ=NqzYFPJ)=}DbGu;ID_}V9nJ@8A z#nbpT$bsRH(>`gHiv$n*Rk(NjuFERTi>$jA0M;y_@x72wbj@0B&};A?g4%u_h9U7o z(7f4U>=~qsq~|-{>bmS{L}0rh1IZ|i8b115;2BO~oyD;DaV;ZEt(}=8!@96IzNVic z>Tbmc+gEm(v(<{_Ss_E!c5GDGJb+8LgHJWRX}0BxvmEke8Lz4FqI?;ARz2}RQ7=lR zRz3bIpTPHXF9hl{AhgI3PH2!m?d$W8L9Ac#zm;`f?QZzoANulABM-)#Q??2d1DF77 zV`uh~$0rT??o|+2!+6{lt%_koKU&>V7kTpG+vH6{(zfWkJI-V{g&`On`@<@4u0v=`Hf|9-)CX`bztu@24=HVfCX z3m)Ien-APgu=B+Zj^u7Vuls76eoeXnf<3Isgj+3g7+a?Pm*R2&CTF!KJ98V29T9rD z;Hv+4yPh=g=>C_TjJCs3kUfc+c1GTl%GvveuAAcoTI&fdydAR30YG#XGJohqjL9It zRDPHOFj0#&mipli-$QempFtXc=^6m|+Zk^_Tt@4E;VEXB1T6g|Vz!MpHl{#x2Zlpc zC8rqYl(Oq_M462$v-G~#?Rl@`U>4Fw03BX*nWadk{Gp2%KVptR{=NnPfhBgwx_N1a zYtIzi+_Fa8zSh*l2)&->y0xGls&$I)#BaI+IQKeO2tXF|059~q4ta1Tu8b0X{5n + * Example: `kubectl apply -f container-azm-ms-agentconfig.yaml` +4. The configuration change can take upto 15 mins to finish before taking effect, and all omsagent pods in the cluster will restart. The restart is a rolling restart for all omsagent pods, not all restart at the same time. + + +## Validate the metrics flow +1. Query cluster's Log Analytics workspace InsightsMetrics table to see metrics are flowing or not +``` +InsightsMetrics +| where Name contains "envoy" +| summarize count() by Name +``` + +## How to consume OSM monitoring dashboard? +1. Access your AKS cluster & Container Insights through this [link.](https://aka.ms/azmon/osmux) +2. Go to reports tab and access Open Service Mesh (OSM) workbook. +3. Select the time-range & namespace to scope your services. By default, we only show services deployed by customers and we exclude internal service communication. In case you want to view that you select Show All in the filter. Please note OSM is managed service mesh, we show all internal connections for transparency. + +![alt text](https://github.com/microsoft/Docker-Provider/blob/saarorOSMdoc/Documentation/OSMPrivatePreview/Image1.jpg) +### Requests Tab +1. This tab provides you the summary of all the http requests sent via service to service in OSM. +2. You can view all the services and all the services it is communicating to by selecting the service in grid. +3. You can view total requests, request error rate & P90 latency. +4. You can drill-down to destination and view trends for HTTP error/success code, success rate, Pods resource utilization, latencies at different percentiles. + +### Connections Tab +1. This tab provides you a summary of all the connections between your services in Open Service Mesh. +2. Outbound connections: Total number of connections between Source and destination services. +3. Outbound active connections: Last count of active connections between source and destination in selected time range. +4. Outbound failed connections: Total number of failed connections between source and destination service + +### Troubleshooting guidance when Outbound active connections is 0 or failed connection count is >10k. +1. Please check your connection policy in OSM configuration. +2. If connection policy is fine, please refer the OSM documentation. https://aka.ms/osm/tsg +3. From this view as well, you can drill-down to destination and view trends for HTTP error/success code, success rate, Pods resource utilization, latencies at different percentiles. + + +### Known Issues +1. The workbook has scale limits of 50 pods per namespace. If you have more than 50 pods in mesh you can have workbook loading issues. +2. When source or destination is osmcontroller we show no latency & for internal services we show no resource utilization. + +This is private preview, the goal for us is to get feedback. Please feel free to reach out to us at [askcoin@microsoft.com](mailto:askcoin@microsoft.com) for any feedback and questions! From fea4ffa0a602ddc3428be8796dbdf0321f7c2ae7 Mon Sep 17 00:00:00 2001 From: rashmichandrashekar Date: Mon, 5 Apr 2021 18:53:42 -0700 Subject: [PATCH 084/301] telemetry bug fix (#527) --- source/plugins/go/src/telemetry.go | 2 -- source/plugins/ruby/in_kube_nodes.rb | 4 ++++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/source/plugins/go/src/telemetry.go b/source/plugins/go/src/telemetry.go index 48f82a9ab..461fdea96 100644 --- a/source/plugins/go/src/telemetry.go +++ b/source/plugins/go/src/telemetry.go @@ -153,8 +153,6 @@ func SendContainerLogPluginMetrics(telemetryPushIntervalProperty string) { SendEvent(eventNameCustomPrometheusSidecarHeartbeat, telemetryDimensions) - } else if strings.Compare(strings.ToLower(os.Getenv("OS_TYPE")), "windows") == 0 { - SendEvent(eventNameWindowsFluentBitHeartbeat, make(map[string]string)) } else { SendEvent(eventNameDaemonSetHeartbeat, make(map[string]string)) flushRateMetric := appinsights.NewMetricTelemetry(metricNameAvgFlushRate, flushRate) diff --git a/source/plugins/ruby/in_kube_nodes.rb b/source/plugins/ruby/in_kube_nodes.rb index c057f7c2c..8a94a7245 100644 --- a/source/plugins/ruby/in_kube_nodes.rb +++ b/source/plugins/ruby/in_kube_nodes.rb @@ -9,6 +9,7 @@ class Kube_nodeInventory_Input < Input @@MDMKubeNodeInventoryTag = "mdm.kubenodeinventory" @@configMapMountPath = "/etc/config/settings/log-data-collection-settings" @@promConfigMountPath = "/etc/config/settings/prometheus-data-collection-settings" + @@osmConfigMountPath = "/etc/config/osm-settings" @@AzStackCloudFileName = "/etc/kubernetes/host/azurestackcloud.json" @@kubeperfTag = "oms.api.KubePerf" @@ -301,6 +302,9 @@ def parse_and_emit_records(nodeInventory, batchTime = Time.utc.iso8601) properties["rsPromMonPodsNs"] = @@rsPromMonitorPodsNamespaceLength properties["rsPromMonPodsLabelSelectorLength"] = @@rsPromMonitorPodsLabelSelectorLength properties["rsPromMonPodsFieldSelectorLength"] = @@rsPromMonitorPodsFieldSelectorLength + end + # telemetry about osm metric settings for replicaset + if (File.file?(@@osmConfigMountPath)) properties["osmNamespaceCount"] = @@osmNamespaceCount end ApplicationInsightsUtility.sendMetricTelemetry("NodeCoreCapacity", capacityInfo["cpu"], properties) From e31cc8715c0e6fe49abccf0238a973d7d3a24ed8 Mon Sep 17 00:00:00 2001 From: Grace Wehner Date: Tue, 6 Apr 2021 09:37:20 -0700 Subject: [PATCH 085/301] Fix conflicting logrotate settings (#526) The node and the omsagent container both have a cron.daily file to rotate certain logs daily. These settings are the same for some files in /var/log (mounted from the node with read/write access), causing the rotation to fail when both try to rotate at the same time. So then the /var/log/*.1 file is written to forever. Since these files are always written to and never rotated, it causes high memory usage on the node after a while. This fix removes the container logrotate settings for /var/log, which the container does not write to. --- kubernetes/linux/setup.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/kubernetes/linux/setup.sh b/kubernetes/linux/setup.sh index 218e3c717..ee3756964 100644 --- a/kubernetes/linux/setup.sh +++ b/kubernetes/linux/setup.sh @@ -85,3 +85,7 @@ rm -f $TMPDIR/docker-cimprov*.sh rm -f $TMPDIR/azure-mdsd*.deb rm -f $TMPDIR/mdsd.xml rm -f $TMPDIR/envmdsd + +# Remove settings for cron.daily that conflict with the node's cron.daily. Since both are trying to rotate the same files +# in /var/log at the same time, the rotation doesn't happen correctly and then the *.1 file is forever logged to. +rm /etc/logrotate.d/alternatives /etc/logrotate.d/apt /etc/logrotate.d/azure-mdsd /etc/logrotate.d/rsyslog From ca8fa1274b7bcc02a45cf8a8b195ca8e68f52bff Mon Sep 17 00:00:00 2001 From: rashmichandrashekar Date: Tue, 6 Apr 2021 11:11:17 -0700 Subject: [PATCH 086/301] bug fix (#528) --- source/plugins/ruby/in_kube_nodes.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/plugins/ruby/in_kube_nodes.rb b/source/plugins/ruby/in_kube_nodes.rb index 8a94a7245..d4b54c340 100644 --- a/source/plugins/ruby/in_kube_nodes.rb +++ b/source/plugins/ruby/in_kube_nodes.rb @@ -9,7 +9,7 @@ class Kube_nodeInventory_Input < Input @@MDMKubeNodeInventoryTag = "mdm.kubenodeinventory" @@configMapMountPath = "/etc/config/settings/log-data-collection-settings" @@promConfigMountPath = "/etc/config/settings/prometheus-data-collection-settings" - @@osmConfigMountPath = "/etc/config/osm-settings" + @@osmConfigMountPath = "/etc/config/osm-settings/osm-metric-collection-configuration" @@AzStackCloudFileName = "/etc/kubernetes/host/azurestackcloud.json" @@kubeperfTag = "oms.api.KubePerf" From 1f6f6d2578ebd534d5f0c98345ad147526b73821 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Wed, 7 Apr 2021 11:47:14 -0700 Subject: [PATCH 087/301] Gangams/arc ev2 deployment (#522) * ev2 deployment for arc k8s extension * fix charts path issue * rename scripts tar * add notifications * fix line endings * fix line endings * update with prod repo * fix file endings --- .pipelines/build-linux.sh | 5 + .pipelines/pipeline.user.linux.yml | 7 +- ...rom-cdpx-and-push-to-ci-acr-linux-image.sh | 38 +++- ...m-cdpx-and-push-to-ci-acr-windows-image.sh | 39 +++- ReleaseProcess.md | 19 +- ...ContainerInsightsExtension.Parameters.json | 66 +++++++ .../Public.Canary.RolloutSpec.json | 29 +++ .../RolloutSpecs/Public.FF.RolloutSpec.json | 29 +++ .../Public.HighLoad.RolloutSpec.json | 29 +++ .../Public.LightLoad.RolloutSpec.json | 29 +++ .../RolloutSpecs/Public.MC.RolloutSpec.json | 29 +++ .../Public.MediumLoad.RolloutSpec.json | 29 +++ .../Public.Pilot.RolloutSpec.json | 29 +++ .../ScopeBindings/Public.ScopeBindings.json | 125 ++++++++++++ .../Scripts/pushChartToAcr.sh | 181 ++++++++++++++++++ .../ServiceModels/Public.ServiceModel.json | 159 +++++++++++++++ .../ServiceGroupRoot/buildver.txt | 1 + 17 files changed, 821 insertions(+), 22 deletions(-) mode change 100755 => 100644 .pipelines/pull-from-cdpx-and-push-to-ci-acr-linux-image.sh mode change 100755 => 100644 .pipelines/pull-from-cdpx-and-push-to-ci-acr-windows-image.sh create mode 100644 deployment/arc-k8s-extension/ServiceGroupRoot/Parameters/ContainerInsightsExtension.Parameters.json create mode 100644 deployment/arc-k8s-extension/ServiceGroupRoot/RolloutSpecs/Public.Canary.RolloutSpec.json create mode 100644 deployment/arc-k8s-extension/ServiceGroupRoot/RolloutSpecs/Public.FF.RolloutSpec.json create mode 100644 deployment/arc-k8s-extension/ServiceGroupRoot/RolloutSpecs/Public.HighLoad.RolloutSpec.json create mode 100644 deployment/arc-k8s-extension/ServiceGroupRoot/RolloutSpecs/Public.LightLoad.RolloutSpec.json create mode 100644 deployment/arc-k8s-extension/ServiceGroupRoot/RolloutSpecs/Public.MC.RolloutSpec.json create mode 100644 deployment/arc-k8s-extension/ServiceGroupRoot/RolloutSpecs/Public.MediumLoad.RolloutSpec.json create mode 100644 deployment/arc-k8s-extension/ServiceGroupRoot/RolloutSpecs/Public.Pilot.RolloutSpec.json create mode 100644 deployment/arc-k8s-extension/ServiceGroupRoot/ScopeBindings/Public.ScopeBindings.json create mode 100644 deployment/arc-k8s-extension/ServiceGroupRoot/Scripts/pushChartToAcr.sh create mode 100644 deployment/arc-k8s-extension/ServiceGroupRoot/ServiceModels/Public.ServiceModel.json create mode 100644 deployment/arc-k8s-extension/ServiceGroupRoot/buildver.txt diff --git a/.pipelines/build-linux.sh b/.pipelines/build-linux.sh index f4c92fda2..53f6a3a07 100644 --- a/.pipelines/build-linux.sh +++ b/.pipelines/build-linux.sh @@ -14,3 +14,8 @@ cd $DIR/../build/linux echo "----------- Build Docker Provider -------------------------------" make cd $DIR + +echo "------------ Bundle Shell Extension Scripts & HELM chart -------------------------" +cd $DIR/../deployment/arc-k8s-extension/ServiceGroupRoot/Scripts +tar -czvf ../artifacts.tar.gz ../../../../charts/azuremonitor-containers/ pushChartToAcr.sh + diff --git a/.pipelines/pipeline.user.linux.yml b/.pipelines/pipeline.user.linux.yml index 57273111e..565661d64 100644 --- a/.pipelines/pipeline.user.linux.yml +++ b/.pipelines/pipeline.user.linux.yml @@ -24,10 +24,15 @@ restore: build: commands: - - !!defaultcommand + - !!buildcommand name: 'Build Docker Provider Shell Bundle' command: '.pipelines/build-linux.sh' fail_on_stderr: false + artifacts: + - from: 'deployment' + to: 'build' + include: + - '**' package: commands: diff --git a/.pipelines/pull-from-cdpx-and-push-to-ci-acr-linux-image.sh b/.pipelines/pull-from-cdpx-and-push-to-ci-acr-linux-image.sh old mode 100755 new mode 100644 index 3844ea185..e7d26245f --- a/.pipelines/pull-from-cdpx-and-push-to-ci-acr-linux-image.sh +++ b/.pipelines/pull-from-cdpx-and-push-to-ci-acr-linux-image.sh @@ -35,12 +35,22 @@ echo "end: read appid and appsecret which has read access on cdpx acr" # suffix 00 primary and 01 secondary, and we only use primary # This configured via pipeline variable echo "login to cdpxlinux acr:${CDPX_ACR}" -docker login $CDPX_ACR --username $CDPX_ACR_APP_ID --password $CDPX_ACR_APP_SECRET -echo "login to cdpxlinux acr completed: ${CDPX_ACR}" +echo $CDPX_ACR_APP_SECRET | docker login $CDPX_ACR --username $CDPX_ACR_APP_ID --password-stdin +if [ $? -eq 0 ]; then + echo "login to cdpxlinux acr: ${CDPX_ACR} completed successfully." +else + echo "-e error login to cdpxlinux acr: ${CDPX_ACR} failed.Please see release task logs." + exit 1 +fi echo "pull agent image from cdpxlinux acr: ${CDPX_ACR}" docker pull ${CDPX_ACR}/official/${CDPX_REPO_NAME}:${CDPX_AGENT_IMAGE_TAG} -echo "pull image from cdpxlinux acr completed: ${CDPX_ACR}" +if [ $? -eq 0 ]; then + echo "pulling of agent image from cdpxlinux acr: ${CDPX_ACR} completed successfully." +else + echo "-e error pulling of agent image from cdpxlinux acr: ${CDPX_ACR} failed.Please see release task logs." + exit 1 +fi echo "CI Release name is:"$CI_RELEASE imagetag=$CI_RELEASE$CI_IMAGE_TAG_SUFFIX @@ -51,13 +61,29 @@ echo "CI AGENT REPOSITORY NAME : ${CI_AGENT_REPO}" echo "tag linux agent image" docker tag ${CDPX_ACR}/official/${CDPX_REPO_NAME}:${CDPX_AGENT_IMAGE_TAG} ${CI_ACR}/public/azuremonitor/containerinsights/${CI_AGENT_REPO}:${imagetag} +if [ $? -eq 0 ]; then + echo "tagging of linux agent image completed successfully." +else + echo "-e error tagging of linux agent image failed. Please see release task logs." + exit 1 +fi echo "login ciprod acr":$CI_ACR -docker login $CI_ACR --username $ACR_APP_ID --password $ACR_APP_SECRET -echo "login to ${CI_ACR} acr completed" +echo $ACR_APP_SECRET | docker login $CI_ACR --username $ACR_APP_ID --password-stdin +if [ $? -eq 0 ]; then + echo "login to ciprod acr: ${CI_ACR} completed successfully" +else + echo "-e error login to ciprod acr: ${CI_ACR} failed. Please see release task logs." + exit 1 +fi echo "pushing the image to ciprod acr:${CI_ACR}" docker push ${CI_ACR}/public/azuremonitor/containerinsights/${CI_AGENT_REPO}:${imagetag} -echo "pushing the image to ciprod acr completed" +if [ $? -eq 0 ]; then + echo "pushing of the image to ciprod acr completed successfully" +else + echo "-e error pushing of image to ciprod acr failed. Please see release task logs." + exit 1 +fi echo "end: pull linux agent image from cdpx and push to ciprod acr" diff --git a/.pipelines/pull-from-cdpx-and-push-to-ci-acr-windows-image.sh b/.pipelines/pull-from-cdpx-and-push-to-ci-acr-windows-image.sh old mode 100755 new mode 100644 index 095a00039..19fe55722 --- a/.pipelines/pull-from-cdpx-and-push-to-ci-acr-windows-image.sh +++ b/.pipelines/pull-from-cdpx-and-push-to-ci-acr-windows-image.sh @@ -34,12 +34,22 @@ echo "end: read appid and appsecret which has read access on cdpx acr" # suffix 00 primary and 01 secondary, and we only use primary # This configured via pipeline variable echo "login to cdpxwindows acr:${CDPX_ACR}" -docker login $CDPX_ACR --username $CDPX_ACR_APP_ID --password $CDPX_ACR_APP_SECRET -echo "login to cdpxwindows acr:${CDPX_ACR} completed" +echo $CDPX_ACR_APP_SECRET | docker login $CDPX_ACR --username $CDPX_ACR_APP_ID --password-stdin +if [ $? -eq 0 ]; then + echo "login to cdpxwindows acr: ${CDPX_ACR} completed successfully." +else + echo "-e error login to cdpxwindows acr: ${CDPX_ACR} failed.Please see release task logs." + exit 1 +fi echo "pull image from cdpxwin acr: ${CDPX_ACR}" docker pull ${CDPX_ACR}/official/${CDPX_REPO_NAME}:${CDPX_AGENT_IMAGE_TAG} -echo "pull image from cdpxwin acr completed: ${CDPX_ACR}" +if [ $? -eq 0 ]; then + echo "pulling of image from cdpxwin acr: ${CDPX_ACR} completed successfully." +else + echo "pulling of image from cdpxwin acr: ${CDPX_ACR} failed. Please see release task logs." + exit 1 +fi echo "CI Release name:"$CI_RELEASE echo "CI Image Tax suffix:"$CI_IMAGE_TAG_SUFFIX @@ -49,13 +59,30 @@ echo "agentimagetag="$imagetag echo "tag windows agent image" docker tag ${CDPX_ACR}/official/${CDPX_REPO_NAME}:${CDPX_AGENT_IMAGE_TAG} ${CI_ACR}/public/azuremonitor/containerinsights/${CI_AGENT_REPO}:${imagetag} +if [ $? -eq 0 ]; then + echo "tagging of windows agent image completed successfully." +else + echo "-e error tagging of windows agent image failed. Please see release task logs." + exit 1 +fi echo "login to ${CI_ACR} acr" -docker login $CI_ACR --username $ACR_APP_ID --password $ACR_APP_SECRET -echo "login to ${CI_ACR} acr completed" +echo $ACR_APP_SECRET | docker login $CI_ACR --username $ACR_APP_ID --password-stdin +if [ $? -eq 0 ]; then + echo "login to acr: ${CI_ACR} completed successfully." +else + echo "login to acr: ${CI_ACR} failed. Please see release task logs." + exit 1 +fi + echo "pushing the image to ciprod acr" docker push ${CI_ACR}/public/azuremonitor/containerinsights/${CI_AGENT_REPO}:${imagetag} -echo "pushing the image to ciprod acr completed" +if [ $? -eq 0 ]; then + echo "pushing the image to ciprod acr completed successfully." +else + echo "pushing the image to ciprod acr failed. Please see release task logs" + exit 1 +fi echo "end: pull windows agent image from cdpx and push to ciprod acr" diff --git a/ReleaseProcess.md b/ReleaseProcess.md index c6f51bb65..8ec91546c 100644 --- a/ReleaseProcess.md +++ b/ReleaseProcess.md @@ -35,20 +35,21 @@ Image automatically synched to MCR CN from Public cloud MCR. - Refer to internal docs for the release process and instructions. -## ARO v3 - -This needs to be co-ordinated with Red hat and ARO-RP team for the release and Red hat team will pick up the changes for the release. - ## AKS-Engine Make PR against [AKS-Engine](https://github.com/Azure/aks-engine). Refer PR https://github.com/Azure/aks-engine/pull/2318 -## ARO v4, Azure Arc K8s and OpenShift v4 clusters - -Make sure azuremonitor-containers chart yamls updates with all changes going with the release and also make sure to bump the chart version, imagetag and docker provider version etc. Similar to agent container image, build pipeline automatically push the chart to container insights prod acr for canary and prod repos accordingly. -Both the agent and helm chart will be replicated to `mcr.microsoft.com`. +## Arc for Kubernetes -The way, customers will be onboard the monitoring to these clusters using onboarding scripts under `onboarding\managed` directory so please bump chart version for prod release. Once we move to Arc K8s Monitoring extension Public preview, these will be taken care so at that point of time no manual changes like this required. +Ev2 pipeline used to deploy the chart of the Arc K8s Container Insights Extension as per Safe Deployment Process. +Here is the high level process +``` + 1. Specify chart version of the release candidate and trigger [container-insights-arc-k8s-extension-ci_prod-release](https://github-private.visualstudio.com/microsoft/_release?_a=releases&view=all) + 2. Get the approval from one of team member for the release + 3. Once the approved, release should be triggered automatically + 4. use `cimon-arck8s-eastus2euap` for validating latest release in canary region + 5. TBD - Notify vendor team for the validation on all Arc K8s supported platforms +``` ## Microsoft Charts Repo release for On-prem K8s diff --git a/deployment/arc-k8s-extension/ServiceGroupRoot/Parameters/ContainerInsightsExtension.Parameters.json b/deployment/arc-k8s-extension/ServiceGroupRoot/Parameters/ContainerInsightsExtension.Parameters.json new file mode 100644 index 000000000..a8a99e9f6 --- /dev/null +++ b/deployment/arc-k8s-extension/ServiceGroupRoot/Parameters/ContainerInsightsExtension.Parameters.json @@ -0,0 +1,66 @@ +{ + "$schema": "http://schema.express.azure.com/schemas/2015-01-01-alpha/RolloutParameters.json", + "contentVersion": "1.0.0.0", + "wait": [ + { + "name": "waitSdpBakeTime", + "properties": { + "duration": "PT24H" + } + } + ], + "shellExtensions": [ + { + "name": "PushChartToACR", + "type": "ShellExtensionType", + "properties": { + "maxexecutiontime": "PT1H" + }, + "package": { + "reference": { + "path": "artifacts.tar.gz" + } + }, + "launch": { + "command": [ + "/bin/bash", + "pushChartToAcr.sh" + ], + "environmentVariables": [ + { + "name": "RELEASE_STAGE", + "value": "__RELEASE_STAGE__" + }, + { + "name": "ACR_APP_ID", + "reference": { + "provider": "AzureKeyVault", + "parameters": { + "secretId": "https://cibuildandreleasekv.vault.azure.net/secrets/ciprodacrappid/e8f47bf7505741ebaf65a4db16ff9fa7" + } + }, + "asSecureValue": "true" + }, + { + "name": "ACR_APP_SECRET", + "reference": { + "provider": "AzureKeyVault", + "parameters": { + "secretId": "https://cibuildandreleasekv.vault.azure.net/secrets/ciprodacrappsecret/8718afcdac114accb8b26f613cef1e1e" + } + }, + "asSecureValue": "true" + }, + { + "name": "ACR_NAME", + "value": "__ACR_NAME__" + }, + { + "name": "CHART_VERSION", + "value": "__CHART_VERSION__" + } + ] + } + } + ] +} diff --git a/deployment/arc-k8s-extension/ServiceGroupRoot/RolloutSpecs/Public.Canary.RolloutSpec.json b/deployment/arc-k8s-extension/ServiceGroupRoot/RolloutSpecs/Public.Canary.RolloutSpec.json new file mode 100644 index 000000000..cde103633 --- /dev/null +++ b/deployment/arc-k8s-extension/ServiceGroupRoot/RolloutSpecs/Public.Canary.RolloutSpec.json @@ -0,0 +1,29 @@ +{ + "$schema": "http://schema.express.azure.com/schemas/2015-01-01-alpha/RolloutSpec.json", + "ContentVersion": "1.0.0.0", + "RolloutMetadata": { + "ServiceModelPath": "ServiceModels//Public.ServiceModel.json", + "ScopeBindingsPath": "ScopeBindings//Public.ScopeBindings.json", + "Name": "ContainerInsightsExtension-Canary", + "RolloutType": "Major", + "BuildSource": { + "Parameters": { + "VersionFile": "buildver.txt" + } + }, + "notification": { + "email": { + "to": "omscontainers@microsoft.com" + } + } + }, + "orchestratedSteps": [ + { + "name": "PushChartToACR", + "targetType": "ServiceResource", + "targetName": "PushChartToACR-Canary", + "actions": [ "Shell/PushChartToACR" ], + "dependsOn": [ ] + } + ] +} diff --git a/deployment/arc-k8s-extension/ServiceGroupRoot/RolloutSpecs/Public.FF.RolloutSpec.json b/deployment/arc-k8s-extension/ServiceGroupRoot/RolloutSpecs/Public.FF.RolloutSpec.json new file mode 100644 index 000000000..1749296c8 --- /dev/null +++ b/deployment/arc-k8s-extension/ServiceGroupRoot/RolloutSpecs/Public.FF.RolloutSpec.json @@ -0,0 +1,29 @@ +{ + "$schema": "http://schema.express.azure.com/schemas/2015-01-01-alpha/RolloutSpec.json", + "ContentVersion": "1.0.0.0", + "RolloutMetadata": { + "ServiceModelPath": "ServiceModels//Public.ServiceModel.json", + "ScopeBindingsPath": "ScopeBindings//Public.ScopeBindings.json", + "Name": "ContainerInsightsExtension-FF", + "RolloutType": "Major", + "BuildSource": { + "Parameters": { + "VersionFile": "buildver.txt" + } + }, + "notification": { + "email": { + "to": "omscontainers@microsoft.com" + } + } + }, + "orchestratedSteps": [ + { + "name": "PushChartToACR", + "targetType": "ServiceResource", + "targetName": "PushChartToACR-FF", + "actions": [ "wait/waitSdpBakeTime", "Shell/PushChartToACR" ], + "dependsOn": [ ] + } + ] +} diff --git a/deployment/arc-k8s-extension/ServiceGroupRoot/RolloutSpecs/Public.HighLoad.RolloutSpec.json b/deployment/arc-k8s-extension/ServiceGroupRoot/RolloutSpecs/Public.HighLoad.RolloutSpec.json new file mode 100644 index 000000000..50729b1ae --- /dev/null +++ b/deployment/arc-k8s-extension/ServiceGroupRoot/RolloutSpecs/Public.HighLoad.RolloutSpec.json @@ -0,0 +1,29 @@ +{ + "$schema": "http://schema.express.azure.com/schemas/2015-01-01-alpha/RolloutSpec.json", + "ContentVersion": "1.0.0.0", + "RolloutMetadata": { + "ServiceModelPath": "ServiceModels//Public.ServiceModel.json", + "ScopeBindingsPath": "ScopeBindings//Public.ScopeBindings.json", + "Name": "ContainerInsightsExtension-Prod3", + "RolloutType": "Major", + "BuildSource": { + "Parameters": { + "VersionFile": "buildver.txt" + } + }, + "notification": { + "email": { + "to": "omscontainers@microsoft.com" + } + } + }, + "orchestratedSteps": [ + { + "name": "PushChartToACR", + "targetType": "ServiceResource", + "targetName": "PushChartToACR-HighLoad", + "actions": [ "wait/waitSdpBakeTime", "Shell/PushChartToACR" ], + "dependsOn": [ ] + } + ] +} diff --git a/deployment/arc-k8s-extension/ServiceGroupRoot/RolloutSpecs/Public.LightLoad.RolloutSpec.json b/deployment/arc-k8s-extension/ServiceGroupRoot/RolloutSpecs/Public.LightLoad.RolloutSpec.json new file mode 100644 index 000000000..edd61f852 --- /dev/null +++ b/deployment/arc-k8s-extension/ServiceGroupRoot/RolloutSpecs/Public.LightLoad.RolloutSpec.json @@ -0,0 +1,29 @@ +{ + "$schema": "http://schema.express.azure.com/schemas/2015-01-01-alpha/RolloutSpec.json", + "ContentVersion": "1.0.0.0", + "RolloutMetadata": { + "ServiceModelPath": "ServiceModels//Public.ServiceModel.json", + "ScopeBindingsPath": "ScopeBindings//Public.ScopeBindings.json", + "Name": "ContainerInsightsExtension-Prod2", + "RolloutType": "Major", + "BuildSource": { + "Parameters": { + "VersionFile": "buildver.txt" + } + }, + "notification": { + "email": { + "to": "omscontainers@microsoft.com" + } + } + }, + "orchestratedSteps": [ + { + "name": "PushChartToACR", + "targetType": "ServiceResource", + "targetName": "PushChartToACR-LightLoad", + "actions": [ "wait/waitSdpBakeTime", "Shell/PushChartToACR" ], + "dependsOn": [ ] + } + ] +} diff --git a/deployment/arc-k8s-extension/ServiceGroupRoot/RolloutSpecs/Public.MC.RolloutSpec.json b/deployment/arc-k8s-extension/ServiceGroupRoot/RolloutSpecs/Public.MC.RolloutSpec.json new file mode 100644 index 000000000..014f4b092 --- /dev/null +++ b/deployment/arc-k8s-extension/ServiceGroupRoot/RolloutSpecs/Public.MC.RolloutSpec.json @@ -0,0 +1,29 @@ +{ + "$schema": "http://schema.express.azure.com/schemas/2015-01-01-alpha/RolloutSpec.json", + "ContentVersion": "1.0.0.0", + "RolloutMetadata": { + "ServiceModelPath": "ServiceModels//Public.ServiceModel.json", + "ScopeBindingsPath": "ScopeBindings//Public.ScopeBindings.json", + "Name": "ContainerInsightsExtension-MC", + "RolloutType": "Major", + "BuildSource": { + "Parameters": { + "VersionFile": "buildver.txt" + } + }, + "notification": { + "email": { + "to": "omscontainers@microsoft.com" + } + } + }, + "orchestratedSteps": [ + { + "name": "PushChartToACR", + "targetType": "ServiceResource", + "targetName": "PushChartToACR-MC", + "actions": [ "wait/waitSdpBakeTime", "Shell/PushChartToACR" ], + "dependsOn": [ ] + } + ] +} diff --git a/deployment/arc-k8s-extension/ServiceGroupRoot/RolloutSpecs/Public.MediumLoad.RolloutSpec.json b/deployment/arc-k8s-extension/ServiceGroupRoot/RolloutSpecs/Public.MediumLoad.RolloutSpec.json new file mode 100644 index 000000000..cd1befbc3 --- /dev/null +++ b/deployment/arc-k8s-extension/ServiceGroupRoot/RolloutSpecs/Public.MediumLoad.RolloutSpec.json @@ -0,0 +1,29 @@ +{ + "$schema": "http://schema.express.azure.com/schemas/2015-01-01-alpha/RolloutSpec.json", + "ContentVersion": "1.0.0.0", + "RolloutMetadata": { + "ServiceModelPath": "ServiceModels//Public.ServiceModel.json", + "ScopeBindingsPath": "ScopeBindings//Public.ScopeBindings.json", + "Name": "ContainerInsightsExtension-Prod2", + "RolloutType": "Major", + "BuildSource": { + "Parameters": { + "VersionFile": "buildver.txt" + } + }, + "notification": { + "email": { + "to": "omscontainers@microsoft.com" + } + } + }, + "orchestratedSteps": [ + { + "name": "PushChartToACR", + "targetType": "ServiceResource", + "targetName": "PushChartToACR-MediumLoad", + "actions": ["wait/waitSdpBakeTime", "Shell/PushChartToACR" ], + "dependsOn": [ ] + } + ] +} diff --git a/deployment/arc-k8s-extension/ServiceGroupRoot/RolloutSpecs/Public.Pilot.RolloutSpec.json b/deployment/arc-k8s-extension/ServiceGroupRoot/RolloutSpecs/Public.Pilot.RolloutSpec.json new file mode 100644 index 000000000..48c99fce1 --- /dev/null +++ b/deployment/arc-k8s-extension/ServiceGroupRoot/RolloutSpecs/Public.Pilot.RolloutSpec.json @@ -0,0 +1,29 @@ +{ + "$schema": "http://schema.express.azure.com/schemas/2015-01-01-alpha/RolloutSpec.json", + "ContentVersion": "1.0.0.0", + "RolloutMetadata": { + "ServiceModelPath": "ServiceModels//Public.ServiceModel.json", + "ScopeBindingsPath": "ScopeBindings//Public.ScopeBindings.json", + "Name": "ContainerInsightsExtension-Pilot", + "RolloutType": "Major", + "BuildSource": { + "Parameters": { + "VersionFile": "buildver.txt" + } + }, + "notification": { + "email": { + "to": "omscontainers@microsoft.com" + } + } + }, + "orchestratedSteps": [ + { + "name": "PushChartToACR", + "targetType": "ServiceResource", + "targetName": "PushChartToACR-Pilot", + "actions": [ "wait/waitSdpBakeTime", "Shell/PushChartToACR"], + "dependsOn": [ ] + } + ] +} diff --git a/deployment/arc-k8s-extension/ServiceGroupRoot/ScopeBindings/Public.ScopeBindings.json b/deployment/arc-k8s-extension/ServiceGroupRoot/ScopeBindings/Public.ScopeBindings.json new file mode 100644 index 000000000..516eba3e2 --- /dev/null +++ b/deployment/arc-k8s-extension/ServiceGroupRoot/ScopeBindings/Public.ScopeBindings.json @@ -0,0 +1,125 @@ +{ + "$schema": "https://ev2schema.azure.net/schemas/2020-01-01/scopeBindings.json", + "contentVersion": "0.0.0.1", + "scopeBindings": [ + { + "scopeTagName": "Canary", + "bindings": [ + { + "find": "__RELEASE_STAGE__", + "replaceWith": "Canary" + }, + { + "find": "__ACR_NAME__", + "replaceWith": "$(ACRName)" + }, + { + "find": "__CHART_VERSION__", + "replaceWith": "$(ChartVersion)" + } + ] + }, + { + "scopeTagName": "Pilot", + "bindings": [ + { + "find": "__RELEASE_STAGE__", + "replaceWith": "Pilot" + }, + { + "find": "__ACR_NAME__", + "replaceWith": "$(ACRName)" + }, + { + "find": "__CHART_VERSION__", + "replaceWith": "$(ChartVersion)" + } + ] + }, + { + "scopeTagName": "LightLoad", + "bindings": [ + { + "find": "__RELEASE_STAGE__", + "replaceWith": "MediumLow" + }, + { + "find": "__ACR_NAME__", + "replaceWith": "$(ACRName)" + }, + { + "find": "__CHART_VERSION__", + "replaceWith": "$(ChartVersion)" + } + ] + }, + { + "scopeTagName": "MediumLoad", + "bindings": [ + { + "find": "__RELEASE_STAGE__", + "replaceWith": "MediumHigh" + }, + { + "find": "__ACR_NAME__", + "replaceWith": "$(ACRName)" + }, + { + "find": "__CHART_VERSION__", + "replaceWith": "$(ChartVersion)" + } + ] + }, + { + "scopeTagName": "HighLoad", + "bindings": [ + { + "find": "__RELEASE_STAGE__", + "replaceWith": "HighLoad" + }, + { + "find": "__ACR_NAME__", + "replaceWith": "$(ACRName)" + }, + { + "find": "__CHART_VERSION__", + "replaceWith": "$(ChartVersion)" + } + ] + }, + { + "scopeTagName": "FF", + "bindings": [ + { + "find": "__RELEASE_STAGE__", + "replaceWith": "FF" + }, + { + "find": "__ACR_NAME__", + "replaceWith": "$(ACRName)" + }, + { + "find": "__CHART_VERSION__", + "replaceWith": "$(ChartVersion)" + } + ] + }, + { + "scopeTagName": "MC", + "bindings": [ + { + "find": "__RELEASE_STAGE__", + "replaceWith": "MC" + }, + { + "find": "__ACR_NAME__", + "replaceWith": "$(ACRName)" + }, + { + "find": "__CHART_VERSION__", + "replaceWith": "$(ChartVersion)" + } + ] + } + ] +} diff --git a/deployment/arc-k8s-extension/ServiceGroupRoot/Scripts/pushChartToAcr.sh b/deployment/arc-k8s-extension/ServiceGroupRoot/Scripts/pushChartToAcr.sh new file mode 100644 index 000000000..520557592 --- /dev/null +++ b/deployment/arc-k8s-extension/ServiceGroupRoot/Scripts/pushChartToAcr.sh @@ -0,0 +1,181 @@ +#!/bin/bash + +export HELM_EXPERIMENTAL_OCI=1 +export MCR_NAME="mcr.microsoft.com" +# for prod-> stable and for test -> preview +export REPO_TYPE="stable" + +# repo paths for arc k8s extension roll-out +# canary region +export CANARY_REGION_REPO_PATH="azuremonitor/containerinsights/canary/${REPO_TYPE}/azuremonitor-containers" +# pilot region +export PILOT_REGION_REPO_PATH="azuremonitor/containerinsights/prod1/${REPO_TYPE}/azuremonitor-containers" +# light load regions +export LIGHT_LOAD_REGION_REPO_PATH="azuremonitor/containerinsights/prod2/${REPO_TYPE}/azuremonitor-containers" +# medium load regions +export MEDIUM_LOAD_REGION_REPO_PATH="azuremonitor/containerinsights/prod3/${REPO_TYPE}/azuremonitor-containers" +# high load regions +export HIGH_LOAD_REGION_REPO_PATH="azuremonitor/containerinsights/prod4/${REPO_TYPE}/azuremonitor-containers" +# FairFax regions +export FF_REGION_REPO_PATH="azuremonitor/containerinsights/prod5/${REPO_TYPE}/azuremonitor-containers" +# Mooncake regions +export MC_REGION_REPO_PATH="azuremonitor/containerinsights/prod6/${REPO_TYPE}/azuremonitor-containers" + +# pull chart from previous stage mcr and push chart to next stage acr +pull_chart_from_source_mcr_to_push_to_dest_acr() { + srcMcrFullPath=${1} + destAcrFullPath=${2} + + if [ -z $srcMcrFullPath ]; then + echo "-e error source mcr path must be provided " + exit 1 + fi + + if [ -z $destAcrFullPath ]; then + echo "-e error dest acr path must be provided " + exit 1 + fi + + echo "Pulling chart from MCR:${srcMcrFullPath} ..." + helm chart pull ${srcMcrFullPath} + if [ $? -eq 0 ]; then + echo "Pulling chart from MCR:${srcMcrFullPath} completed successfully." + else + echo "-e error Pulling chart from MCR:${srcMcrFullPath} failed. Please review Ev2 pipeline logs for more details on the error." + exit 1 + fi + + echo "Exporting chart to current directory ..." + helm chart export ${srcMcrFullPath} + if [ $? -eq 0 ]; then + echo "Exporting chart to current directory completed successfully." + else + echo "-e error Exporting chart to current directory failed. Please review Ev2 pipeline logs for more details on the error." + exit 1 + fi + + echo "save the chart locally with dest acr full path : ${destAcrFullPath} ..." + helm chart save azuremonitor-containers/ ${destAcrFullPath} + if [ $? -eq 0 ]; then + echo "save the chart locally with dest acr full path : ${destAcrFullPath} completed successfully." + else + echo "-e error save the chart locally with dest acr full path : ${destAcrFullPath} failed. Please review Ev2 pipeline logs for more details on the error." + exit 1 + fi + + echo "pushing the chart to acr path: ${destAcrFullPath} ..." + helm chart push ${destAcrFullPath} + if [ $? -eq 0 ]; then + echo "pushing the chart to acr path: ${destAcrFullPath} completed successfully." + else + echo "-e error pushing the chart to acr path: ${destAcrFullPath} failed. Please review Ev2 pipeline logs for more details on the error." + exit 1 + fi +} + +# push to local release candidate chart to canary region +push_local_chart_to_canary_region() { + destAcrFullPath=${1} + if [ -z $destAcrFullPath ]; then + echo "-e error dest acr path must be provided " + exit 1 + fi + + echo "save the chart locally with dest acr full path : ${destAcrFullPath} ..." + helm chart save charts/azuremonitor-containers/ $destAcrFullPath + if [ $? -eq 0 ]; then + echo "save the chart locally with dest acr full path : ${destAcrFullPath} completed." + else + echo "-e error save the chart locally with dest acr full path : ${destAcrFullPath} failed. Please review Ev2 pipeline logs for more details on the error." + exit 1 + fi + + echo "pushing the chart to acr path: ${destAcrFullPath} ..." + helm chart push $destAcrFullPath + if [ $? -eq 0 ]; then + echo "pushing the chart to acr path: ${destAcrFullPath} completed successfully." + else + echo "-e error pushing the chart to acr path: ${destAcrFullPath} failed.Please review Ev2 pipeline logs for more details on the error." + exit 1 + fi +} + +echo "START - Release stage : ${RELEASE_STAGE}" + +# login to acr +echo "Using acr : ${ACR_NAME}" +echo "Using acr repo type: ${REPO_TYPE}" + +echo "login to acr:${ACR_NAME} using helm ..." +echo $ACR_APP_SECRET | helm registry login $ACR_NAME --username $ACR_APP_ID --password-stdin +if [ $? -eq 0 ]; then + echo "login to acr:${ACR_NAME} using helm completed successfully." +else + echo "-e error login to acr:${ACR_NAME} using helm failed. Please review Ev2 pipeline logs for more details on the error." + exit 1 +fi + +case $RELEASE_STAGE in + + Canary) + echo "START: Release stage - Canary" + destAcrFullPath=${ACR_NAME}/public/${CANARY_REGION_REPO_PATH}:${CHART_VERSION} + push_local_chart_to_canary_region $destAcrFullPath + echo "END: Release stage - Canary" + ;; + + Pilot | Prod1) + echo "START: Release stage - Pilot" + srcMcrFullPath=${MCR_NAME}/${CANARY_REGION_REPO_PATH}:${CHART_VERSION} + destAcrFullPath=${ACR_NAME}/public/${PILOT_REGION_REPO_PATH}:${CHART_VERSION} + pull_chart_from_source_mcr_to_push_to_dest_acr $srcMcrFullPath $destAcrFullPath + echo "END: Release stage - Pilot" + ;; + + LightLoad | Pord2) + echo "START: Release stage - Light Load Regions" + srcMcrFullPath=${MCR_NAME}/${PILOT_REGION_REPO_PATH}:${CHART_VERSION} + destAcrFullPath=${ACR_NAME}/public/${LIGHT_LOAD_REGION_REPO_PATH}:${CHART_VERSION} + pull_chart_from_source_mcr_to_push_to_dest_acr $srcMcrFullPath $destAcrFullPath + echo "END: Release stage - Light Load Regions" + ;; + + MediumLoad | Prod3) + echo "START: Release stage - Medium Load Regions" + srcMcrFullPath=${MCR_NAME}/${LIGHT_LOAD_REGION_REPO_PATH}:${CHART_VERSION} + destAcrFullPath=${ACR_NAME}/public/${MEDIUM_LOAD_REGION_REPO_PATH}:${CHART_VERSION} + pull_chart_from_source_mcr_to_push_to_dest_acr $srcMcrFullPath $destAcrFullPath + echo "END: Release stage - Medium Load Regions" + ;; + + HighLoad | Prod4) + echo "START: Release stage - High Load Regions" + srcMcrFullPath=${MCR_NAME}/${MEDIUM_LOAD_REGION_REPO_PATH}:${CHART_VERSION} + destAcrFullPath=${ACR_NAME}/public/${HIGH_LOAD_REGION_REPO_PATH}:${CHART_VERSION} + pull_chart_from_source_mcr_to_push_to_dest_acr $srcMcrFullPath $destAcrFullPath + echo "END: Release stage - High Load Regions" + ;; + + FF | Prod5) + echo "START: Release stage - FF" + srcMcrFullPath=${MCR_NAME}/${HIGH_LOAD_REGION_REPO_PATH}:${CHART_VERSION} + destAcrFullPath=${ACR_NAME}/public/${FF_REGION_REPO_PATH}:${CHART_VERSION} + pull_chart_from_source_mcr_to_push_to_dest_acr $srcMcrFullPath $destAcrFullPath + echo "END: Release stage - FF" + ;; + + MC | Prod6) + echo "START: Release stage - MC" + srcMcrFullPath=${MCR_NAME}/${FF_REGION_REPO_PATH}:${CHART_VERSION} + destAcrFullPath=${ACR_NAME}/public/${MC_REGION_REPO_PATH}:${CHART_VERSION} + pull_chart_from_source_mcr_to_push_to_dest_acr $srcMcrFullPath $destAcrFullPath + echo "END: Release stage - MC" + ;; + + *) + echo -n "unknown release stage" + exit 1 + ;; +esac + +echo "END - Release stage : ${RELEASE_STAGE}" diff --git a/deployment/arc-k8s-extension/ServiceGroupRoot/ServiceModels/Public.ServiceModel.json b/deployment/arc-k8s-extension/ServiceGroupRoot/ServiceModels/Public.ServiceModel.json new file mode 100644 index 000000000..71081661a --- /dev/null +++ b/deployment/arc-k8s-extension/ServiceGroupRoot/ServiceModels/Public.ServiceModel.json @@ -0,0 +1,159 @@ +{ + "$schema": "http://schema.express.azure.com/schemas/2015-01-01-alpha/ServiceModel.json", + "ContentVersion": "0.0.0.1", + "ServiceMetadata": { + "ServiceGroup": "ContainerInsightsExtension", + "Environment": "Prod" + }, + "ServiceResourceGroupDefinitions": [ + { + "Name": "ARC-Extension-ServiceResourceGroupDefinition", + "ServiceResourceDefinitions": [ + { + "Name": "ShellExtension", + "ComposedOf": { + "Extension": { + "Shell": [ + { + "type": "ShellExtensionType", + "properties": { + "imageName": "adm-ubuntu-1804-l", + "imageVersion": "v18" + } + } + ] + } + } + } + ] + } + ], + "ServiceResourceGroups": [ + { + "AzureResourceGroupName": "ContainerInsightsExtension-Canary-Release", + "Location": "eastus2", + "InstanceOf": "ARC-Extension-ServiceResourceGroupDefinition", + "AzureSubscriptionId": "5fab7b6f-6150-42fe-89e1-0f07a0a9a46f", + "ScopeTags": [ + { + "Name": "Canary" + } + ], + "ServiceResources": [ + { + "Name": "PushChartToACR-Canary", + "InstanceOf": "ShellExtension", + "RolloutParametersPath": "Parameters\\ContainerInsightsExtension.Parameters.json" + } + ] + }, + { + "AzureResourceGroupName": "ContainerInsightsExtension-Pilot-Release", + "Location": "eastus2", + "InstanceOf": "ARC-Extension-ServiceResourceGroupDefinition", + "AzureSubscriptionId": "5fab7b6f-6150-42fe-89e1-0f07a0a9a46f", + "ScopeTags": [ + { + "Name": "Pilot" + } + ], + "ServiceResources": [ + { + "Name": "PushChartToACR-Pilot", + "InstanceOf": "ShellExtension", + "RolloutParametersPath": "Parameters\\ContainerInsightsExtension.Parameters.json" + } + ] + }, + { + "AzureResourceGroupName": "ContainerInsightsExtension-LightLoad-Release", + "Location": "eastus2", + "InstanceOf": "ARC-Extension-ServiceResourceGroupDefinition", + "AzureSubscriptionId": "5fab7b6f-6150-42fe-89e1-0f07a0a9a46f", + "ScopeTags": [ + { + "Name": "LightLoad" + } + ], + "ServiceResources": [ + { + "Name": "PushChartToACR-LightLoad", + "InstanceOf": "ShellExtension", + "RolloutParametersPath": "Parameters\\ContainerInsightsExtension.Parameters.json" + } + ] + }, + { + "AzureResourceGroupName": "ContainerInsightsExtension-MediumLoad-Release", + "Location": "eastus2", + "InstanceOf": "ARC-Extension-ServiceResourceGroupDefinition", + "AzureSubscriptionId": "5fab7b6f-6150-42fe-89e1-0f07a0a9a46f", + "ScopeTags": [ + { + "Name": "MediumLoad" + } + ], + "ServiceResources": [ + { + "Name": "PushChartToACR-MediumLoad", + "InstanceOf": "ShellExtension", + "RolloutParametersPath": "Parameters\\ContainerInsightsExtension.Parameters.json" + } + ] + }, + { + "AzureResourceGroupName": "ContainerInsightsExtension-HighLoad-Release", + "Location": "eastus2", + "InstanceOf": "ARC-Extension-ServiceResourceGroupDefinition", + "AzureSubscriptionId": "5fab7b6f-6150-42fe-89e1-0f07a0a9a46f", + "ScopeTags": [ + { + "Name": "HighLoad" + } + ], + "ServiceResources": [ + { + "Name": "PushChartToACR-HighLoad", + "InstanceOf": "ShellExtension", + "RolloutParametersPath": "Parameters\\ContainerInsightsExtension.Parameters.json" + } + ] + }, + { + "AzureResourceGroupName": "ContainerInsightsExtension-FF-Release", + "Location": "eastus2", + "InstanceOf": "ARC-Extension-ServiceResourceGroupDefinition", + "AzureSubscriptionId": "5fab7b6f-6150-42fe-89e1-0f07a0a9a46f", + "ScopeTags": [ + { + "Name": "FF" + } + ], + "ServiceResources": [ + { + "Name": "PushChartToACR-FF", + "InstanceOf": "ShellExtension", + "RolloutParametersPath": "Parameters\\ContainerInsightsExtension.Parameters.json" + } + ] + }, + { + "AzureResourceGroupName": "ContainerInsightsExtension-MC-Release", + "Location": "eastus2", + "InstanceOf": "ARC-Extension-ServiceResourceGroupDefinition", + "AzureSubscriptionId": "5fab7b6f-6150-42fe-89e1-0f07a0a9a46f", + "ScopeTags": [ + { + "Name": "MC" + } + ], + "ServiceResources": [ + { + "Name": "PushChartToACR-MC", + "InstanceOf": "ShellExtension", + "RolloutParametersPath": "Parameters\\ContainerInsightsExtension.Parameters.json" + } + ] + } + ] + } diff --git a/deployment/arc-k8s-extension/ServiceGroupRoot/buildver.txt b/deployment/arc-k8s-extension/ServiceGroupRoot/buildver.txt new file mode 100644 index 000000000..1921233b3 --- /dev/null +++ b/deployment/arc-k8s-extension/ServiceGroupRoot/buildver.txt @@ -0,0 +1 @@ +1.0.0.0 From 97678b679a9467350b64060b97bf3d355fc64874 Mon Sep 17 00:00:00 2001 From: David Michelman Date: Fri, 9 Apr 2021 14:29:00 -0700 Subject: [PATCH 088/301] added liveness and telemetry for telegraf (#517) * added liveness and telemetry for telegraf * code transfer * removed windows liveness probe * done --- .../installer/conf/td-agent-bit-prom-side-car.conf | 12 ++++++++++++ build/linux/installer/conf/td-agent-bit-rs.conf | 12 ++++++++++++ build/linux/installer/conf/td-agent-bit.conf | 12 ++++++++++++ build/linux/installer/scripts/livenessprobe.sh | 9 +++++++++ kubernetes/linux/main.sh | 4 ++++ 5 files changed, 49 insertions(+) diff --git a/build/linux/installer/conf/td-agent-bit-prom-side-car.conf b/build/linux/installer/conf/td-agent-bit-prom-side-car.conf index 720f54820..339e509b0 100644 --- a/build/linux/installer/conf/td-agent-bit-prom-side-car.conf +++ b/build/linux/installer/conf/td-agent-bit-prom-side-car.conf @@ -11,6 +11,18 @@ Parsers_File /etc/opt/microsoft/docker-cimprov/azm-containers-parser.conf Log_File /var/opt/microsoft/docker-cimprov/log/fluent-bit.log +[INPUT] + Name tail + Tag oms.container.log.flbplugin.terminationlog.* + Path /dev/write-to-traces + DB /var/opt/microsoft/docker-cimprov/state/terminationlog-ai.db + DB.Sync Off + Parser docker + Mem_Buf_Limit 1m + Path_Key filepath + Skip_Long_Lines On + Ignore_Older 2m + [INPUT] Name tcp Tag oms.container.perf.telegraf.* diff --git a/build/linux/installer/conf/td-agent-bit-rs.conf b/build/linux/installer/conf/td-agent-bit-rs.conf index 696ac80e6..c94b4c40e 100644 --- a/build/linux/installer/conf/td-agent-bit-rs.conf +++ b/build/linux/installer/conf/td-agent-bit-rs.conf @@ -10,6 +10,18 @@ Parsers_File /etc/opt/microsoft/docker-cimprov/azm-containers-parser.conf Log_File /var/opt/microsoft/docker-cimprov/log/fluent-bit.log +[INPUT] + Name tail + Tag oms.container.log.flbplugin.terminationlog.* + Path /dev/write-to-traces + DB /var/opt/microsoft/docker-cimprov/state/terminationlog-ai.db + DB.Sync Off + Parser docker + Mem_Buf_Limit 1m + Path_Key filepath + Skip_Long_Lines On + Ignore_Older 2m + [INPUT] Name tcp Tag oms.container.perf.telegraf.* diff --git a/build/linux/installer/conf/td-agent-bit.conf b/build/linux/installer/conf/td-agent-bit.conf index 484a4bbbf..287c076dc 100644 --- a/build/linux/installer/conf/td-agent-bit.conf +++ b/build/linux/installer/conf/td-agent-bit.conf @@ -52,6 +52,18 @@ Skip_Long_Lines On Ignore_Older 2m +[INPUT] + Name tail + Tag oms.container.log.flbplugin.terminationlog.* + Path /dev/write-to-traces + DB /var/opt/microsoft/docker-cimprov/state/terminationlog-ai.db + DB.Sync Off + Parser docker + Mem_Buf_Limit 1m + Path_Key filepath + Skip_Long_Lines On + Ignore_Older 2m + [INPUT] Name tcp Tag oms.container.perf.telegraf.* diff --git a/build/linux/installer/scripts/livenessprobe.sh b/build/linux/installer/scripts/livenessprobe.sh index a82fa28eb..e3b0fc28e 100644 --- a/build/linux/installer/scripts/livenessprobe.sh +++ b/build/linux/installer/scripts/livenessprobe.sh @@ -26,6 +26,15 @@ then exit 1 fi +#test to exit non zero value if telegraf is not running +(ps -ef | grep telegraf | grep -v "grep") +if [ $? -ne 0 ] +then + echo "Telegraf is not running" > /dev/termination-log + echo "Telegraf is not running (controller: ${CONTROLLER_TYPE}, container type: ${CONTAINER_TYPE})" > /dev/write-to-traces # this file is tailed and sent to traces + exit 1 +fi + if [ -s "inotifyoutput.txt" ] then # inotifyoutput file has data(config map was applied) diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index 71e46875b..81db6f3a4 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -699,6 +699,10 @@ dpkg -l | grep td-agent-bit | awk '{print $2 " " $3}' +# Write messages from the liveness probe to stdout (so telemetry picks it up) +touch /dev/write-to-traces + + echo "stopping rsyslog..." service rsyslog stop From 63ea896b7b7c270320678289eb0468690c1e24bd Mon Sep 17 00:00:00 2001 From: David Michelman Date: Tue, 13 Apr 2021 12:48:00 -0700 Subject: [PATCH 089/301] Windows metric fix (#530) * changes * about to remove container fix * moved caching code to existing loop * removed un-necessary changes * removed a few more un-necessary changes * added windows node check * fixed a bug * everything works confirmed --- source/plugins/ruby/filter_cadvisor2mdm.rb | 42 +++++++--- source/plugins/ruby/in_kube_nodes.rb | 92 ++++++++++++++++++++++ 2 files changed, 125 insertions(+), 9 deletions(-) diff --git a/source/plugins/ruby/filter_cadvisor2mdm.rb b/source/plugins/ruby/filter_cadvisor2mdm.rb index 8d7e729c8..659e3000c 100644 --- a/source/plugins/ruby/filter_cadvisor2mdm.rb +++ b/source/plugins/ruby/filter_cadvisor2mdm.rb @@ -9,6 +9,7 @@ module Fluent require_relative "CustomMetricsUtils" require_relative "kubelet_utils" require_relative "MdmMetricsGenerator" + require_relative "in_kube_nodes" class CAdvisor2MdmFilter < Filter Fluent::Plugin.register_filter("filter_cadvisor2mdm", self) @@ -23,6 +24,7 @@ class CAdvisor2MdmFilter < Filter @metrics_to_collect_hash = {} @@metric_threshold_hash = {} + @@controller_type = "" def initialize super @@ -63,6 +65,7 @@ def start @containerResourceDimensionHash = {} @pvUsageHash = {} @@metric_threshold_hash = MdmMetricsGenerator.getContainerResourceUtilizationThresholds + @NodeCache = Fluent::NodeStatsCache.new() end rescue => e @log.info "Error initializing plugin #{e}" @@ -161,19 +164,40 @@ def filter(tag, time, record) if counter_name == Constants::CPU_USAGE_NANO_CORES metric_name = Constants::CPU_USAGE_MILLI_CORES metric_value /= 1000000 #cadvisor record is in nanocores. Convert to mc - @log.info "Metric_value: #{metric_value} CPU Capacity #{@cpu_capacity}" - if @cpu_capacity != 0.0 - percentage_metric_value = (metric_value) * 100 / @cpu_capacity + if @@controller_type.downcase == "replicaset" + target_node_cpu_capacity_mc = @NodeCache.cpu.get_capacity(record["DataItems"][0]["Host"]) / 1000000 + else + target_node_cpu_capacity_mc = @cpu_capacity + end + @log.info "Metric_value: #{metric_value} CPU Capacity #{target_node_cpu_capacity_mc}" + if target_node_cpu_capacity_mc != 0.0 + percentage_metric_value = (metric_value) * 100 / target_node_cpu_capacity_mc end end if counter_name.start_with?("memory") metric_name = counter_name - if @memory_capacity != 0.0 - percentage_metric_value = metric_value * 100 / @memory_capacity + if @@controller_type.downcase == "replicaset" + target_node_mem_capacity = @NodeCache.mem.get_capacity(record["DataItems"][0]["Host"]) + else + target_node_mem_capacity = @memory_capacity + end + @log.info "Metric_value: #{metric_value} Memory Capacity #{target_node_mem_capacity}" + if target_node_mem_capacity != 0.0 + percentage_metric_value = metric_value * 100 / target_node_mem_capacity end + end + @log.info "percentage_metric_value for metric: #{metric_name} for instance: #{record["DataItems"][0]["Host"]} percentage: #{percentage_metric_value}" + + # do some sanity checking. Do we want this? + if percentage_metric_value > 100.0 or percentage_metric_value < 0.0 + telemetryProperties = {} + telemetryProperties["Computer"] = record["DataItems"][0]["Host"] + telemetryProperties["MetricName"] = metric_name + telemetryProperties["MetricPercentageValue"] = percentage_metric_value + ApplicationInsightsUtility.sendCustomEvent("ErrorPercentageOutOfBounds", telemetryProperties) end - # return get_metric_records(record, metric_name, metric_value, percentage_metric_value) + return MdmMetricsGenerator.getNodeResourceMetricRecords(record, metric_name, metric_value, percentage_metric_value) elsif object_name == Constants::OBJECT_NAME_K8S_CONTAINER && @metrics_to_collect_hash.key?(counter_name.downcase) instanceName = record["DataItems"][0]["InstanceName"] @@ -279,8 +303,8 @@ def ensure_cpu_memory_capacity_set return end - controller_type = ENV["CONTROLLER_TYPE"] - if controller_type.downcase == "replicaset" + @@controller_type = ENV["CONTROLLER_TYPE"] + if @@controller_type.downcase == "replicaset" @log.info "ensure_cpu_memory_capacity_set @cpu_capacity #{@cpu_capacity} @memory_capacity #{@memory_capacity}" begin @@ -306,7 +330,7 @@ def ensure_cpu_memory_capacity_set @log.info "Error getting memory_capacity" end end - elsif controller_type.downcase == "daemonset" + elsif @@controller_type.downcase == "daemonset" capacity_from_kubelet = KubeletUtils.get_node_capacity # Error handling in case /metrics/cadvsior endpoint fails diff --git a/source/plugins/ruby/in_kube_nodes.rb b/source/plugins/ruby/in_kube_nodes.rb index d4b54c340..99e804302 100644 --- a/source/plugins/ruby/in_kube_nodes.rb +++ b/source/plugins/ruby/in_kube_nodes.rb @@ -43,6 +43,8 @@ def initialize @nodeInventoryE2EProcessingLatencyMs = 0 @nodesAPIE2ELatencyMs = 0 require_relative "constants" + + @NodeCache = NodeStatsCache.new() end config_param :run_interval, :time, :default => 60 @@ -197,6 +199,15 @@ def parse_and_emit_records(nodeInventory, batchTime = Time.utc.iso8601) end end + # Only CPU and Memory capacity for windows nodes get added to the cache (at end of file) + is_windows_node = false + if !item["status"].nil? && !item["status"]["nodeInfo"].nil? && !item["status"]["nodeInfo"]["operatingSystem"].nil? + operatingSystem = item["status"]["nodeInfo"]["operatingSystem"] + if (operatingSystem.is_a?(String) && operatingSystem.casecmp("windows") == 0) + is_windows_node = true + end + end + # node metrics records nodeMetricRecords = [] nodeMetricRecord = KubernetesApiClient.parseNodeLimitsFromNodeItem(item, "allocatable", "cpu", "cpuAllocatableNanoCores", batchTime) @@ -210,10 +221,18 @@ def parse_and_emit_records(nodeInventory, batchTime = Time.utc.iso8601) nodeMetricRecord = KubernetesApiClient.parseNodeLimitsFromNodeItem(item, "capacity", "cpu", "cpuCapacityNanoCores", batchTime) if !nodeMetricRecord.nil? && !nodeMetricRecord.empty? nodeMetricRecords.push(nodeMetricRecord) + # add data to the cache so filter_cadvisor2mdm.rb can use it + if is_windows_node + @NodeCache.cpu.set_capacity(nodeMetricRecord["DataItems"][0]["Host"], nodeMetricRecord["DataItems"][0]["Collections"][0]["Value"]) + end end nodeMetricRecord = KubernetesApiClient.parseNodeLimitsFromNodeItem(item, "capacity", "memory", "memoryCapacityBytes", batchTime) if !nodeMetricRecord.nil? && !nodeMetricRecord.empty? nodeMetricRecords.push(nodeMetricRecord) + # add data to the cache so filter_cadvisor2mdm.rb can use it + if is_windows_node + @NodeCache.mem.set_capacity(nodeMetricRecord["DataItems"][0]["Host"], nodeMetricRecord["DataItems"][0]["Collections"][0]["Value"]) + end end nodeMetricRecords.each do |metricRecord| metricRecord["DataType"] = "LINUX_PERF_BLOB" @@ -496,4 +515,77 @@ def getNodeTelemetryProps(item) return properties end end # Kube_Node_Input + + + class NodeStatsCache + # inner class for caching implementation (CPU and memory caching is handled the exact same way, so logic to do so is moved to a private inner class) + # (to reduce code duplication) + class NodeCache + + @@RECORD_TIME_TO_LIVE = 60*20 # units are seconds, so clear the cache every 20 minutes. + + def initialize + @cacheHash = {} + @timeAdded = {} # records when an entry was last added + @lock = Mutex.new + @lastCacheClearTime = 0 + + @cacheHash.default = 0.0 + @lastCacheClearTime = DateTime.now.to_time.to_i + end + + def get_capacity(node_name) + @lock.synchronize do + retval = @cacheHash[node_name] + return retval + end + end + + def set_capacity(host, val) + # check here if the cache has not been cleaned in a while. This way calling code doesn't have to remember to clean the cache + current_time = DateTime.now.to_time.to_i + if current_time - @lastCacheClearTime > @@RECORD_TIME_TO_LIVE + clean_cache + @lastCacheClearTime = current_time + end + + @lock.synchronize do + @cacheHash[host] = val + @timeAdded[host] = current_time + end + end + + def clean_cache() + $log.info "in_kube_nodes::clean_cache: cleaning node cpu/mem cache" + cacheClearTime = DateTime.now.to_time.to_i + @lock.synchronize do + nodes_to_remove = [] # first make a list of nodes to remove, then remove them. This intermediate + # list is used so that we aren't modifying a hash while iterating through it. + @cacheHash.each do |key, val| + if cacheClearTime - @timeAdded[key] > @@RECORD_TIME_TO_LIVE + nodes_to_remove.append(key) + end + end + + nodes_to_remove.each do node_name + @cacheHash.delete(node_name) + @timeAdded.delete(node_name) + end + end + end + end # NodeCache + + + @@cpuCache = NodeCache.new + @@memCache = NodeCache.new + + def cpu() + return @@cpuCache + end + + def mem() + return @@memCache + end + end + end # module From 42730a47a852d2299a0c995a602980385e52598b Mon Sep 17 00:00:00 2001 From: rashmichandrashekar Date: Tue, 13 Apr 2021 13:50:25 -0700 Subject: [PATCH 090/301] OSM doc update (#533) --- Documentation/OSMPrivatePreview/ReadMe.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Documentation/OSMPrivatePreview/ReadMe.md b/Documentation/OSMPrivatePreview/ReadMe.md index 1becd80b5..aa90c7413 100644 --- a/Documentation/OSMPrivatePreview/ReadMe.md +++ b/Documentation/OSMPrivatePreview/ReadMe.md @@ -1,3 +1,5 @@ +Note - This is private preview. For any support issues, please reach out to us at [askcoin@microsoft.com](mailto:askcoin@microsoft.com). Please don't open a support ticket. + # Azure Monitor Container Insights Open Service Mesh Monitoring Azure Monitor container insights now supporting preview of [Open Service Mesh(OSM)](https://docs.microsoft.com/azure/aks/servicemesh-osm-about) Monitoring. As part of this support, customer can: @@ -64,5 +66,6 @@ InsightsMetrics ### Known Issues 1. The workbook has scale limits of 50 pods per namespace. If you have more than 50 pods in mesh you can have workbook loading issues. 2. When source or destination is osmcontroller we show no latency & for internal services we show no resource utilization. +3. When both prometheus scraping using pod annotations and OSM monitoring are enabled on the same set of namespaces, the default set of metrics (envoy_cluster_upstream_cx_total, envoy_cluster_upstream_cx_connect_fail, envoy_cluster_upstream_rq, envoy_cluster_upstream_rq_xx, envoy_cluster_upstream_rq_total, envoy_cluster_upstream_rq_time_bucket, envoy_cluster_upstream_cx_rx_bytes_total, envoy_cluster_upstream_cx_tx_bytes_total, envoy_cluster_upstream_cx_active) will be collected twice. You can follow [this](https://docs.microsoft.com/en-us/azure/azure-monitor/containers/container-insights-prometheus-integration#prometheus-scraping-settings) documentation to exclude these namespaces from pod annotation scraping using the setting monitor_kubernetes_pods_namespaces to work around this issue. This is private preview, the goal for us is to get feedback. Please feel free to reach out to us at [askcoin@microsoft.com](mailto:askcoin@microsoft.com) for any feedback and questions! From 7ad52cdb7f15a94abf78927ce0a6969965361af4 Mon Sep 17 00:00:00 2001 From: rashmichandrashekar Date: Wed, 14 Apr 2021 10:28:27 -0700 Subject: [PATCH 091/301] Adding MDM metrics for threshold violation (#531) --- source/plugins/ruby/MdmAlertTemplates.rb | 67 +++++++++++- source/plugins/ruby/MdmMetricsGenerator.rb | 119 +++++++++++++++------ source/plugins/ruby/constants.rb | 10 +- 3 files changed, 161 insertions(+), 35 deletions(-) diff --git a/source/plugins/ruby/MdmAlertTemplates.rb b/source/plugins/ruby/MdmAlertTemplates.rb index ef63cf219..f2b713ff6 100644 --- a/source/plugins/ruby/MdmAlertTemplates.rb +++ b/source/plugins/ruby/MdmAlertTemplates.rb @@ -28,7 +28,7 @@ class MdmAlertTemplates } }' - Stable_job_metrics_template = ' + Stable_job_metrics_template = ' { "time": "%{timestamp}", "data": { @@ -90,6 +90,39 @@ class MdmAlertTemplates } }' + Container_resource_threshold_violation_template = ' + { + "time": "%{timestamp}", + "data": { + "baseData": { + "metric": "%{metricName}", + "namespace": "insights.container/containers", + "dimNames": [ + "containerName", + "podName", + "controllerName", + "Kubernetes namespace", + "thresholdPercentage" + ], + "series": [ + { + "dimValues": [ + "%{containerNameDimValue}", + "%{podNameDimValue}", + "%{controllerNameDimValue}", + "%{namespaceDimValue}", + "%{thresholdPercentageDimValue}" + ], + "min": %{containerResourceThresholdViolated}, + "max": %{containerResourceThresholdViolated}, + "sum": %{containerResourceThresholdViolated}, + "count": 1 + } + ] + } + } + }' + PV_resource_utilization_template = ' { "time": "%{timestamp}", @@ -123,6 +156,38 @@ class MdmAlertTemplates } }' + PV_resource_threshold_violation_template = ' + { + "time": "%{timestamp}", + "data": { + "baseData": { + "metric": "%{metricName}", + "namespace": "insights.container/persistentvolumes", + "dimNames": [ + "podName", + "node", + "kubernetesNamespace", + "volumeName", + "thresholdPercentage" + ], + "series": [ + { + "dimValues": [ + "%{podNameDimValue}", + "%{computerNameDimValue}", + "%{namespaceDimValue}", + "%{volumeNameDimValue}", + "%{thresholdPercentageDimValue}" + ], + "min": %{pvResourceThresholdViolated}, + "max": %{pvResourceThresholdViolated}, + "sum": %{pvResourceThresholdViolated}, + "count": 1 + } + ] + } + } + }' Node_resource_metrics_template = ' { diff --git a/source/plugins/ruby/MdmMetricsGenerator.rb b/source/plugins/ruby/MdmMetricsGenerator.rb index 12d462e44..8703f43a7 100644 --- a/source/plugins/ruby/MdmMetricsGenerator.rb +++ b/source/plugins/ruby/MdmMetricsGenerator.rb @@ -39,10 +39,21 @@ class MdmMetricsGenerator Constants::MEMORY_WORKING_SET_BYTES => Constants::MDM_CONTAINER_MEMORY_WORKING_SET_UTILIZATION_METRIC, } + @@container_metric_name_metric_threshold_violated_hash = { + Constants::CPU_USAGE_MILLI_CORES => Constants::MDM_CONTAINER_CPU_THRESHOLD_VIOLATED_METRIC, + Constants::CPU_USAGE_NANO_CORES => Constants::MDM_CONTAINER_CPU_THRESHOLD_VIOLATED_METRIC, + Constants::MEMORY_RSS_BYTES => Constants::MDM_CONTAINER_MEMORY_RSS_THRESHOLD_VIOLATED_METRIC, + Constants::MEMORY_WORKING_SET_BYTES => Constants::MDM_CONTAINER_MEMORY_WORKING_SET_THRESHOLD_VIOLATED_METRIC, + } + @@pod_metric_name_metric_percentage_name_hash = { Constants::PV_USED_BYTES => Constants::MDM_PV_UTILIZATION_METRIC, } + @@pod_metric_name_metric_threshold_violated_hash = { + Constants::PV_USED_BYTES => Constants::MDM_PV_THRESHOLD_VIOLATED_METRIC, + } + # Setting this to true since we need to send zero filled metrics at startup. If metrics are absent alert creation fails @sendZeroFilledMetrics = true @zeroFilledMetricsTimeTracker = DateTime.now.to_time.to_i @@ -158,43 +169,63 @@ def zeroFillMetricRecords(records, batch_time) metric_threshold_hash = getContainerResourceUtilizationThresholds container_zero_fill_dims = [Constants::OMSAGENT_ZERO_FILL, Constants::OMSAGENT_ZERO_FILL, Constants::OMSAGENT_ZERO_FILL, Constants::KUBESYSTEM_NAMESPACE_ZERO_FILL].join("~~") - containerCpuRecord = getContainerResourceUtilMetricRecords(batch_time, - Constants::CPU_USAGE_NANO_CORES, - 0, - container_zero_fill_dims, - metric_threshold_hash[Constants::CPU_USAGE_NANO_CORES]) - if !containerCpuRecord.nil? && !containerCpuRecord.empty? && !containerCpuRecord[0].nil? && !containerCpuRecord[0].empty? - records.push(containerCpuRecord[0]) + containerCpuRecords = getContainerResourceUtilMetricRecords(batch_time, + Constants::CPU_USAGE_NANO_CORES, + 0, + container_zero_fill_dims, + metric_threshold_hash[Constants::CPU_USAGE_NANO_CORES], + true) + if !containerCpuRecords.nil? && !containerCpuRecords.empty? + containerCpuRecords.each { |cpuRecord| + if !cpuRecord.nil? && !cpuRecord.empty? + records.push(cpuRecord) + end + } end - containerMemoryRssRecord = getContainerResourceUtilMetricRecords(batch_time, - Constants::MEMORY_RSS_BYTES, - 0, - container_zero_fill_dims, - metric_threshold_hash[Constants::MEMORY_RSS_BYTES]) - if !containerMemoryRssRecord.nil? && !containerMemoryRssRecord.empty? && !containerMemoryRssRecord[0].nil? && !containerMemoryRssRecord[0].empty? - records.push(containerMemoryRssRecord[0]) + containerMemoryRssRecords = getContainerResourceUtilMetricRecords(batch_time, + Constants::MEMORY_RSS_BYTES, + 0, + container_zero_fill_dims, + metric_threshold_hash[Constants::MEMORY_RSS_BYTES], + true) + if !containerMemoryRssRecords.nil? && !containerMemoryRssRecords.empty? + containerMemoryRssRecords.each { |memoryRssRecord| + if !memoryRssRecord.nil? && !memoryRssRecord.empty? + records.push(memoryRssRecord) + end + } end - containerMemoryWorkingSetRecord = getContainerResourceUtilMetricRecords(batch_time, - Constants::MEMORY_WORKING_SET_BYTES, - 0, - container_zero_fill_dims, - metric_threshold_hash[Constants::MEMORY_WORKING_SET_BYTES]) - if !containerMemoryWorkingSetRecord.nil? && !containerMemoryWorkingSetRecord.empty? && !containerMemoryWorkingSetRecord[0].nil? && !containerMemoryWorkingSetRecord[0].empty? - records.push(containerMemoryWorkingSetRecord[0]) + containerMemoryWorkingSetRecords = getContainerResourceUtilMetricRecords(batch_time, + Constants::MEMORY_WORKING_SET_BYTES, + 0, + container_zero_fill_dims, + metric_threshold_hash[Constants::MEMORY_WORKING_SET_BYTES], + true) + if !containerMemoryWorkingSetRecords.nil? && !containerMemoryWorkingSetRecords.empty? + containerMemoryWorkingSetRecords.each { |workingSetRecord| + if !workingSetRecord.nil? && !workingSetRecord.empty? + records.push(workingSetRecord) + end + } end pvZeroFillDims = {} pvZeroFillDims[Constants::INSIGHTSMETRICS_TAGS_PVC_NAMESPACE] = Constants::KUBESYSTEM_NAMESPACE_ZERO_FILL pvZeroFillDims[Constants::INSIGHTSMETRICS_TAGS_POD_NAME] = Constants::OMSAGENT_ZERO_FILL pvZeroFillDims[Constants::INSIGHTSMETRICS_TAGS_VOLUME_NAME] = Constants::VOLUME_NAME_ZERO_FILL - pvResourceUtilMetricRecord = getPVResourceUtilMetricRecords(batch_time, - Constants::PV_USED_BYTES, - @@hostName, - 0, - pvZeroFillDims, - metric_threshold_hash[Constants::PV_USED_BYTES]) - if !pvResourceUtilMetricRecord.nil? && !pvResourceUtilMetricRecord.empty? && !pvResourceUtilMetricRecord[0].nil? && !pvResourceUtilMetricRecord[0].empty? - records.push(pvResourceUtilMetricRecord[0]) + pvResourceUtilMetricRecords = getPVResourceUtilMetricRecords(batch_time, + Constants::PV_USED_BYTES, + @@hostName, + 0, + pvZeroFillDims, + metric_threshold_hash[Constants::PV_USED_BYTES], + true) + if !pvResourceUtilMetricRecords.nil? && !pvResourceUtilMetricRecords.empty? + pvResourceUtilMetricRecords.each { |pvRecord| + if !pvRecord.nil? && !pvRecord.empty? + records.push(pvRecord) + end + } end rescue => errorStr @log.info "Error in zeroFillMetricRecords: #{errorStr}" @@ -247,7 +278,7 @@ def appendAllPodMetrics(records, batch_time) return records end - def getContainerResourceUtilMetricRecords(recordTimeStamp, metricName, percentageMetricValue, dims, thresholdPercentage) + def getContainerResourceUtilMetricRecords(recordTimeStamp, metricName, percentageMetricValue, dims, thresholdPercentage, isZeroFill = false) records = [] begin if dims.nil? @@ -276,6 +307,19 @@ def getContainerResourceUtilMetricRecords(recordTimeStamp, metricName, percentag thresholdPercentageDimValue: thresholdPercentage, } records.push(Yajl::Parser.parse(StringIO.new(resourceUtilRecord))) + + # Adding another metric for threshold violation + resourceThresholdViolatedRecord = MdmAlertTemplates::Container_resource_threshold_violation_template % { + timestamp: recordTimeStamp, + metricName: @@container_metric_name_metric_threshold_violated_hash[metricName], + containerNameDimValue: containerName, + podNameDimValue: podName, + controllerNameDimValue: controllerName, + namespaceDimValue: podNamespace, + containerResourceThresholdViolated: isZeroFill ? 0 : 1, + thresholdPercentageDimValue: thresholdPercentage, + } + records.push(Yajl::Parser.parse(StringIO.new(resourceThresholdViolatedRecord))) rescue => errorStr @log.info "Error in getContainerResourceUtilMetricRecords: #{errorStr}" ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) @@ -283,7 +327,7 @@ def getContainerResourceUtilMetricRecords(recordTimeStamp, metricName, percentag return records end - def getPVResourceUtilMetricRecords(recordTimeStamp, metricName, computer, percentageMetricValue, dims, thresholdPercentage) + def getPVResourceUtilMetricRecords(recordTimeStamp, metricName, computer, percentageMetricValue, dims, thresholdPercentage, isZeroFill = false) records = [] begin containerName = dims[Constants::INSIGHTSMETRICS_TAGS_CONTAINER_NAME] @@ -303,6 +347,19 @@ def getPVResourceUtilMetricRecords(recordTimeStamp, metricName, computer, percen thresholdPercentageDimValue: thresholdPercentage, } records.push(Yajl::Parser.parse(StringIO.new(resourceUtilRecord))) + + # Adding another metric for threshold violation + resourceThresholdViolatedRecord = MdmAlertTemplates::PV_resource_threshold_violation_template % { + timestamp: recordTimeStamp, + metricName: @@pod_metric_name_metric_threshold_violated_hash[metricName], + podNameDimValue: podName, + computerNameDimValue: computer, + namespaceDimValue: pvcNamespace, + volumeNameDimValue: volumeName, + pvResourceThresholdViolated: isZeroFill ? 0 : 1, + thresholdPercentageDimValue: thresholdPercentage, + } + records.push(Yajl::Parser.parse(StringIO.new(resourceThresholdViolatedRecord))) rescue => errorStr @log.info "Error in getPVResourceUtilMetricRecords: #{errorStr}" ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) diff --git a/source/plugins/ruby/constants.rb b/source/plugins/ruby/constants.rb index cf41900dc..e0b0d1e0c 100644 --- a/source/plugins/ruby/constants.rb +++ b/source/plugins/ruby/constants.rb @@ -53,6 +53,10 @@ class Constants MDM_CONTAINER_MEMORY_RSS_UTILIZATION_METRIC = "memoryRssExceededPercentage" MDM_CONTAINER_MEMORY_WORKING_SET_UTILIZATION_METRIC = "memoryWorkingSetExceededPercentage" MDM_PV_UTILIZATION_METRIC = "pvUsageExceededPercentage" + MDM_CONTAINER_CPU_THRESHOLD_VIOLATED_METRIC = "cpuThresholdViolated" + MDM_CONTAINER_MEMORY_RSS_THRESHOLD_VIOLATED_METRIC = "memoryRssThresholdViolated" + MDM_CONTAINER_MEMORY_WORKING_SET_THRESHOLD_VIOLATED_METRIC = "memoryWorkingSetThresholdViolated" + MDM_PV_THRESHOLD_VIOLATED_METRIC = "pvUsageThresholdViolated" MDM_NODE_CPU_USAGE_PERCENTAGE = "cpuUsagePercentage" MDM_NODE_MEMORY_RSS_PERCENTAGE = "memoryRssPercentage" MDM_NODE_MEMORY_WORKING_SET_PERCENTAGE = "memoryWorkingSetPercentage" @@ -77,9 +81,9 @@ class Constants OMSAGENT_ZERO_FILL = "omsagent" KUBESYSTEM_NAMESPACE_ZERO_FILL = "kube-system" VOLUME_NAME_ZERO_FILL = "-" - PV_TYPES =["awsElasticBlockStore", "azureDisk", "azureFile", "cephfs", "cinder", "csi", "fc", "flexVolume", - "flocker", "gcePersistentDisk", "glusterfs", "hostPath", "iscsi", "local", "nfs", - "photonPersistentDisk", "portworxVolume", "quobyte", "rbd", "scaleIO", "storageos", "vsphereVolume"] + PV_TYPES = ["awsElasticBlockStore", "azureDisk", "azureFile", "cephfs", "cinder", "csi", "fc", "flexVolume", + "flocker", "gcePersistentDisk", "glusterfs", "hostPath", "iscsi", "local", "nfs", + "photonPersistentDisk", "portworxVolume", "quobyte", "rbd", "scaleIO", "storageos", "vsphereVolume"] #Telemetry constants CONTAINER_METRICS_HEART_BEAT_EVENT = "ContainerMetricsMdmHeartBeatEvent" From 34d1f64f89dd07168680aed955cbf5dfbe467885 Mon Sep 17 00:00:00 2001 From: rashmichandrashekar Date: Tue, 20 Apr 2021 17:51:23 -0700 Subject: [PATCH 092/301] Rashmi/april agent 2021 (#538) --- .../scripts/tomlparser-mdm-metrics-config.rb | 21 ++++++++++ kubernetes/container-azm-ms-agentconfig.yaml | 5 +++ source/plugins/ruby/KubernetesApiClient.rb | 34 ++++++++++++++++- source/plugins/ruby/MdmAlertTemplates.rb | 2 +- source/plugins/ruby/MdmMetricsGenerator.rb | 38 +++++++++++++++---- source/plugins/ruby/constants.rb | 3 +- source/plugins/ruby/podinventory_to_mdm.rb | 3 +- 7 files changed, 95 insertions(+), 11 deletions(-) diff --git a/build/linux/installer/scripts/tomlparser-mdm-metrics-config.rb b/build/linux/installer/scripts/tomlparser-mdm-metrics-config.rb index 345c51633..5ce5d79d2 100644 --- a/build/linux/installer/scripts/tomlparser-mdm-metrics-config.rb +++ b/build/linux/installer/scripts/tomlparser-mdm-metrics-config.rb @@ -13,6 +13,7 @@ @percentageMemoryRssThreshold = Constants::DEFAULT_MDM_MEMORY_RSS_THRESHOLD @percentageMemoryWorkingSetThreshold = Constants::DEFAULT_MDM_MEMORY_WORKING_SET_THRESHOLD @percentagePVUsageThreshold = Constants::DEFAULT_MDM_PV_UTILIZATION_THRESHOLD +@jobCompletionThresholdMinutes = Constants::DEFAULT_MDM_JOB_COMPLETED_TIME_THRESHOLD_MINUTES # Use parser to parse the configmap toml file to a ruby structure def parseConfigMap @@ -101,6 +102,25 @@ def populateSettingValuesFromConfigMap(parsedConfig) ConfigParseErrorLogger.logError("Exception while reading config map settings for MDM metric configuration settings for PV utilization - #{errorStr}, using defaults, please check config map for errors") @percentagePVUsageThreshold = Constants::DEFAULT_MDM_PV_UTILIZATION_THRESHOLD end + + # Get mdm metrics config settings for job completion + begin + jobCompletion = parsedConfig[:alertable_metrics_configuration_settings][:job_completion_threshold] + if !jobCompletion.nil? + jobCompletionThreshold = jobCompletion[:job_completion_threshold_time_minutes] + jobCompletionThresholdInt = jobCompletionThreshold.to_i + if jobCompletionThresholdInt.kind_of? Integer + @jobCompletionThresholdMinutes = jobCompletionThresholdInt + else + puts "config::Non interger value or value not convertible to integer specified for job completion threshold, using default " + @jobCompletionThresholdMinutes = Constants::DEFAULT_MDM_JOB_COMPLETED_TIME_THRESHOLD_MINUTES + end + puts "config::Using config map settings for MDM metric configuration settings for job completion" + end + rescue => errorStr + ConfigParseErrorLogger.logError("Exception while reading config map settings for MDM metric configuration settings for job completion - #{errorStr}, using defaults, please check config map for errors") + @jobCompletionThresholdMinutes = Constants::DEFAULT_MDM_JOB_COMPLETED_TIME_THRESHOLD_MINUTES + end end end @@ -125,6 +145,7 @@ def populateSettingValuesFromConfigMap(parsedConfig) file.write("export AZMON_ALERT_CONTAINER_MEMORY_RSS_THRESHOLD=#{@percentageMemoryRssThreshold}\n") file.write("export AZMON_ALERT_CONTAINER_MEMORY_WORKING_SET_THRESHOLD=\"#{@percentageMemoryWorkingSetThreshold}\"\n") file.write("export AZMON_ALERT_PV_USAGE_THRESHOLD=#{@percentagePVUsageThreshold}\n") + file.write("export AZMON_ALERT_JOB_COMPLETION_TIME_THRESHOLD=#{@jobCompletionThresholdMinutes}\n") # Close file after writing all MDM setting environment variables file.close puts "****************End MDM Metrics Config Processing********************" diff --git a/kubernetes/container-azm-ms-agentconfig.yaml b/kubernetes/container-azm-ms-agentconfig.yaml index e38d9b4ab..543f270c1 100644 --- a/kubernetes/container-azm-ms-agentconfig.yaml +++ b/kubernetes/container-azm-ms-agentconfig.yaml @@ -126,6 +126,11 @@ data: [alertable_metrics_configuration_settings.pv_utilization_thresholds] # Threshold for persistent volume usage bytes, metric will be sent only when persistent volume utilization exceeds or becomes equal to the following percentage pv_usage_threshold_percentage = 60.0 + + # Alertable metrics configuration settings for completed jobs count + [alertable_metrics_configuration_settings.job_completion_threshold] + # Threshold for completed job count , metric will be sent only for those jobs which were completed earlier than the following threshold + job_completion_threshold_time_minutes = 360 integrations: |- [integrations.azure_network_policy_manager] collect_basic_metrics = false diff --git a/source/plugins/ruby/KubernetesApiClient.rb b/source/plugins/ruby/KubernetesApiClient.rb index c5a363741..98347d272 100644 --- a/source/plugins/ruby/KubernetesApiClient.rb +++ b/source/plugins/ruby/KubernetesApiClient.rb @@ -31,6 +31,8 @@ class KubernetesApiClient @@TokenStr = nil @@NodeMetrics = Hash.new @@WinNodeArray = [] + @@telemetryTimeTracker = DateTime.now.to_time.to_i + @@resourceLimitsTelemetryHash = {} def initialize end @@ -403,9 +405,12 @@ def getPodUid(podNameSpace, podMetadata) def getContainerResourceRequestsAndLimits(pod, metricCategory, metricNameToCollect, metricNametoReturn, metricTime = Time.now.utc.iso8601) metricItems = [] + timeDifference = (DateTime.now.to_time.to_i - @@telemetryTimeTracker).abs + timeDifferenceInMinutes = timeDifference / 60 begin clusterId = getClusterId podNameSpace = pod["metadata"]["namespace"] + podName = pod["metadata"]["name"] podUid = getPodUid(podNameSpace, pod["metadata"]) if podUid.nil? return metricItems @@ -456,6 +461,33 @@ def getContainerResourceRequestsAndLimits(pod, metricCategory, metricNameToColle metricProps["Collections"].push(metricCollections) metricItem["DataItems"].push(metricProps) metricItems.push(metricItem) + #Telemetry about omsagent requests and limits + begin + if (podName.downcase.start_with?("omsagent-") && podNameSpace.eql?("kube-system") && containerName.downcase.start_with?("omsagent")) + nodePodContainerKey = [nodeName, podName, containerName, metricNametoReturn].join("~~") + @@resourceLimitsTelemetryHash[nodePodContainerKey] = metricValue + end + if (timeDifferenceInMinutes >= Constants::TELEMETRY_FLUSH_INTERVAL_IN_MINUTES) + @@resourceLimitsTelemetryHash.each { |key, value| + keyElements = key.split("~~") + if keyElements.length != 4 + next + end + + # get dimension values by key + telemetryProps = {} + telemetryProps["Computer"] = keyElements[0] + telemetryProps["PodName"] = keyElements[1] + telemetryProps["ContainerName"] = keyElements[2] + metricNameFromKey = keyElements[3] + ApplicationInsightsUtility.sendMetricTelemetry(metricNameFromKey, value, telemetryProps) + } + @@telemetryTimeTracker = DateTime.now.to_time.to_i + @@resourceLimitsTelemetryHash = {} + end + rescue => errorStr + $log.warn("Exception while generating Telemetry from getContainerResourceRequestsAndLimits failed: #{errorStr} for metric #{metricNameToCollect}") + end #No container level limit for the given metric, so default to node level limit else nodeMetricsHashKey = clusterId + "/" + nodeName + "_" + "allocatable" + "_" + metricNameToCollect @@ -791,7 +823,7 @@ def getKubeAPIServerUrl def getKubeServicesInventoryRecords(serviceList, batchTime = Time.utc.iso8601) kubeServiceRecords = [] begin - if (!serviceList.nil? && !serviceList.empty? && serviceList.key?("items") && !serviceList["items"].nil? && !serviceList["items"].empty? ) + if (!serviceList.nil? && !serviceList.empty? && serviceList.key?("items") && !serviceList["items"].nil? && !serviceList["items"].empty?) servicesCount = serviceList["items"].length @Log.info("KubernetesApiClient::getKubeServicesInventoryRecords : number of services in serviceList #{servicesCount} @ #{Time.now.utc.iso8601}") serviceList["items"].each do |item| diff --git a/source/plugins/ruby/MdmAlertTemplates.rb b/source/plugins/ruby/MdmAlertTemplates.rb index f2b713ff6..e889c3f09 100644 --- a/source/plugins/ruby/MdmAlertTemplates.rb +++ b/source/plugins/ruby/MdmAlertTemplates.rb @@ -45,7 +45,7 @@ class MdmAlertTemplates "dimValues": [ "%{controllerNameDimValue}", "%{namespaceDimValue}", - "6" + "%{jobCompletionThreshold}" ], "min": %{containerCountMetricValue}, "max": %{containerCountMetricValue}, diff --git a/source/plugins/ruby/MdmMetricsGenerator.rb b/source/plugins/ruby/MdmMetricsGenerator.rb index 8703f43a7..f2aa92c14 100644 --- a/source/plugins/ruby/MdmMetricsGenerator.rb +++ b/source/plugins/ruby/MdmMetricsGenerator.rb @@ -107,13 +107,28 @@ def appendPodMetrics(records, metricName, metricHash, batch_time, metricsTemplat podControllerNameDimValue = key_elements[0] podNamespaceDimValue = key_elements[1] - record = metricsTemplate % { - timestamp: batch_time, - metricName: metricName, - controllerNameDimValue: podControllerNameDimValue, - namespaceDimValue: podNamespaceDimValue, - containerCountMetricValue: value, - } + # Special handling for jobs since we need to send the threshold as a dimension as it is configurable + if metricName == Constants::MDM_STALE_COMPLETED_JOB_COUNT + metric_threshold_hash = getContainerResourceUtilizationThresholds + #Converting this to hours since we already have olderThanHours dimension. + jobCompletionThresholdHours = metric_threshold_hash[Constants::JOB_COMPLETION_TIME] / 60.0 + record = metricsTemplate % { + timestamp: batch_time, + metricName: metricName, + controllerNameDimValue: podControllerNameDimValue, + namespaceDimValue: podNamespaceDimValue, + containerCountMetricValue: value, + jobCompletionThreshold: jobCompletionThresholdHours, + } + else + record = metricsTemplate % { + timestamp: batch_time, + metricName: metricName, + controllerNameDimValue: podControllerNameDimValue, + namespaceDimValue: podNamespaceDimValue, + containerCountMetricValue: value, + } + end records.push(Yajl::Parser.parse(StringIO.new(record))) } else @@ -140,9 +155,11 @@ def flushPodMdmMetricTelemetry staleJobHashValues = @stale_job_count_hash.values staleJobMetricCount = staleJobHashValues.inject(0) { |sum, x| sum + x } + metric_threshold_hash = getContainerResourceUtilizationThresholds properties["ContainerRestarts"] = containerRestartMetricCount properties["OomKilledContainers"] = oomKilledContainerMetricCount properties["OldCompletedJobs"] = staleJobMetricCount + properties["JobCompletionThesholdTimeInMinutes"] = metric_threshold_hash[Constants::JOB_COMPLETION_TIME] ApplicationInsightsUtility.sendCustomEvent(Constants::CONTAINER_METRICS_HEART_BEAT_EVENT, properties) ApplicationInsightsUtility.sendCustomEvent(Constants::POD_READY_PERCENTAGE_HEART_BEAT_EVENT, {}) rescue => errorStr @@ -465,6 +482,7 @@ def getContainerResourceUtilizationThresholds metric_threshold_hash[Constants::MEMORY_RSS_BYTES] = Constants::DEFAULT_MDM_MEMORY_RSS_THRESHOLD metric_threshold_hash[Constants::MEMORY_WORKING_SET_BYTES] = Constants::DEFAULT_MDM_MEMORY_WORKING_SET_THRESHOLD metric_threshold_hash[Constants::PV_USED_BYTES] = Constants::DEFAULT_MDM_PV_UTILIZATION_THRESHOLD + metric_threshold_hash[Constants::JOB_COMPLETION_TIME] = Constants::DEFAULT_MDM_JOB_COMPLETED_TIME_THRESHOLD_MINUTES cpuThreshold = ENV["AZMON_ALERT_CONTAINER_CPU_THRESHOLD"] if !cpuThreshold.nil? && !cpuThreshold.empty? @@ -490,6 +508,12 @@ def getContainerResourceUtilizationThresholds pvUsagePercentageThresholdFloat = (pvUsagePercentageThreshold.to_f).round(2) metric_threshold_hash[Constants::PV_USED_BYTES] = pvUsagePercentageThresholdFloat end + + jobCompletionTimeThreshold = ENV["AZMON_ALERT_JOB_COMPLETION_TIME_THRESHOLD"] + if !jobCompletionTimeThreshold.nil? && !jobCompletionTimeThreshold.empty? + jobCompletionTimeThresholdInt = jobCompletionTimeThreshold.to_i + metric_threshold_hash[Constants::JOB_COMPLETION_TIME] = jobCompletionTimeThresholdInt + end rescue => errorStr @log.info "Error in getContainerResourceUtilizationThresholds: #{errorStr}" ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) diff --git a/source/plugins/ruby/constants.rb b/source/plugins/ruby/constants.rb index e0b0d1e0c..906019b95 100644 --- a/source/plugins/ruby/constants.rb +++ b/source/plugins/ruby/constants.rb @@ -69,14 +69,15 @@ class Constants MEMORY_WORKING_SET_BYTES = "memoryWorkingSetBytes" MEMORY_RSS_BYTES = "memoryRssBytes" PV_USED_BYTES = "pvUsedBytes" + JOB_COMPLETION_TIME = "completedJobTimeMinutes" DEFAULT_MDM_CPU_UTILIZATION_THRESHOLD = 95.0 DEFAULT_MDM_MEMORY_RSS_THRESHOLD = 95.0 DEFAULT_MDM_MEMORY_WORKING_SET_THRESHOLD = 95.0 DEFAULT_MDM_PV_UTILIZATION_THRESHOLD = 60.0 + DEFAULT_MDM_JOB_COMPLETED_TIME_THRESHOLD_MINUTES = 360 CONTROLLER_KIND_JOB = "job" CONTAINER_TERMINATION_REASON_COMPLETED = "completed" CONTAINER_STATE_TERMINATED = "terminated" - STALE_JOB_TIME_IN_MINUTES = 360 TELEGRAF_DISK_METRICS = "container.azm.ms/disk" OMSAGENT_ZERO_FILL = "omsagent" KUBESYSTEM_NAMESPACE_ZERO_FILL = "kube-system" diff --git a/source/plugins/ruby/podinventory_to_mdm.rb b/source/plugins/ruby/podinventory_to_mdm.rb index 77370e284..d9cb71bd4 100644 --- a/source/plugins/ruby/podinventory_to_mdm.rb +++ b/source/plugins/ruby/podinventory_to_mdm.rb @@ -88,6 +88,7 @@ def initialize() @pod_count_by_phase = {} @pod_uids = {} @process_incoming_stream = CustomMetricsUtils.check_custom_metrics_availability + @metric_threshold_hash = MdmMetricsGenerator.getContainerResourceUtilizationThresholds @log.debug "After check_custom_metrics_availability process_incoming_stream #{@process_incoming_stream}" @log.debug { "Starting podinventory_to_mdm plugin" } end @@ -259,7 +260,7 @@ def process_record_for_terminated_job_metric(podControllerNameDimValue, podNames if !containerFinishedTime.nil? && !containerFinishedTime.empty? finishedTimeParsed = Time.parse(containerFinishedTime) # Check to see if job was completed 6 hours ago/STALE_JOB_TIME_IN_MINUTES - if ((Time.now - finishedTimeParsed) / 60) > Constants::STALE_JOB_TIME_IN_MINUTES + if ((Time.now - finishedTimeParsed) / 60) > @metric_threshold_hash[Constants::JOB_COMPLETION_TIME] MdmMetricsGenerator.generateStaleJobCountMetrics(podControllerNameDimValue, podNamespaceDimValue) end From fcc50480ce1c56a14657bf75c54609340e1c23e2 Mon Sep 17 00:00:00 2001 From: Grace Wehner Date: Wed, 21 Apr 2021 09:57:02 -0700 Subject: [PATCH 093/301] add Read_from_Head config for all fluentbit tail plugins (#539) See the commit message of: fluent/fluent-bit@70e33fa for details explaining the fluentbit change and what Read_from_Head does when set to true. --- build/linux/installer/conf/td-agent-bit-prom-side-car.conf | 1 + build/linux/installer/conf/td-agent-bit-rs.conf | 1 + build/linux/installer/conf/td-agent-bit.conf | 4 ++++ 3 files changed, 6 insertions(+) diff --git a/build/linux/installer/conf/td-agent-bit-prom-side-car.conf b/build/linux/installer/conf/td-agent-bit-prom-side-car.conf index 339e509b0..05fa3afd2 100644 --- a/build/linux/installer/conf/td-agent-bit-prom-side-car.conf +++ b/build/linux/installer/conf/td-agent-bit-prom-side-car.conf @@ -15,6 +15,7 @@ Name tail Tag oms.container.log.flbplugin.terminationlog.* Path /dev/write-to-traces + Read_from_Head true DB /var/opt/microsoft/docker-cimprov/state/terminationlog-ai.db DB.Sync Off Parser docker diff --git a/build/linux/installer/conf/td-agent-bit-rs.conf b/build/linux/installer/conf/td-agent-bit-rs.conf index c94b4c40e..9613c270d 100644 --- a/build/linux/installer/conf/td-agent-bit-rs.conf +++ b/build/linux/installer/conf/td-agent-bit-rs.conf @@ -14,6 +14,7 @@ Name tail Tag oms.container.log.flbplugin.terminationlog.* Path /dev/write-to-traces + Read_from_Head true DB /var/opt/microsoft/docker-cimprov/state/terminationlog-ai.db DB.Sync Off Parser docker diff --git a/build/linux/installer/conf/td-agent-bit.conf b/build/linux/installer/conf/td-agent-bit.conf index 287c076dc..045aefcaf 100644 --- a/build/linux/installer/conf/td-agent-bit.conf +++ b/build/linux/installer/conf/td-agent-bit.conf @@ -15,6 +15,7 @@ Name tail Tag oms.container.log.la.* Path ${AZMON_LOG_TAIL_PATH} + Read_from_Head true DB /var/log/omsagent-fblogs.db DB.Sync Off Parser docker @@ -32,6 +33,7 @@ Name tail Tag oms.container.log.flbplugin.* Path /var/log/containers/omsagent*.log + Read_from_Head true DB /var/opt/microsoft/docker-cimprov/state/omsagent-ai.db DB.Sync Off Parser docker @@ -44,6 +46,7 @@ Name tail Tag oms.container.log.flbplugin.mdsd.* Path /var/opt/microsoft/linuxmonagent/log/mdsd.err + Read_from_Head true DB /var/opt/microsoft/docker-cimprov/state/mdsd-ai.db DB.Sync Off Parser docker @@ -56,6 +59,7 @@ Name tail Tag oms.container.log.flbplugin.terminationlog.* Path /dev/write-to-traces + Read_from_Head true DB /var/opt/microsoft/docker-cimprov/state/terminationlog-ai.db DB.Sync Off Parser docker From 01e5529bc8bead27d04607db7b087a0645d1e7db Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Wed, 21 Apr 2021 20:06:15 -0700 Subject: [PATCH 094/301] fix programdata mount issue on containerd win nodes (#542) --- .../templates/omsagent-daemonset-windows.yaml | 1 + kubernetes/omsagent.yaml | 1 + 2 files changed, 2 insertions(+) diff --git a/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml b/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml index 8868b86bb..580ef9d15 100644 --- a/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml @@ -118,6 +118,7 @@ spec: - name: docker-windows-containers hostPath: path: C:\ProgramData\docker\containers + type: DirectoryOrCreate - name: settings-vol-config configMap: name: container-azm-ms-agentconfig diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index 206d9a8f0..e98b8ace3 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -839,6 +839,7 @@ spec: - name: docker-windows-containers hostPath: path: C:\ProgramData\docker\containers + type: DirectoryOrCreate - name: settings-vol-config configMap: name: container-azm-ms-agentconfig From b5d074afecff2998c8171f8b3b10e25fc9b21ccf Mon Sep 17 00:00:00 2001 From: rashmichandrashekar Date: Thu, 22 Apr 2021 13:52:43 -0700 Subject: [PATCH 095/301] Update sidecar mem limits (#541) --- build/linux/installer/conf/td-agent-bit-prom-side-car.conf | 6 +++--- kubernetes/omsagent.yaml | 2 +- source/plugins/ruby/MdmMetricsGenerator.rb | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/build/linux/installer/conf/td-agent-bit-prom-side-car.conf b/build/linux/installer/conf/td-agent-bit-prom-side-car.conf index 05fa3afd2..8a69f7995 100644 --- a/build/linux/installer/conf/td-agent-bit-prom-side-car.conf +++ b/build/linux/installer/conf/td-agent-bit-prom-side-car.conf @@ -29,9 +29,9 @@ Tag oms.container.perf.telegraf.* Listen 0.0.0.0 Port 25229 - Chunk_Size 1m - Buffer_Size 1m - Mem_Buf_Limit 20m + Chunk_Size 10m + Buffer_Size 10m + Mem_Buf_Limit 200m [OUTPUT] Name oms diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index e98b8ace3..fc3428a26 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -451,7 +451,7 @@ spec: resources: limits: cpu: 500m - memory: 400Mi + memory: 1Gi requests: cpu: 75m memory: 225Mi diff --git a/source/plugins/ruby/MdmMetricsGenerator.rb b/source/plugins/ruby/MdmMetricsGenerator.rb index f2aa92c14..6641456af 100644 --- a/source/plugins/ruby/MdmMetricsGenerator.rb +++ b/source/plugins/ruby/MdmMetricsGenerator.rb @@ -111,7 +111,7 @@ def appendPodMetrics(records, metricName, metricHash, batch_time, metricsTemplat if metricName == Constants::MDM_STALE_COMPLETED_JOB_COUNT metric_threshold_hash = getContainerResourceUtilizationThresholds #Converting this to hours since we already have olderThanHours dimension. - jobCompletionThresholdHours = metric_threshold_hash[Constants::JOB_COMPLETION_TIME] / 60.0 + jobCompletionThresholdHours = (metric_threshold_hash[Constants::JOB_COMPLETION_TIME] / 60.0).round(2) record = metricsTemplate % { timestamp: batch_time, metricName: metricName, From 5feeb3e3e617f2ac286e7c222e5e8573c2543361 Mon Sep 17 00:00:00 2001 From: David Michelman Date: Thu, 22 Apr 2021 14:51:51 -0700 Subject: [PATCH 096/301] David/release 4 22 2021 (#544) * updating image tag and agent version * updated liveness probe * updated release notes again * fixed date in version file --- ReleaseNotes.md | 17 +++++++++++++++++ build/version | 4 ++-- charts/azuremonitor-containers/Chart.yaml | 2 +- charts/azuremonitor-containers/values.yaml | 6 +++--- kubernetes/linux/Dockerfile | 2 +- kubernetes/omsagent.yaml | 14 +++++++------- kubernetes/windows/Dockerfile | 2 +- .../onboarding/managed/enable-monitoring.ps1 | 2 +- scripts/onboarding/managed/enable-monitoring.sh | 2 +- .../onboarding/managed/upgrade-monitoring.sh | 2 +- 10 files changed, 35 insertions(+), 18 deletions(-) diff --git a/ReleaseNotes.md b/ReleaseNotes.md index 04bd7c6e5..acbd579a0 100644 --- a/ReleaseNotes.md +++ b/ReleaseNotes.md @@ -11,6 +11,23 @@ additional questions or comments. Note : The agent version(s) below has dates (ciprod), which indicate the agent build dates (not release dates) +### 04/22/2021 - +##### Version microsoft/oms:ciprod04222021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod04222021 (linux) +##### Version microsoft/oms:win-ciprod04222021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod04222021 (windows) +##### Code change log +- Bug fixes for metrics cpuUsagePercentage and memoryWorkingSetPercentage for windows nodes +- Added metrics for threshold violation +- Made Job completion metric configurable +- Udated default buffer sizes in fluent-bit +- Updated recommended alerts +- Fixed bug where logs written before agent starts up were not collected +- Fixed bug which kept agent logs from being rotated +- Bug fix for Windows Containerd container log collection +- Bug fixes +- Doc updates +- Minor telemetry changes + + ### 03/26/2021 - ##### Version microsoft/oms:ciprod03262021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod03262021 (linux) ##### Version microsoft/oms:win-ciprod03262021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod03262021 (windows) diff --git a/build/version b/build/version index 83a0a174b..16a43604a 100644 --- a/build/version +++ b/build/version @@ -2,11 +2,11 @@ # Build Version Information -CONTAINER_BUILDVERSION_MAJOR=14 +CONTAINER_BUILDVERSION_MAJOR=15 CONTAINER_BUILDVERSION_MINOR=0 CONTAINER_BUILDVERSION_PATCH=0 CONTAINER_BUILDVERSION_BUILDNR=0 -CONTAINER_BUILDVERSION_DATE=20210326 +CONTAINER_BUILDVERSION_DATE=20210422 CONTAINER_BUILDVERSION_STATUS=Developer_Build #-------------------------------- End of File ----------------------------------- diff --git a/charts/azuremonitor-containers/Chart.yaml b/charts/azuremonitor-containers/Chart.yaml index 9c8014ed0..00f3f49ed 100644 --- a/charts/azuremonitor-containers/Chart.yaml +++ b/charts/azuremonitor-containers/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v1 appVersion: 7.0.0-1 description: Helm chart for deploying Azure Monitor container monitoring agent in Kubernetes name: azuremonitor-containers -version: 2.8.2 +version: 2.8.3 kubeVersion: "^1.10.0-0" keywords: - monitoring diff --git a/charts/azuremonitor-containers/values.yaml b/charts/azuremonitor-containers/values.yaml index 4b539546b..9dd5317a4 100644 --- a/charts/azuremonitor-containers/values.yaml +++ b/charts/azuremonitor-containers/values.yaml @@ -21,10 +21,10 @@ Azure: omsagent: image: repo: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod" - tag: "ciprod03262021" - tagWindows: "win-ciprod03262021" + tag: "ciprod04222021" + tagWindows: "win-ciprod04222021" pullPolicy: IfNotPresent - dockerProviderVersion: "14.0.0-0" + dockerProviderVersion: "15.0.0-0" agentVersion: "1.10.0.1" # The priority used by the omsagent priority class for the daemonset pods diff --git a/kubernetes/linux/Dockerfile b/kubernetes/linux/Dockerfile index 76b8622b4..d5ece4509 100644 --- a/kubernetes/linux/Dockerfile +++ b/kubernetes/linux/Dockerfile @@ -2,7 +2,7 @@ FROM ubuntu:18.04 MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=ciprod03262021 +ARG IMAGE_TAG=ciprod04222021 ENV AGENT_VERSION ${IMAGE_TAG} ENV tmpdir /opt ENV APPLICATIONINSIGHTS_AUTH NzAwZGM5OGYtYTdhZC00NThkLWI5NWMtMjA3ZjM3NmM3YmRi diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index fc3428a26..feea3f29a 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -358,7 +358,7 @@ spec: tier: node annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "14.0.0-0" + dockerProviderVersion: "15.0.0-0" schema-versions: "v1" spec: serviceAccountName: omsagent @@ -368,7 +368,7 @@ spec: value: "3" containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod03262021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod04222021" imagePullPolicy: IfNotPresent resources: limits: @@ -446,7 +446,7 @@ spec: timeoutSeconds: 15 #Only in sidecar scraping mode - name: omsagent-prometheus - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod03262021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod04222021" imagePullPolicy: IfNotPresent resources: limits: @@ -583,13 +583,13 @@ spec: rsName: "omsagent-rs" annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "14.0.0-0" + dockerProviderVersion: "15.0.0-0" schema-versions: "v1" spec: serviceAccountName: omsagent containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod03262021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod04222021" imagePullPolicy: IfNotPresent resources: limits: @@ -750,7 +750,7 @@ spec: tier: node-win annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "14.0.0-0" + dockerProviderVersion: "15.0.0-0" schema-versions: "v1" spec: serviceAccountName: omsagent @@ -760,7 +760,7 @@ spec: value: "3" containers: - name: omsagent-win - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod03262021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod04222021" imagePullPolicy: IfNotPresent resources: limits: diff --git a/kubernetes/windows/Dockerfile b/kubernetes/windows/Dockerfile index e4ace417a..fefd089a8 100644 --- a/kubernetes/windows/Dockerfile +++ b/kubernetes/windows/Dockerfile @@ -3,7 +3,7 @@ MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=win-ciprod03262021 +ARG IMAGE_TAG=win-ciprod04222021 # Do not split this into multiple RUN! # Docker creates a layer for every RUN-Statement diff --git a/scripts/onboarding/managed/enable-monitoring.ps1 b/scripts/onboarding/managed/enable-monitoring.ps1 index baf547497..828d061ac 100644 --- a/scripts/onboarding/managed/enable-monitoring.ps1 +++ b/scripts/onboarding/managed/enable-monitoring.ps1 @@ -64,7 +64,7 @@ $isUsingServicePrincipal = $false # released chart version in mcr $mcr = "mcr.microsoft.com" -$mcrChartVersion = "2.8.2" +$mcrChartVersion = "2.8.3" $mcrChartRepoPath = "azuremonitor/containerinsights/preview/azuremonitor-containers" $helmLocalRepoName = "." $omsAgentDomainName="opinsights.azure.com" diff --git a/scripts/onboarding/managed/enable-monitoring.sh b/scripts/onboarding/managed/enable-monitoring.sh index 9747d932d..f27f944fd 100644 --- a/scripts/onboarding/managed/enable-monitoring.sh +++ b/scripts/onboarding/managed/enable-monitoring.sh @@ -44,7 +44,7 @@ defaultAzureCloud="AzureCloud" omsAgentDomainName="opinsights.azure.com" # released chart version in mcr -mcrChartVersion="2.8.2" +mcrChartVersion="2.8.3" mcr="mcr.microsoft.com" mcrChartRepoPath="azuremonitor/containerinsights/preview/azuremonitor-containers" helmLocalRepoName="." diff --git a/scripts/onboarding/managed/upgrade-monitoring.sh b/scripts/onboarding/managed/upgrade-monitoring.sh index 1cf7b5c97..5456a7072 100644 --- a/scripts/onboarding/managed/upgrade-monitoring.sh +++ b/scripts/onboarding/managed/upgrade-monitoring.sh @@ -20,7 +20,7 @@ set -e set -o pipefail # released chart version for Azure Arc enabled Kubernetes public preview -mcrChartVersion="2.8.2" +mcrChartVersion="2.8.3" mcr="mcr.microsoft.com" mcrChartRepoPath="azuremonitor/containerinsights/preview/azuremonitor-containers" From 1b2da4adb4a8beac41af1cf2fd093872a52c95c6 Mon Sep 17 00:00:00 2001 From: David Michelman Date: Fri, 23 Apr 2021 09:29:10 -0700 Subject: [PATCH 097/301] 1m, 1m, 1s by default (#543) * 1m, 1m, 1s by default * setting default through a different method --- .../installer/scripts/td-agent-bit-conf-customizer.rb | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/build/common/installer/scripts/td-agent-bit-conf-customizer.rb b/build/common/installer/scripts/td-agent-bit-conf-customizer.rb index 35b71e550..ea1536866 100644 --- a/build/common/installer/scripts/td-agent-bit-conf-customizer.rb +++ b/build/common/installer/scripts/td-agent-bit-conf-customizer.rb @@ -3,7 +3,9 @@ @td_agent_bit_conf_path = "/etc/opt/microsoft/docker-cimprov/td-agent-bit.conf" -@default_service_interval = "15" +@default_service_interval = "1" +@default_buffer_chunk_size = "1" +@default_buffer_max_size = "1" def is_number?(value) true if Integer(value) rescue false @@ -21,9 +23,9 @@ def substituteFluentBitPlaceHolders serviceInterval = (!interval.nil? && is_number?(interval) && interval.to_i > 0 ) ? interval : @default_service_interval serviceIntervalSetting = "Flush " + serviceInterval - tailBufferChunkSize = (!bufferChunkSize.nil? && is_number?(bufferChunkSize) && bufferChunkSize.to_i > 0) ? bufferChunkSize : nil + tailBufferChunkSize = (!bufferChunkSize.nil? && is_number?(bufferChunkSize) && bufferChunkSize.to_i > 0) ? bufferChunkSize : @default_buffer_chunk_size - tailBufferMaxSize = (!bufferMaxSize.nil? && is_number?(bufferMaxSize) && bufferMaxSize.to_i > 0) ? bufferMaxSize : nil + tailBufferMaxSize = (!bufferMaxSize.nil? && is_number?(bufferMaxSize) && bufferMaxSize.to_i > 0) ? bufferMaxSize : @default_buffer_max_size = "1" if ((!tailBufferChunkSize.nil? && tailBufferMaxSize.nil?) || (!tailBufferChunkSize.nil? && !tailBufferMaxSize.nil? && tailBufferChunkSize.to_i > tailBufferMaxSize.to_i)) puts "config:warn buffer max size must be greater or equal to chunk size" From 83e5816d6f3d92a6ff4dad32ac68694274714d23 Mon Sep 17 00:00:00 2001 From: David Michelman Date: Wed, 12 May 2021 16:15:04 -0700 Subject: [PATCH 098/301] David/aad stage 1 release (#556) * update to latest omsagent, add eastus2 to mdsd regions * copied oneagent bits to a CI repository release * mdsd inmem mode * yaml for cl scale test * yaml for cl scale test * reverting dockerProviderVersion version to 15.0.0 * prepping for release (updated image version, dockerProviderVersion, and release notes * container log scaletest yamls * forgot to update image version in chart * fixing windows tag in dockerfile, changing release notes wording * missed windows tag in one more place * forgot to change the windows dockerProviderVersion back Co-authored-by: Ganga Mahesh Siddem --- ReleaseNotes.md | 9 ++- .../linux/installer/scripts/livenessprobe.sh | 2 +- build/version | 4 +- kubernetes/linux/Dockerfile | 2 +- kubernetes/linux/envmdsd | 4 ++ kubernetes/linux/main.sh | 2 +- kubernetes/linux/setup.sh | 4 +- kubernetes/omsagent.yaml | 12 ++-- .../400logspersec-2klogentrysize.yaml | 60 +++++++++++++++++++ .../400logspersec-5klogentrysize.yaml | 60 +++++++++++++++++++ .../ci-log-scale-4kpersec-5klogline.yaml | 60 +++++++++++++++++++ 11 files changed, 205 insertions(+), 14 deletions(-) create mode 100644 test/containerlog-scale-tests/400logspersec-2klogentrysize.yaml create mode 100644 test/containerlog-scale-tests/400logspersec-5klogentrysize.yaml create mode 100644 test/containerlog-scale-tests/ci-log-scale-4kpersec-5klogline.yaml diff --git a/ReleaseNotes.md b/ReleaseNotes.md index acbd579a0..979eb968b 100644 --- a/ReleaseNotes.md +++ b/ReleaseNotes.md @@ -11,6 +11,14 @@ additional questions or comments. Note : The agent version(s) below has dates (ciprod), which indicate the agent build dates (not release dates) + +### 05/12/2021 - +##### Version microsoft/oms:ciprod00512021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod00512021 (linux) +##### No Windows changes with this release, win-ciprod04222021 still current. +##### Code change log +- Upgrading oneagent to version 1.8 (only for Linux) +- Enabling oneagent for container logs for East US 2 + ### 04/22/2021 - ##### Version microsoft/oms:ciprod04222021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod04222021 (linux) ##### Version microsoft/oms:win-ciprod04222021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod04222021 (windows) @@ -27,7 +35,6 @@ Note : The agent version(s) below has dates (ciprod), which indicate t - Doc updates - Minor telemetry changes - ### 03/26/2021 - ##### Version microsoft/oms:ciprod03262021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod03262021 (linux) ##### Version microsoft/oms:win-ciprod03262021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod03262021 (windows) diff --git a/build/linux/installer/scripts/livenessprobe.sh b/build/linux/installer/scripts/livenessprobe.sh index e3b0fc28e..198b4e87f 100644 --- a/build/linux/installer/scripts/livenessprobe.sh +++ b/build/linux/installer/scripts/livenessprobe.sh @@ -10,7 +10,7 @@ fi #optionally test to exit non zero value if oneagent is not running if [ -e "/opt/AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE_V2" ]; then - (ps -ef | grep "mdsd -l" | grep -v "grep") + (ps -ef | grep "mdsd" | grep -v "grep") if [ $? -ne 0 ] then echo "oneagent is not running" > /dev/termination-log diff --git a/build/version b/build/version index 16a43604a..81bb808f5 100644 --- a/build/version +++ b/build/version @@ -3,10 +3,10 @@ # Build Version Information CONTAINER_BUILDVERSION_MAJOR=15 -CONTAINER_BUILDVERSION_MINOR=0 +CONTAINER_BUILDVERSION_MINOR=1 CONTAINER_BUILDVERSION_PATCH=0 CONTAINER_BUILDVERSION_BUILDNR=0 -CONTAINER_BUILDVERSION_DATE=20210422 +CONTAINER_BUILDVERSION_DATE=20210512 CONTAINER_BUILDVERSION_STATUS=Developer_Build #-------------------------------- End of File ----------------------------------- diff --git a/kubernetes/linux/Dockerfile b/kubernetes/linux/Dockerfile index d5ece4509..822e52bc8 100644 --- a/kubernetes/linux/Dockerfile +++ b/kubernetes/linux/Dockerfile @@ -2,7 +2,7 @@ FROM ubuntu:18.04 MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=ciprod04222021 +ARG IMAGE_TAG=ciprod05122021 ENV AGENT_VERSION ${IMAGE_TAG} ENV tmpdir /opt ENV APPLICATIONINSIGHTS_AUTH NzAwZGM5OGYtYTdhZC00NThkLWI5NWMtMjA3ZjM3NmM3YmRi diff --git a/kubernetes/linux/envmdsd b/kubernetes/linux/envmdsd index e4886012e..3f834bfb8 100644 --- a/kubernetes/linux/envmdsd +++ b/kubernetes/linux/envmdsd @@ -12,3 +12,7 @@ export HOSTNAME_OVERRIDE="${NODE_NAME}" export MDSD_TCMALLOC_RELEASE_FREQ_SEC=1 export MDSD_COMPRESSION_ALGORITHM=LZ4 export SSL_CERT_DIR="/etc/ssl/certs" +# increase the size of msgpack items mdsd will accept, otherwise they will be silently dropped. These values were arbitrairly chosen to be 10 or 100 times larger than the defaults. +export MDSD_MSGPACK_ARRAY_SIZE_ITEMS=10000000 +export MDSD_MSGPACK_MAP_SIZE_ITEMS=10000000 +export MDSD_MSGPACK_NESTING_LEVEL=100 diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index 81db6f3a4..f03318ad1 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -581,7 +581,7 @@ if [ ! -e "/etc/config/kube.conf" ] && [ "${CONTAINER_TYPE}" != "PrometheusSidec dpkg -l | grep mdsd | awk '{print $2 " " $3}' echo "starting mdsd ..." - mdsd -l -e ${MDSD_LOG}/mdsd.err -w ${MDSD_LOG}/mdsd.warn -o ${MDSD_LOG}/mdsd.info -q ${MDSD_LOG}/mdsd.qos & + mdsd -e ${MDSD_LOG}/mdsd.err -w ${MDSD_LOG}/mdsd.warn -o ${MDSD_LOG}/mdsd.info -q ${MDSD_LOG}/mdsd.qos & touch /opt/AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE_V2 fi diff --git a/kubernetes/linux/setup.sh b/kubernetes/linux/setup.sh index ee3756964..f065cc165 100644 --- a/kubernetes/linux/setup.sh +++ b/kubernetes/linux/setup.sh @@ -31,8 +31,8 @@ mv $TMPDIR/omsbundle* $TMPDIR/omsbundle /usr/bin/dpkg -i $TMPDIR/omsbundle/110/omsagent*.deb #/usr/bin/dpkg -i $TMPDIR/omsbundle/100/omsconfig*.deb -#install oneagent - Official bits (10/18) -wget https://github.com/microsoft/Docker-Provider/releases/download/10182020-oneagent/azure-mdsd_1.5.126-build.master.99_x86_64.deb +#install oneagent - Official bits (05/2021) +wget https://github.com/microsoft/Docker-Provider/releases/download/05112021-oneagent/azure-mdsd_1.8.0-build.master.189_x86_64.deb /usr/bin/dpkg -i $TMPDIR/azure-mdsd*.deb cp -f $TMPDIR/mdsd.xml /etc/mdsd.d cp -f $TMPDIR/envmdsd /etc/mdsd.d diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index feea3f29a..bf94490ba 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -358,7 +358,7 @@ spec: tier: node annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "15.0.0-0" + dockerProviderVersion: "15.1.0-0" schema-versions: "v1" spec: serviceAccountName: omsagent @@ -368,7 +368,7 @@ spec: value: "3" containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod04222021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod05122021" imagePullPolicy: IfNotPresent resources: limits: @@ -399,7 +399,7 @@ spec: - name: USER_ASSIGNED_IDENTITY_CLIENT_ID value: "" - name: AZMON_CONTAINERLOGS_ONEAGENT_REGIONS - value: "koreacentral,norwayeast" + value: "koreacentral,norwayeast,eastus2" securityContext: privileged: true ports: @@ -446,7 +446,7 @@ spec: timeoutSeconds: 15 #Only in sidecar scraping mode - name: omsagent-prometheus - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod04222021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod05122021" imagePullPolicy: IfNotPresent resources: limits: @@ -583,13 +583,13 @@ spec: rsName: "omsagent-rs" annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "15.0.0-0" + dockerProviderVersion: "15.1.0-0" schema-versions: "v1" spec: serviceAccountName: omsagent containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod04222021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod05122021" imagePullPolicy: IfNotPresent resources: limits: diff --git a/test/containerlog-scale-tests/400logspersec-2klogentrysize.yaml b/test/containerlog-scale-tests/400logspersec-2klogentrysize.yaml new file mode 100644 index 000000000..cc3dd5259 --- /dev/null +++ b/test/containerlog-scale-tests/400logspersec-2klogentrysize.yaml @@ -0,0 +1,60 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: logs-400persec-2kentrysize +spec: + parallelism: 1 + completions: 1 + template: + metadata: + name: logs-400persec-2kentrysize + spec: + volumes: + - name: logs-400persec-2kentrysize-scripts-volume + configMap: + name: logs-400persec-test-scripts + containers: + - name: logs-400persec-2kentrysize + image: ubuntu + volumeMounts: + - mountPath: /logs-400persec-test-scripts + name: logs-400persec-2kentrysize-scripts-volume + env: + - name: HOME + value: /tmp + command: + - /bin/sh + - -c + - | + echo "scripts in /logs-400persec-test-scripts" + ls -lh /logs-400persec-test-scripts + echo "copy scripts to /tmp" + cp /logs-400persec-test-scripts/*.sh /tmp + echo "apply 'chmod +x' to /tmp/*.sh" + chmod +x /tmp/*.sh + echo "script.sh in /tmp" + ls -lh /tmp + /tmp/script.sh + restartPolicy: Never +--- +apiVersion: v1 +items: +- apiVersion: v1 + data: + script.sh: | + #!/bin/bash + logentry='' + for var in {1..400..1} + do + logentry="${logentry}Test-" + done + for var in {1..200000..1} + do + echo $(date "+%Y/%m/%d %H:%M:%S.%3N") ${var}: $logentry + done + kind: ConfigMap + metadata: + creationTimestamp: null + name: logs-400persec-test-scripts +kind: List +metadata: {} diff --git a/test/containerlog-scale-tests/400logspersec-5klogentrysize.yaml b/test/containerlog-scale-tests/400logspersec-5klogentrysize.yaml new file mode 100644 index 000000000..42188631a --- /dev/null +++ b/test/containerlog-scale-tests/400logspersec-5klogentrysize.yaml @@ -0,0 +1,60 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: logs-400persec-5kentrysize +spec: + parallelism: 1 + completions: 1 + template: + metadata: + name: logs-400persec-5kentrysize + spec: + volumes: + - name: logs-400persec-5kentrysize-scripts-volume + configMap: + name: logs-400persec-5kentrysize-test-scripts + containers: + - name: logs-400persec-5kentrysize + image: ubuntu + volumeMounts: + - mountPath: /logs-400persec-5kentrysize-test-scripts + name: logs-400persec-5kentrysize-scripts-volume + env: + - name: HOME + value: /tmp + command: + - /bin/sh + - -c + - | + echo "scripts in /logs-400persec-5kentrysize-test-scripts" + ls -lh /logs-400persec-5kentrysize-test-scripts + echo "copy scripts to /tmp" + cp /logs-400persec-5kentrysize-test-scripts/*.sh /tmp + echo "apply 'chmod +x' to /tmp/*.sh" + chmod +x /tmp/*.sh + echo "script.sh in /tmp" + ls -lh /tmp + /tmp/script.sh + restartPolicy: Never +--- +apiVersion: v1 +items: +- apiVersion: v1 + data: + script.sh: | + #!/bin/bash + logentry='' + for var in {1..1024..1} + do + logentry="${logentry}Test-" + done + for var in {1..200000..1} + do + echo $(date "+%Y/%m/%d %H:%M:%S.%3N") ${var}: $logentry + done + kind: ConfigMap + metadata: + creationTimestamp: null + name: logs-400persec-5kentrysize-test-scripts +kind: List +metadata: {} diff --git a/test/containerlog-scale-tests/ci-log-scale-4kpersec-5klogline.yaml b/test/containerlog-scale-tests/ci-log-scale-4kpersec-5klogline.yaml new file mode 100644 index 000000000..ff619a822 --- /dev/null +++ b/test/containerlog-scale-tests/ci-log-scale-4kpersec-5klogline.yaml @@ -0,0 +1,60 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: ci-log-scale +spec: + parallelism: 1 + completions: 1 + template: + metadata: + name: ci-log-scale + spec: + volumes: + - name: ci-log-scale-scripts-volume + configMap: + name: test-scripts + containers: + - name: ci-log-scale + image: ubuntu + volumeMounts: + - mountPath: /test-scripts + name: ci-log-scale-scripts-volume + env: + - name: HOME + value: /tmp + command: + - /bin/sh + - -c + - | + echo "scripts in /test-scripts" + ls -lh /test-scripts + echo "copy scripts to /tmp" + cp /test-scripts/*.sh /tmp + echo "apply 'chmod +x' to /tmp/*.sh" + chmod +x /tmp/*.sh + echo "script.sh in /tmp" + ls -lh /tmp + /tmp/script.sh + restartPolicy: Never +--- +apiVersion: v1 +items: +- apiVersion: v1 + data: + script.sh: | + #!/bin/bash + logentry='' + for var in {1..1024..1} + do + logentry="${logentry}Test-" + done + for var in {1..200000..1} + do + echo $(date "+%Y/%m/%d %H:%M:%S.%3N") ${var}: $logentry + done + kind: ConfigMap + metadata: + creationTimestamp: null + name: test-scripts +kind: List +metadata: {} From 8beabe3cef2fdc4a60e79d3866bf9d4e4723f0a2 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Wed, 12 May 2021 17:16:21 -0700 Subject: [PATCH 099/301] Update ReleaseNotes.md (#558) fix imagetag in the release notes --- ReleaseNotes.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ReleaseNotes.md b/ReleaseNotes.md index 979eb968b..b4c0d6ba4 100644 --- a/ReleaseNotes.md +++ b/ReleaseNotes.md @@ -13,7 +13,7 @@ Note : The agent version(s) below has dates (ciprod), which indicate t ### 05/12/2021 - -##### Version microsoft/oms:ciprod00512021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod00512021 (linux) +##### Version microsoft/oms:ciprod00512021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod05122021 (linux) ##### No Windows changes with this release, win-ciprod04222021 still current. ##### Code change log - Upgrading oneagent to version 1.8 (only for Linux) From 3805f44d89abd9034756c41174da5f1ba58e9500 Mon Sep 17 00:00:00 2001 From: Vishwanath Date: Wed, 19 May 2021 07:09:19 -0700 Subject: [PATCH 100/301] Add wait time for telegraf and also force mdm egress to use tls 1.2 (#560) * Add wait time for telegraf and also force mdm egress to use tls 1.2 * add wait for all telegraf dependencies across all containers (ds & rs) * remove ssl change so we dont include as part of the other fix until we test with att nodes. --- kubernetes/linux/main.sh | 52 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index f03318ad1..c7d939034 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -1,5 +1,43 @@ #!/bin/bash +waitforlisteneronTCPport() { + local sleepdurationsecs=1 + local totalsleptsecs=0 + local port=$1 + local waittimesecs=$2 + local numeric='^[0-9]+$' + local varlistener="" + + if [ -z "$1" ] || [ -z "$2" ]; then + echo "${FUNCNAME[0]} called with incorrect arguments<$1 , $2>. Required arguments <#port, #wait-time-in-seconds>" + return -1 + else + + if [[ $port =~ $numeric ]] && [[ $waittimesecs =~ $numeric ]]; then + #local varlistener=$(netstat -lnt | awk '$6 == "LISTEN" && $4 ~ ":25228$"') + while true + do + if [ $totalsleptsecs -gt $waittimesecs ]; then + echo "${FUNCNAME[0]} giving up waiting for listener on port:$port after $totalsleptsecs secs" + return 1 + fi + varlistener=$(netstat -lnt | awk '$6 == "LISTEN" && $4 ~ ":'"$port"'$"') + if [ -z "$varlistener" ]; then + #echo "${FUNCNAME[0]} waiting for $sleepdurationsecs more sec for listener on port:$port ..." + sleep $sleepdurationsecs + totalsleptsecs=$(($totalsleptsecs+1)) + else + echo "${FUNCNAME[0]} found listener on port:$port in $totalsleptsecs secs" + return 0 + fi + done + else + echo "${FUNCNAME[0]} called with non-numeric arguments<$1 , $2>. Required arguments <#port, #wait-time-in-seconds>" + return -1 + fi + fi +} + if [ -e "/etc/config/kube.conf" ]; then cat /etc/config/kube.conf > /etc/opt/microsoft/omsagent/sysconf/omsagent.d/container.conf elif [ "${CONTAINER_TYPE}" == "PrometheusSidecar" ]; then @@ -689,6 +727,20 @@ echo "export HOST_ETC=/hostfs/etc" >> ~/.bashrc export HOST_VAR=/hostfs/var echo "export HOST_VAR=/hostfs/var" >> ~/.bashrc +if [ ! -e "/etc/config/kube.conf" ]; then + if [ "${CONTAINER_TYPE}" == "PrometheusSidecar" ]; then + echo "checking for listener on tcp #25229 and waiting for 30 secs if not.." + waitforlisteneronTCPport 25229 30 + else + echo "checking for listener on tcp #25226 and waiting for 30 secs if not.." + waitforlisteneronTCPport 25226 30 + echo "checking for listener on tcp #25228 and waiting for 30 secs if not.." + waitforlisteneronTCPport 25228 30 + fi +else + echo "checking for listener on tcp #25226 and waiting for 30 secs if not.." + waitforlisteneronTCPport 25226 30 +fi #start telegraf /opt/telegraf --config $telegrafConfFile & From 7c5087f7f57d8b3e3a1f430ab3dfca19f8f888a9 Mon Sep 17 00:00:00 2001 From: David Michelman Date: Wed, 19 May 2021 16:22:27 -0700 Subject: [PATCH 101/301] partially disabled telegraf liveness probe check, we'll still have telemetry but the probe won't fail if telegraf isn't running (#561) --- build/linux/installer/scripts/livenessprobe.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/build/linux/installer/scripts/livenessprobe.sh b/build/linux/installer/scripts/livenessprobe.sh index 198b4e87f..5e1261e7e 100644 --- a/build/linux/installer/scripts/livenessprobe.sh +++ b/build/linux/installer/scripts/livenessprobe.sh @@ -30,9 +30,9 @@ fi (ps -ef | grep telegraf | grep -v "grep") if [ $? -ne 0 ] then - echo "Telegraf is not running" > /dev/termination-log + # echo "Telegraf is not running" > /dev/termination-log echo "Telegraf is not running (controller: ${CONTROLLER_TYPE}, container type: ${CONTAINER_TYPE})" > /dev/write-to-traces # this file is tailed and sent to traces - exit 1 + # exit 1 fi if [ -s "inotifyoutput.txt" ] From 0d33489aaef5f0824b94c0b644ebe39c1501d576 Mon Sep 17 00:00:00 2001 From: David Michelman Date: Thu, 20 May 2021 13:08:53 -0700 Subject: [PATCH 102/301] changes for 05202021 release (#563) * changes for 05202021 release * fixed typos --- ReleaseNotes.md | 8 +++++++- build/version | 2 +- kubernetes/linux/Dockerfile | 2 +- kubernetes/omsagent.yaml | 10 +++++----- 4 files changed, 14 insertions(+), 8 deletions(-) diff --git a/ReleaseNotes.md b/ReleaseNotes.md index b4c0d6ba4..d7d6de6af 100644 --- a/ReleaseNotes.md +++ b/ReleaseNotes.md @@ -11,9 +11,15 @@ additional questions or comments. Note : The agent version(s) below has dates (ciprod), which indicate the agent build dates (not release dates) +### 05/20/2021 - +##### Version microsoft/oms:ciprod05202021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod05202021 (linux) +##### No Windows changes with this release, win-ciprod04222021 still current. +##### Code change log +- Telegraf now waits 30 seconds on startup for network connections to complete (Linux only) +- Change adding telegraf to the liveness probe reverted (Linux only) ### 05/12/2021 - -##### Version microsoft/oms:ciprod00512021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod05122021 (linux) +##### Version microsoft/oms:ciprod05122021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod05122021 (linux) ##### No Windows changes with this release, win-ciprod04222021 still current. ##### Code change log - Upgrading oneagent to version 1.8 (only for Linux) diff --git a/build/version b/build/version index 81bb808f5..d70d1f9bc 100644 --- a/build/version +++ b/build/version @@ -3,7 +3,7 @@ # Build Version Information CONTAINER_BUILDVERSION_MAJOR=15 -CONTAINER_BUILDVERSION_MINOR=1 +CONTAINER_BUILDVERSION_MINOR=2 CONTAINER_BUILDVERSION_PATCH=0 CONTAINER_BUILDVERSION_BUILDNR=0 CONTAINER_BUILDVERSION_DATE=20210512 diff --git a/kubernetes/linux/Dockerfile b/kubernetes/linux/Dockerfile index 822e52bc8..3ad3cd315 100644 --- a/kubernetes/linux/Dockerfile +++ b/kubernetes/linux/Dockerfile @@ -2,7 +2,7 @@ FROM ubuntu:18.04 MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=ciprod05122021 +ARG IMAGE_TAG=ciprod05202021 ENV AGENT_VERSION ${IMAGE_TAG} ENV tmpdir /opt ENV APPLICATIONINSIGHTS_AUTH NzAwZGM5OGYtYTdhZC00NThkLWI5NWMtMjA3ZjM3NmM3YmRi diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index bf94490ba..6ff02c941 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -358,7 +358,7 @@ spec: tier: node annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "15.1.0-0" + dockerProviderVersion: "15.2.0-0" schema-versions: "v1" spec: serviceAccountName: omsagent @@ -368,7 +368,7 @@ spec: value: "3" containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod05122021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod05202021" imagePullPolicy: IfNotPresent resources: limits: @@ -446,7 +446,7 @@ spec: timeoutSeconds: 15 #Only in sidecar scraping mode - name: omsagent-prometheus - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod05122021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod05202021" imagePullPolicy: IfNotPresent resources: limits: @@ -583,13 +583,13 @@ spec: rsName: "omsagent-rs" annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "15.1.0-0" + dockerProviderVersion: "15.2.0-0" schema-versions: "v1" spec: serviceAccountName: omsagent containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod05122021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod05202021" imagePullPolicy: IfNotPresent resources: limits: From 486acfd719288196b192e751e1e644d2b054df0e Mon Sep 17 00:00:00 2001 From: rashmichandrashekar Date: Fri, 21 May 2021 11:42:26 -0700 Subject: [PATCH 103/301] Rashmi/jedi wireserver (#566) --- kubernetes/omsagent.yaml | 11 +++++++++ kubernetes/windows/main.ps1 | 49 ++++++++++++++++++++++++++++++++++++- 2 files changed, 59 insertions(+), 1 deletion(-) diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index 6ff02c941..ab6bbea9c 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -790,6 +790,9 @@ spec: fieldPath: status.hostIP - name: SIDECAR_SCRAPING_ENABLED value: "true" + # Add this only for clouds that require cert bootstrapping + - name: REQUIRES_CERT_BOOTSTRAP + value: "true" volumeMounts: - mountPath: C:\ProgramData\docker\containers name: docker-windows-containers @@ -805,6 +808,10 @@ spec: - mountPath: C:\etc\config\adx name: omsagent-adx-secret readOnly: true + # Need to mount this only for airgapped clouds - Commenting this since it wont exist in non airgapped clouds + # - mountPath: C:\ca + # name: ca-certs + # readOnly: true livenessProbe: exec: command: @@ -836,6 +843,10 @@ spec: - name: docker-windows-kuberenetes-container-logs hostPath: path: C:\var + # Need to mount this only for airgapped clouds - Commenting this since it wont exist in non airgapped clouds + #- name: ca-certs + # hostPath: + # path: C:\ca - name: docker-windows-containers hostPath: path: C:\ProgramData\docker\containers diff --git a/kubernetes/windows/main.ps1 b/kubernetes/windows/main.ps1 index 95cba2579..baf95fca4 100644 --- a/kubernetes/windows/main.ps1 +++ b/kubernetes/windows/main.ps1 @@ -119,6 +119,25 @@ function Set-EnvironmentVariables { $env:AZMON_AGENT_CFG_SCHEMA_VERSION } + # Need to do this before the SA fetch for AI key for airgapped clouds so that it is not overwritten with defaults. + $appInsightsAuth = [System.Environment]::GetEnvironmentVariable("APPLICATIONINSIGHTS_AUTH", "process") + if (![string]::IsNullOrEmpty($appInsightsAuth)) { + [System.Environment]::SetEnvironmentVariable("APPLICATIONINSIGHTS_AUTH", $appInsightsAuth, "machine") + Write-Host "Successfully set environment variable APPLICATIONINSIGHTS_AUTH - $($appInsightsAuth) for target 'machine'..." + } + else { + Write-Host "Failed to set environment variable APPLICATIONINSIGHTS_AUTH for target 'machine' since it is either null or empty" + } + + $appInsightsEndpoint = [System.Environment]::GetEnvironmentVariable("APPLICATIONINSIGHTS_ENDPOINT", "process") + if (![string]::IsNullOrEmpty($appInsightsEndpoint)) { + [System.Environment]::SetEnvironmentVariable("APPLICATIONINSIGHTS_ENDPOINT", $appInsightsEndpoint, "machine") + Write-Host "Successfully set environment variable APPLICATIONINSIGHTS_ENDPOINT - $($appInsightsEndpoint) for target 'machine'..." + } + else { + Write-Host "Failed to set environment variable APPLICATIONINSIGHTS_ENDPOINT for target 'machine' since it is either null or empty" + } + # Check if the instrumentation key needs to be fetched from a storage account (as in airgapped clouds) $aiKeyURl = [System.Environment]::GetEnvironmentVariable('APPLICATIONINSIGHTS_AUTH_URL') if ($aiKeyURl) { @@ -161,7 +180,6 @@ function Set-EnvironmentVariables { [System.Environment]::SetEnvironmentVariable("TELEMETRY_APPLICATIONINSIGHTS_KEY", $aiKeyDecoded, "Process") [System.Environment]::SetEnvironmentVariable("TELEMETRY_APPLICATIONINSIGHTS_KEY", $aiKeyDecoded, "Machine") - # run config parser ruby /opt/omsagentwindows/scripts/ruby/tomlparser.rb .\setenv.ps1 @@ -404,12 +422,41 @@ function Test-CertificatePath { } } +function Bootstrap-CACertificates { + try { + # This is required when the root CA certs are different for some clouds. + $certMountPath = "C:\ca" + Get-ChildItem $certMountPath | + Foreach-Object { + $absolutePath=$_.FullName + Write-Host "cert path: $($absolutePath)" + Import-Certificate -FilePath $absolutePath -CertStoreLocation 'Cert:\LocalMachine\Root' -Verbose + } + } + catch { + $e = $_.Exception + Write-Host $e + Write-Host "exception occured in Bootstrap-CACertificates..." + } +} + Start-Transcript -Path main.txt Remove-WindowsServiceIfItExists "fluentdwinaks" Set-EnvironmentVariables Start-FileSystemWatcher +#Bootstrapping CA certs for non public clouds and AKS clusters +$aksResourceId = [System.Environment]::GetEnvironmentVariable("AKS_RESOURCE_ID") +$requiresCertBootstrap = [System.Environment]::GetEnvironmentVariable("REQUIRES_CERT_BOOTSTRAP") +if (![string]::IsNullOrEmpty($requiresCertBootstrap) -and ` + $requiresCertBootstrap.ToLower() -eq 'true' -and ` + ![string]::IsNullOrEmpty($aksResourceId) -and ` + $aksResourceId.ToLower().Contains("/microsoft.containerservice/managedclusters/")) +{ + Bootstrap-CACertificates +} + Generate-Certificates Test-CertificatePath Start-Fluent-Telegraf From 0fa350e66edeb65b26a6354a425ad2322296f7d8 Mon Sep 17 00:00:00 2001 From: saaror <31900410+saaror@users.noreply.github.com> Date: Fri, 21 May 2021 16:02:58 -0700 Subject: [PATCH 104/301] Update ReadMe.md (#565) * Update ReadMe.md * Update ReadMe.md Included feedback from OSM team and Fixed --- Documentation/OSMPrivatePreview/ReadMe.md | 37 ++++++++++++++++++++--- 1 file changed, 32 insertions(+), 5 deletions(-) diff --git a/Documentation/OSMPrivatePreview/ReadMe.md b/Documentation/OSMPrivatePreview/ReadMe.md index aa90c7413..da125a35c 100644 --- a/Documentation/OSMPrivatePreview/ReadMe.md +++ b/Documentation/OSMPrivatePreview/ReadMe.md @@ -1,15 +1,17 @@ Note - This is private preview. For any support issues, please reach out to us at [askcoin@microsoft.com](mailto:askcoin@microsoft.com). Please don't open a support ticket. +This private preview supports Open Service Mesh on [AKS](https://docs.microsoft.com/azure/aks/servicemesh-osm-about) & Azure [Arc on k8s](http://docs.microsoft.com/azure/azure-arc/kubernetes/tutorial-arc-enabled-osm). + # Azure Monitor Container Insights Open Service Mesh Monitoring Azure Monitor container insights now supporting preview of [Open Service Mesh(OSM)](https://docs.microsoft.com/azure/aks/servicemesh-osm-about) Monitoring. As part of this support, customer can: 1. Filter & view inventory of all the services that are part of your service mesh. 2. Visualize and monitor requests between services in your service mesh, with request latency, error rate & resource utilization by services. -3. Provides connection summary for OSM infrastructure running on AKS. +3. Provides connection summary for OSM infrastructure running on AKS or Azure Arc for k8s. ## How to onboard Container Insights OSM monitoring? OSM exposes Prometheus metrics which Container Insights can collect, for container insights agent to collect OSM metrics follow the following steps. - +### AKS 1. Follow this [link](https://docs.microsoft.com/en-us/azure/aks/servicemesh-osm-about?pivots=client-operating-system-linux#register-the-aks-openservicemesh-preview-feature) as a prereq before enabling the addon. 2. Enable AKS OSM addon on your @@ -27,9 +29,29 @@ osm metrics enable --namespace "test1, test2" * Download the configmap from [here](https://github.com/microsoft/Docker-Provider/blob/ci_prod/kubernetes/container-azm-ms-osmconfig.yaml) * Add the namespaces you want to monitor in configmap `monitor_namespaces = ["namespace1", "namespace2"]` * Run the following kubectl command: kubectl apply -f - * Example: `kubectl apply -f container-azm-ms-agentconfig.yaml` + * Example: `kubectl apply -f container-azm-ms-osmconfig.yaml` 4. The configuration change can take upto 15 mins to finish before taking effect, and all omsagent pods in the cluster will restart. The restart is a rolling restart for all omsagent pods, not all restart at the same time. +### Azure Arc for Kuberentes +This section assumes that you already have your kubernetes distribution connected via Azure Arc. If not learn more [here.](https://docs.microsoft.com/en-us/azure/azure-arc/kubernetes/quickstart-connect-cluster) + +1. Install Arc enabled Open Service mesh on your Arc cluster. Learn more [here](http://docs.microsoft.com/azure/azure-arc/kubernetes/tutorial-arc-enabled-osm#install-arc-enabled-open-service-mesh-osm-on-an-arc-enabled-kubernetes-cluster) +2. Install Azure Monitor Container Insights on Arc. If not installed already. Learn more how to install [here](https://docs.microsoft.com/azure/azure-monitor/containers/container-insights-enable-arc-enabled-clusters) +3. Ensure that prometheus_scraping is set to true in the OSM configmap. +3. Ensure that the application namespaces that you wish to be monitored are onboarded to the mesh. Follow the guidance available [here.](http://docs.microsoft.com/azure/azure-arc/kubernetes/tutorial-arc-enabled-osm#onboard-namespaces-to-the-service-mesh) +4. To enable namespace(s), download the osm client library [here](https://docs.microsoft.com/en-us/azure/aks/servicemesh-osm-about?pivots=client-operating-system-linux#osm-service-quotas-and-limits-preview) & then enable metrics on namespaces +```bash +# With osm +osm metrics enable --namespace test +osm metrics enable --namespace "test1, test2" + +``` +4. On your Azure Monitor Container Insights for Arc. + * Download the configmap from [here](https://github.com/microsoft/Docker-Provider/blob/ci_prod/kubernetes/container-azm-ms-osmconfig.yaml) + * Add the namespaces you want to monitor in configmap `monitor_namespaces = ["namespace1", "namespace2"]` + * Run the following kubectl command: kubectl apply -f + * Example: `kubectl apply -f container-azm-ms-osmconfig.yaml` +5. The configuration change can take upto 15 mins to finish before taking effect, and all omsagent pods in the cluster will restart. The restart is a rolling restart for all omsagent pods, not all restart at the same time. ## Validate the metrics flow 1. Query cluster's Log Analytics workspace InsightsMetrics table to see metrics are flowing or not @@ -41,8 +63,9 @@ InsightsMetrics ## How to consume OSM monitoring dashboard? 1. Access your AKS cluster & Container Insights through this [link.](https://aka.ms/azmon/osmux) -2. Go to reports tab and access Open Service Mesh (OSM) workbook. -3. Select the time-range & namespace to scope your services. By default, we only show services deployed by customers and we exclude internal service communication. In case you want to view that you select Show All in the filter. Please note OSM is managed service mesh, we show all internal connections for transparency. + * For **Azure Arc for k8s**, access Container Insights through this [link.](https://aka.ms/azmon/osmarcux) +3. Go to reports tab and access Open Service Mesh (OSM) workbook. +4. Select the time-range & namespace to scope your services. By default, we only show services deployed by customers and we exclude internal service communication. In case you want to view that you select Show All in the filter. Please note OSM is managed service mesh, we show all internal connections for transparency. ![alt text](https://github.com/microsoft/Docker-Provider/blob/saarorOSMdoc/Documentation/OSMPrivatePreview/Image1.jpg) ### Requests Tab @@ -51,6 +74,8 @@ InsightsMetrics 3. You can view total requests, request error rate & P90 latency. 4. You can drill-down to destination and view trends for HTTP error/success code, success rate, Pods resource utilization, latencies at different percentiles. +![image](https://user-images.githubusercontent.com/31900410/119195241-2e712000-ba39-11eb-8cb0-2d7d16e26d1b.png) + ### Connections Tab 1. This tab provides you a summary of all the connections between your services in Open Service Mesh. 2. Outbound connections: Total number of connections between Source and destination services. @@ -68,4 +93,6 @@ InsightsMetrics 2. When source or destination is osmcontroller we show no latency & for internal services we show no resource utilization. 3. When both prometheus scraping using pod annotations and OSM monitoring are enabled on the same set of namespaces, the default set of metrics (envoy_cluster_upstream_cx_total, envoy_cluster_upstream_cx_connect_fail, envoy_cluster_upstream_rq, envoy_cluster_upstream_rq_xx, envoy_cluster_upstream_rq_total, envoy_cluster_upstream_rq_time_bucket, envoy_cluster_upstream_cx_rx_bytes_total, envoy_cluster_upstream_cx_tx_bytes_total, envoy_cluster_upstream_cx_active) will be collected twice. You can follow [this](https://docs.microsoft.com/en-us/azure/azure-monitor/containers/container-insights-prometheus-integration#prometheus-scraping-settings) documentation to exclude these namespaces from pod annotation scraping using the setting monitor_kubernetes_pods_namespaces to work around this issue. +4. For monitoring on **Azure Arc on k8s** currently there is a separate link to access OSM workbook. We plan to have one single link to access workbook on both platforms by 10th June 2021. + This is private preview, the goal for us is to get feedback. Please feel free to reach out to us at [askcoin@microsoft.com](mailto:askcoin@microsoft.com) for any feedback and questions! From c7075394ce704193a0198a5e7b93a5b6d7186054 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Fri, 21 May 2021 18:26:24 -0700 Subject: [PATCH 105/301] Gangams/aad stage2 full switch to mdsd (#559) * full switch to mdsd, upgrade to ruby v1 & omsagent removal * add odsdirect as fallback option * cleanup * cleanup * move customRegion to stage3 * updates related to containerlog route * make xml eventschema consistent * add buffer settings * address HTTPServerException deprecation in ruby 2.6 * update to official mdsd version * fix log message issue * fix pr feedback * get ridoff unused code from omscommon * fix pr feedback * fix pr feedback * clean up * clean up * fix missing conf --- build/common/installer/scripts/tomlparser.rb | 16 +- build/linux/installer/conf/container.conf | 318 ++--- build/linux/installer/conf/kube.conf | 509 +++++--- build/linux/installer/conf/out_oms.conf | 5 +- .../installer/datafiles/base_container.data | 297 +++-- build/linux/installer/datafiles/linux.data | 18 +- .../linux/installer/datafiles/linux_dpkg.data | 2 +- .../linux/installer/datafiles/linux_rpm.data | 2 +- .../linux/installer/scripts/livenessprobe.sh | 18 +- .../scripts/tomlparser-mdm-metrics-config.rb | 2 +- .../tomlparser-metric-collection-config.rb | 2 +- kubernetes/linux/envmdsd | 2 - kubernetes/linux/main.sh | 285 ++--- kubernetes/linux/mdsd.xml | 345 +++++- kubernetes/linux/setup.sh | 52 +- source/plugins/go/src/oms.go | 386 ++++-- source/plugins/go/src/telemetry.go | 17 + source/plugins/go/src/utils.go | 114 +- .../ruby/ApplicationInsightsUtility.rb | 22 +- .../plugins/ruby/CAdvisorMetricsAPIClient.rb | 212 ++-- source/plugins/ruby/DockerApiClient.rb | 2 +- source/plugins/ruby/KubernetesApiClient.rb | 100 +- source/plugins/ruby/MdmMetricsGenerator.rb | 16 +- source/plugins/ruby/constants.rb | 2 +- source/plugins/ruby/filter_cadvisor2mdm.rb | 111 +- .../ruby/filter_cadvisor_health_container.rb | 15 +- .../ruby/filter_cadvisor_health_node.rb | 28 +- source/plugins/ruby/filter_container.rb | 59 - source/plugins/ruby/filter_docker_log.rb | 103 -- .../ruby/filter_health_model_builder.rb | 43 +- source/plugins/ruby/filter_inventory2mdm.rb | 24 +- source/plugins/ruby/filter_telegraf2mdm.rb | 8 +- ...h_container_cpu_memory_record_formatter.rb | 8 +- .../ruby/health/health_monitor_utils.rb | 12 +- source/plugins/ruby/in_cadvisor_perf.rb | 42 +- source/plugins/ruby/in_containerinventory.rb | 29 +- source/plugins/ruby/in_kube_events.rb | 31 +- source/plugins/ruby/in_kube_health.rb | 16 +- source/plugins/ruby/in_kube_nodes.rb | 111 +- source/plugins/ruby/in_kube_podinventory.rb | 116 +- source/plugins/ruby/in_kube_pvinventory.rb | 37 +- .../plugins/ruby/in_kubestate_deployments.rb | 37 +- source/plugins/ruby/in_kubestate_hpa.rb | 33 +- source/plugins/ruby/in_win_cadvisor_perf.rb | 28 +- source/plugins/ruby/out_health_forward.rb | 1074 ++++++++++------- source/plugins/ruby/out_mdm.rb | 85 +- source/plugins/ruby/podinventory_to_mdm.rb | 10 +- source/plugins/utils/oms_common.rb | 143 +++ source/plugins/utils/omslog.rb | 50 + 49 files changed, 2821 insertions(+), 2176 deletions(-) delete mode 100644 source/plugins/ruby/filter_container.rb delete mode 100644 source/plugins/ruby/filter_docker_log.rb create mode 100644 source/plugins/utils/oms_common.rb create mode 100644 source/plugins/utils/omslog.rb diff --git a/build/common/installer/scripts/tomlparser.rb b/build/common/installer/scripts/tomlparser.rb index a0f3c2f0a..b173ecfe3 100644 --- a/build/common/installer/scripts/tomlparser.rb +++ b/build/common/installer/scripts/tomlparser.rb @@ -25,8 +25,10 @@ @enrichContainerLogs = false @containerLogSchemaVersion = "" @collectAllKubeEvents = false -@containerLogsRoute = "" - +@containerLogsRoute = "v2" # default for linux +if !@os_type.nil? && !@os_type.empty? && @os_type.strip.casecmp("windows") == 0 + @containerLogsRoute = "v1" # default is v1 for windows until windows agent integrates windows ama +end # Use parser to parse the configmap toml file to a ruby structure def parseConfigMap begin @@ -162,8 +164,12 @@ def populateSettingValuesFromConfigMap(parsedConfig) #Get container logs route setting begin if !parsedConfig[:log_collection_settings][:route_container_logs].nil? && !parsedConfig[:log_collection_settings][:route_container_logs][:version].nil? - @containerLogsRoute = parsedConfig[:log_collection_settings][:route_container_logs][:version] - puts "config::Using config map setting for container logs route" + if !parsedConfig[:log_collection_settings][:route_container_logs][:version].empty? + @containerLogsRoute = parsedConfig[:log_collection_settings][:route_container_logs][:version] + puts "config::Using config map setting for container logs route: #{@containerLogsRoute}" + else + puts "config::Ignoring config map settings and using default value since provided container logs route value is empty" + end end rescue => errorStr ConfigParseErrorLogger.logError("Exception while reading config map settings for container logs route - #{errorStr}, using defaults, please check config map for errors") @@ -256,7 +262,7 @@ def get_command_windows(env_variable_name, env_variable_value) file.write(commands) commands = get_command_windows('AZMON_CLUSTER_COLLECT_ALL_KUBE_EVENTS', @collectAllKubeEvents) file.write(commands) - commands = get_command_windows('AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE', @containerLogsRoute) + commands = get_command_windows('AZMON_CONTAINER_LOGS_ROUTE', @containerLogsRoute) file.write(commands) commands = get_command_windows('AZMON_CONTAINER_LOG_SCHEMA_VERSION', @containerLogSchemaVersion) file.write(commands) diff --git a/build/linux/installer/conf/container.conf b/build/linux/installer/conf/container.conf index 958a85eb6..093c9ef12 100644 --- a/build/linux/installer/conf/container.conf +++ b/build/linux/installer/conf/container.conf @@ -1,141 +1,179 @@ -# Fluentd config file for OMS Docker - container components (non kubeAPI) - -# Forward port 25225 for container logs - - type forward - port 25225 - bind 127.0.0.1 - - -# MDM metrics from telegraf - - @type tcp - tag oms.mdm.container.perf.telegraf.* - bind 0.0.0.0 - port 25228 - format json - - -# Container inventory - - type containerinventory - tag oms.containerinsights.containerinventory - run_interval 60 - log_level debug - - -#cadvisor perf - - type cadvisorperf - tag oms.api.cadvisorperf - run_interval 60 - log_level debug - - - - type filter_cadvisor_health_node - log_level debug - - - - type filter_cadvisor_health_container - log_level debug - - -#custom_metrics_mdm filter plugin - - type filter_cadvisor2mdm - metrics_to_collect cpuUsageNanoCores,memoryWorkingSetBytes,memoryRssBytes,pvUsedBytes - log_level info - - - - type filter_telegraf2mdm - log_level debug - - - - type out_oms - log_level debug - num_threads 5 - buffer_type file - buffer_path %STATE_DIR_WS%/out_oms_containerinventory*.buffer - buffer_queue_full_action drop_oldest_chunk - buffer_chunk_limit 4m - flush_interval 20s - retry_limit 10 - retry_wait 5s - max_retry_wait 5m - - - - type out_oms - log_level debug - num_threads 5 - buffer_type file - buffer_path %STATE_DIR_WS%/out_oms_cadvisorperf*.buffer - buffer_queue_full_action drop_oldest_chunk - buffer_chunk_limit 4m - flush_interval 20s - retry_limit 10 - retry_wait 5s - max_retry_wait 5m - - - - - @type health_forward - send_timeout 60s - recover_wait 10s - hard_timeout 60s - heartbeat_type tcp - skip_network_error_at_init true - expire_dns_cache 600s - buffer_queue_full_action drop_oldest_chunk - buffer_type file - buffer_path %STATE_DIR_WS%/out_health_forward*.buffer - buffer_chunk_limit 3m - flush_interval 20s - retry_limit 10 - retry_wait 5s - max_retry_wait 5m - - - host "#{ENV['HEALTHMODEL_REPLICASET_SERVICE_SERVICE_HOST']}" - port "#{ENV['HEALTHMODEL_REPLICASET_SERVICE_SERVICE_PORT']}" - - - + # Fluentd config file for OMS Docker - container components (non kubeAPI) + + # Forward port 25225 for container logs + # gangams - not used and get ridoff after confirming safe to remove + + @type forward + port 25225 + bind 127.0.0.1 + + + # MDM metrics from telegraf + + @type tcp + tag oms.mdm.container.perf.telegraf.* + bind 0.0.0.0 + port 25228 + format json + + + # Container inventory + + @type containerinventory + tag oneagent.containerInsights.CONTAINER_INVENTORY_BLOB + run_interval 60 + @log_level debug + + + #cadvisor perf + + @type cadvisor_perf + tag oneagent.containerInsights.LINUX_PERF_BLOB + run_interval 60 + @log_level debug + + + + @type cadvisor_health_node + @log_level debug + + + + @type cadvisor_health_container + @log_level debug + + + #custom_metrics_mdm filter plugin + + @type cadvisor2mdm + metrics_to_collect cpuUsageNanoCores,memoryWorkingSetBytes,memoryRssBytes,pvUsedBytes + @log_level info + + + + @type telegraf2mdm + @log_level debug + + + #containerinventory + + @type forward + @log_level debug + send_timeout 30 + connect_timeout 30 + heartbeat_type none + + host 0.0.0.0 + port "#{ENV['MDSD_FLUENT_SOCKET_PORT']}" + + @type file - path %STATE_DIR_WS%/fluent_forward_failed.buffer - - - - - type out_mdm - log_level debug - num_threads 5 - buffer_type file - buffer_path %STATE_DIR_WS%/out_mdm_cdvisorperf*.buffer - buffer_queue_full_action drop_oldest_chunk - buffer_chunk_limit 4m - flush_interval 20s - retry_limit 10 - retry_wait 5s - max_retry_wait 5m - retry_mdm_post_wait_minutes 30 - - - - type out_oms - log_level debug - num_threads 5 - buffer_type file - buffer_path %STATE_DIR_WS%/out_oms_insightsmetrics*.buffer - buffer_queue_full_action drop_oldest_chunk - buffer_chunk_limit 4m - flush_interval 20s - retry_limit 10 - retry_wait 5s - max_retry_wait 5m - + path /var/opt/microsoft/docker-cimprov/state/containerinventory*.buffer + overflow_action drop_oldest_chunk + chunk_limit_size 4m + queue_limit_length 20 + flush_interval 20s + retry_max_times 10 + retry_wait 5s + retry_max_interval 5m + flush_thread_count 5 + + keepalive true + + + #cadvisorperf + + @type forward + @log_level debug + send_timeout 30 + connect_timeout 30 + heartbeat_type none + + host 0.0.0.0 + port "#{ENV['MDSD_FLUENT_SOCKET_PORT']}" + + + @type file + path /var/opt/microsoft/docker-cimprov/state/cadvisorperf*.buffer + overflow_action drop_oldest_chunk + chunk_limit_size 4m + queue_limit_length 20 + flush_interval 20s + retry_max_times 10 + retry_wait 5s + retry_max_interval 5m + flush_thread_count 5 + + keepalive true + + + + @type health_forward + send_timeout 60s + recover_wait 10s + hard_timeout 60s + transport tcp + ignore_network_errors_at_startup true + expire_dns_cache 600s + + @type file + overflow_action drop_oldest_chunk + path /var/opt/microsoft/docker-cimprov/state/out_health_forward*.buffer + chunk_limit_size 3m + flush_interval 20s + retry_max_times 10 + retry_max_interval 5m + retry_wait 5s + + + host "#{ENV['HEALTHMODEL_REPLICASET_SERVICE_SERVICE_HOST']}" + port "#{ENV['HEALTHMODEL_REPLICASET_SERVICE_SERVICE_PORT']}" + + + @type file + path /var/opt/microsoft/docker-cimprov/state/fluent_forward_failed.buffer + + + + + @type mdm + @log_level debug + + @type file + path /var/opt/microsoft/docker-cimprov/state/out_mdm_cdvisorperf*.buffer + overflow_action drop_oldest_chunk + chunk_limit_size 4m + flush_interval 20s + retry_max_times 10 + retry_wait 5s + retry_max_interval 5m + flush_thread_count 5 + + retry_mdm_post_wait_minutes 30 + + + #InsightsMetrics + + @type forward + @log_level debug + send_timeout 30 + connect_timeout 30 + heartbeat_type none + + host 0.0.0.0 + port "#{ENV['MDSD_FLUENT_SOCKET_PORT']}" + + + @type file + path /var/opt/microsoft/docker-cimprov/state/insightsmetrics*.buffer + overflow_action drop_oldest_chunk + chunk_limit_size 4m + queue_limit_length 20 + flush_interval 20s + retry_max_times 10 + retry_wait 5s + retry_max_interval 5m + flush_thread_count 5 + + keepalive true + diff --git a/build/linux/installer/conf/kube.conf b/build/linux/installer/conf/kube.conf index fb566c360..a1c8bf928 100644 --- a/build/linux/installer/conf/kube.conf +++ b/build/linux/installer/conf/kube.conf @@ -1,7 +1,6 @@ -# Fluentd config file for OMS Docker - cluster components (kubeAPI) #fluent forward plugin - type forward + @type forward port "#{ENV['HEALTHMODEL_REPLICASET_SERVICE_SERVICE_PORT']}" bind 0.0.0.0 chunk_size_limit 4m @@ -9,262 +8,378 @@ #Kubernetes pod inventory - type kubepodinventory - tag oms.containerinsights.KubePodInventory + @type kube_podinventory + tag oneagent.containerInsights.KUBE_POD_INVENTORY_BLOB run_interval 60 - log_level debug + @log_level debug #Kubernetes Persistent Volume inventory - type kubepvinventory - tag oms.containerinsights.KubePVInventory + @type kube_pvinventory + tag oneagent.containerInsights.KUBE_PV_INVENTORY_BLOB run_interval 60 - log_level debug + @log_level debug #Kubernetes events - type kubeevents - tag oms.containerinsights.KubeEvents + @type kube_events + tag oneagent.containerInsights.KUBE_EVENTS_BLOB run_interval 60 - log_level debug - + @log_level debug + #Kubernetes Nodes - type kubenodeinventory - tag oms.containerinsights.KubeNodeInventory + @type kube_nodes + tag oneagent.containerInsights.KUBE_NODE_INVENTORY_BLOB run_interval 60 - log_level debug + @log_level debug #Kubernetes health - type kubehealth + @type kube_health tag kubehealth.ReplicaSet run_interval 60 - log_level debug + @log_level debug #cadvisor perf- Windows nodes - type wincadvisorperf - tag oms.api.wincadvisorperf + @type win_cadvisor_perf + tag oneagent.containerInsights.LINUX_PERF_BLOB run_interval 60 - log_level debug + @log_level debug #Kubernetes object state - deployments - - type kubestatedeployments - tag oms.containerinsights.KubeStateDeployments - run_interval 60 - log_level debug - + + @type kubestate_deployments + tag oneagent.containerInsights.INSIGHTS_METRICS_BLOB + run_interval 60 + @log_level debug + - #Kubernetes object state - HPA - - type kubestatehpa - tag oms.containerinsights.KubeStateHpa - run_interval 60 - log_level debug - + #Kubernetes object state - HPA + + @type kubestate_hpa + tag oneagent.containerInsights.INSIGHTS_METRICS_BLOB + run_interval 60 + @log_level debug + - type filter_inventory2mdm - log_level info + @type inventory2mdm + @log_level info #custom_metrics_mdm filter plugin for perf data from windows nodes - type filter_cadvisor2mdm + @type cadvisor2mdm metrics_to_collect cpuUsageNanoCores,memoryWorkingSetBytes,pvUsedBytes - log_level info + @log_level info #health model aggregation filter - type filter_health_model_builder + @type health_model_builder - - type out_oms - log_level debug - num_threads 5 - buffer_chunk_limit 4m - buffer_type file - buffer_path %STATE_DIR_WS%/out_oms_kubepods*.buffer - buffer_queue_limit 20 - buffer_queue_full_action drop_oldest_chunk - flush_interval 20s - retry_limit 10 - retry_wait 5s - max_retry_wait 5m + #kubepodinventory + + @type forward + @log_level debug + send_timeout 30 + connect_timeout 30 + heartbeat_type none + + host 0.0.0.0 + port "#{ENV['MDSD_FLUENT_SOCKET_PORT']}" + + + @type file + path /var/opt/microsoft/docker-cimprov/state/kubepod*.buffer + overflow_action drop_oldest_chunk + chunk_limit_size 4m + queue_limit_length 20 + flush_interval 20s + retry_max_times 10 + retry_wait 5s + retry_max_interval 5m + flush_thread_count 5 + + keepalive true - - type out_oms - log_level debug - num_threads 5 - buffer_chunk_limit 4m - buffer_type file - buffer_path %STATE_DIR_WS%/state/out_oms_kubepv*.buffer - buffer_queue_limit 20 - buffer_queue_full_action drop_oldest_chunk - flush_interval 20s - retry_limit 10 - retry_wait 5s - max_retry_wait 5m - + #kubepvinventory + + @type forward + @log_level debug + send_timeout 30 + connect_timeout 30 + heartbeat_type none + + host 0.0.0.0 + port "#{ENV['MDSD_FLUENT_SOCKET_PORT']}" + + + @type file + path /var/opt/microsoft/docker-cimprov/state/kubepv*.buffer + overflow_action drop_oldest_chunk + chunk_limit_size 4m + queue_limit_length 20 + flush_interval 20s + retry_max_times 10 + retry_wait 5s + retry_max_interval 5m + flush_thread_count 5 + + keepalive true + - - type out_oms - log_level debug - num_threads 5 - buffer_chunk_limit 4m - buffer_type file - buffer_path %STATE_DIR_WS%/out_oms_kubeevents*.buffer - buffer_queue_limit 20 - buffer_queue_full_action drop_oldest_chunk - flush_interval 20s - retry_limit 10 - retry_wait 5s - max_retry_wait 5m + #InsightsMetrics + #kubestate + + @type forward + @log_level debug + send_timeout 30 + connect_timeout 30 + heartbeat_type none + + host 0.0.0.0 + port "#{ENV['MDSD_FLUENT_SOCKET_PORT']}" + + + @type file + path /var/opt/microsoft/docker-cimprov/state/insightsmetrics*.buffer + overflow_action drop_oldest_chunk + chunk_limit_size 4m + queue_limit_length 20 + flush_interval 20s + retry_max_times 10 + retry_wait 5s + retry_max_interval 5m + flush_thread_count 5 + + keepalive true - - type out_oms - log_level debug - num_threads 2 - buffer_chunk_limit 4m - buffer_type file - buffer_path %STATE_DIR_WS%/out_oms_kubeservices*.buffer - buffer_queue_limit 20 - buffer_queue_full_action drop_oldest_chunk - flush_interval 20s - retry_limit 10 - retry_wait 5s - max_retry_wait 5m + #kubeevents + + @type forward + @log_level debug + send_timeout 30 + connect_timeout 30 + heartbeat_type none + + host 0.0.0.0 + port "#{ENV['MDSD_FLUENT_SOCKET_PORT']}" + + + @type file + path /var/opt/microsoft/docker-cimprov/state/kubeevents*.buffer + overflow_action drop_oldest_chunk + chunk_limit_size 4m + queue_limit_length 20 + flush_interval 20s + retry_max_times 10 + retry_wait 5s + retry_max_interval 5m + flush_thread_count 5 + + keepalive true + + #kubeservices + + @type forward + @log_level debug + send_timeout 30 + connect_timeout 30 + heartbeat_type none + + host 0.0.0.0 + port "#{ENV['MDSD_FLUENT_SOCKET_PORT']}" + + + @type file + path /var/opt/microsoft/docker-cimprov/state/kubeservices*.buffer + overflow_action drop_oldest_chunk + chunk_limit_size 4m + queue_limit_length 20 + flush_interval 20s + retry_max_times 10 + retry_wait 5s + retry_max_interval 5m + flush_thread_count 2 + + keepalive true + - - type out_oms - log_level debug - num_threads 5 - buffer_chunk_limit 4m - buffer_type file - buffer_path %STATE_DIR_WS%/state/out_oms_kubenodes*.buffer - buffer_queue_limit 20 - buffer_queue_full_action drop_oldest_chunk - flush_interval 20s - retry_limit 10 - retry_wait 5s - max_retry_wait 5m + #kubenodeinventory + + @type forward + @log_level debug + send_timeout 30 + connect_timeout 30 + heartbeat_type none + + host 0.0.0.0 + port "#{ENV['MDSD_FLUENT_SOCKET_PORT']}" + + + @type file + path /var/opt/microsoft/docker-cimprov/state/kubenode*.buffer + overflow_action drop_oldest_chunk + chunk_limit_size 4m + queue_limit_length 20 + flush_interval 20s + retry_max_times 10 + retry_wait 5s + retry_max_interval 5m + flush_thread_count 5 + + keepalive true - - type out_oms - log_level debug - num_threads 3 - buffer_chunk_limit 4m - buffer_type file - buffer_path %STATE_DIR_WS%/out_oms_containernodeinventory*.buffer - buffer_queue_limit 20 - flush_interval 20s - retry_limit 10 - retry_wait 5s - max_retry_wait 5m + #containernodeinventory + + @type forward + @log_level debug + send_timeout 30 + connect_timeout 30 + heartbeat_type none + + host 0.0.0.0 + port "#{ENV['MDSD_FLUENT_SOCKET_PORT']}" + + + @type file + path /var/opt/microsoft/docker-cimprov/state/containernodeinventory*.buffer + overflow_action drop_oldest_chunk + chunk_limit_size 4m + queue_limit_length 20 + flush_interval 20s + retry_max_times 10 + retry_wait 5s + retry_max_interval 5m + flush_thread_count 3 + + keepalive true - - type out_oms - log_level debug - num_threads 5 - buffer_chunk_limit 4m - buffer_type file - buffer_path %STATE_DIR_WS%/out_oms_kubeperf*.buffer - buffer_queue_limit 20 - buffer_queue_full_action drop_oldest_chunk - flush_interval 20s - retry_limit 10 - retry_wait 5s - max_retry_wait 5m + #containerinventory for windows containers + + @type forward + @log_level debug + send_timeout 30 + connect_timeout 30 + heartbeat_type none + + host 0.0.0.0 + port "#{ENV['MDSD_FLUENT_SOCKET_PORT']}" + + + @type file + path /var/opt/microsoft/docker-cimprov/state/containerinventory*.buffer + overflow_action drop_oldest_chunk + chunk_limit_size 4m + queue_limit_length 20 + flush_interval 20s + retry_max_times 10 + retry_wait 5s + retry_max_interval 5m + flush_thread_count 5 + + keepalive true + + + #perf + + @type forward + @log_level debug + send_timeout 30 + connect_timeout 30 + heartbeat_type none + + host 0.0.0.0 + port "#{ENV['MDSD_FLUENT_SOCKET_PORT']}" + + + @type file + path /var/opt/microsoft/docker-cimprov/state/perf*.buffer + overflow_action drop_oldest_chunk + chunk_limit_size 4m + queue_limit_length 20 + flush_interval 20s + retry_max_times 10 + retry_wait 5s + retry_max_interval 5m + flush_thread_count 5 + + keepalive true - type out_mdm - log_level debug - num_threads 5 - buffer_chunk_limit 4m - buffer_type file - buffer_path %STATE_DIR_WS%/out_mdm_*.buffer - buffer_queue_limit 20 - buffer_queue_full_action drop_oldest_chunk - flush_interval 20s - retry_limit 10 - retry_wait 5s - max_retry_wait 5m + @type mdm + @log_level debug + + @type file + path /var/opt/microsoft/docker-cimprov/state/out_mdm_*.buffer + overflow_action drop_oldest_chunk + chunk_limit_size 4m + queue_limit_length 20 + flush_interval 20s + retry_max_times 10 + retry_wait 5s + retry_max_interval 5m + flush_thread_count 5 + retry_mdm_post_wait_minutes 30 - - type out_oms - log_level debug - num_threads 5 - buffer_chunk_limit 4m - buffer_type file - buffer_path %STATE_DIR_WS%/out_oms_api_wincadvisorperf*.buffer - buffer_queue_limit 20 - buffer_queue_full_action drop_oldest_chunk - flush_interval 20s - retry_limit 10 - retry_wait 5s - max_retry_wait 5m - - - type out_mdm - log_level debug - num_threads 5 - buffer_chunk_limit 4m - buffer_type file - buffer_path %STATE_DIR_WS%/out_mdm_cdvisorperf*.buffer - buffer_queue_limit 20 - buffer_queue_full_action drop_oldest_chunk - flush_interval 20s - retry_limit 10 - retry_wait 5s - max_retry_wait 5m + @type mdm + @log_level debug + + @type file + path /var/opt/microsoft/docker-cimprov/state/out_mdm_cdvisorperf*.buffer + overflow_action drop_oldest_chunk + chunk_limit_size 4m + queue_limit_length 20 + flush_interval 20s + retry_max_times 10 + retry_wait 5s + retry_max_interval 5m + flush_thread_count 5 + retry_mdm_post_wait_minutes 30 - - - type out_oms - log_level debug - num_threads 5 - buffer_chunk_limit 4m - buffer_type file - buffer_path %STATE_DIR_WS%/out_oms_kubehealth*.buffer - buffer_queue_limit 20 - buffer_queue_full_action drop_oldest_chunk - flush_interval 20s - retry_limit 10 - retry_wait 5s - max_retry_wait 5m + + #kubehealth + + @type forward + @log_level debug + send_timeout 30 + connect_timeout 30 + heartbeat_type none + + host 0.0.0.0 + port "#{ENV['MDSD_FLUENT_SOCKET_PORT']}" + + + @type file + path /var/opt/microsoft/docker-cimprov/state/kubehealth*.buffer + overflow_action drop_oldest_chunk + chunk_limit_size 4m + queue_limit_length 20 + flush_interval 20s + retry_max_times 10 + retry_wait 5s + retry_max_interval 5m + flush_thread_count 5 + + keepalive true - - - type out_oms - log_level debug - num_threads 5 - buffer_chunk_limit 4m - buffer_type file - buffer_path %STATE_DIR_WS%/out_oms_insightsmetrics*.buffer - buffer_queue_limit 20 - buffer_queue_full_action drop_oldest_chunk - flush_interval 20s - retry_limit 10 - retry_wait 5s - max_retry_wait 5m - \ No newline at end of file diff --git a/build/linux/installer/conf/out_oms.conf b/build/linux/installer/conf/out_oms.conf index 74ba3195e..21dc4c1ed 100644 --- a/build/linux/installer/conf/out_oms.conf +++ b/build/linux/installer/conf/out_oms.conf @@ -1,10 +1,9 @@ -omsadmin_conf_path=/etc/opt/microsoft/omsagent/conf/omsadmin.conf omsproxy_secret_path=/etc/omsagent-secret/PROXY adx_cluster_uri_path=/etc/config/settings/adx/ADXCLUSTERURI adx_client_id_path=/etc/config/settings/adx/ADXCLIENTID adx_tenant_id_path=/etc/config/settings/adx/ADXTENANTID adx_client_secret_path=/etc/config/settings/adx/ADXCLIENTSECRET -cert_file_path=/etc/opt/microsoft/omsagent/certs/oms.crt -key_file_path=/etc/opt/microsoft/omsagent/certs/oms.key +cert_file_path=/etc/mdsd.d/oms/%s/oms.crt +key_file_path=/etc/mdsd.d/oms/%s/oms.key container_host_file_path=/var/opt/microsoft/docker-cimprov/state/containerhostname container_inventory_refresh_interval=60 diff --git a/build/linux/installer/datafiles/base_container.data b/build/linux/installer/datafiles/base_container.data index df8fbc3da..b9f889dba 100644 --- a/build/linux/installer/datafiles/base_container.data +++ b/build/linux/installer/datafiles/base_container.data @@ -18,89 +18,8 @@ MAINTAINER: 'Microsoft Corporation' /etc/opt/microsoft/docker-cimprov/conf/installinfo.txt; build/linux/installer/conf/installinfo.txt; 644; root; root; conffile -/opt/microsoft/omsagent/plugin/filter_docker_log.rb; source/plugins/ruby/filter_docker_log.rb; 644; root; root -/opt/microsoft/omsagent/plugin/filter_container.rb; source/plugins/ruby/filter_container.rb; 644; root; root - -/opt/microsoft/omsagent/plugin/in_kube_podinventory.rb; source/plugins/ruby/in_kube_podinventory.rb; 644; root; root -/opt/microsoft/omsagent/plugin/in_kube_pvinventory.rb; source/plugins/ruby/in_kube_pvinventory.rb; 644; root; root -/opt/microsoft/omsagent/plugin/in_kube_events.rb; source/plugins/ruby/in_kube_events.rb; 644; root; root -/opt/microsoft/omsagent/plugin/KubernetesApiClient.rb; source/plugins/ruby/KubernetesApiClient.rb; 644; root; root - /etc/opt/microsoft/docker-cimprov/container.conf; build/linux/installer/conf/container.conf; 644; root; root -/opt/microsoft/omsagent/plugin/CAdvisorMetricsAPIClient.rb; source/plugins/ruby/CAdvisorMetricsAPIClient.rb; 644; root; root -/opt/microsoft/omsagent/plugin/in_cadvisor_perf.rb; source/plugins/ruby/in_cadvisor_perf.rb; 644; root; root -/opt/microsoft/omsagent/plugin/in_win_cadvisor_perf.rb; source/plugins/ruby/in_win_cadvisor_perf.rb; 644; root; root -/opt/microsoft/omsagent/plugin/in_kube_nodes.rb; source/plugins/ruby/in_kube_nodes.rb; 644; root; root -/opt/microsoft/omsagent/plugin/in_kubestate_deployments.rb; source/plugins/ruby/in_kubestate_deployments.rb; 644; root; root -/opt/microsoft/omsagent/plugin/in_kubestate_hpa.rb; source/plugins/ruby/in_kubestate_hpa.rb; 644; root; root -/opt/microsoft/omsagent/plugin/filter_inventory2mdm.rb; source/plugins/ruby/filter_inventory2mdm.rb; 644; root; root -/opt/microsoft/omsagent/plugin/podinventory_to_mdm.rb; source/plugins/ruby/podinventory_to_mdm.rb; 644; root; root -/opt/microsoft/omsagent/plugin/kubelet_utils.rb; source/plugins/ruby/kubelet_utils.rb; 644; root; root -/opt/microsoft/omsagent/plugin/CustomMetricsUtils.rb; source/plugins/ruby/CustomMetricsUtils.rb; 644; root; root -/opt/microsoft/omsagent/plugin/constants.rb; source/plugins/ruby/constants.rb; 644; root; root -/opt/microsoft/omsagent/plugin/MdmAlertTemplates.rb; source/plugins/ruby/MdmAlertTemplates.rb; 644; root; root -/opt/microsoft/omsagent/plugin/MdmMetricsGenerator.rb; source/plugins/ruby/MdmMetricsGenerator.rb; 644; root; root - - -/opt/microsoft/omsagent/plugin/ApplicationInsightsUtility.rb; source/plugins/ruby/ApplicationInsightsUtility.rb; 644; root; root -/opt/microsoft/omsagent/plugin/ContainerInventoryState.rb; source/plugins/ruby/ContainerInventoryState.rb; 644; root; root -/opt/microsoft/omsagent/plugin/DockerApiClient.rb; source/plugins/ruby/DockerApiClient.rb; 644; root; root -/opt/microsoft/omsagent/plugin/DockerApiRestHelper.rb; source/plugins/ruby/DockerApiRestHelper.rb; 644; root; root -/opt/microsoft/omsagent/plugin/in_containerinventory.rb; source/plugins/ruby/in_containerinventory.rb; 644; root; root -/opt/microsoft/omsagent/plugin/kubernetes_container_inventory.rb; source/plugins/ruby/kubernetes_container_inventory.rb; 644; root; root -/opt/microsoft/omsagent/plugin/proxy_utils.rb; source/plugins/ruby/proxy_utils.rb; 644; root; root - -/opt/microsoft/omsagent/plugin/arc_k8s_cluster_identity.rb; source/plugins/ruby/arc_k8s_cluster_identity.rb; 644; root; root -/opt/microsoft/omsagent/plugin/out_mdm.rb; source/plugins/ruby/out_mdm.rb; 644; root; root -/opt/microsoft/omsagent/plugin/filter_cadvisor2mdm.rb; source/plugins/ruby/filter_cadvisor2mdm.rb; 644; root; root -/opt/microsoft/omsagent/plugin/filter_telegraf2mdm.rb; source/plugins/ruby/filter_telegraf2mdm.rb; 644; root; root - -/opt/microsoft/omsagent/plugin/lib/application_insights/version.rb; source/plugins/ruby/lib/application_insights/version.rb; 644; root; root -/opt/microsoft/omsagent/plugin/lib/application_insights/rack/track_request.rb; source/plugins/ruby/lib/application_insights/rack/track_request.rb; 644; root; root -/opt/microsoft/omsagent/plugin/lib/application_insights/unhandled_exception.rb; source/plugins/ruby/lib/application_insights/unhandled_exception.rb; 644; root; root -/opt/microsoft/omsagent/plugin/lib/application_insights/telemetry_client.rb; source/plugins/ruby/lib/application_insights/telemetry_client.rb; 644; root; root -/opt/microsoft/omsagent/plugin/lib/application_insights/channel/queue_base.rb; source/plugins/ruby/lib/application_insights/channel/queue_base.rb; 644; root; root -/opt/microsoft/omsagent/plugin/lib/application_insights/channel/asynchronous_queue.rb; source/plugins/ruby/lib/application_insights/channel/asynchronous_queue.rb; 644; root; root -/opt/microsoft/omsagent/plugin/lib/application_insights/channel/synchronous_sender.rb; source/plugins/ruby/lib/application_insights/channel/synchronous_sender.rb; 644; root; root -/opt/microsoft/omsagent/plugin/lib/application_insights/channel/contracts/data_point_type.rb; source/plugins/ruby/lib/application_insights/channel/contracts/data_point_type.rb; 644; root; root -/opt/microsoft/omsagent/plugin/lib/application_insights/channel/contracts/data_point.rb; source/plugins/ruby/lib/application_insights/channel/contracts/data_point.rb; 644; root; root -/opt/microsoft/omsagent/plugin/lib/application_insights/channel/contracts/stack_frame.rb; source/plugins/ruby/lib/application_insights/channel/contracts/stack_frame.rb; 644; root; root -/opt/microsoft/omsagent/plugin/lib/application_insights/channel/contracts/request_data.rb; source/plugins/ruby/lib/application_insights/channel/contracts/request_data.rb; 644; root; root -/opt/microsoft/omsagent/plugin/lib/application_insights/channel/contracts/session.rb; source/plugins/ruby/lib/application_insights/channel/contracts/session.rb; 644; root; root -/opt/microsoft/omsagent/plugin/lib/application_insights/channel/contracts/page_view_data.rb; source/plugins/ruby/lib/application_insights/channel/contracts/page_view_data.rb; 644; root; root -/opt/microsoft/omsagent/plugin/lib/application_insights/channel/contracts/remote_dependency_data.rb; source/plugins/ruby/lib/application_insights/channel/contracts/remote_dependency_data.rb; 644; root; root -/opt/microsoft/omsagent/plugin/lib/application_insights/channel/contracts/exception_data.rb; source/plugins/ruby/lib/application_insights/channel/contracts/exception_data.rb; 644; root; root -/opt/microsoft/omsagent/plugin/lib/application_insights/channel/contracts/location.rb; source/plugins/ruby/lib/application_insights/channel/contracts/location.rb; 644; root; root -/opt/microsoft/omsagent/plugin/lib/application_insights/channel/contracts/operation.rb; source/plugins/ruby/lib/application_insights/channel/contracts/operation.rb; 644; root; root -/opt/microsoft/omsagent/plugin/lib/application_insights/channel/contracts/data.rb; source/plugins/ruby/lib/application_insights/channel/contracts/data.rb; 644; root; root -/opt/microsoft/omsagent/plugin/lib/application_insights/channel/contracts/event_data.rb; source/plugins/ruby/lib/application_insights/channel/contracts/event_data.rb; 644; root; root -/opt/microsoft/omsagent/plugin/lib/application_insights/channel/contracts/metric_data.rb; source/plugins/ruby/lib/application_insights/channel/contracts/metric_data.rb; 644; root; root -/opt/microsoft/omsagent/plugin/lib/application_insights/channel/contracts/device.rb; source/plugins/ruby/lib/application_insights/channel/contracts/device.rb; 644; root; root -/opt/microsoft/omsagent/plugin/lib/application_insights/channel/contracts/message_data.rb; source/plugins/ruby/lib/application_insights/channel/contracts/message_data.rb; 644; root; root -/opt/microsoft/omsagent/plugin/lib/application_insights/channel/contracts/dependency_source_type.rb; source/plugins/ruby/lib/application_insights/channel/contracts/dependency_source_type.rb; 644; root; root -/opt/microsoft/omsagent/plugin/lib/application_insights/channel/contracts/user.rb; source/plugins/ruby/lib/application_insights/channel/contracts/user.rb; 644; root; root -/opt/microsoft/omsagent/plugin/lib/application_insights/channel/contracts/severity_level.rb; source/plugins/ruby/lib/application_insights/channel/contracts/severity_level.rb; 644; root; root -/opt/microsoft/omsagent/plugin/lib/application_insights/channel/contracts/application.rb; source/plugins/ruby/lib/application_insights/channel/contracts/application.rb; 644; root; root -/opt/microsoft/omsagent/plugin/lib/application_insights/channel/contracts/dependency_kind.rb; source/plugins/ruby/lib/application_insights/channel/contracts/dependency_kind.rb; 644; root; root -/opt/microsoft/omsagent/plugin/lib/application_insights/channel/contracts/cloud.rb; source/plugins/ruby/lib/application_insights/channel/contracts/cloud.rb; 644; root; root -/opt/microsoft/omsagent/plugin/lib/application_insights/channel/contracts/envelope.rb; source/plugins/ruby/lib/application_insights/channel/contracts/envelope.rb; 644; root; root -/opt/microsoft/omsagent/plugin/lib/application_insights/channel/contracts/json_serializable.rb; source/plugins/ruby/lib/application_insights/channel/contracts/json_serializable.rb; 644; root; root -/opt/microsoft/omsagent/plugin/lib/application_insights/channel/contracts/domain.rb; source/plugins/ruby/lib/application_insights/channel/contracts/domain.rb; 644; root; root -/opt/microsoft/omsagent/plugin/lib/application_insights/channel/contracts/base.rb; source/plugins/ruby/lib/application_insights/channel/contracts/base.rb; 644; root; root -/opt/microsoft/omsagent/plugin/lib/application_insights/channel/contracts/reopenings.rb; source/plugins/ruby/lib/application_insights/channel/contracts/reopenings.rb; 644; root; root -/opt/microsoft/omsagent/plugin/lib/application_insights/channel/contracts/page_view_perf_data.rb; source/plugins/ruby/lib/application_insights/channel/contracts/page_view_perf_data.rb; 644; root; root -/opt/microsoft/omsagent/plugin/lib/application_insights/channel/contracts/internal.rb; source/plugins/ruby/lib/application_insights/channel/contracts/internal.rb; 644; root; root -/opt/microsoft/omsagent/plugin/lib/application_insights/channel/contracts/availability_data.rb; source/plugins/ruby/lib/application_insights/channel/contracts/availability_data.rb; 644; root; root -/opt/microsoft/omsagent/plugin/lib/application_insights/channel/contracts/exception_details.rb; source/plugins/ruby/lib/application_insights/channel/contracts/exception_details.rb; 644; root; root -/opt/microsoft/omsagent/plugin/lib/application_insights/channel/synchronous_queue.rb; source/plugins/ruby/lib/application_insights/channel/synchronous_queue.rb; 644; root; root -/opt/microsoft/omsagent/plugin/lib/application_insights/channel/sender_base.rb; source/plugins/ruby/lib/application_insights/channel/sender_base.rb; 644; root; root -/opt/microsoft/omsagent/plugin/lib/application_insights/channel/telemetry_context.rb; source/plugins/ruby/lib/application_insights/channel/telemetry_context.rb; 644; root; root -/opt/microsoft/omsagent/plugin/lib/application_insights/channel/asynchronous_sender.rb; source/plugins/ruby/lib/application_insights/channel/asynchronous_sender.rb; 644; root; root -/opt/microsoft/omsagent/plugin/lib/application_insights/channel/telemetry_channel.rb; source/plugins/ruby/lib/application_insights/channel/telemetry_channel.rb; 644; root; root -/opt/microsoft/omsagent/plugin/lib/application_insights/channel/event.rb; source/plugins/ruby/lib/application_insights/channel/event.rb; 644; root; root -/opt/microsoft/omsagent/plugin/lib/application_insights.rb; source/plugins/ruby/lib/application_insights.rb; 644; root; root - /opt/tomlrb.rb; source/toml-parser/tomlrb.rb; 644; root; root /opt/tomlrb/generated_parser.rb; source/toml-parser/tomlrb/generated_parser.rb; 644; root; root /opt/tomlrb/handler.rb; source/toml-parser/tomlrb/handler.rb; 644; root; root @@ -126,6 +45,7 @@ MAINTAINER: 'Microsoft Corporation' /opt/tomlparser-mdm-metrics-config.rb; build/linux/installer/scripts/tomlparser-mdm-metrics-config.rb; 755; root; root /opt/tomlparser-metric-collection-config.rb; build/linux/installer/scripts/tomlparser-metric-collection-config.rb; 755; root; root + /opt/tomlparser-agent-config.rb; build/linux/installer/scripts/tomlparser-agent-config.rb; 755; root; root /opt/tomlparser.rb; build/common/installer/scripts/tomlparser.rb; 755; root; root /opt/td-agent-bit-conf-customizer.rb; build/common/installer/scripts/td-agent-bit-conf-customizer.rb; 755; root; root @@ -134,43 +54,127 @@ MAINTAINER: 'Microsoft Corporation' /opt/tomlparser-osm-config.rb; build/linux/installer/scripts/tomlparser-osm-config.rb; 755; root; root -/opt/microsoft/omsagent/plugin/filter_cadvisor_health_container.rb; source/plugins/ruby/filter_cadvisor_health_container.rb; 644; root; root -/opt/microsoft/omsagent/plugin/filter_cadvisor_health_node.rb; source/plugins/ruby/filter_cadvisor_health_node.rb; 644; root; root -/opt/microsoft/omsagent/plugin/filter_health_model_builder.rb; source/plugins/ruby/filter_health_model_builder.rb; 644; root; root -/opt/microsoft/omsagent/plugin/in_kube_health.rb; source/plugins/ruby/in_kube_health.rb; 644; root; root -/opt/microsoft/omsagent/plugin/out_health_forward.rb; source/plugins/ruby/out_health_forward.rb; 644; root; root /etc/opt/microsoft/docker-cimprov/health/healthmonitorconfig.json; build/linux/installer/conf/healthmonitorconfig.json; 644; root; root /etc/opt/microsoft/docker-cimprov/health/health_model_definition.json; build/linux/installer/conf/health_model_definition.json; 644; root; root -/opt/microsoft/omsagent/plugin/health/aggregate_monitor.rb; source/plugins/ruby/health/aggregate_monitor.rb; 644; root; root -/opt/microsoft/omsagent/plugin/health/agg_monitor_id_labels.rb; source/plugins/ruby/health/agg_monitor_id_labels.rb; 644; root; root -/opt/microsoft/omsagent/plugin/health/aggregate_monitor_state_finalizer.rb; source/plugins/ruby/health/aggregate_monitor_state_finalizer.rb; 644; root; root -/opt/microsoft/omsagent/plugin/health/cluster_health_state.rb; source/plugins/ruby/health/cluster_health_state.rb; 644; root; root -/opt/microsoft/omsagent/plugin/health/health_container_cpu_memory_aggregator.rb; source/plugins/ruby/health/health_container_cpu_memory_aggregator.rb; 644; root; root -/opt/microsoft/omsagent/plugin/health/health_container_cpu_memory_record_formatter.rb; source/plugins/ruby/health/health_container_cpu_memory_record_formatter.rb; 644; root; root -/opt/microsoft/omsagent/plugin/health/health_hierarchy_builder.rb; source/plugins/ruby/health/health_hierarchy_builder.rb; 644; root; root -/opt/microsoft/omsagent/plugin/health/health_kubernetes_resources.rb; source/plugins/ruby/health/health_kubernetes_resources.rb; 644; root; root -/opt/microsoft/omsagent/plugin/health/health_kube_api_down_handler.rb; source/plugins/ruby/health/health_kube_api_down_handler.rb; 644; root; root -/opt/microsoft/omsagent/plugin/health/health_missing_signal_generator.rb; source/plugins/ruby/health/health_missing_signal_generator.rb; 644; root; root -/opt/microsoft/omsagent/plugin/health/health_model_buffer.rb; source/plugins/ruby/health/health_model_buffer.rb; 644; root; root -/opt/microsoft/omsagent/plugin/health/health_model_builder.rb; source/plugins/ruby/health/health_model_builder.rb; 644; root; root -/opt/microsoft/omsagent/plugin/health/health_model_constants.rb; source/plugins/ruby/health/health_model_constants.rb; 644; root; root -/opt/microsoft/omsagent/plugin/health/parent_monitor_provider.rb; source/plugins/ruby/health/parent_monitor_provider.rb; 644; root; root -/opt/microsoft/omsagent/plugin/health/health_model_definition_parser.rb; source/plugins/ruby/health/health_model_definition_parser.rb; 644; root; root -/opt/microsoft/omsagent/plugin/health/health_monitor_helpers.rb; source/plugins/ruby/health/health_monitor_helpers.rb; 644; root; root -/opt/microsoft/omsagent/plugin/health/health_monitor_optimizer.rb; source/plugins/ruby/health/health_monitor_optimizer.rb; 644; root; root -/opt/microsoft/omsagent/plugin/health/health_monitor_provider.rb; source/plugins/ruby/health/health_monitor_provider.rb; 644; root; root -/opt/microsoft/omsagent/plugin/health/health_monitor_record.rb; source/plugins/ruby/health/health_monitor_record.rb; 644; root; root -/opt/microsoft/omsagent/plugin/health/health_monitor_state.rb; source/plugins/ruby/health/health_monitor_state.rb; 644; root; root -/opt/microsoft/omsagent/plugin/health/health_monitor_telemetry.rb; source/plugins/ruby/health/health_monitor_telemetry.rb; 644; root; root -/opt/microsoft/omsagent/plugin/health/health_monitor_utils.rb; source/plugins/ruby/health/health_monitor_utils.rb; 644; root; root -/opt/microsoft/omsagent/plugin/health/health_signal_reducer.rb; source/plugins/ruby/health/health_signal_reducer.rb; 644; root; root -/opt/microsoft/omsagent/plugin/health/monitor_factory.rb; source/plugins/ruby/health/monitor_factory.rb; 644; root; root -/opt/microsoft/omsagent/plugin/health/monitor_set.rb; source/plugins/ruby/health/monitor_set.rb; 644; root; root -/opt/microsoft/omsagent/plugin/health/unit_monitor.rb; source/plugins/ruby/health/unit_monitor.rb; 644; root; root +/etc/fluent/plugin/lib/application_insights/version.rb; source/plugins/ruby/lib/application_insights/version.rb; 644; root; root +/etc/fluent/plugin/lib/application_insights/rack/track_request.rb; source/plugins/ruby/lib/application_insights/rack/track_request.rb; 644; root; root +/etc/fluent/plugin/lib/application_insights/unhandled_exception.rb; source/plugins/ruby/lib/application_insights/unhandled_exception.rb; 644; root; root +/etc/fluent/plugin/lib/application_insights/telemetry_client.rb; source/plugins/ruby/lib/application_insights/telemetry_client.rb; 644; root; root +/etc/fluent/plugin/lib/application_insights/channel/queue_base.rb; source/plugins/ruby/lib/application_insights/channel/queue_base.rb; 644; root; root +/etc/fluent/plugin/lib/application_insights/channel/asynchronous_queue.rb; source/plugins/ruby/lib/application_insights/channel/asynchronous_queue.rb; 644; root; root +/etc/fluent/plugin/lib/application_insights/channel/synchronous_sender.rb; source/plugins/ruby/lib/application_insights/channel/synchronous_sender.rb; 644; root; root +/etc/fluent/plugin/lib/application_insights/channel/contracts/data_point_type.rb; source/plugins/ruby/lib/application_insights/channel/contracts/data_point_type.rb; 644; root; root +/etc/fluent/plugin/lib/application_insights/channel/contracts/data_point.rb; source/plugins/ruby/lib/application_insights/channel/contracts/data_point.rb; 644; root; root +/etc/fluent/plugin/lib/application_insights/channel/contracts/stack_frame.rb; source/plugins/ruby/lib/application_insights/channel/contracts/stack_frame.rb; 644; root; root +/etc/fluent/plugin/lib/application_insights/channel/contracts/request_data.rb; source/plugins/ruby/lib/application_insights/channel/contracts/request_data.rb; 644; root; root +/etc/fluent/plugin/lib/application_insights/channel/contracts/session.rb; source/plugins/ruby/lib/application_insights/channel/contracts/session.rb; 644; root; root +/etc/fluent/plugin/lib/application_insights/channel/contracts/page_view_data.rb; source/plugins/ruby/lib/application_insights/channel/contracts/page_view_data.rb; 644; root; root +/etc/fluent/plugin/lib/application_insights/channel/contracts/remote_dependency_data.rb; source/plugins/ruby/lib/application_insights/channel/contracts/remote_dependency_data.rb; 644; root; root +/etc/fluent/plugin/lib/application_insights/channel/contracts/exception_data.rb; source/plugins/ruby/lib/application_insights/channel/contracts/exception_data.rb; 644; root; root +/etc/fluent/plugin/lib/application_insights/channel/contracts/location.rb; source/plugins/ruby/lib/application_insights/channel/contracts/location.rb; 644; root; root +/etc/fluent/plugin/lib/application_insights/channel/contracts/operation.rb; source/plugins/ruby/lib/application_insights/channel/contracts/operation.rb; 644; root; root +/etc/fluent/plugin/lib/application_insights/channel/contracts/data.rb; source/plugins/ruby/lib/application_insights/channel/contracts/data.rb; 644; root; root +/etc/fluent/plugin/lib/application_insights/channel/contracts/event_data.rb; source/plugins/ruby/lib/application_insights/channel/contracts/event_data.rb; 644; root; root +/etc/fluent/plugin/lib/application_insights/channel/contracts/metric_data.rb; source/plugins/ruby/lib/application_insights/channel/contracts/metric_data.rb; 644; root; root +/etc/fluent/plugin/lib/application_insights/channel/contracts/device.rb; source/plugins/ruby/lib/application_insights/channel/contracts/device.rb; 644; root; root +/etc/fluent/plugin/lib/application_insights/channel/contracts/message_data.rb; source/plugins/ruby/lib/application_insights/channel/contracts/message_data.rb; 644; root; root +/etc/fluent/plugin/lib/application_insights/channel/contracts/dependency_source_type.rb; source/plugins/ruby/lib/application_insights/channel/contracts/dependency_source_type.rb; 644; root; root +/etc/fluent/plugin/lib/application_insights/channel/contracts/user.rb; source/plugins/ruby/lib/application_insights/channel/contracts/user.rb; 644; root; root +/etc/fluent/plugin/lib/application_insights/channel/contracts/severity_level.rb; source/plugins/ruby/lib/application_insights/channel/contracts/severity_level.rb; 644; root; root +/etc/fluent/plugin/lib/application_insights/channel/contracts/application.rb; source/plugins/ruby/lib/application_insights/channel/contracts/application.rb; 644; root; root +/etc/fluent/plugin/lib/application_insights/channel/contracts/dependency_kind.rb; source/plugins/ruby/lib/application_insights/channel/contracts/dependency_kind.rb; 644; root; root +/etc/fluent/plugin/lib/application_insights/channel/contracts/cloud.rb; source/plugins/ruby/lib/application_insights/channel/contracts/cloud.rb; 644; root; root +/etc/fluent/plugin/lib/application_insights/channel/contracts/envelope.rb; source/plugins/ruby/lib/application_insights/channel/contracts/envelope.rb; 644; root; root +/etc/fluent/plugin/lib/application_insights/channel/contracts/json_serializable.rb; source/plugins/ruby/lib/application_insights/channel/contracts/json_serializable.rb; 644; root; root +/etc/fluent/plugin/lib/application_insights/channel/contracts/domain.rb; source/plugins/ruby/lib/application_insights/channel/contracts/domain.rb; 644; root; root +/etc/fluent/plugin/lib/application_insights/channel/contracts/base.rb; source/plugins/ruby/lib/application_insights/channel/contracts/base.rb; 644; root; root +/etc/fluent/plugin/lib/application_insights/channel/contracts/reopenings.rb; source/plugins/ruby/lib/application_insights/channel/contracts/reopenings.rb; 644; root; root +/etc/fluent/plugin/lib/application_insights/channel/contracts/page_view_perf_data.rb; source/plugins/ruby/lib/application_insights/channel/contracts/page_view_perf_data.rb; 644; root; root +/etc/fluent/plugin/lib/application_insights/channel/contracts/internal.rb; source/plugins/ruby/lib/application_insights/channel/contracts/internal.rb; 644; root; root +/etc/fluent/plugin/lib/application_insights/channel/contracts/availability_data.rb; source/plugins/ruby/lib/application_insights/channel/contracts/availability_data.rb; 644; root; root +/etc/fluent/plugin/lib/application_insights/channel/contracts/exception_details.rb; source/plugins/ruby/lib/application_insights/channel/contracts/exception_details.rb; 644; root; root +/etc/fluent/plugin/lib/application_insights/channel/synchronous_queue.rb; source/plugins/ruby/lib/application_insights/channel/synchronous_queue.rb; 644; root; root +/etc/fluent/plugin/lib/application_insights/channel/sender_base.rb; source/plugins/ruby/lib/application_insights/channel/sender_base.rb; 644; root; root +/etc/fluent/plugin/lib/application_insights/channel/telemetry_context.rb; source/plugins/ruby/lib/application_insights/channel/telemetry_context.rb; 644; root; root +/etc/fluent/plugin/lib/application_insights/channel/asynchronous_sender.rb; source/plugins/ruby/lib/application_insights/channel/asynchronous_sender.rb; 644; root; root +/etc/fluent/plugin/lib/application_insights/channel/telemetry_channel.rb; source/plugins/ruby/lib/application_insights/channel/telemetry_channel.rb; 644; root; root +/etc/fluent/plugin/lib/application_insights/channel/event.rb; source/plugins/ruby/lib/application_insights/channel/event.rb; 644; root; root +/etc/fluent/plugin/lib/application_insights.rb; source/plugins/ruby/lib/application_insights.rb; 644; root; root + +/etc/fluent/plugin/health/aggregate_monitor.rb; source/plugins/ruby/health/aggregate_monitor.rb; 644; root; root +/etc/fluent/plugin/health/agg_monitor_id_labels.rb; source/plugins/ruby/health/agg_monitor_id_labels.rb; 644; root; root +/etc/fluent/plugin/health/aggregate_monitor_state_finalizer.rb; source/plugins/ruby/health/aggregate_monitor_state_finalizer.rb; 644; root; root +/etc/fluent/plugin/health/cluster_health_state.rb; source/plugins/ruby/health/cluster_health_state.rb; 644; root; root +/etc/fluent/plugin/health/health_container_cpu_memory_aggregator.rb; source/plugins/ruby/health/health_container_cpu_memory_aggregator.rb; 644; root; root +/etc/fluent/plugin/health/health_container_cpu_memory_record_formatter.rb; source/plugins/ruby/health/health_container_cpu_memory_record_formatter.rb; 644; root; root +/etc/fluent/plugin/health/health_hierarchy_builder.rb; source/plugins/ruby/health/health_hierarchy_builder.rb; 644; root; root +/etc/fluent/plugin/health/health_kubernetes_resources.rb; source/plugins/ruby/health/health_kubernetes_resources.rb; 644; root; root +/etc/fluent/plugin/health/health_kube_api_down_handler.rb; source/plugins/ruby/health/health_kube_api_down_handler.rb; 644; root; root +/etc/fluent/plugin/health/health_missing_signal_generator.rb; source/plugins/ruby/health/health_missing_signal_generator.rb; 644; root; root +/etc/fluent/plugin/health/health_model_buffer.rb; source/plugins/ruby/health/health_model_buffer.rb; 644; root; root +/etc/fluent/plugin/health/health_model_builder.rb; source/plugins/ruby/health/health_model_builder.rb; 644; root; root +/etc/fluent/plugin/health/health_model_constants.rb; source/plugins/ruby/health/health_model_constants.rb; 644; root; root +/etc/fluent/plugin/health/parent_monitor_provider.rb; source/plugins/ruby/health/parent_monitor_provider.rb; 644; root; root +/etc/fluent/plugin/health/health_model_definition_parser.rb; source/plugins/ruby/health/health_model_definition_parser.rb; 644; root; root +/etc/fluent/plugin/health/health_monitor_helpers.rb; source/plugins/ruby/health/health_monitor_helpers.rb; 644; root; root +/etc/fluent/plugin/health/health_monitor_optimizer.rb; source/plugins/ruby/health/health_monitor_optimizer.rb; 644; root; root +/etc/fluent/plugin/health/health_monitor_provider.rb; source/plugins/ruby/health/health_monitor_provider.rb; 644; root; root +/etc/fluent/plugin/health/health_monitor_record.rb; source/plugins/ruby/health/health_monitor_record.rb; 644; root; root +/etc/fluent/plugin/health/health_monitor_state.rb; source/plugins/ruby/health/health_monitor_state.rb; 644; root; root +/etc/fluent/plugin/health/health_monitor_telemetry.rb; source/plugins/ruby/health/health_monitor_telemetry.rb; 644; root; root +/etc/fluent/plugin/health/health_monitor_utils.rb; source/plugins/ruby/health/health_monitor_utils.rb; 644; root; root +/etc/fluent/plugin/health/health_signal_reducer.rb; source/plugins/ruby/health/health_signal_reducer.rb; 644; root; root +/etc/fluent/plugin/health/monitor_factory.rb; source/plugins/ruby/health/monitor_factory.rb; 644; root; root +/etc/fluent/plugin/health/monitor_set.rb; source/plugins/ruby/health/monitor_set.rb; 644; root; root +/etc/fluent/plugin/health/unit_monitor.rb; source/plugins/ruby/health/unit_monitor.rb; 644; root; root + +/etc/fluent/plugin/ApplicationInsightsUtility.rb; source/plugins/ruby/ApplicationInsightsUtility.rb; 644; root; root +/etc/fluent/plugin/arc_k8s_cluster_identity.rb; source/plugins/ruby/arc_k8s_cluster_identity.rb; 644; root; root +/etc/fluent/plugin/CAdvisorMetricsAPIClient.rb; source/plugins/ruby/CAdvisorMetricsAPIClient.rb; 644; root; root +/etc/fluent/plugin/constants.rb; source/plugins/ruby/constants.rb; 644; root; root +/etc/fluent/plugin/ContainerInventoryState.rb; source/plugins/ruby/ContainerInventoryState.rb; 644; root; root +/etc/fluent/plugin/CustomMetricsUtils.rb; source/plugins/ruby/CustomMetricsUtils.rb; 644; root; root +/etc/fluent/plugin/DockerApiClient.rb; source/plugins/ruby/DockerApiClient.rb; 644; root; root +/etc/fluent/plugin/DockerApiRestHelper.rb; source/plugins/ruby/DockerApiRestHelper.rb; 644; root; root +/etc/fluent/plugin/kubelet_utils.rb; source/plugins/ruby/kubelet_utils.rb; 644; root; root +/etc/fluent/plugin/proxy_utils.rb; source/plugins/ruby/proxy_utils.rb; 644; root; root +/etc/fluent/plugin/kubernetes_container_inventory.rb; source/plugins/ruby/kubernetes_container_inventory.rb; 644; root; root +/etc/fluent/plugin/podinventory_to_mdm.rb; source/plugins/ruby/podinventory_to_mdm.rb; 644; root; root +/etc/fluent/plugin/MdmMetricsGenerator.rb; source/plugins/ruby/MdmMetricsGenerator.rb; 644; root; root +/etc/fluent/plugin/MdmAlertTemplates.rb; source/plugins/ruby/MdmAlertTemplates.rb; 644; root; root + +/etc/fluent/plugin/omslog.rb; source/plugins/utils/omslog.rb; 644; root; root +/etc/fluent/plugin/oms_common.rb; source/plugins/utils/oms_common.rb; 644; root; root + +/etc/fluent/kube.conf; build/linux/installer/conf/kube.conf; 644; root; root +/etc/fluent/container.conf; build/linux/installer/conf/container.conf; 644; root; root + +/etc/fluent/plugin/in_cadvisor_perf.rb; source/plugins/ruby/in_cadvisor_perf.rb; 644; root; root +/etc/fluent/plugin/in_win_cadvisor_perf.rb; source/plugins/ruby/in_win_cadvisor_perf.rb; 644; root; root +/etc/fluent/plugin/in_containerinventory.rb; source/plugins/ruby/in_containerinventory.rb; 644; root; root +/etc/fluent/plugin/in_kube_nodes.rb; source/plugins/ruby/in_kube_nodes.rb; 644; root; root +/etc/fluent/plugin/in_kube_podinventory.rb; source/plugins/ruby/in_kube_podinventory.rb; 644; root; root +/etc/fluent/plugin/KubernetesApiClient.rb; source/plugins/ruby/KubernetesApiClient.rb; 644; root; root +/etc/fluent/plugin/in_kube_events.rb; source/plugins/ruby/in_kube_events.rb; 644; root; root +/etc/fluent/plugin/in_kube_health.rb; source/plugins/ruby/in_kube_health.rb; 644; root; root +/etc/fluent/plugin/in_kube_pvinventory.rb; source/plugins/ruby/in_kube_pvinventory.rb; 644; root; root +/etc/fluent/plugin/in_kubestate_deployments.rb; source/plugins/ruby/in_kubestate_deployments.rb; 644; root; root +/etc/fluent/plugin/in_kubestate_hpa.rb; source/plugins/ruby/in_kubestate_hpa.rb; 644; root; root + +/etc/fluent/plugin/filter_cadvisor_health_container.rb; source/plugins/ruby/filter_cadvisor_health_container.rb; 644; root; root +/etc/fluent/plugin/filter_cadvisor_health_node.rb; source/plugins/ruby/filter_cadvisor_health_node.rb; 644; root; root +/etc/fluent/plugin/filter_cadvisor2mdm.rb; source/plugins/ruby/filter_cadvisor2mdm.rb; 644; root; root +/etc/fluent/plugin/filter_health_model_builder.rb; source/plugins/ruby/filter_health_model_builder.rb; 644; root; root +/etc/fluent/plugin/filter_inventory2mdm.rb; source/plugins/ruby/filter_inventory2mdm.rb; 644; root; root +/etc/fluent/plugin/filter_telegraf2mdm.rb; source/plugins/ruby/filter_telegraf2mdm.rb; 644; root; root + +/etc/fluent/plugin/out_health_forward.rb; source/plugins/ruby/out_health_forward.rb; 644; root; root +/etc/fluent/plugin/out_mdm.rb; source/plugins/ruby/out_mdm.rb; 644; root; root + + %Links -/opt/omi/lib/libcontainer.${{SHLIB_EXT}}; /opt/microsoft/docker-cimprov/lib/libcontainer.${{SHLIB_EXT}}; 644; root; root %Directories /etc; 755; root; root; sysdir @@ -179,27 +183,18 @@ MAINTAINER: 'Microsoft Corporation' /var; 755; root; root; sysdir /var/opt; 755; root; root; sysdir +/opt/fluent; 755; root; root; sysdir + /etc/opt/microsoft; 755; root; root; sysdir /etc/opt/microsoft/docker-cimprov; 755; root; root /etc/opt/microsoft/docker-cimprov/conf; 755; root; root /etc/opt/microsoft/docker-cimprov/health; 755; root; root -/etc/opt/omi; 755; root; root; sysdir -/etc/opt/omi/conf; 755; root; root; sysdir -/etc/opt/omi/conf/omiregister; 755; root; root; sysdir -/etc/opt/omi/conf/omiregister/root-cimv2; 755; root; root - /opt/microsoft; 755; root; root; sysdir /opt/microsoft/docker-cimprov; 755; root; root /opt/microsoft/docker-cimprov/bin; 755; root; root /opt/microsoft/docker-cimprov/lib; 755; root; root -/opt/microsoft/omsagent; 755; root; root; sysdir -/opt/microsoft/omsagent/plugin; 755; root; root; sysdir -/opt/microsoft/omsagent/plugin/health; 755; root; root; sysdir - -/opt/omi; 755; root; root; sysdir -/opt/omi/lib; 755; root; root; sysdir /var/opt/microsoft; 755; root; root; sysdir /var/opt/microsoft/docker-cimprov; 755; root; root @@ -213,11 +208,14 @@ MAINTAINER: 'Microsoft Corporation' /opt/td-agent-bit/bin; 755; root; root;sysdir /etc/telegraf; 755; root; root;sysdir -/opt/microsoft/omsagent/plugin/lib; 755; root; root; sysdir -/opt/microsoft/omsagent/plugin/lib/application_insights; 755; root; root; sysdir -/opt/microsoft/omsagent/plugin/lib/application_insights/channel; 755; root; root; sysdir -/opt/microsoft/omsagent/plugin/lib/application_insights/channel/contracts; 755; root; root; sysdir -/opt/microsoft/omsagent/plugin/lib/application_insights/rack; 755; root; root; sysdir +/etc/fluent; 755; root; root; sysdir +/etc/fluent/plugin; 755; root; root; sysdir +/etc/fluent/plugin/health; 755; root; root; sysdir +/etc/fluent/plugin/lib; 755; root; root; sysdir +/etc/fluent/plugin/lib/application_insights; 755; root; root; sysdir +/etc/fluent/plugin/lib/application_insights/channel; 755; root; root; sysdir +/etc/fluent/plugin/lib/application_insights/channel/contracts; 755; root; root; sysdir +/etc/fluent/plugin/lib/application_insights/rack; 755; root; root; sysdir /opt/tomlrb; 755; root; root; sysdir @@ -230,64 +228,61 @@ WriteInstallInfo() { } WriteInstallInfo -#Make omsagent owner for ContainerInventory directory. This is needed for ruby plugin to have access -chown omsagent:omsagent /var/opt/microsoft/docker-cimprov/state/ContainerInventory # Get the state file in place with proper permissions touch /var/opt/microsoft/docker-cimprov/state/LastEventQueryTime.txt chmod 644 /var/opt/microsoft/docker-cimprov/state/LastEventQueryTime.txt -chown omsagent:omsagent /var/opt/microsoft/docker-cimprov/state/LastEventQueryTime.txt touch /var/opt/microsoft/docker-cimprov/state/KubeEventQueryState.yaml chmod 644 /var/opt/microsoft/docker-cimprov/state/KubeEventQueryState.yaml -chown omsagent:omsagent /var/opt/microsoft/docker-cimprov/state/KubeEventQueryState.yaml touch /var/opt/microsoft/docker-cimprov/state/KubeLogQueryState.yaml chmod 644 /var/opt/microsoft/docker-cimprov/state/KubeLogQueryState.yaml -chown omsagent:omsagent /var/opt/microsoft/docker-cimprov/state/KubeLogQueryState.yaml + touch /var/opt/microsoft/docker-cimprov/log/kubernetes_client_log.txt chmod 666 /var/opt/microsoft/docker-cimprov/log/kubernetes_client_log.txt -chown omsagent:omiusers /var/opt/microsoft/docker-cimprov/log/kubernetes_client_log.txt + touch /var/opt/microsoft/docker-cimprov/log/kubernetes_perf_log.txt chmod 666 /var/opt/microsoft/docker-cimprov/log/kubernetes_perf_log.txt -chown omsagent:omiusers /var/opt/microsoft/docker-cimprov/log/kubernetes_perf_log.txt + touch /var/opt/microsoft/docker-cimprov/log/filter_cadvisor2mdm.log chmod 666 /var/opt/microsoft/docker-cimprov/log/filter_cadvisor2mdm.log -chown omsagent:omiusers /var/opt/microsoft/docker-cimprov/log/filter_cadvisor2mdm.log + touch /var/opt/microsoft/docker-cimprov/log/filter_telegraf2mdm.log chmod 666 /var/opt/microsoft/docker-cimprov/log/filter_telegraf2mdm.log -chown omsagent:omiusers /var/opt/microsoft/docker-cimprov/log/filter_telegraf2mdm.log + touch /var/opt/microsoft/docker-cimprov/log/filter_inventory2mdm.log chmod 666 /var/opt/microsoft/docker-cimprov/log/filter_inventory2mdm.log -chown omsagent:omiusers /var/opt/microsoft/docker-cimprov/log/filter_inventory2mdm.log + touch /var/opt/microsoft/docker-cimprov/log/mdm_metrics_generator.log chmod 666 /var/opt/microsoft/docker-cimprov/log/mdm_metrics_generator.log -chown omsagent:omiusers /var/opt/microsoft/docker-cimprov/log/mdm_metrics_generator.log + touch /var/opt/microsoft/docker-cimprov/log/health_monitors.log chmod 666 /var/opt/microsoft/docker-cimprov/log/health_monitors.log -chown omsagent:omiusers /var/opt/microsoft/docker-cimprov/log/health_monitors.log + touch /var/opt/microsoft/docker-cimprov/log/filter_health_model_builder.log chmod 666 /var/opt/microsoft/docker-cimprov/log/filter_health_model_builder.log -chown omsagent:omiusers /var/opt/microsoft/docker-cimprov/log/filter_health_model_builder.log + touch /var/opt/microsoft/docker-cimprov/log/fluent_forward_failed.log chmod 666 /var/opt/microsoft/docker-cimprov/log/fluent_forward_failed.log -chown omsagent:omiusers /var/opt/microsoft/docker-cimprov/log/fluent_forward_failed.log + touch /var/opt/microsoft/docker-cimprov/log/arc_k8s_cluster_identity.log chmod 666 /var/opt/microsoft/docker-cimprov/log/arc_k8s_cluster_identity.log -chown omsagent:omiusers /var/opt/microsoft/docker-cimprov/log/arc_k8s_cluster_identity.log -mv /etc/opt/microsoft/docker-cimprov/container.conf /etc/opt/microsoft/omsagent/sysconf/omsagent.d/container.conf -chown omsagent:omsagent /etc/opt/microsoft/omsagent/sysconf/omsagent.d/container.conf + +touch /var/opt/microsoft/docker-cimprov/log/fluentd.log +chmod 666 /var/opt/microsoft/docker-cimprov/log/fluentd.log + %Postuninstall_10 # If we're an upgrade, skip all of this cleanup @@ -299,7 +294,6 @@ if ${{PERFORMING_UPGRADE_NOT}}; then rm -f /var/opt/microsoft/docker-cimprov/state/KubeLogQueryState.yaml rm -f /var/opt/microsoft/docker-cimprov/log/kubernetes_client_log.txt rm -f /var/opt/microsoft/docker-cimprov/log/kubernetes_perf_log.txt - rm -f /etc/opt/microsoft/omsagent/conf/omsagent.d/container.conf rmdir /var/opt/microsoft/docker-cimprov/log 2> /dev/null rmdir /var/opt/microsoft/docker-cimprov/state/ContainerInventory 2> /dev/null rmdir /var/opt/microsoft/docker-cimprov/state/ImageInventory 2> /dev/null @@ -308,14 +302,7 @@ if ${{PERFORMING_UPGRADE_NOT}}; then rmdir /etc/opt/microsoft/docker-cimprov/conf 2> /dev/null rmdir /etc/opt/microsoft/docker-cimprov 2> /dev/null rmdir /etc/opt/microsoft 2> /dev/null - rmdir /etc/opt 2> /dev/null - #Remove sudoers file edit - if [ -s /etc/sudoers.d/omsagent ] - then - chmod +w /etc/sudoers.d/omsagent - sed -i '/docker\-provider/,+1 d' /etc/sudoers.d/omsagent - chmod 440 /etc/sudoers.d/omsagent - fi + rmdir /etc/opt 2> /dev/null fi %Preinstall_0 diff --git a/build/linux/installer/datafiles/linux.data b/build/linux/installer/datafiles/linux.data index 604394d80..48af63a73 100644 --- a/build/linux/installer/datafiles/linux.data +++ b/build/linux/installer/datafiles/linux.data @@ -1,16 +1,11 @@ %Variables PF: 'Linux' -OMI_SERVICE: '/opt/omi/bin/service_control' -OMS_SERVICE: '/opt/microsoft/omsagent/bin/service_control' + %Postinstall_2000 -# Reload the OMI server -${{OMI_SERVICE}} reload -${{OMS_SERVICE}} reload -if ${{PERFORMING_UPGRADE_NOT}}; then - /opt/omi/bin/omicli ei root/cimv2 Container_HostInventory -fi + + %Postuninstall_1000 # Calling sequence for RPM pre/post scripts, during upgrade, is as follows: @@ -35,10 +30,5 @@ if ${{PERFORMING_UPGRADE_NOT}}; then fi %Postuninstall_1100 -# If we're called for upgrade, don't do anything -if ${{PERFORMING_UPGRADE_NOT}}; then - # Reload the OMI server - ${{OMI_SERVICE}} reload - ${{OMS_SERVICE}} reload -fi + diff --git a/build/linux/installer/datafiles/linux_dpkg.data b/build/linux/installer/datafiles/linux_dpkg.data index a7821642d..bdf9f2354 100644 --- a/build/linux/installer/datafiles/linux_dpkg.data +++ b/build/linux/installer/datafiles/linux_dpkg.data @@ -3,5 +3,5 @@ PERFORMING_UPGRADE_NOT: '[ "$1" != "upgrade" ]' PACKAGE_TYPE: 'DPKG' %Dependencies -omi (>= 1.0.8.6) + diff --git a/build/linux/installer/datafiles/linux_rpm.data b/build/linux/installer/datafiles/linux_rpm.data index 1b9ba009b..d537b444d 100644 --- a/build/linux/installer/datafiles/linux_rpm.data +++ b/build/linux/installer/datafiles/linux_rpm.data @@ -3,5 +3,5 @@ PERFORMING_UPGRADE_NOT: '[ "$1" -ne 1 ]' PACKAGE_TYPE: 'RPM' %Dependencies -omi >= 1.0.8-6 + diff --git a/build/linux/installer/scripts/livenessprobe.sh b/build/linux/installer/scripts/livenessprobe.sh index 5e1261e7e..252f471e9 100644 --- a/build/linux/installer/scripts/livenessprobe.sh +++ b/build/linux/installer/scripts/livenessprobe.sh @@ -1,19 +1,21 @@ #!/bin/bash -#test to exit non zero value if omsagent is not running -(ps -ef | grep omsagent- | grep -v "grep") +#test to exit non zero value if mdsd is not running +(ps -ef | grep "mdsd" | grep -v "grep") if [ $? -ne 0 ] then - echo " omsagent is not running" > /dev/termination-log - exit 1 + echo "mdsd is not running" > /dev/termination-log + exit 1 fi -#optionally test to exit non zero value if oneagent is not running -if [ -e "/opt/AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE_V2" ]; then - (ps -ef | grep "mdsd" | grep -v "grep") + +#optionally test to exit non zero value if fluentd is not running +#fluentd not used in sidecar container +if [ "${CONTAINER_TYPE}" != "PrometheusSidecar" ]; then + (ps -ef | grep "fluentd" | grep -v "grep") if [ $? -ne 0 ] then - echo "oneagent is not running" > /dev/termination-log + echo "fluentd is not running" > /dev/termination-log exit 1 fi fi diff --git a/build/linux/installer/scripts/tomlparser-mdm-metrics-config.rb b/build/linux/installer/scripts/tomlparser-mdm-metrics-config.rb index 5ce5d79d2..dcf179bf2 100644 --- a/build/linux/installer/scripts/tomlparser-mdm-metrics-config.rb +++ b/build/linux/installer/scripts/tomlparser-mdm-metrics-config.rb @@ -3,7 +3,7 @@ require_relative "tomlrb" require_relative "ConfigParseErrorLogger" -require_relative "microsoft/omsagent/plugin/constants" +require_relative "/etc/fluent/plugin/constants" @configMapMountPath = "/etc/config/settings/alertable-metrics-configuration-settings" @configVersion = "" diff --git a/build/linux/installer/scripts/tomlparser-metric-collection-config.rb b/build/linux/installer/scripts/tomlparser-metric-collection-config.rb index 40d87b7f1..cee41312b 100644 --- a/build/linux/installer/scripts/tomlparser-metric-collection-config.rb +++ b/build/linux/installer/scripts/tomlparser-metric-collection-config.rb @@ -3,7 +3,7 @@ require_relative "tomlrb" require_relative "ConfigParseErrorLogger" -require_relative "microsoft/omsagent/plugin/constants" +require_relative "/etc/fluent/plugin/constants" @configMapMountPath = "/etc/config/settings/metric_collection_settings" @configVersion = "" diff --git a/kubernetes/linux/envmdsd b/kubernetes/linux/envmdsd index 3f834bfb8..5a939fc3e 100644 --- a/kubernetes/linux/envmdsd +++ b/kubernetes/linux/envmdsd @@ -2,8 +2,6 @@ export MDSD_ROLE_PREFIX="/var/run/mdsd/default" #export MDSD_OPTIONS="-d -A -r ${MDSD_ROLE_PREFIX}" export MDSD_LOG="/var/opt/microsoft/linuxmonagent/log" export MDSD_SPOOL_DIRECTORY="/var/opt/microsoft/linuxmonagent" -export OMS_CERT_PATH="/etc/opt/microsoft/omsagent/certs/oms.crt" -export OMS_CERT_KEY_PATH="/etc/opt/microsoft/omsagent/certs/oms.key" #export CIWORKSPACE_id="" #export CIWORKSPACE_key="" export MDSD_OPTIONS="-A -c /etc/mdsd.d/mdsd.xml -r ${MDSD_ROLE_PREFIX} -S ${MDSD_SPOOL_DIRECTORY}/eh -e ${MDSD_LOG}/mdsd.err -w ${MDSD_LOG}/mdsd.warn -o ${MDSD_LOG}/mdsd.info -q ${MDSD_LOG}/mdsd.qos" diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index c7d939034..b21ed6b96 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -38,41 +38,9 @@ waitforlisteneronTCPport() { fi } -if [ -e "/etc/config/kube.conf" ]; then - cat /etc/config/kube.conf > /etc/opt/microsoft/omsagent/sysconf/omsagent.d/container.conf -elif [ "${CONTAINER_TYPE}" == "PrometheusSidecar" ]; then - echo "setting omsagent conf file for prometheus sidecar" - cat /etc/opt/microsoft/docker-cimprov/prometheus-side-car.conf > /etc/opt/microsoft/omsagent/sysconf/omsagent.d/container.conf - # omsadmin.sh replaces %MONITOR_AGENT_PORT% and %SYSLOG_PORT% in the monitor.conf and syslog.conf with default ports 25324 and 25224. - # Since we are running 2 omsagents in the same pod, we need to use a different port for the sidecar, - # else we will see the Address already in use - bind(2) for 0.0.0.0:253(2)24 error. - # Look into omsadmin.sh scripts's configure_monitor_agent()/configure_syslog() and find_available_port() methods for more info. - sed -i -e 's/port %MONITOR_AGENT_PORT%/port 25326/g' /etc/opt/microsoft/omsagent/sysconf/omsagent.d/monitor.conf - sed -i -e 's/port %SYSLOG_PORT%/port 25226/g' /etc/opt/microsoft/omsagent/sysconf/omsagent.d/syslog.conf -else - echo "setting omsagent conf file for daemonset" - sed -i -e 's/bind 127.0.0.1/bind 0.0.0.0/g' /etc/opt/microsoft/omsagent/sysconf/omsagent.d/container.conf -fi -sed -i -e 's/bind 127.0.0.1/bind 0.0.0.0/g' /etc/opt/microsoft/omsagent/sysconf/omsagent.d/syslog.conf -sed -i -e 's/^exit 101$/exit 0/g' /usr/sbin/policy-rc.d - -#Using the get_hostname for hostname instead of the host field in syslog messages -sed -i.bak "s/record\[\"Host\"\] = hostname/record\[\"Host\"\] = OMS::Common.get_hostname/" /opt/microsoft/omsagent/plugin/filter_syslog.rb - #using /var/opt/microsoft/docker-cimprov/state instead of /var/opt/microsoft/omsagent/state since the latter gets deleted during onboarding mkdir -p /var/opt/microsoft/docker-cimprov/state -#if [ ! -e "/etc/config/kube.conf" ]; then - # add permissions for omsagent user to access docker.sock - #sudo setfacl -m user:omsagent:rw /var/run/host/docker.sock -#fi - -# add permissions for omsagent user to access azure.json. -sudo setfacl -m user:omsagent:r /etc/kubernetes/host/azure.json - -# add permission for omsagent user to log folder. We also need 'x', else log rotation is failing. TODO: Investigate why. -sudo setfacl -m user:omsagent:rwx /var/opt/microsoft/docker-cimprov/log - #Run inotify as a daemon to track changes to the mounted configmap. inotifywait /etc/config/settings --daemon --recursive --outfile "/opt/inotifyoutput.txt" --event create,delete --format '%e : %T' --timefmt '+%s' @@ -89,7 +57,7 @@ else export customResourceId=$AKS_RESOURCE_ID echo "export customResourceId=$AKS_RESOURCE_ID" >> ~/.bashrc source ~/.bashrc - echo "customResourceId:$customResourceId" + echo "customResourceId:$customResourceId" fi #set agent config schema version @@ -141,7 +109,6 @@ if [[ ( ( ! -e "/etc/config/kube.conf" ) && ( "${CONTAINER_TYPE}" == "Prometheus fi export PROXY_ENDPOINT="" - # Check for internet connectivity or workspace deletion if [ -e "/etc/omsagent-secret/WSID" ]; then workspaceId=$(cat /etc/omsagent-secret/WSID) @@ -222,6 +189,7 @@ else echo "LA Onboarding:Workspace Id not mounted, skipping the telemetry check" fi + # Set environment variable for if public cloud by checking the workspace domain. if [ -z $domain ]; then ClOUD_ENVIRONMENT="unknown" @@ -233,6 +201,12 @@ fi export CLOUD_ENVIRONMENT=$CLOUD_ENVIRONMENT echo "export CLOUD_ENVIRONMENT=$CLOUD_ENVIRONMENT" >> ~/.bashrc +#consisten naming conventions with the windows +export DOMAIN=$domain +echo "export DOMAIN=$DOMAIN" >> ~/.bashrc +export WSID=$workspaceId +echo "export WSID=$WSID" >> ~/.bashrc + # Check if the instrumentation key needs to be fetched from a storage account (as in airgapped clouds) if [ ${#APPLICATIONINSIGHTS_AUTH_URL} -ge 1 ]; then # (check if APPLICATIONINSIGHTS_AUTH_URL has length >=1) for BACKOFF in {1..4}; do @@ -267,7 +241,7 @@ source ~/.bashrc if [ "${CONTAINER_TYPE}" != "PrometheusSidecar" ]; then #Parse the configmap to set the right environment variables. - /opt/microsoft/omsagent/ruby/bin/ruby tomlparser.rb + /usr/bin/ruby2.6 tomlparser.rb cat config_env_var | while read line; do echo $line >> ~/.bashrc @@ -278,7 +252,7 @@ fi #Parse the configmap to set the right environment variables for agent config. #Note > tomlparser-agent-config.rb has to be parsed first before td-agent-bit-conf-customizer.rb for fbit agent settings if [ "${CONTAINER_TYPE}" != "PrometheusSidecar" ]; then - /opt/microsoft/omsagent/ruby/bin/ruby tomlparser-agent-config.rb + /usr/bin/ruby2.6 tomlparser-agent-config.rb cat agent_config_env_var | while read line; do #echo $line @@ -287,7 +261,7 @@ if [ "${CONTAINER_TYPE}" != "PrometheusSidecar" ]; then source agent_config_env_var #Parse the configmap to set the right environment variables for network policy manager (npm) integration. - /opt/microsoft/omsagent/ruby/bin/ruby tomlparser-npm-config.rb + /usr/bin/ruby2.6 tomlparser-npm-config.rb cat integration_npm_config_env_var | while read line; do #echo $line @@ -298,11 +272,11 @@ fi #Replace the placeholders in td-agent-bit.conf file for fluentbit with custom/default values in daemonset if [ ! -e "/etc/config/kube.conf" ] && [ "${CONTAINER_TYPE}" != "PrometheusSidecar" ]; then - /opt/microsoft/omsagent/ruby/bin/ruby td-agent-bit-conf-customizer.rb + /usr/bin/ruby2.6 td-agent-bit-conf-customizer.rb fi #Parse the prometheus configmap to create a file with new custom settings. -/opt/microsoft/omsagent/ruby/bin/ruby tomlparser-prom-customconfig.rb +/usr/bin/ruby2.6 tomlparser-prom-customconfig.rb #Setting default environment variables to be used in any case of failure in the above steps if [ ! -e "/etc/config/kube.conf" ]; then @@ -335,7 +309,7 @@ fi #Parse the configmap to set the right environment variables for MDM metrics configuration for Alerting. if [ "${CONTAINER_TYPE}" != "PrometheusSidecar" ]; then - /opt/microsoft/omsagent/ruby/bin/ruby tomlparser-mdm-metrics-config.rb + /usr/bin/ruby2.6 tomlparser-mdm-metrics-config.rb cat config_mdm_metrics_env_var | while read line; do echo $line >> ~/.bashrc @@ -343,7 +317,7 @@ if [ "${CONTAINER_TYPE}" != "PrometheusSidecar" ]; then source config_mdm_metrics_env_var #Parse the configmap to set the right environment variables for metric collection settings - /opt/microsoft/omsagent/ruby/bin/ruby tomlparser-metric-collection-config.rb + /usr/bin/ruby2.6 tomlparser-metric-collection-config.rb cat config_metric_collection_env_var | while read line; do echo $line >> ~/.bashrc @@ -354,7 +328,7 @@ fi # OSM scraping to be done in replicaset if sidecar car scraping is disabled and always do the scraping from the sidecar (It will always be either one of the two) if [[ ( ( ! -e "/etc/config/kube.conf" ) && ( "${CONTAINER_TYPE}" == "PrometheusSidecar" ) ) || ( ( -e "/etc/config/kube.conf" ) && ( "${SIDECAR_SCRAPING_ENABLED}" == "false" ) ) ]]; then - /opt/microsoft/omsagent/ruby/bin/ruby tomlparser-osm-config.rb + /usr/bin/ruby2.6 tomlparser-osm-config.rb if [ -e "integration_osm_config_env_var" ]; then cat integration_osm_config_env_var | while read line; do @@ -432,26 +406,11 @@ export KUBELET_RUNTIME_OPERATIONS_ERRORS_METRIC="kubelet_docker_operations_error if [ "$CONTAINER_RUNTIME" != "docker" ]; then # these metrics are avialble only on k8s versions <1.18 and will get deprecated from 1.18 export KUBELET_RUNTIME_OPERATIONS_METRIC="kubelet_runtime_operations" - export KUBELET_RUNTIME_OPERATIONS_ERRORS_METRIC="kubelet_runtime_operations_errors" -else - #if container run time is docker then add omsagent user to local docker group to get access to docker.sock - # docker.sock only use for the telemetry to get the docker version - DOCKER_SOCKET=/var/run/host/docker.sock - DOCKER_GROUP=docker - REGULAR_USER=omsagent - if [ -S ${DOCKER_SOCKET} ]; then - echo "getting gid for docker.sock" - DOCKER_GID=$(stat -c '%g' ${DOCKER_SOCKET}) - echo "creating a local docker group" - groupadd -for -g ${DOCKER_GID} ${DOCKER_GROUP} - echo "adding omsagent user to local docker group" - usermod -aG ${DOCKER_GROUP} ${REGULAR_USER} - fi + export KUBELET_RUNTIME_OPERATIONS_ERRORS_METRIC="kubelet_runtime_operations_errors" fi echo "set caps for ruby process to read container env from proc" -sudo setcap cap_sys_ptrace,cap_dac_read_search+ep /opt/microsoft/omsagent/ruby/bin/ruby - +sudo setcap cap_sys_ptrace,cap_dac_read_search+ep /usr/bin/ruby2.6 echo "export KUBELET_RUNTIME_OPERATIONS_METRIC="$KUBELET_RUNTIME_OPERATIONS_METRIC >> ~/.bashrc echo "export KUBELET_RUNTIME_OPERATIONS_ERRORS_METRIC="$KUBELET_RUNTIME_OPERATIONS_ERRORS_METRIC >> ~/.bashrc @@ -461,171 +420,70 @@ echo $NODE_NAME > /var/opt/microsoft/docker-cimprov/state/containerhostname #check if file was written successfully. cat /var/opt/microsoft/docker-cimprov/state/containerhostname - -#Commenting it for test. We do this in the installer now -#Setup sudo permission for containerlogtailfilereader -#chmod +w /etc/sudoers.d/omsagent -#echo "#run containerlogtailfilereader.rb for docker-provider" >> /etc/sudoers.d/omsagent -#echo "omsagent ALL=(ALL) NOPASSWD: /opt/microsoft/omsagent/ruby/bin/ruby /opt/microsoft/omsagent/plugin/containerlogtailfilereader.rb *" >> /etc/sudoers.d/omsagent -#chmod 440 /etc/sudoers.d/omsagent - -#Disable dsc -#/opt/microsoft/omsconfig/Scripts/OMS_MetaConfigHelper.py --disable -rm -f /etc/opt/microsoft/omsagent/conf/omsagent.d/omsconfig.consistencyinvoker.conf - -CIWORKSPACE_id="" -CIWORKSPACE_key="" - -if [ -z $INT ]; then - if [ -a /etc/omsagent-secret/PROXY ]; then - if [ -a /etc/omsagent-secret/DOMAIN ]; then - /opt/microsoft/omsagent/bin/omsadmin.sh -w `cat /etc/omsagent-secret/WSID` -s `cat /etc/omsagent-secret/KEY` -d `cat /etc/omsagent-secret/DOMAIN` -p `cat /etc/omsagent-secret/PROXY` - else - /opt/microsoft/omsagent/bin/omsadmin.sh -w `cat /etc/omsagent-secret/WSID` -s `cat /etc/omsagent-secret/KEY` -p `cat /etc/omsagent-secret/PROXY` - fi - CIWORKSPACE_id="$(cat /etc/omsagent-secret/WSID)" - CIWORKSPACE_key="$(cat /etc/omsagent-secret/KEY)" - elif [ -a /etc/omsagent-secret/DOMAIN ]; then - /opt/microsoft/omsagent/bin/omsadmin.sh -w `cat /etc/omsagent-secret/WSID` -s `cat /etc/omsagent-secret/KEY` -d `cat /etc/omsagent-secret/DOMAIN` - CIWORKSPACE_id="$(cat /etc/omsagent-secret/WSID)" - CIWORKSPACE_key="$(cat /etc/omsagent-secret/KEY)" - elif [ -a /etc/omsagent-secret/WSID ]; then - /opt/microsoft/omsagent/bin/omsadmin.sh -w `cat /etc/omsagent-secret/WSID` -s `cat /etc/omsagent-secret/KEY` - CIWORKSPACE_id="$(cat /etc/omsagent-secret/WSID)" - CIWORKSPACE_key="$(cat /etc/omsagent-secret/KEY)" - elif [ -a /run/secrets/DOMAIN ]; then - /opt/microsoft/omsagent/bin/omsadmin.sh -w `cat /run/secrets/WSID` -s `cat /run/secrets/KEY` -d `cat /run/secrets/DOMAIN` - CIWORKSPACE_id="$(cat /run/secrets/WSID)" - CIWORKSPACE_key="$(cat /run/secrets/KEY)" - elif [ -a /run/secrets/WSID ]; then - /opt/microsoft/omsagent/bin/omsadmin.sh -w `cat /run/secrets/WSID` -s `cat /run/secrets/KEY` - CIWORKSPACE_id="$(cat /run/secrets/WSID)" - CIWORKSPACE_key="$(cat /run/secrets/KEY)" - elif [ -z $DOMAIN ]; then - /opt/microsoft/omsagent/bin/omsadmin.sh -w $WSID -s $KEY - CIWORKSPACE_id="$(cat /etc/omsagent-secret/WSID)" - CIWORKSPACE_key="$(cat /etc/omsagent-secret/KEY)" - else - /opt/microsoft/omsagent/bin/omsadmin.sh -w $WSID -s $KEY -d $DOMAIN - CIWORKSPACE_id="$WSID" - CIWORKSPACE_key="$KEY" - fi -else -#To onboard to INT workspace - workspace-id (WSID-not base64 encoded), workspace-key (KEY-not base64 encoded), Domain(DOMAIN-int2.microsoftatlanta-int.com) -#need to be added to omsagent.yaml. - echo WORKSPACE_ID=$WSID > /etc/omsagent-onboard.conf - echo SHARED_KEY=$KEY >> /etc/omsagent-onboard.conf - echo URL_TLD=$DOMAIN >> /etc/omsagent-onboard.conf - /opt/microsoft/omsagent/bin/omsadmin.sh - CIWORKSPACE_id="$WSID" - CIWORKSPACE_key="$KEY" -fi - #start cron daemon for logrotate service cron start +#get docker-provider versions -#check if agent onboarded successfully -/opt/microsoft/omsagent/bin/omsadmin.sh -l - -#get omsagent and docker-provider versions -dpkg -l | grep omsagent | awk '{print $2 " " $3}' dpkg -l | grep docker-cimprov | awk '{print $2 " " $3}' DOCKER_CIMPROV_VERSION=$(dpkg -l | grep docker-cimprov | awk '{print $3}') echo "DOCKER_CIMPROV_VERSION=$DOCKER_CIMPROV_VERSION" export DOCKER_CIMPROV_VERSION=$DOCKER_CIMPROV_VERSION echo "export DOCKER_CIMPROV_VERSION=$DOCKER_CIMPROV_VERSION" >> ~/.bashrc +echo "*** activating oneagent in legacy auth mode ***" +CIWORKSPACE_id="$(cat /etc/omsagent-secret/WSID)" +#use the file path as its secure than env +CIWORKSPACE_keyFile="/etc/omsagent-secret/KEY" +cat /etc/mdsd.d/envmdsd | while read line; do + echo $line >> ~/.bashrc +done +source /etc/mdsd.d/envmdsd +echo "setting mdsd workspaceid & key for workspace:$CIWORKSPACE_id" +export CIWORKSPACE_id=$CIWORKSPACE_id +echo "export CIWORKSPACE_id=$CIWORKSPACE_id" >> ~/.bashrc +export CIWORKSPACE_keyFile=$CIWORKSPACE_keyFile +echo "export CIWORKSPACE_keyFile=$CIWORKSPACE_keyFile" >> ~/.bashrc +export OMS_TLD=$domain +echo "export OMS_TLD=$OMS_TLD" >> ~/.bashrc +export MDSD_FLUENT_SOCKET_PORT="29230" +echo "export MDSD_FLUENT_SOCKET_PORT=$MDSD_FLUENT_SOCKET_PORT" >> ~/.bashrc + +#skip imds lookup since not used in legacy auth path +export SKIP_IMDS_LOOKUP_FOR_LEGACY_AUTH="true" +echo "export SKIP_IMDS_LOOKUP_FOR_LEGACY_AUTH=$SKIP_IMDS_LOOKUP_FOR_LEGACY_AUTH" >> ~/.bashrc -#region check to auto-activate oneagent, to route container logs, -#Intent is to activate one agent routing for all managed clusters with region in the regionllist, unless overridden by configmap -# AZMON_CONTAINER_LOGS_ROUTE will have route (if any) specified in the config map -# AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE will have the final route that we compute & set, based on our region list logic -echo "************start oneagent log routing checks************" -# by default, use configmap route for safer side -AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE=$AZMON_CONTAINER_LOGS_ROUTE - -#trim region list -oneagentregions="$(echo $AZMON_CONTAINERLOGS_ONEAGENT_REGIONS | xargs)" -#lowercase region list -typeset -l oneagentregions=$oneagentregions -echo "oneagent regions: $oneagentregions" -#trim current region -currentregion="$(echo $AKS_REGION | xargs)" -#lowercase current region -typeset -l currentregion=$currentregion -echo "current region: $currentregion" - -#initilze isoneagentregion as false -isoneagentregion=false - -#set isoneagentregion as true if matching region is found -if [ ! -z $oneagentregions ] && [ ! -z $currentregion ]; then - for rgn in $(echo $oneagentregions | sed "s/,/ /g"); do - if [ "$rgn" == "$currentregion" ]; then - isoneagentregion=true - echo "current region is in oneagent regions..." - break - fi - done -else - echo "current region is not in oneagent regions..." -fi +source ~/.bashrc -if [ "$isoneagentregion" = true ]; then - #if configmap has a routing for logs, but current region is in the oneagent region list, take the configmap route - if [ ! -z $AZMON_CONTAINER_LOGS_ROUTE ]; then - AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE=$AZMON_CONTAINER_LOGS_ROUTE - echo "oneagent region is true for current region:$currentregion and config map logs route is not empty. so using config map logs route as effective route:$AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE" - else #there is no configmap route, so route thru oneagent - AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE="v2" - echo "oneagent region is true for current region:$currentregion and config map logs route is empty. so using oneagent as effective route:$AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE" - fi -else - echo "oneagent region is false for current region:$currentregion" +dpkg -l | grep mdsd | awk '{print $2 " " $3}' + +if [ "${CONTAINER_TYPE}" == "PrometheusSidecar" ]; then + echo "starting mdsd with mdsd-port=26130, fluentport=26230 and influxport=26330 in legacy auth mode in sidecar container..." + #use tenant name to avoid unix socket conflict and different ports for port conflict + #roleprefix to use container specific mdsd socket + export TENANT_NAME="${CONTAINER_TYPE}" + echo "export TENANT_NAME=$TENANT_NAME" >> ~/.bashrc + export MDSD_ROLE_PREFIX=/var/run/mdsd-${CONTAINER_TYPE}/default + echo "export MDSD_ROLE_PREFIX=$MDSD_ROLE_PREFIX" >> ~/.bashrc + source ~/.bashrc + mkdir /var/run/mdsd-${CONTAINER_TYPE} + # add -T 0xFFFF for full traces + mdsd -r ${MDSD_ROLE_PREFIX} -p 26130 -f 26230 -i 26330 -e ${MDSD_LOG}/mdsd.err -w ${MDSD_LOG}/mdsd.warn -o ${MDSD_LOG}/mdsd.info -q ${MDSD_LOG}/mdsd.qos & +else + echo "starting mdsd in legacy auth mode in main container..." + # add -T 0xFFFF for full traces + mdsd -e ${MDSD_LOG}/mdsd.err -w ${MDSD_LOG}/mdsd.warn -o ${MDSD_LOG}/mdsd.info -q ${MDSD_LOG}/mdsd.qos & fi - -#start oneagent -if [ ! -e "/etc/config/kube.conf" ] && [ "${CONTAINER_TYPE}" != "PrometheusSidecar" ]; then - if [ ! -z $AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE ]; then - echo "container logs configmap route is $AZMON_CONTAINER_LOGS_ROUTE" - echo "container logs effective route is $AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE" - #trim - containerlogsroute="$(echo $AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE | xargs)" - # convert to lowercase - typeset -l containerlogsroute=$containerlogsroute - - echo "setting AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE as :$containerlogsroute" - export AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE=$containerlogsroute - echo "export AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE=$containerlogsroute" >> ~/.bashrc - source ~/.bashrc - - if [ "$containerlogsroute" == "v2" ]; then - echo "activating oneagent..." - echo "configuring mdsd..." - cat /etc/mdsd.d/envmdsd | while read line; do - echo $line >> ~/.bashrc - done - source /etc/mdsd.d/envmdsd - - echo "setting mdsd workspaceid & key for workspace:$CIWORKSPACE_id" - export CIWORKSPACE_id=$CIWORKSPACE_id - echo "export CIWORKSPACE_id=$CIWORKSPACE_id" >> ~/.bashrc - export CIWORKSPACE_key=$CIWORKSPACE_key - echo "export CIWORKSPACE_key=$CIWORKSPACE_key" >> ~/.bashrc - - source ~/.bashrc - - dpkg -l | grep mdsd | awk '{print $2 " " $3}' - - echo "starting mdsd ..." - mdsd -e ${MDSD_LOG}/mdsd.err -w ${MDSD_LOG}/mdsd.warn -o ${MDSD_LOG}/mdsd.info -q ${MDSD_LOG}/mdsd.qos & - - touch /opt/AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE_V2 - fi - fi -fi -echo "************end oneagent log routing checks************" +# no dependency on fluentd for prometheus side car container +if [ "${CONTAINER_TYPE}" != "PrometheusSidecar" ]; then + if [ ! -e "/etc/config/kube.conf" ]; then + echo "*** starting fluentd v1 in daemonset" + fluentd -c /etc/fluent/container.conf -o /var/opt/microsoft/docker-cimprov/log/fluentd.log & + else + echo "*** starting fluentd v1 in replicaset" + fluentd -c /etc/fluent/kube.conf -o /var/opt/microsoft/docker-cimprov/log/fluentd.log & + fi +fi #If config parsing was successful, a copy of the conf file with replaced custom settings file is created if [ ! -e "/etc/config/kube.conf" ]; then @@ -749,12 +607,9 @@ dpkg -l | grep td-agent-bit | awk '{print $2 " " $3}' #dpkg -l | grep telegraf | awk '{print $2 " " $3}' - - # Write messages from the liveness probe to stdout (so telemetry picks it up) touch /dev/write-to-traces - echo "stopping rsyslog..." service rsyslog stop @@ -762,7 +617,7 @@ echo "getting rsyslog status..." service rsyslog status shutdown() { - /opt/microsoft/omsagent/bin/service_control stop + pkill -f mdsd } trap "shutdown" SIGTERM diff --git a/kubernetes/linux/mdsd.xml b/kubernetes/linux/mdsd.xml index 49d329791..de14240aa 100644 --- a/kubernetes/linux/mdsd.xml +++ b/kubernetes/linux/mdsd.xml @@ -47,6 +47,149 @@ Each column has a name, an augmented JSON source type, and a target MDS type. --> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -68,14 +211,33 @@ + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - ]]> @@ -143,11 +360,95 @@ - - ]]> + + + + + ]]> + + + + + + + + ]]> + + + + + + + + ]]> + + + + + + + + ]]> + + + + + + + + ]]> + + + + + + + + ]]> + + + + + + + + ]]> + + + + + + + + ]]> + + + + + + + + ]]> + + + + + + + ]]> + + + + + + + ]]> + + + diff --git a/kubernetes/linux/setup.sh b/kubernetes/linux/setup.sh index f065cc165..3d00e4c57 100644 --- a/kubernetes/linux/setup.sh +++ b/kubernetes/linux/setup.sh @@ -9,37 +9,13 @@ sed -i -e 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen && \ dpkg-reconfigure --frontend=noninteractive locales && \ update-locale LANG=en_US.UTF-8 -wget https://github.com/Microsoft/OMS-Agent-for-Linux/releases/download/OMSAgent_v1.10.0-1/omsagent-1.10.0-1.universal.x64.sh +#install oneagent - Official bits (05/17/2021) +wget https://github.com/microsoft/Docker-Provider/releases/download/05172021-oneagent/azure-mdsd_1.10.1-build.master.213_x86_64.deb -#create file to disable omi service startup script -touch /etc/.omi_disable_service_control - -chmod 775 $TMPDIR/*.sh - -#Extract omsbundle -$TMPDIR/omsagent-*.universal.x64.sh --extract -mv $TMPDIR/omsbundle* $TMPDIR/omsbundle -#Install omi -/usr/bin/dpkg -i $TMPDIR/omsbundle/110/omi*.deb - -#Install scx -/usr/bin/dpkg -i $TMPDIR/omsbundle/110/scx*.deb -#$TMPDIR/omsbundle/bundles/scx-1.6.*-*.universal.x64.sh --install - -#Install omsagent - -/usr/bin/dpkg -i $TMPDIR/omsbundle/110/omsagent*.deb -#/usr/bin/dpkg -i $TMPDIR/omsbundle/100/omsconfig*.deb - -#install oneagent - Official bits (05/2021) -wget https://github.com/microsoft/Docker-Provider/releases/download/05112021-oneagent/azure-mdsd_1.8.0-build.master.189_x86_64.deb /usr/bin/dpkg -i $TMPDIR/azure-mdsd*.deb cp -f $TMPDIR/mdsd.xml /etc/mdsd.d cp -f $TMPDIR/envmdsd /etc/mdsd.d -#Assign permissions to omsagent user to access docker.sock -sudo apt-get install acl - #download inotify tools for watching configmap changes sudo apt-get update sudo apt-get install inotify-tools -y @@ -49,18 +25,7 @@ sudo apt-get install inotify-tools -y sudo apt-get install jq=1.5+dfsg-2 -y #used to setcaps for ruby process to read /proc/env -echo "installing libcap2-bin" sudo apt-get install libcap2-bin -y -#/$TMPDIR/omsbundle/oss-kits/docker-cimprov-1.0.0-*.x86_64.sh --install -#Use downloaded docker-provider instead of the bundled one - -#download and install telegraf -#wget https://dl.influxdata.com/telegraf/releases/telegraf_1.10.1-1_amd64.deb -#sudo dpkg -i telegraf_1.10.1-1_amd64.deb - -#service telegraf stop - -#wget https://github.com/microsoft/Docker-Provider/releases/download/5.0.0.0/telegraf #1.18 pre-release wget https://dl.influxdata.com/telegraf/releases/telegraf-1.18.0_linux_amd64.tar.gz @@ -79,8 +44,17 @@ sudo echo "deb https://packages.fluentbit.io/ubuntu/xenial xenial main" >> /etc/ sudo apt-get update sudo apt-get install td-agent-bit=1.6.8 -y -rm -rf $TMPDIR/omsbundle -rm -f $TMPDIR/omsagent*.sh +# install ruby2.6 +sudo apt-get install software-properties-common -y +sudo apt-add-repository ppa:brightbox/ruby-ng -y +sudo apt-get update +sudo apt-get install ruby2.6 ruby2.6-dev gcc make -y +# fluentd v1 gem +gem install fluentd -v "1.12.2" --no-document +fluentd --setup ./fluent +gem install gyoku iso8601 --no-doc + + rm -f $TMPDIR/docker-cimprov*.sh rm -f $TMPDIR/azure-mdsd*.deb rm -f $TMPDIR/mdsd.xml diff --git a/source/plugins/go/src/oms.go b/source/plugins/go/src/oms.go index d35acad3d..25f364c55 100644 --- a/source/plugins/go/src/oms.go +++ b/source/plugins/go/src/oms.go @@ -92,15 +92,24 @@ const kubeMonAgentConfigEventFlushInterval = 60 //Eventsource name in mdsd const MdsdContainerLogSourceName = "ContainerLogSource" const MdsdContainerLogV2SourceName = "ContainerLogV2Source" +const MdsdKubeMonAgentEventsSourceName = "KubeMonAgentEventsSource" +const MdsdInsightsMetricsSourceName = "InsightsMetricsSource" -//container logs route (v2=flush to oneagent, adx= flush to adx ingestion, anything else flush to ODS[default]) +//container logs route (v2=flush to oneagent, adx= flush to adx ingestion, v1 for ODS Direct) const ContainerLogsV2Route = "v2" const ContainerLogsADXRoute = "adx" +//fallback option v1 route i.e. ODS direct if required in any case +const ContainerLogsV1Route = "v1" + //container logs schema (v2=ContainerLogsV2 table in LA, anything else ContainerLogs table in LA. This is applicable only if Container logs route is NOT ADX) const ContainerLogV2SchemaVersion = "v2" + +//env variable to container type +const ContainerTypeEnv = "CONTAINER_TYPE" + var ( // PluginConfiguration the plugins configuration PluginConfiguration map[string]string @@ -108,6 +117,10 @@ var ( HTTPClient http.Client // Client for MDSD msgp Unix socket MdsdMsgpUnixSocketClient net.Conn + // Client for MDSD msgp Unix socket for KubeMon Agent events + MdsdKubeMonMsgpUnixSocketClient net.Conn + // Client for MDSD msgp Unix socket for Insights Metrics + MdsdInsightsMetricsMsgpUnixSocketClient net.Conn // Ingestor for ADX ADXIngestor *ingest.Ingestion // OMSEndpoint ingestion endpoint @@ -116,6 +129,8 @@ var ( Computer string // WorkspaceID log analytics workspace id WorkspaceID string + // LogAnalyticsWorkspaceDomain log analytics workspace domain + LogAnalyticsWorkspaceDomain string // ResourceID for resource-centric log analytics data ResourceID string // Resource-centric flag (will be true if we determine if above RseourceID is non-empty - default is false) @@ -143,7 +158,17 @@ var ( // ADX tenantID AdxTenantID string //ADX client secret - AdxClientSecret string + AdxClientSecret string + // container log or container log v2 tag name for oneagent route + MdsdContainerLogTagName string + // kubemonagent events tag name for oneagent route + MdsdKubeMonAgentEventsTagName string + // InsightsMetrics tag name for oneagent route + MdsdInsightsMetricsTagName string + // flag to check if its Windows OS + IsWindows bool + // container type + ContainerType string ) var ( @@ -314,6 +339,15 @@ const ( PromScrapingError ) +// DataType to be used as enum per data type socket client creation +type DataType int +const ( + // DataType to be used as enum per data type socket client creation + ContainerLogV2 DataType = iota + KubeMonAgentEvents + InsightsMetrics +) + func createLogger() *log.Logger { var logfile *os.File @@ -532,6 +566,7 @@ func flushKubeMonAgentEventRecords() { start := time.Now() var elapsed time.Duration var laKubeMonAgentEventsRecords []laKubeMonAgentEvents + var msgPackEntries []MsgPackEntry telemetryDimensions := make(map[string]string) telemetryDimensions["ConfigErrorEventCount"] = strconv.Itoa(len(ConfigErrorEvent)) @@ -558,7 +593,25 @@ func flushKubeMonAgentEventRecords() { Message: k, Tags: fmt.Sprintf("%s", tagJson), } - laKubeMonAgentEventsRecords = append(laKubeMonAgentEventsRecords, laKubeMonAgentEventsRecord) + laKubeMonAgentEventsRecords = append(laKubeMonAgentEventsRecords, laKubeMonAgentEventsRecord) + var stringMap map[string]string + jsonBytes, err := json.Marshal(&laKubeMonAgentEventsRecord) + if err != nil { + message := fmt.Sprintf("Error while Marshalling laKubeMonAgentEventsRecord to json bytes: %s", err.Error()) + Log(message) + SendException(message) + } else { + if err := json.Unmarshal(jsonBytes, &stringMap); err != nil { + message := fmt.Sprintf("Error while UnMarhalling json bytes to stringmap: %s", err.Error()) + Log(message) + SendException(message) + } else { + msgPackEntry := MsgPackEntry{ + Record: stringMap, + } + msgPackEntries = append(msgPackEntries, msgPackEntry) + } + } } } @@ -579,7 +632,25 @@ func flushKubeMonAgentEventRecords() { Message: k, Tags: fmt.Sprintf("%s", tagJson), } - laKubeMonAgentEventsRecords = append(laKubeMonAgentEventsRecords, laKubeMonAgentEventsRecord) + laKubeMonAgentEventsRecords = append(laKubeMonAgentEventsRecords, laKubeMonAgentEventsRecord) + var stringMap map[string]string + jsonBytes, err := json.Marshal(&laKubeMonAgentEventsRecord) + if err != nil { + message := fmt.Sprintf("Error while Marshalling laKubeMonAgentEventsRecord to json bytes: %s", err.Error()) + Log(message) + SendException(message) + } else { + if err := json.Unmarshal(jsonBytes, &stringMap); err != nil { + message := fmt.Sprintf("Error while UnMarhalling json bytes to stringmap: %s", err.Error()) + Log(message) + SendException(message) + } else { + msgPackEntry := MsgPackEntry{ + Record: stringMap, + } + msgPackEntries = append(msgPackEntries, msgPackEntry) + } + } } } @@ -610,11 +681,63 @@ func flushKubeMonAgentEventRecords() { Message: "No errors", Tags: fmt.Sprintf("%s", tagJson), } - laKubeMonAgentEventsRecords = append(laKubeMonAgentEventsRecords, laKubeMonAgentEventsRecord) + laKubeMonAgentEventsRecords = append(laKubeMonAgentEventsRecords, laKubeMonAgentEventsRecord) + var stringMap map[string]string + jsonBytes, err := json.Marshal(&laKubeMonAgentEventsRecord) + if err != nil { + message := fmt.Sprintf("Error while Marshalling laKubeMonAgentEventsRecord to json bytes: %s", err.Error()) + Log(message) + SendException(message) + } else { + if err := json.Unmarshal(jsonBytes, &stringMap); err != nil { + message := fmt.Sprintf("Error while UnMarshalling json bytes to stringmap: %s", err.Error()) + Log(message) + SendException(message) + } else { + msgPackEntry := MsgPackEntry{ + Record: stringMap, + } + msgPackEntries = append(msgPackEntries, msgPackEntry) + } + } } } - - if len(laKubeMonAgentEventsRecords) > 0 { + if (IsWindows == false && len(msgPackEntries) > 0) { //for linux, mdsd route + Log("Info::mdsd:: using mdsdsource name for KubeMonAgentEvents: %s", MdsdKubeMonAgentEventsTagName) + msgpBytes := convertMsgPackEntriesToMsgpBytes(MdsdKubeMonAgentEventsTagName, msgPackEntries) + if MdsdKubeMonMsgpUnixSocketClient == nil { + Log("Error::mdsd::mdsd connection for KubeMonAgentEvents does not exist. re-connecting ...") + CreateMDSDClient(KubeMonAgentEvents, ContainerType) + if MdsdKubeMonMsgpUnixSocketClient == nil { + Log("Error::mdsd::Unable to create mdsd client for KubeMonAgentEvents. Please check error log.") + ContainerLogTelemetryMutex.Lock() + defer ContainerLogTelemetryMutex.Unlock() + KubeMonEventsMDSDClientCreateErrors += 1 + } + } + if MdsdKubeMonMsgpUnixSocketClient != nil { + deadline := 10 * time.Second + MdsdKubeMonMsgpUnixSocketClient.SetWriteDeadline(time.Now().Add(deadline)) //this is based of clock time, so cannot reuse + bts, er := MdsdKubeMonMsgpUnixSocketClient.Write(msgpBytes) + elapsed = time.Since(start) + if er != nil { + message := fmt.Sprintf("Error::mdsd::Failed to write to kubemonagent mdsd %d records after %s. Will retry ... error : %s", len(msgPackEntries), elapsed, er.Error()) + Log(message) + if MdsdKubeMonMsgpUnixSocketClient != nil { + MdsdKubeMonMsgpUnixSocketClient.Close() + MdsdKubeMonMsgpUnixSocketClient = nil + } + SendException(message) + } else { + numRecords := len(msgPackEntries) + Log("FlushKubeMonAgentEventRecords::Info::Successfully flushed %d records that was %d bytes in %s", numRecords, bts, elapsed) + // Send telemetry to AppInsights resource + SendEvent(KubeMonAgentEventsFlushedEvent, telemetryDimensions) + } + } else { + Log("Error::mdsd::Unable to create mdsd client for KubeMonAgentEvents. Please check error log.") + } + } else if len(laKubeMonAgentEventsRecords) > 0 { //for windows, ODS direct kubeMonAgentEventEntry := KubeMonAgentEventBlob{ DataType: KubeMonAgentEventDataType, IPName: IPName, @@ -746,70 +869,144 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int message := fmt.Sprintf("PostTelegrafMetricsToLA::Info:derived %v metrics from %v timeseries", len(laMetrics), len(telegrafRecords)) Log(message) } + + if IsWindows == false { //for linux, mdsd route + var msgPackEntries []MsgPackEntry + var i int + start := time.Now() + var elapsed time.Duration + + for i = 0; i < len(laMetrics); i++ { + var interfaceMap map[string]interface{} + stringMap := make(map[string]string) + jsonBytes, err := json.Marshal(*laMetrics[i]) + if err != nil { + message := fmt.Sprintf("PostTelegrafMetricsToLA::Error:when marshalling json %q", err) + Log(message) + SendException(message) + return output.FLB_OK + } else { + if err := json.Unmarshal(jsonBytes, &interfaceMap); err != nil { + message := fmt.Sprintf("Error while UnMarshalling json bytes to interfaceMap: %s", err.Error()) + Log(message) + SendException(message) + return output.FLB_OK + } else { + for key, value := range interfaceMap { + strKey := fmt.Sprintf("%v", key) + strValue := fmt.Sprintf("%v", value) + stringMap[strKey] = strValue + } + msgPackEntry := MsgPackEntry{ + Record: stringMap, + } + msgPackEntries = append(msgPackEntries, msgPackEntry) + } + } + } + if (len(msgPackEntries) > 0) { + msgpBytes := convertMsgPackEntriesToMsgpBytes(MdsdInsightsMetricsTagName, msgPackEntries) + if MdsdInsightsMetricsMsgpUnixSocketClient == nil { + Log("Error::mdsd::mdsd connection does not exist. re-connecting ...") + CreateMDSDClient(InsightsMetrics, ContainerType) + if MdsdInsightsMetricsMsgpUnixSocketClient == nil { + Log("Error::mdsd::Unable to create mdsd client for insights metrics. Please check error log.") + ContainerLogTelemetryMutex.Lock() + defer ContainerLogTelemetryMutex.Unlock() + InsightsMetricsMDSDClientCreateErrors += 1 + return output.FLB_RETRY + } + } - var metrics []laTelegrafMetric - var i int + deadline := 10 * time.Second + MdsdInsightsMetricsMsgpUnixSocketClient.SetWriteDeadline(time.Now().Add(deadline)) //this is based of clock time, so cannot reuse + bts, er := MdsdInsightsMetricsMsgpUnixSocketClient.Write(msgpBytes) - for i = 0; i < len(laMetrics); i++ { - metrics = append(metrics, *laMetrics[i]) - } + elapsed = time.Since(start) - laTelegrafMetrics := InsightsMetricsBlob{ - DataType: InsightsMetricsDataType, - IPName: IPName, - DataItems: metrics} + if er != nil { + Log("Error::mdsd::Failed to write to mdsd %d records after %s. Will retry ... error : %s", len(msgPackEntries), elapsed, er.Error()) + if MdsdInsightsMetricsMsgpUnixSocketClient != nil { + MdsdInsightsMetricsMsgpUnixSocketClient.Close() + MdsdInsightsMetricsMsgpUnixSocketClient = nil + } - jsonBytes, err := json.Marshal(laTelegrafMetrics) + ContainerLogTelemetryMutex.Lock() + defer ContainerLogTelemetryMutex.Unlock() + InsightsMetricsMDSDClientCreateErrors += 1 + return output.FLB_RETRY + } else { + numTelegrafMetricsRecords := len(msgPackEntries) + Log("Success::mdsd::Successfully flushed %d telegraf metrics records that was %d bytes to mdsd in %s ", numTelegrafMetricsRecords, bts, elapsed) + } + } + + } else { // for windows, ODS direct - if err != nil { - message := fmt.Sprintf("PostTelegrafMetricsToLA::Error:when marshalling json %q", err) - Log(message) - SendException(message) - return output.FLB_OK - } + var metrics []laTelegrafMetric + var i int - //Post metrics data to LA - req, _ := http.NewRequest("POST", OMSEndpoint, bytes.NewBuffer(jsonBytes)) + for i = 0; i < len(laMetrics); i++ { + metrics = append(metrics, *laMetrics[i]) + } - //req.URL.Query().Add("api-version","2016-04-01") + laTelegrafMetrics := InsightsMetricsBlob{ + DataType: InsightsMetricsDataType, + IPName: IPName, + DataItems: metrics} - //set headers - req.Header.Set("x-ms-date", time.Now().Format(time.RFC3339)) - req.Header.Set("User-Agent", userAgent) - reqID := uuid.New().String() - req.Header.Set("X-Request-ID", reqID) + jsonBytes, err := json.Marshal(laTelegrafMetrics) - //expensive to do string len for every request, so use a flag - if ResourceCentric == true { - req.Header.Set("x-ms-AzureResourceId", ResourceID) - } + if err != nil { + message := fmt.Sprintf("PostTelegrafMetricsToLA::Error:when marshalling json %q", err) + Log(message) + SendException(message) + return output.FLB_OK + } - start := time.Now() - resp, err := HTTPClient.Do(req) - elapsed := time.Since(start) + //Post metrics data to LA + req, _ := http.NewRequest("POST", OMSEndpoint, bytes.NewBuffer(jsonBytes)) - if err != nil { - message := fmt.Sprintf("PostTelegrafMetricsToLA::Error:(retriable) when sending %v metrics. duration:%v err:%q \n", len(laMetrics), elapsed, err.Error()) - Log(message) - UpdateNumTelegrafMetricsSentTelemetry(0, 1, 0) - return output.FLB_RETRY - } + //req.URL.Query().Add("api-version","2016-04-01") - if resp == nil || resp.StatusCode != 200 { - if resp != nil { - Log("PostTelegrafMetricsToLA::Error:(retriable) RequestID %s Response Status %v Status Code %v", reqID, resp.Status, resp.StatusCode) + //set headers + req.Header.Set("x-ms-date", time.Now().Format(time.RFC3339)) + req.Header.Set("User-Agent", userAgent) + reqID := uuid.New().String() + req.Header.Set("X-Request-ID", reqID) + + //expensive to do string len for every request, so use a flag + if ResourceCentric == true { + req.Header.Set("x-ms-AzureResourceId", ResourceID) } - if resp != nil && resp.StatusCode == 429 { - UpdateNumTelegrafMetricsSentTelemetry(0, 1, 1) + + start := time.Now() + resp, err := HTTPClient.Do(req) + elapsed := time.Since(start) + + if err != nil { + message := fmt.Sprintf("PostTelegrafMetricsToLA::Error:(retriable) when sending %v metrics. duration:%v err:%q \n", len(laMetrics), elapsed, err.Error()) + Log(message) + UpdateNumTelegrafMetricsSentTelemetry(0, 1, 0) + return output.FLB_RETRY + } + + if resp == nil || resp.StatusCode != 200 { + if resp != nil { + Log("PostTelegrafMetricsToLA::Error:(retriable) RequestID %s Response Status %v Status Code %v", reqID, resp.Status, resp.StatusCode) + } + if resp != nil && resp.StatusCode == 429 { + UpdateNumTelegrafMetricsSentTelemetry(0, 1, 1) + } + return output.FLB_RETRY } - return output.FLB_RETRY - } - defer resp.Body.Close() + defer resp.Body.Close() - numMetrics := len(laMetrics) - UpdateNumTelegrafMetricsSentTelemetry(numMetrics, 0, 0) - Log("PostTelegrafMetricsToLA::Info:Successfully flushed %v records in %v", numMetrics, elapsed) + numMetrics := len(laMetrics) + UpdateNumTelegrafMetricsSentTelemetry(numMetrics, 0, 0) + Log("PostTelegrafMetricsToLA::Info:Successfully flushed %v records in %v", numMetrics, elapsed) + } return output.FLB_OK } @@ -986,13 +1183,9 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { numContainerLogRecords := 0 if len(msgPackEntries) > 0 && ContainerLogsRouteV2 == true { - //flush to mdsd - mdsdSourceName := MdsdContainerLogSourceName - if (ContainerLogSchemaV2 == true) { - mdsdSourceName = MdsdContainerLogV2SourceName - } + //flush to mdsd fluentForward := MsgPackForward{ - Tag: mdsdSourceName, + Tag: MdsdContainerLogTagName, Entries: msgPackEntries, } @@ -1019,7 +1212,7 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { if MdsdMsgpUnixSocketClient == nil { Log("Error::mdsd::mdsd connection does not exist. re-connecting ...") - CreateMDSDClient() + CreateMDSDClient(ContainerLogV2, ContainerType) if MdsdMsgpUnixSocketClient == nil { Log("Error::mdsd::Unable to create mdsd client. Please check error log.") @@ -1286,21 +1479,31 @@ func InitializePlugin(pluginConfPath string, agentVersion string) { log.Fatalln(message) } - osType := os.Getenv("OS_TYPE") + ContainerType = os.Getenv(ContainerTypeEnv) + Log("Container Type %s", ContainerType) + osType := os.Getenv("OS_TYPE") + IsWindows = false // Linux if strings.Compare(strings.ToLower(osType), "windows") != 0 { Log("Reading configuration for Linux from %s", pluginConfPath) - omsadminConf, err := ReadConfiguration(pluginConfig["omsadmin_conf_path"]) - if err != nil { - message := fmt.Sprintf("Error Reading omsadmin configuration %s\n", err.Error()) + WorkspaceID = os.Getenv("WSID") + if WorkspaceID == "" { + message := fmt.Sprintf("WorkspaceID shouldnt be empty") Log(message) SendException(message) time.Sleep(30 * time.Second) log.Fatalln(message) } - OMSEndpoint = omsadminConf["OMS_ENDPOINT"] - WorkspaceID = omsadminConf["WORKSPACE_ID"] + LogAnalyticsWorkspaceDomain = os.Getenv("DOMAIN") + if LogAnalyticsWorkspaceDomain == "" { + message := fmt.Sprintf("Workspace DOMAIN shouldnt be empty") + Log(message) + SendException(message) + time.Sleep(30 * time.Second) + log.Fatalln(message) + } + OMSEndpoint = "https://" + WorkspaceID + ".ods." + LogAnalyticsWorkspaceDomain + "/OperationalData.svc/PostJsonDataItems" // Populate Computer field containerHostName, err1 := ioutil.ReadFile(pluginConfig["container_host_file_path"]) if err1 != nil { @@ -1329,6 +1532,7 @@ func InitializePlugin(pluginConfPath string, agentVersion string) { } } else { // windows + IsWindows = true Computer = os.Getenv("HOSTNAME") WorkspaceID = os.Getenv("WSID") logAnalyticsDomain := os.Getenv("DOMAIN") @@ -1410,21 +1614,15 @@ func InitializePlugin(pluginConfPath string, agentVersion string) { Log(message) } - PluginConfiguration = pluginConfig - - CreateHTTPClient() + PluginConfiguration = pluginConfig - ContainerLogsRoute := strings.TrimSpace(strings.ToLower(os.Getenv("AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE"))) - Log("AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE:%s", ContainerLogsRoute) + ContainerLogsRoute := strings.TrimSpace(strings.ToLower(os.Getenv("AZMON_CONTAINER_LOGS_ROUTE"))) + Log("AZMON_CONTAINER_LOGS_ROUTE:%s", ContainerLogsRoute) - ContainerLogsRouteV2 = false //default is ODS - ContainerLogsRouteADX = false //default is LA + ContainerLogsRouteV2 = false + ContainerLogsRouteADX = false - if strings.Compare(ContainerLogsRoute, ContainerLogsV2Route) == 0 && strings.Compare(strings.ToLower(osType), "windows") != 0 { - ContainerLogsRouteV2 = true - Log("Routing container logs thru %s route...", ContainerLogsV2Route) - fmt.Fprintf(os.Stdout, "Routing container logs thru %s route... \n", ContainerLogsV2Route) - } else if strings.Compare(ContainerLogsRoute, ContainerLogsADXRoute) == 0 { + if strings.Compare(ContainerLogsRoute, ContainerLogsADXRoute) == 0 { //check if adx clusteruri, clientid & secret are set var err error AdxClusterUri, err = ReadFileContents(PluginConfiguration["adx_cluster_uri_path"]) @@ -1455,14 +1653,30 @@ func InitializePlugin(pluginConfPath string, agentVersion string) { Log("Routing container logs thru %s route...", ContainerLogsADXRoute) fmt.Fprintf(os.Stdout, "Routing container logs thru %s route...\n", ContainerLogsADXRoute) } - } + } else if strings.Compare(strings.ToLower(osType), "windows") != 0 { //for linux, oneagent will be default route + ContainerLogsRouteV2 = true //default is mdsd route + if strings.Compare(ContainerLogsRoute, ContainerLogsV1Route) == 0 { + ContainerLogsRouteV2 = false //fallback option when hiddensetting set + } + Log("Routing container logs thru %s route...", ContainerLogsRoute) + fmt.Fprintf(os.Stdout, "Routing container logs thru %s route... \n", ContainerLogsRoute) + } if ContainerLogsRouteV2 == true { - CreateMDSDClient() + CreateMDSDClient(ContainerLogV2, ContainerType) } else if ContainerLogsRouteADX == true { CreateADXClient() + } else { // v1 or windows + Log("Creating HTTP Client since either OS Platform is Windows or configmap configured with fallback option for ODS direct") + CreateHTTPClient() } + if IsWindows == false { // mdsd linux specific + Log("Creating MDSD clients for KubeMonAgentEvents & InsightsMetrics") + CreateMDSDClient(KubeMonAgentEvents, ContainerType) + CreateMDSDClient(InsightsMetrics, ContainerType) + } + ContainerLogSchemaVersion := strings.TrimSpace(strings.ToLower(os.Getenv("AZMON_CONTAINER_LOG_SCHEMA_VERSION"))) Log("AZMON_CONTAINER_LOG_SCHEMA_VERSION:%s", ContainerLogSchemaVersion) @@ -1491,4 +1705,12 @@ func InitializePlugin(pluginConfPath string, agentVersion string) { Log("Running in replicaset. Disabling container enrichment caching & updates \n") } + if ContainerLogSchemaV2 == true { + MdsdContainerLogTagName = MdsdContainerLogV2SourceName + } else { + MdsdContainerLogTagName = MdsdContainerLogSourceName + } + + MdsdInsightsMetricsTagName = MdsdInsightsMetricsSourceName + MdsdKubeMonAgentEventsTagName = MdsdKubeMonAgentEventsSourceName } \ No newline at end of file diff --git a/source/plugins/go/src/telemetry.go b/source/plugins/go/src/telemetry.go index 461fdea96..4750b4624 100644 --- a/source/plugins/go/src/telemetry.go +++ b/source/plugins/go/src/telemetry.go @@ -42,6 +42,10 @@ var ( ContainerLogsSendErrorsToMDSDFromFluent float64 //Tracks the number of mdsd client create errors for containerlogs (uses ContainerLogTelemetryTicker) ContainerLogsMDSDClientCreateErrors float64 + //Tracks the number of mdsd client create errors for insightsmetrics (uses ContainerLogTelemetryTicker) + InsightsMetricsMDSDClientCreateErrors float64 + //Tracks the number of mdsd client create errors for kubemonevents (uses ContainerLogTelemetryTicker) + KubeMonEventsMDSDClientCreateErrors float64 //Tracks the number of write/send errors to ADX for containerlogs (uses ContainerLogTelemetryTicker) ContainerLogsSendErrorsToADXFromFluent float64 //Tracks the number of ADX client create errors for containerlogs (uses ContainerLogTelemetryTicker) @@ -74,6 +78,8 @@ const ( metricNameNumberofSend429ErrorsTelegrafMetrics = "TelegrafMetricsSend429ErrorCount" metricNameErrorCountContainerLogsSendErrorsToMDSDFromFluent = "ContainerLogs2MdsdSendErrorCount" metricNameErrorCountContainerLogsMDSDClientCreateError = "ContainerLogsMdsdClientCreateErrorCount" + metricNameErrorCountInsightsMetricsMDSDClientCreateError = "InsightsMetricsMDSDClientCreateErrorsCount" + metricNameErrorCountKubeMonEventsMDSDClientCreateError = "KubeMonEventsMDSDClientCreateErrorsCount" metricNameErrorCountContainerLogsSendErrorsToADXFromFluent = "ContainerLogs2ADXSendErrorCount" metricNameErrorCountContainerLogsADXClientCreateError = "ContainerLogsADXClientCreateErrorCount" @@ -112,6 +118,8 @@ func SendContainerLogPluginMetrics(telemetryPushIntervalProperty string) { containerLogsMDSDClientCreateErrors := ContainerLogsMDSDClientCreateErrors containerLogsSendErrorsToADXFromFluent := ContainerLogsSendErrorsToADXFromFluent containerLogsADXClientCreateErrors := ContainerLogsADXClientCreateErrors + insightsMetricsMDSDClientCreateErrors := InsightsMetricsMDSDClientCreateErrors + kubeMonEventsMDSDClientCreateErrors := KubeMonEventsMDSDClientCreateErrors osmNamespaceCount := OSMNamespaceCount promMonitorPods := PromMonitorPods promMonitorPodsNamespaceLength := PromMonitorPodsNamespaceLength @@ -132,6 +140,8 @@ func SendContainerLogPluginMetrics(telemetryPushIntervalProperty string) { ContainerLogsMDSDClientCreateErrors = 0.0 ContainerLogsSendErrorsToADXFromFluent = 0.0 ContainerLogsADXClientCreateErrors = 0.0 + InsightsMetricsMDSDClientCreateErrors = 0.0 + KubeMonEventsMDSDClientCreateErrors = 0.0 ContainerLogTelemetryMutex.Unlock() if strings.Compare(strings.ToLower(os.Getenv("CONTROLLER_TYPE")), "daemonset") == 0 { @@ -186,6 +196,13 @@ func SendContainerLogPluginMetrics(telemetryPushIntervalProperty string) { if containerLogsADXClientCreateErrors > 0.0 { TelemetryClient.Track(appinsights.NewMetricTelemetry(metricNameErrorCountContainerLogsADXClientCreateError, containerLogsADXClientCreateErrors)) } + if insightsMetricsMDSDClientCreateErrors > 0.0 { + TelemetryClient.Track(appinsights.NewMetricTelemetry(metricNameErrorCountInsightsMetricsMDSDClientCreateError, insightsMetricsMDSDClientCreateErrors)) + } + if kubeMonEventsMDSDClientCreateErrors > 0.0 { + TelemetryClient.Track(appinsights.NewMetricTelemetry(metricNameErrorCountKubeMonEventsMDSDClientCreateError, kubeMonEventsMDSDClientCreateErrors)) + } + start = time.Now() } } diff --git a/source/plugins/go/src/utils.go b/source/plugins/go/src/utils.go index 61d047e52..3fe5c6d0e 100644 --- a/source/plugins/go/src/utils.go +++ b/source/plugins/go/src/utils.go @@ -12,11 +12,12 @@ import ( "net/url" "os" "strings" - "time" - + "time" + "github.com/Azure/azure-kusto-go/kusto" "github.com/Azure/azure-kusto-go/kusto/ingest" "github.com/Azure/go-autorest/autorest/azure/auth" + "github.com/tinylib/msgp/msgp" ) // ReadConfiguration reads a property file @@ -62,7 +63,13 @@ func ReadConfiguration(filename string) (map[string]string, error) { // CreateHTTPClient used to create the client for sending post requests to OMSEndpoint func CreateHTTPClient() { - cert, err := tls.LoadX509KeyPair(PluginConfiguration["cert_file_path"], PluginConfiguration["key_file_path"]) + certFilePath := PluginConfiguration["cert_file_path"] + keyFilePath := PluginConfiguration["key_file_path"] + if IsWindows == false { + certFilePath = fmt.Sprintf(certFilePath, WorkspaceID) + keyFilePath = fmt.Sprintf(keyFilePath, WorkspaceID) + } + cert, err := tls.LoadX509KeyPair(certFilePath, keyFilePath) if err != nil { message := fmt.Sprintf("Error when loading cert %s", err.Error()) SendException(message) @@ -93,7 +100,7 @@ func CreateHTTPClient() { HTTPClient = http.Client{ Transport: transport, Timeout: 30 * time.Second, - } + } Log("Successfully created HTTP Client") } @@ -110,23 +117,58 @@ func ToString(s interface{}) string { } //mdsdSocketClient to write msgp messages -func CreateMDSDClient() { - if MdsdMsgpUnixSocketClient != nil { - MdsdMsgpUnixSocketClient.Close() - MdsdMsgpUnixSocketClient = nil - } - /*conn, err := fluent.New(fluent.Config{FluentNetwork:"unix", - FluentSocketPath:"/var/run/mdsd/default_fluent.socket", - WriteTimeout: 5 * time.Second, - RequestAck: true}) */ - conn, err := net.DialTimeout("unix", - "/var/run/mdsd/default_fluent.socket", 10*time.Second) - if err != nil { - Log("Error::mdsd::Unable to open MDSD msgp socket connection %s", err.Error()) - //log.Fatalf("Unable to open MDSD msgp socket connection %s", err.Error()) - } else { - Log("Successfully created MDSD msgp socket connection") - MdsdMsgpUnixSocketClient = conn +func CreateMDSDClient(dataType DataType, containerType string) { + mdsdfluentSocket := "/var/run/mdsd/default_fluent.socket" + if containerType != "" && strings.Compare(strings.ToLower(containerType), "prometheussidecar") == 0 { + mdsdfluentSocket = fmt.Sprintf("/var/run/mdsd-%s/default_fluent.socket", containerType) + } + switch dataType { + case ContainerLogV2: + if MdsdMsgpUnixSocketClient != nil { + MdsdMsgpUnixSocketClient.Close() + MdsdMsgpUnixSocketClient = nil + } + /*conn, err := fluent.New(fluent.Config{FluentNetwork:"unix", + FluentSocketPath:"/var/run/mdsd/default_fluent.socket", + WriteTimeout: 5 * time.Second, + RequestAck: true}) */ + conn, err := net.DialTimeout("unix", + mdsdfluentSocket, 10*time.Second) + if err != nil { + Log("Error::mdsd::Unable to open MDSD msgp socket connection for ContainerLogV2 %s", err.Error()) + //log.Fatalf("Unable to open MDSD msgp socket connection %s", err.Error()) + } else { + Log("Successfully created MDSD msgp socket connection for ContainerLogV2: %s", mdsdfluentSocket) + MdsdMsgpUnixSocketClient = conn + } + case KubeMonAgentEvents: + if MdsdKubeMonMsgpUnixSocketClient != nil { + MdsdKubeMonMsgpUnixSocketClient.Close() + MdsdKubeMonMsgpUnixSocketClient = nil + } + conn, err := net.DialTimeout("unix", + mdsdfluentSocket, 10*time.Second) + if err != nil { + Log("Error::mdsd::Unable to open MDSD msgp socket connection for KubeMon events %s", err.Error()) + //log.Fatalf("Unable to open MDSD msgp socket connection %s", err.Error()) + } else { + Log("Successfully created MDSD msgp socket connection for KubeMon events:%s", mdsdfluentSocket) + MdsdKubeMonMsgpUnixSocketClient = conn + } + case InsightsMetrics: + if MdsdInsightsMetricsMsgpUnixSocketClient != nil { + MdsdInsightsMetricsMsgpUnixSocketClient.Close() + MdsdInsightsMetricsMsgpUnixSocketClient = nil + } + conn, err := net.DialTimeout("unix", + mdsdfluentSocket, 10*time.Second) + if err != nil { + Log("Error::mdsd::Unable to open MDSD msgp socket connection for insights metrics %s", err.Error()) + //log.Fatalf("Unable to open MDSD msgp socket connection %s", err.Error()) + } else { + Log("Successfully created MDSD msgp socket connection for Insights metrics %s", mdsdfluentSocket) + MdsdInsightsMetricsMsgpUnixSocketClient = conn + } } } @@ -178,3 +220,33 @@ func isValidUrl(uri string) bool { } return true } + +func convertMsgPackEntriesToMsgpBytes(fluentForwardTag string, msgPackEntries []MsgPackEntry) []byte { + var msgpBytes []byte + + fluentForward := MsgPackForward{ + Tag: fluentForwardTag, + Entries: msgPackEntries, + } + //determine the size of msgp message + msgpSize := 1 + msgp.StringPrefixSize + len(fluentForward.Tag) + msgp.ArrayHeaderSize + for i := range fluentForward.Entries { + msgpSize += 1 + msgp.Int64Size + msgp.GuessSize(fluentForward.Entries[i].Record) + } + + //allocate buffer for msgp message + msgpBytes = msgp.Require(nil, msgpSize) + + //construct the stream + msgpBytes = append(msgpBytes, 0x92) + msgpBytes = msgp.AppendString(msgpBytes, fluentForward.Tag) + msgpBytes = msgp.AppendArrayHeader(msgpBytes, uint32(len(fluentForward.Entries))) + batchTime := time.Now().Unix() + for entry := range fluentForward.Entries { + msgpBytes = append(msgpBytes, 0x92) + msgpBytes = msgp.AppendInt64(msgpBytes, batchTime) + msgpBytes = msgp.AppendMapStrStr(msgpBytes, fluentForward.Entries[entry].Record) + } + + return msgpBytes +} diff --git a/source/plugins/ruby/ApplicationInsightsUtility.rb b/source/plugins/ruby/ApplicationInsightsUtility.rb index b118cc646..6ae567337 100644 --- a/source/plugins/ruby/ApplicationInsightsUtility.rb +++ b/source/plugins/ruby/ApplicationInsightsUtility.rb @@ -14,7 +14,6 @@ class ApplicationInsightsUtility @@Exception = "ExceptionEvent" @@AcsClusterType = "ACS" @@AksClusterType = "AKS" - @OmsAdminFilePath = "/etc/opt/microsoft/omsagent/conf/omsadmin.conf" @@EnvAcsResourceName = "ACS_RESOURCE_NAME" @@EnvAksRegion = "AKS_REGION" @@EnvAgentVersion = "AGENT_VERSION" @@ -263,14 +262,11 @@ def sendMetricTelemetry(metricName, metricValue, properties) end def getWorkspaceId() - begin - adminConf = {} - confFile = File.open(@OmsAdminFilePath, "r") - confFile.each_line do |line| - splitStrings = line.split("=") - adminConf[splitStrings[0]] = splitStrings[1] + begin + workspaceId = ENV["WSID"] + if workspaceId.nil? || workspaceId.empty? + $log.warn("Exception in AppInsightsUtility: getWorkspaceId - WorkspaceID either nil or empty") end - workspaceId = adminConf["WORKSPACE_ID"] return workspaceId rescue => errorStr $log.warn("Exception in AppInsightsUtility: getWorkspaceId - error: #{errorStr}") @@ -278,14 +274,8 @@ def getWorkspaceId() end def getWorkspaceCloud() - begin - adminConf = {} - confFile = File.open(@OmsAdminFilePath, "r") - confFile.each_line do |line| - splitStrings = line.split("=") - adminConf[splitStrings[0]] = splitStrings[1] - end - workspaceDomain = adminConf["URL_TLD"].strip + begin + workspaceDomain = ENV["DOMAIN"] workspaceCloud = "AzureCloud" if workspaceDomain.casecmp("opinsights.azure.com") == 0 workspaceCloud = "AzureCloud" diff --git a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb index 8cb6f603e..f02459aef 100644 --- a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb +++ b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb @@ -203,23 +203,25 @@ def getContainerCpuMetricItems(metricJSON, hostName, cpuMetricNameToCollect, met containerName = container["name"] metricValue = container["cpu"][cpuMetricNameToCollect] metricTime = metricPollTime #container["cpu"]["time"] - metricItem = {} - metricItem["DataItems"] = [] - - metricProps = {} - metricProps["Timestamp"] = metricTime - metricProps["Host"] = hostName - metricProps["ObjectName"] = Constants::OBJECT_NAME_K8S_CONTAINER - metricProps["InstanceName"] = clusterId + "/" + podUid + "/" + containerName - - metricProps["Collections"] = [] - metricCollections = {} - metricCollections["CounterName"] = metricNametoReturn - metricCollections["Value"] = metricValue + - metricProps["Collections"].push(metricCollections) - metricItem["DataItems"].push(metricProps) - metricItems.push(metricItem) + metricItem = {} + metricItem["Timestamp"] = metricTime + metricItem["Host"] = hostName + metricItem["ObjectName"] = Constants::OBJECT_NAME_K8S_CONTAINER + metricItem["InstanceName"] = clusterId + "/" + podUid + "/" + containerName + + + metricCollection = {} + metricCollection["CounterName"] = metricNametoReturn + metricCollection["Value"] = metricValue + + metricItem["json_Collections"] = [] + metricCollections = [] + metricCollections.push(metricCollection) + metricItem["json_Collections"] = metricCollections.to_json + metricItems.push(metricItem) + #Telemetry about agent performance begin # we can only do this much now. Ideally would like to use the docker image repository to find our pods/containers @@ -250,11 +252,8 @@ def getContainerCpuMetricItems(metricJSON, hostName, cpuMetricNameToCollect, met telemetryProps["dsPromUrl"] = @dsPromUrlCount end #telemetry about containerlog Routing for daemonset - if File.exist?(Constants::AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE_V2_FILENAME) - telemetryProps["containerLogsRoute"] = "v2" - elsif (!@containerLogsRoute.nil? && !@containerLogsRoute.empty?) - telemetryProps["containerLogsRoute"] = @containerLogsRoute - end + telemetryProps["containerLogsRoute"] = @containerLogsRoute + #telemetry about health model if (!@hmEnabled.nil? && !@hmEnabled.empty?) telemetryProps["hmEnabled"] = @hmEnabled @@ -503,18 +502,16 @@ def getContainerCpuMetricItemRate(metricJSON, hostName, cpuMetricNameToCollect, containerName = container["name"] metricValue = container["cpu"][cpuMetricNameToCollect] metricTime = metricPollTime #container["cpu"]["time"] + metricItem = {} - metricItem["DataItems"] = [] - - metricProps = {} - metricProps["Timestamp"] = metricTime - metricProps["Host"] = hostName - metricProps["ObjectName"] = Constants::OBJECT_NAME_K8S_CONTAINER - metricProps["InstanceName"] = clusterId + "/" + podUid + "/" + containerName - - metricProps["Collections"] = [] - metricCollections = {} - metricCollections["CounterName"] = metricNametoReturn + metricItem["Timestamp"] = metricTime + metricItem["Host"] = hostName + metricItem["ObjectName"] = Constants::OBJECT_NAME_K8S_CONTAINER + metricItem["InstanceName"] = clusterId + "/" + podUid + "/" + containerName + + metricItem["json_Collections"] = [] + metricCollection = {} + metricCollection["CounterName"] = metricNametoReturn containerId = podUid + "/" + containerName # Adding the containers to the winContainerIdCache so that it can be used by the cleanup routine @@ -545,9 +542,11 @@ def getContainerCpuMetricItemRate(metricJSON, hostName, cpuMetricNameToCollect, @@winContainerPrevMetricRate[containerId] = metricRateValue end - metricCollections["Value"] = metricValue - metricProps["Collections"].push(metricCollections) - metricItem["DataItems"].push(metricProps) + metricCollection["Value"] = metricValue + + metricCollections = [] + metricCollections.push(metricCollection) + metricItem["json_Collections"] = metricCollections.to_json metricItems.push(metricItem) #Telemetry about agent performance begin @@ -629,22 +628,21 @@ def getContainerMemoryMetricItems(metricJSON, hostName, memoryMetricNameToCollec metricTime = metricPollTime #container["memory"]["time"] metricItem = {} - metricItem["DataItems"] = [] - - metricProps = {} - metricProps["Timestamp"] = metricTime - metricProps["Host"] = hostName - metricProps["ObjectName"] = Constants::OBJECT_NAME_K8S_CONTAINER - metricProps["InstanceName"] = clusterId + "/" + podUid + "/" + containerName + metricItem["Timestamp"] = metricTime + metricItem["Host"] = hostName + metricItem["ObjectName"] = Constants::OBJECT_NAME_K8S_CONTAINER + metricItem["InstanceName"] = clusterId + "/" + podUid + "/" + containerName + + metricCollection = {} + metricCollection["CounterName"] = metricNametoReturn + metricCollection["Value"] = metricValue + + metricItem["json_Collections"] = [] + metricCollections = [] + metricCollections.push(metricCollection) + metricItem["json_Collections"] = metricCollections.to_json + metricItems.push(metricItem) - metricProps["Collections"] = [] - metricCollections = {} - metricCollections["CounterName"] = metricNametoReturn - metricCollections["Value"] = metricValue - - metricProps["Collections"].push(metricCollections) - metricItem["DataItems"].push(metricProps) - metricItems.push(metricItem) #Telemetry about agent performance begin # we can only do this much now. Ideally would like to use the docker image repository to find our pods/containers @@ -687,22 +685,21 @@ def getNodeMetricItem(metricJSON, hostName, metricCategory, metricNameToCollect, if !node[metricCategory].nil? metricValue = node[metricCategory][metricNameToCollect] metricTime = metricPollTime #node[metricCategory]["time"] - - metricItem["DataItems"] = [] - - metricProps = {} - metricProps["Timestamp"] = metricTime - metricProps["Host"] = hostName - metricProps["ObjectName"] = Constants::OBJECT_NAME_K8S_NODE - metricProps["InstanceName"] = clusterId + "/" + nodeName - - metricProps["Collections"] = [] - metricCollections = {} - metricCollections["CounterName"] = metricNametoReturn - metricCollections["Value"] = metricValue - - metricProps["Collections"].push(metricCollections) - metricItem["DataItems"].push(metricProps) + + metricItem["Timestamp"] = metricTime + metricItem["Host"] = hostName + metricItem["ObjectName"] = Constants::OBJECT_NAME_K8S_NODE + metricItem["InstanceName"] = clusterId + "/" + nodeName + + + metricCollection = {} + metricCollection["CounterName"] = metricNametoReturn + metricCollection["Value"] = metricValue + + metricItem["json_Collections"] = [] + metricCollections = [] + metricCollections.push(metricCollection) + metricItem["json_Collections"] = metricCollections.to_json end rescue => error @Log.warn("getNodeMetricItem failed: #{error} for metric #{metricNameToCollect}") @@ -805,21 +802,20 @@ def getNodeMetricItemRate(metricJSON, hostName, metricCategory, metricNameToColl end end end - metricItem["DataItems"] = [] - - metricProps = {} - metricProps["Timestamp"] = metricTime - metricProps["Host"] = hostName - metricProps["ObjectName"] = Constants::OBJECT_NAME_K8S_NODE - metricProps["InstanceName"] = clusterId + "/" + nodeName - - metricProps["Collections"] = [] - metricCollections = {} - metricCollections["CounterName"] = metricNametoReturn - metricCollections["Value"] = metricValue - - metricProps["Collections"].push(metricCollections) - metricItem["DataItems"].push(metricProps) + + metricItem["Timestamp"] = metricTime + metricItem["Host"] = hostName + metricItem["ObjectName"] = Constants::OBJECT_NAME_K8S_NODE + metricItem["InstanceName"] = clusterId + "/" + nodeName + + metricCollection = {} + metricCollection["CounterName"] = metricNametoReturn + metricCollection["Value"] = metricValue + + metricItem["json_Collections"] = [] + metricCollections = [] + metricCollections.push(metricCollection) + metricItem["json_Collections"] = metricCollections.to_json end rescue => error @Log.warn("getNodeMetricItemRate failed: #{error} for metric #{metricNameToCollect}") @@ -841,22 +837,22 @@ def getNodeLastRebootTimeMetric(metricJSON, hostName, metricNametoReturn, metric metricValue = node["startTime"] metricTime = metricPollTime #Time.now.utc.iso8601 #2018-01-30T19:36:14Z - metricItem["DataItems"] = [] - - metricProps = {} - metricProps["Timestamp"] = metricTime - metricProps["Host"] = hostName - metricProps["ObjectName"] = Constants::OBJECT_NAME_K8S_NODE - metricProps["InstanceName"] = clusterId + "/" + nodeName + + metricItem["Timestamp"] = metricTime + metricItem["Host"] = hostName + metricItem["ObjectName"] = Constants::OBJECT_NAME_K8S_NODE + metricItem["InstanceName"] = clusterId + "/" + nodeName - metricProps["Collections"] = [] - metricCollections = {} - metricCollections["CounterName"] = metricNametoReturn + + metricCollection = {} + metricCollection["CounterName"] = metricNametoReturn #Read it from /proc/uptime - metricCollections["Value"] = DateTime.parse(metricTime).to_time.to_i - IO.read("/proc/uptime").split[0].to_f + metricCollection["Value"] = DateTime.parse(metricTime).to_time.to_i - IO.read("/proc/uptime").split[0].to_f - metricProps["Collections"].push(metricCollections) - metricItem["DataItems"].push(metricProps) + metricItem["json_Collections"] = [] + metricCollections = [] + metricCollections.push(metricCollection) + metricItem["json_Collections"] = metricCollections.to_json rescue => error @Log.warn("getNodeLastRebootTimeMetric failed: #{error} ") @Log.warn metricJSON @@ -880,21 +876,19 @@ def getContainerStartTimeMetricItems(metricJSON, hostName, metricNametoReturn, m metricTime = metricPollTime #currentTime metricItem = {} - metricItem["DataItems"] = [] - - metricProps = {} - metricProps["Timestamp"] = metricTime - metricProps["Host"] = hostName - metricProps["ObjectName"] = Constants::OBJECT_NAME_K8S_CONTAINER - metricProps["InstanceName"] = clusterId + "/" + podUid + "/" + containerName - - metricProps["Collections"] = [] - metricCollections = {} - metricCollections["CounterName"] = metricNametoReturn - metricCollections["Value"] = DateTime.parse(metricValue).to_time.to_i - - metricProps["Collections"].push(metricCollections) - metricItem["DataItems"].push(metricProps) + metricItem["Timestamp"] = metricTime + metricItem["Host"] = hostName + metricItem["ObjectName"] = Constants::OBJECT_NAME_K8S_CONTAINER + metricItem["InstanceName"] = clusterId + "/" + podUid + "/" + containerName + + metricCollection = {} + metricCollection["CounterName"] = metricNametoReturn + metricCollection["Value"] = DateTime.parse(metricValue).to_time.to_i + + metricItem["json_Collections"] = [] + metricCollections = [] + metricCollections.push(metricCollection) + metricItem["json_Collections"] = metricCollections.to_json metricItems.push(metricItem) end end diff --git a/source/plugins/ruby/DockerApiClient.rb b/source/plugins/ruby/DockerApiClient.rb index f2828b357..53dd1f39f 100644 --- a/source/plugins/ruby/DockerApiClient.rb +++ b/source/plugins/ruby/DockerApiClient.rb @@ -29,7 +29,7 @@ def getResponse(request, isMultiJson, isVersion) loop do begin responseChunk = "" - timeout(@@TimeoutInSeconds) do + Timeout.timeout(@@TimeoutInSeconds) do responseChunk = socket.recv(@@ChunkSize) end dockerResponse += responseChunk diff --git a/source/plugins/ruby/KubernetesApiClient.rb b/source/plugins/ruby/KubernetesApiClient.rb index 98347d272..3720bf6dc 100644 --- a/source/plugins/ruby/KubernetesApiClient.rb +++ b/source/plugins/ruby/KubernetesApiClient.rb @@ -405,12 +405,9 @@ def getPodUid(podNameSpace, podMetadata) def getContainerResourceRequestsAndLimits(pod, metricCategory, metricNameToCollect, metricNametoReturn, metricTime = Time.now.utc.iso8601) metricItems = [] - timeDifference = (DateTime.now.to_time.to_i - @@telemetryTimeTracker).abs - timeDifferenceInMinutes = timeDifference / 60 begin clusterId = getClusterId podNameSpace = pod["metadata"]["namespace"] - podName = pod["metadata"]["name"] podUid = getPodUid(podNameSpace, pod["metadata"]) if podUid.nil? return metricItems @@ -442,9 +439,6 @@ def getContainerResourceRequestsAndLimits(pod, metricCategory, metricNameToColle if (!container["resources"].nil? && !container["resources"].empty? && !container["resources"][metricCategory].nil? && !container["resources"][metricCategory][metricNameToCollect].nil?) metricValue = getMetricNumericValue(metricNameToCollect, container["resources"][metricCategory][metricNameToCollect]) - metricItem = {} - metricItem["DataItems"] = [] - metricProps = {} metricProps["Timestamp"] = metricTime metricProps["Host"] = nodeName @@ -453,50 +447,22 @@ def getContainerResourceRequestsAndLimits(pod, metricCategory, metricNameToColle metricProps["ObjectName"] = "K8SContainer" metricProps["InstanceName"] = clusterId + "/" + podUid + "/" + containerName - metricProps["Collections"] = [] - metricCollections = {} - metricCollections["CounterName"] = metricNametoReturn - metricCollections["Value"] = metricValue - - metricProps["Collections"].push(metricCollections) - metricItem["DataItems"].push(metricProps) - metricItems.push(metricItem) - #Telemetry about omsagent requests and limits - begin - if (podName.downcase.start_with?("omsagent-") && podNameSpace.eql?("kube-system") && containerName.downcase.start_with?("omsagent")) - nodePodContainerKey = [nodeName, podName, containerName, metricNametoReturn].join("~~") - @@resourceLimitsTelemetryHash[nodePodContainerKey] = metricValue - end - if (timeDifferenceInMinutes >= Constants::TELEMETRY_FLUSH_INTERVAL_IN_MINUTES) - @@resourceLimitsTelemetryHash.each { |key, value| - keyElements = key.split("~~") - if keyElements.length != 4 - next - end - - # get dimension values by key - telemetryProps = {} - telemetryProps["Computer"] = keyElements[0] - telemetryProps["PodName"] = keyElements[1] - telemetryProps["ContainerName"] = keyElements[2] - metricNameFromKey = keyElements[3] - ApplicationInsightsUtility.sendMetricTelemetry(metricNameFromKey, value, telemetryProps) - } - @@telemetryTimeTracker = DateTime.now.to_time.to_i - @@resourceLimitsTelemetryHash = {} - end - rescue => errorStr - $log.warn("Exception while generating Telemetry from getContainerResourceRequestsAndLimits failed: #{errorStr} for metric #{metricNameToCollect}") - end + metricCollection = {} + metricCollection["CounterName"] = metricNametoReturn + metricCollection["Value"] = metricValue + + metricProps["json_Collections"] = [] + metricCollections = [] + metricCollections.push(metricCollection) + metricProps["json_Collections"] = metricCollections.to_json + metricItems.push(metricProps) #No container level limit for the given metric, so default to node level limit else nodeMetricsHashKey = clusterId + "/" + nodeName + "_" + "allocatable" + "_" + metricNameToCollect if (metricCategory == "limits" && @@NodeMetrics.has_key?(nodeMetricsHashKey)) metricValue = @@NodeMetrics[nodeMetricsHashKey] #@Log.info("Limits not set for container #{clusterId + "/" + podUid + "/" + containerName} using node level limits: #{nodeMetricsHashKey}=#{metricValue} ") - metricItem = {} - metricItem["DataItems"] = [] - + metricProps = {} metricProps["Timestamp"] = metricTime metricProps["Host"] = nodeName @@ -505,14 +471,14 @@ def getContainerResourceRequestsAndLimits(pod, metricCategory, metricNameToColle metricProps["ObjectName"] = "K8SContainer" metricProps["InstanceName"] = clusterId + "/" + podUid + "/" + containerName - metricProps["Collections"] = [] - metricCollections = {} - metricCollections["CounterName"] = metricNametoReturn - metricCollections["Value"] = metricValue - - metricProps["Collections"].push(metricCollections) - metricItem["DataItems"].push(metricProps) - metricItems.push(metricItem) + metricCollection = {} + metricCollection["CounterName"] = metricNametoReturn + metricCollection["Value"] = metricValue + metricProps["json_Collections"] = [] + metricCollections = [] + metricCollections.push(metricCollection) + metricProps["json_Collections"] = metricCollections.to_json + metricItems.push(metricProps) end end end @@ -632,22 +598,22 @@ def parseNodeLimitsFromNodeItem(node, metricCategory, metricNameToCollect, metri # metricCategory can be "capacity" or "allocatable" and metricNameToCollect can be "cpu" or "memory" metricValue = getMetricNumericValue(metricNameToCollect, node["status"][metricCategory][metricNameToCollect]) - metricItem["DataItems"] = [] - metricProps = {} - metricProps["Timestamp"] = metricTime - metricProps["Host"] = node["metadata"]["name"] + metricItem["Timestamp"] = metricTime + metricItem["Host"] = node["metadata"]["name"] # Adding this so that it is not set by base omsagent since it was not set earlier and being set by base omsagent - metricProps["Computer"] = node["metadata"]["name"] - metricProps["ObjectName"] = "K8SNode" - metricProps["InstanceName"] = clusterId + "/" + node["metadata"]["name"] - metricProps["Collections"] = [] - metricCollections = {} - metricCollections["CounterName"] = metricNametoReturn - metricCollections["Value"] = metricValue - - metricProps["Collections"].push(metricCollections) - metricItem["DataItems"].push(metricProps) - + metricItem["Computer"] = node["metadata"]["name"] + metricItem["ObjectName"] = "K8SNode" + metricItem["InstanceName"] = clusterId + "/" + node["metadata"]["name"] + + metricCollection = {} + metricCollection["CounterName"] = metricNametoReturn + metricCollection["Value"] = metricValue + metricCollections = [] + metricCollections.push(metricCollection) + + metricItem["json_Collections"] = [] + metricItem["json_Collections"] = metricCollections.to_json + #push node level metrics to a inmem hash so that we can use it looking up at container level. #Currently if container level cpu & memory limits are not defined we default to node level limits @@NodeMetrics[clusterId + "/" + node["metadata"]["name"] + "_" + metricCategory + "_" + metricNameToCollect] = metricValue diff --git a/source/plugins/ruby/MdmMetricsGenerator.rb b/source/plugins/ruby/MdmMetricsGenerator.rb index 6641456af..a809087dc 100644 --- a/source/plugins/ruby/MdmMetricsGenerator.rb +++ b/source/plugins/ruby/MdmMetricsGenerator.rb @@ -525,11 +525,11 @@ def getNodeResourceMetricRecords(record, metric_name, metric_value, percentage_m records = [] begin custommetricrecord = MdmAlertTemplates::Node_resource_metrics_template % { - timestamp: record["DataItems"][0]["Timestamp"], + timestamp: record["Timestamp"], metricName: metric_name, - hostvalue: record["DataItems"][0]["Host"], - objectnamevalue: record["DataItems"][0]["ObjectName"], - instancenamevalue: record["DataItems"][0]["InstanceName"], + hostvalue: record["Host"], + objectnamevalue: record["ObjectName"], + instancenamevalue: record["InstanceName"], metricminvalue: metric_value, metricmaxvalue: metric_value, metricsumvalue: metric_value, @@ -538,11 +538,11 @@ def getNodeResourceMetricRecords(record, metric_name, metric_value, percentage_m if !percentage_metric_value.nil? additional_record = MdmAlertTemplates::Node_resource_metrics_template % { - timestamp: record["DataItems"][0]["Timestamp"], + timestamp: record["Timestamp"], metricName: @@node_metric_name_metric_percentage_name_hash[metric_name], - hostvalue: record["DataItems"][0]["Host"], - objectnamevalue: record["DataItems"][0]["ObjectName"], - instancenamevalue: record["DataItems"][0]["InstanceName"], + hostvalue: record["Host"], + objectnamevalue: record["ObjectName"], + instancenamevalue: record["InstanceName"], metricminvalue: percentage_metric_value, metricmaxvalue: percentage_metric_value, metricsumvalue: percentage_metric_value, diff --git a/source/plugins/ruby/constants.rb b/source/plugins/ruby/constants.rb index 906019b95..c037c99f6 100644 --- a/source/plugins/ruby/constants.rb +++ b/source/plugins/ruby/constants.rb @@ -103,5 +103,5 @@ class Constants #Pod Statuses POD_STATUS_TERMINATING = "Terminating" - AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE_V2_FILENAME = "/opt/AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE_V2" + end diff --git a/source/plugins/ruby/filter_cadvisor2mdm.rb b/source/plugins/ruby/filter_cadvisor2mdm.rb index 659e3000c..62dcf31dc 100644 --- a/source/plugins/ruby/filter_cadvisor2mdm.rb +++ b/source/plugins/ruby/filter_cadvisor2mdm.rb @@ -2,7 +2,9 @@ # frozen_string_literal: true -module Fluent +require 'fluent/plugin/filter' + +module Fluent::Plugin require "logger" require "yajl/json_gem" require_relative "oms_common" @@ -12,7 +14,7 @@ module Fluent require_relative "in_kube_nodes" class CAdvisor2MdmFilter < Filter - Fluent::Plugin.register_filter("filter_cadvisor2mdm", self) + Fluent::Plugin.register_filter("cadvisor2mdm", self) config_param :enable_log, :integer, :default => 0 config_param :log_path, :string, :default => "/var/opt/microsoft/docker-cimprov/log/filter_cadvisor2mdm.log" @@ -65,7 +67,7 @@ def start @containerResourceDimensionHash = {} @pvUsageHash = {} @@metric_threshold_hash = MdmMetricsGenerator.getContainerResourceUtilizationThresholds - @NodeCache = Fluent::NodeStatsCache.new() + @NodeCache = Fluent::Plugin::NodeStatsCache.new() end rescue => e @log.info "Error initializing plugin #{e}" @@ -148,16 +150,16 @@ def filter(tag, time, record) begin if @process_incoming_stream - # Check if insights metrics for PV metrics - data_type = record["DataType"] - if data_type == "INSIGHTS_METRICS_BLOB" + # Check if insights metrics for PV metrics + if record["Name"] == Constants::PV_USED_BYTES return filterPVInsightsMetrics(record) end - object_name = record["DataItems"][0]["ObjectName"] - counter_name = record["DataItems"][0]["Collections"][0]["CounterName"] + object_name = record["ObjectName"] + counter_name = JSON.parse(record["json_Collections"])[0]["CounterName"] + percentage_metric_value = 0.0 - metric_value = record["DataItems"][0]["Collections"][0]["Value"] + metric_value = JSON.parse(record["json_Collections"])[0]["Value"] if object_name == Constants::OBJECT_NAME_K8S_NODE && @metrics_to_collect_hash.key?(counter_name.downcase) # Compute and send % CPU and Memory @@ -165,7 +167,7 @@ def filter(tag, time, record) metric_name = Constants::CPU_USAGE_MILLI_CORES metric_value /= 1000000 #cadvisor record is in nanocores. Convert to mc if @@controller_type.downcase == "replicaset" - target_node_cpu_capacity_mc = @NodeCache.cpu.get_capacity(record["DataItems"][0]["Host"]) / 1000000 + target_node_cpu_capacity_mc = @NodeCache.cpu.get_capacity(record["Host"]) / 1000000 else target_node_cpu_capacity_mc = @cpu_capacity end @@ -178,7 +180,7 @@ def filter(tag, time, record) if counter_name.start_with?("memory") metric_name = counter_name if @@controller_type.downcase == "replicaset" - target_node_mem_capacity = @NodeCache.mem.get_capacity(record["DataItems"][0]["Host"]) + target_node_mem_capacity = @NodeCache.mem.get_capacity(record["Host"]) else target_node_mem_capacity = @memory_capacity end @@ -187,12 +189,12 @@ def filter(tag, time, record) percentage_metric_value = metric_value * 100 / target_node_mem_capacity end end - @log.info "percentage_metric_value for metric: #{metric_name} for instance: #{record["DataItems"][0]["Host"]} percentage: #{percentage_metric_value}" + @log.info "percentage_metric_value for metric: #{metric_name} for instance: #{record["Host"]} percentage: #{percentage_metric_value}" # do some sanity checking. Do we want this? if percentage_metric_value > 100.0 or percentage_metric_value < 0.0 telemetryProperties = {} - telemetryProperties["Computer"] = record["DataItems"][0]["Host"] + telemetryProperties["Computer"] = record["Host"] telemetryProperties["MetricName"] = metric_name telemetryProperties["MetricPercentageValue"] = percentage_metric_value ApplicationInsightsUtility.sendCustomEvent("ErrorPercentageOutOfBounds", telemetryProperties) @@ -200,7 +202,7 @@ def filter(tag, time, record) return MdmMetricsGenerator.getNodeResourceMetricRecords(record, metric_name, metric_value, percentage_metric_value) elsif object_name == Constants::OBJECT_NAME_K8S_CONTAINER && @metrics_to_collect_hash.key?(counter_name.downcase) - instanceName = record["DataItems"][0]["InstanceName"] + instanceName = record["InstanceName"] metricName = counter_name # Using node cpu capacity in the absence of container cpu capacity since the container will end up using the # node's capacity in this case. Converting this to nanocores for computation purposes, since this is in millicores @@ -235,7 +237,7 @@ def filter(tag, time, record) flushMetricTelemetry if percentage_metric_value >= thresholdPercentage setThresholdExceededTelemetry(metricName) - return MdmMetricsGenerator.getContainerResourceUtilMetricRecords(record["DataItems"][0]["Timestamp"], + return MdmMetricsGenerator.getContainerResourceUtilMetricRecords(record["Timestamp"], metricName, percentage_metric_value, @containerResourceDimensionHash[instanceName], @@ -256,39 +258,36 @@ def filter(tag, time, record) end end - def filterPVInsightsMetrics(record) + def filterPVInsightsMetrics(record) begin mdmMetrics = [] - record["DataItems"].each do |dataItem| - - if dataItem["Name"] == Constants::PV_USED_BYTES && @metrics_to_collect_hash.key?(dataItem["Name"].downcase) - metricName = dataItem["Name"] - usage = dataItem["Value"] - capacity = dataItem["Tags"][Constants::INSIGHTSMETRICS_TAGS_PV_CAPACITY_BYTES] - if capacity != 0 - percentage_metric_value = (usage * 100.0) / capacity - end - @log.info "percentage_metric_value for metric: #{metricName} percentage: #{percentage_metric_value}" - @log.info "@@metric_threshold_hash for #{metricName}: #{@@metric_threshold_hash[metricName]}" - - computer = dataItem["Computer"] - resourceDimensions = dataItem["Tags"] - thresholdPercentage = @@metric_threshold_hash[metricName] - - flushMetricTelemetry - if percentage_metric_value >= thresholdPercentage - setThresholdExceededTelemetry(metricName) - return MdmMetricsGenerator.getPVResourceUtilMetricRecords(dataItem["CollectionTime"], - metricName, - computer, - percentage_metric_value, - resourceDimensions, - thresholdPercentage) - else - return [] - end # end if block for percentage metric > configured threshold % check - end # end if block for dataItem name check - end # end for block of looping through data items + if record["Name"] == Constants::PV_USED_BYTES && @metrics_to_collect_hash.key?(record["Name"].downcase) + metricName = record["Name"] + usage = record["Value"] + capacity = record["Tags"][Constants::INSIGHTSMETRICS_TAGS_PV_CAPACITY_BYTES] + if capacity != 0 + percentage_metric_value = (usage * 100.0) / capacity + end + @log.info "percentage_metric_value for metric: #{metricName} percentage: #{percentage_metric_value}" + @log.info "@@metric_threshold_hash for #{metricName}: #{@@metric_threshold_hash[metricName]}" + + computer = record["Computer"] + resourceDimensions = record["Tags"] + thresholdPercentage = @@metric_threshold_hash[metricName] + + flushMetricTelemetry + if percentage_metric_value >= thresholdPercentage + setThresholdExceededTelemetry(metricName) + return MdmMetricsGenerator.getPVResourceUtilMetricRecords(record["CollectionTime"], + metricName, + computer, + percentage_metric_value, + resourceDimensions, + thresholdPercentage) + else + return [] + end # end if block for percentage metric > configured threshold % check + end # end if block for dataItem name check return [] rescue Exception => e @log.info "Error processing cadvisor insights metrics record Exception: #{e.class} Message: #{e.message}" @@ -316,16 +315,22 @@ def ensure_cpu_memory_capacity_set end if !nodeInventory.nil? cpu_capacity_json = KubernetesApiClient.parseNodeLimits(nodeInventory, "capacity", "cpu", "cpuCapacityNanoCores") - if !cpu_capacity_json.nil? && !cpu_capacity_json[0]["DataItems"][0]["Collections"][0]["Value"].to_s.nil? - @cpu_capacity = cpu_capacity_json[0]["DataItems"][0]["Collections"][0]["Value"] - @log.info "CPU Limit #{@cpu_capacity}" + if !cpu_capacity_json.nil? + metricVal = JSON.parse(cpu_capacity_json[0]["json_Collections"])[0]["Value"] + if !metricVal.to_s.nil? + @cpu_capacity = metricVal + @log.info "CPU Limit #{@cpu_capacity}" + end else @log.info "Error getting cpu_capacity" end memory_capacity_json = KubernetesApiClient.parseNodeLimits(nodeInventory, "capacity", "memory", "memoryCapacityBytes") - if !memory_capacity_json.nil? && !memory_capacity_json[0]["DataItems"][0]["Collections"][0]["Value"].to_s.nil? - @memory_capacity = memory_capacity_json[0]["DataItems"][0]["Collections"][0]["Value"] - @log.info "Memory Limit #{@memory_capacity}" + if !memory_capacity_json.nil? + metricVal = JSON.parse(cpu_capacity_json[0]["json_Collections"])[0]["Value"] + if !metricVal.to_s.nil? + @memory_capacity = metricVal + @log.info "Memory Limit #{@memory_capacity}" + end else @log.info "Error getting memory_capacity" end @@ -346,7 +351,7 @@ def ensure_cpu_memory_capacity_set end def filter_stream(tag, es) - new_es = MultiEventStream.new + new_es = Fluent::MultiEventStream.new begin ensure_cpu_memory_capacity_set # Getting container limits hash diff --git a/source/plugins/ruby/filter_cadvisor_health_container.rb b/source/plugins/ruby/filter_cadvisor_health_container.rb index 870fcd6d6..ab64b6e61 100644 --- a/source/plugins/ruby/filter_cadvisor_health_container.rb +++ b/source/plugins/ruby/filter_cadvisor_health_container.rb @@ -1,7 +1,9 @@ #!/usr/local/bin/ruby # frozen_string_literal: true -module Fluent +require 'fluent/plugin/filter' + +module Fluent::Plugin require 'logger' require 'yajl/json_gem' require_relative 'oms_common' @@ -11,7 +13,7 @@ module Fluent class CAdvisor2ContainerHealthFilter < Filter include HealthModel - Fluent::Plugin.register_filter('filter_cadvisor_health_container', self) + Fluent::Plugin.register_filter('cadvisor_health_container', self) config_param :log_path, :string, :default => '/var/opt/microsoft/docker-cimprov/log/health_monitors.log' config_param :metrics_to_collect, :string, :default => 'cpuUsageNanoCores,memoryRssBytes' @@ -58,9 +60,9 @@ def start def filter_stream(tag, es) if !@@cluster_health_model_enabled @log.info "Cluster Health Model disabled in filter_cadvisor_health_container" - return MultiEventStream.new + return Fluent::MultiEventStream.new end - new_es = MultiEventStream.new + new_es = Fluent::MultiEventStream.new records_count = 0 es.each { |time, record| begin @@ -83,8 +85,9 @@ def filter(tag, time, record) if record.key?("MonitorLabels") return record end - object_name = record['DataItems'][0]['ObjectName'] - counter_name = record['DataItems'][0]['Collections'][0]['CounterName'].downcase + + object_name = record['ObjectName'] + counter_name = JSON.parse(record['json_Collections'])[0]['CounterName'].downcase if @metrics_to_collect_hash.key?(counter_name) if object_name == @@object_name_k8s_container return @formatter.get_record_from_cadvisor_record(record) diff --git a/source/plugins/ruby/filter_cadvisor_health_node.rb b/source/plugins/ruby/filter_cadvisor_health_node.rb index 27e5bc255..ddbb871e8 100644 --- a/source/plugins/ruby/filter_cadvisor_health_node.rb +++ b/source/plugins/ruby/filter_cadvisor_health_node.rb @@ -1,7 +1,9 @@ #!/usr/local/bin/ruby # frozen_string_literal: true -module Fluent +require 'fluent/plugin/filter' + +module Fluent::Plugin require 'logger' require 'yajl/json_gem' require_relative 'oms_common' @@ -11,8 +13,8 @@ module Fluent class CAdvisor2NodeHealthFilter < Filter include HealthModel - Fluent::Plugin.register_filter('filter_cadvisor_health_node', self) - + Fluent::Plugin.register_filter('cadvisor_health_node', self) + attr_accessor :provider, :resources config_param :metrics_to_collect, :string, :default => 'cpuUsageNanoCores,memoryRssBytes' @@ -75,13 +77,13 @@ def start def filter_stream(tag, es) if !@@cluster_health_model_enabled @log.info "Cluster Health Model disabled in filter_cadvisor_health_node" - return MultiEventStream.new + return Fluent::MultiEventStream.new end begin node_capacity = HealthMonitorUtils.ensure_cpu_memory_capacity_set(@@hm_log, @cpu_capacity, @memory_capacity, @@hostName) @cpu_capacity = node_capacity[0] @memory_capacity = node_capacity[1] - new_es = MultiEventStream.new + new_es = Fluent::MultiEventStream.new records_count = 0 es.each { |time, record| filtered_record = filter(tag, time, record) @@ -95,7 +97,7 @@ def filter_stream(tag, es) rescue => e @log.info "Error in filter_cadvisor_health_node filter_stream #{e.backtrace}" ApplicationInsightsUtility.sendExceptionTelemetry(e, {"FeatureArea" => "Health"}) - return MultiEventStream.new + return Fluent::MultiEventStream.new end end @@ -105,10 +107,10 @@ def filter(tag, time, record) return record end - object_name = record['DataItems'][0]['ObjectName'] - counter_name = record['DataItems'][0]['Collections'][0]['CounterName'].downcase + object_name = record['ObjectName'] + counter_name = JSON.parse(record['json_Collections'])[0]['CounterName'].downcase if @metrics_to_collect_hash.key?(counter_name.downcase) - metric_value = record['DataItems'][0]['Collections'][0]['Value'] + metric_value = JSON.parse(record['json_Collections'])[0]['Value'] case object_name when @@object_name_k8s_node case counter_name.downcase @@ -134,14 +136,14 @@ def process_node_cpu_record(record, metric_value) if record.nil? return nil else - instance_name = record['DataItems'][0]['InstanceName'] + instance_name = record['InstanceName'] #@log.info "CPU capacity #{@cpu_capacity}" metric_value /= 1000000 percent = (metric_value.to_f/@cpu_capacity*100).round(2) #@log.debug "Percentage of CPU limit: #{percent}" state = HealthMonitorUtils.compute_percentage_state(percent, @provider.get_config(MonitorId::NODE_CPU_MONITOR_ID)) #@log.debug "Computed State : #{state}" - timestamp = record['DataItems'][0]['Timestamp'] + timestamp = record['Timestamp'] health_monitor_record = {"timestamp" => timestamp, "state" => state, "details" => {"cpuUsageMillicores" => metric_value, "cpuUtilizationPercentage" => percent}} monitor_instance_id = HealthMonitorUtils.get_monitor_instance_id(monitor_id, [@@clusterId, @@hostName]) @@ -166,14 +168,14 @@ def process_node_memory_record(record, metric_value) if record.nil? return nil else - instance_name = record['DataItems'][0]['InstanceName'] + instance_name = record['InstanceName'] #@log.info "Memory capacity #{@memory_capacity}" percent = (metric_value.to_f/@memory_capacity*100).round(2) #@log.debug "Percentage of Memory limit: #{percent}" state = HealthMonitorUtils.compute_percentage_state(percent, @provider.get_config(MonitorId::NODE_MEMORY_MONITOR_ID)) #@log.debug "Computed State : #{state}" - timestamp = record['DataItems'][0]['Timestamp'] + timestamp = record['Timestamp'] health_monitor_record = {"timestamp" => timestamp, "state" => state, "details" => {"memoryRssBytes" => metric_value.to_f, "memoryUtilizationPercentage" => percent}} #@log.info health_monitor_record diff --git a/source/plugins/ruby/filter_container.rb b/source/plugins/ruby/filter_container.rb deleted file mode 100644 index b72e82dbc..000000000 --- a/source/plugins/ruby/filter_container.rb +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright (c) Microsoft Corporation. All rights reserved. - -# frozen_string_literal: true - -module Fluent - require 'logger' - - class ContainerFilter < Filter - Fluent::Plugin.register_filter('filter_container', self) - - config_param :enable_log, :integer, :default => 0 - config_param :log_path, :string, :default => '/var/opt/microsoft/omsagent/log/filter_container.log' - - def initialize - super - end - - def configure(conf) - super - @log = nil - - if @enable_log - @log = Logger.new(@log_path, 'weekly') - @log.debug {'Starting filter_container plugin'} - end - end - - def start - super - end - - def shutdown - super - end - - def filter(tag, time, record) - dataType = nil - - record.each do |r| - if dataType == nil - dataType = case r["ClassName"] - when "Container_ImageInventory" then "CONTAINER_IMAGE_INVENTORY_BLOB" - when "Container_ContainerInventory" then "CONTAINER_INVENTORY_BLOB" - when "Container_DaemonEvent" then "CONTAINER_SERVICE_LOG_BLOB" - when "Container_ContainerLog" then "CONTAINER_LOG_BLOB" - end - end - end - - wrapper = { - "DataType"=>dataType, - "IPName"=>"Containers", - "DataItems"=>record - } - - wrapper - end - end -end diff --git a/source/plugins/ruby/filter_docker_log.rb b/source/plugins/ruby/filter_docker_log.rb deleted file mode 100644 index b80f4c204..000000000 --- a/source/plugins/ruby/filter_docker_log.rb +++ /dev/null @@ -1,103 +0,0 @@ -# Copyright (c) Microsoft Corporation. All rights reserved. - -# frozen_string_literal: true - -module Fluent - require 'logger' - require 'socket' - require 'yajl/json_gem' - - class DockerLogFilter < Filter - Plugin.register_filter('filter_docker_log', self) - - # Set to 1 in config file to enable logging - config_param :enable_log, :integer, :default => 0 - config_param :log_path, :string, :default => '/var/opt/microsoft/omsagent/log/filter_docker_log.txt' - - # This method is called before starting. - def configure(conf) - super - @hostname = Socket.gethostname - # in case get full name, extract up to '.' - dotpos = @hostname.index('.') - if dotpos != nil - @hostname = @hostname[0..dotpos-1] - end - - # Cache the image name and ID of each container so we don't have to inspect each time - @containerCache = Hash.new - - @log = nil - - if @enable_log - @log = Logger.new(@log_path, 'weekly') - @log.debug {'Starting filter_docker_log plugin on ' + @hostname} - end - end - - def filter(tag, time, record) - if @log != nil - @log.debug {'Accepted a log from container ' + record['container_id']} - end - - wrapper = Hash.new - - if record['log'].empty? - if @log != nil - @log.debug {'Log from container ' + record['container_id'] + ' had length 0 and will be discarded'} - end - else - # Need to query image information from ID - containerId = record['container_id'] - - unless @containerCache.has_key?(containerId) - if @log != nil - @log.debug {'Container ' + containerId + ' information is not in the cache, inspecting'} - end - - # Value not in cache, use inspect - @containerCache[containerId] = Hash.new - details = '' - - begin - details = JSON.parse(`sudo docker inspect #{containerId}`) - rescue => e - if @log != nil - @log.error {'sudo docker inspect ' + containerId + ' failed'} - end - end - - if details.empty? - # This should not occur - @containerCache[containerId]['Image'] = 'Unknown' - @containerCache[containerId]['ImageName'] = 'Unknown' - - if @log != nil - @log.warn {'The image ID of container ' + containerId + ' could not be determined'} - end - else - @containerCache[containerId]['Image'] = details[0]['Config']['Image'] - @containerCache[containerId]['ImageName'] = details[0]['Config']['Image'] - end - end - - newRecord = @containerCache[containerId] - - # No query is required - newRecord['Id'] = containerId - newRecord['Name'] = record['container_name'][0] == "/" ? record['container_name'][1..-1] : record['container_name'] - newRecord['LogEntrySource'] = record['source'] - newRecord['LogEntry'] = record['log'] - newRecord['Computer'] = @hostname - - wrapper = { - "DataType"=>"CONTAINER_LOG_BLOB", - "IPName"=>"Containers", - "DataItems"=>[newRecord] - } - end - - wrapper - end - end -end diff --git a/source/plugins/ruby/filter_health_model_builder.rb b/source/plugins/ruby/filter_health_model_builder.rb index 36e4801d7..d491f17c2 100644 --- a/source/plugins/ruby/filter_health_model_builder.rb +++ b/source/plugins/ruby/filter_health_model_builder.rb @@ -2,15 +2,17 @@ # frozen_string_literal: true -module Fluent +require 'fluent/plugin/filter' + +module Fluent::Plugin require 'logger' require 'yajl/json_gem' Dir[File.join(__dir__, './health', '*.rb')].each { |file| require file } - + class FilterHealthModelBuilder < Filter include HealthModel - Fluent::Plugin.register_filter('filter_health_model_builder', self) + Fluent::Plugin.register_filter('health_model_builder', self) config_param :enable_log, :integer, :default => 0 config_param :log_path, :string, :default => '/var/opt/microsoft/docker-cimprov/log/filter_health_model_builder.log' @@ -20,7 +22,7 @@ class FilterHealthModelBuilder < Filter attr_reader :buffer, :model_builder, :health_model_definition, :monitor_factory, :state_finalizers, :monitor_set, :model_builder, :hierarchy_builder, :resources, :kube_api_down_handler, :provider, :reducer, :state, :generator, :telemetry - @@rewrite_tag = 'kubehealth.Signals' + @@cluster_id = KubernetesApiClient.getClusterId @@token_file_path = "/var/run/secrets/kubernetes.io/serviceaccount/token" @@cert_file_path = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" @@ -29,6 +31,7 @@ class FilterHealthModelBuilder < Filter def initialize begin super + @rewrite_tag = 'oneagent.containerInsights.KUBE_HEALTH_BLOB' @buffer = HealthModel::HealthModelBuffer.new @cluster_health_state = ClusterHealthState.new(@@token_file_path, @@cert_file_path) @health_model_definition = HealthModel::ParentMonitorProvider.new(HealthModel::HealthModelDefinitionParser.new(@model_definition_path).parse_file) @@ -53,6 +56,7 @@ def initialize deserialized_state_info = @cluster_health_state.get_state @state.initialize_state(deserialized_state_info) end + rescue => e ApplicationInsightsUtility.sendExceptionTelemetry(e, {"FeatureArea" => "Health"}) end @@ -82,11 +86,11 @@ def shutdown def filter_stream(tag, es) if !@@cluster_health_model_enabled @log.info "Cluster Health Model disabled in filter_health_model_builder" - return MultiEventStream.new + return Fluent::MultiEventStream.new end begin - new_es = MultiEventStream.new - time = Time.now + new_es = Fluent::MultiEventStream.new + time = Time.now if tag.start_with?("kubehealth.DaemonSet.Node") node_records = [] @@ -96,7 +100,7 @@ def filter_stream(tag, es) } @buffer.add_to_buffer(node_records) end - return MultiEventStream.new + return Fluent::MultiEventStream.new elsif tag.start_with?("kubehealth.DaemonSet.Container") container_records = [] if !es.nil? @@ -110,7 +114,7 @@ def filter_stream(tag, es) @container_cpu_memory_records = [] #in some clusters, this is null, so initialize it again. end @container_cpu_memory_records.push(*container_records) # push the records for aggregation later - return MultiEventStream.new + return Fluent::MultiEventStream.new elsif tag.start_with?("kubehealth.ReplicaSet") records = [] es.each{|time, record| @@ -218,11 +222,11 @@ def filter_stream(tag, es) @log.info "after optimizing health signals all_monitors.size #{all_monitors.size}" - current_time = Time.now - emit_time = current_time.to_f + # for each key in monitor.keys, # get the state from health_monitor_state # generate the record to send + emit_time = Fluent::Engine.now all_monitors.keys.each{|key| record = @provider.get_record(all_monitors[key], state) if record[HealthMonitorRecordFields::MONITOR_ID] == MonitorId::CLUSTER @@ -241,17 +245,12 @@ def filter_stream(tag, es) @cluster_new_state = new_state end end - end - record_wrapper = { - "DataType" => "KUBE_HEALTH_BLOB", - "IPName" => "ContainerInsights", - "DataItems" => [record.each { |k, v| record[k] = v }], - } - new_es.add(emit_time, record_wrapper) + end + new_es.add(emit_time, record) } #emit the stream - router.emit_stream(@@rewrite_tag, new_es) + router.emit_stream(@rewrite_tag, new_es) #initialize monitor_set and model_builder @monitor_set = HealthModel::MonitorSet.new @@ -261,8 +260,8 @@ def filter_stream(tag, es) @cluster_health_state.update_state(@state.to_h) @telemetry.send # return an empty event stream, else the match will throw a NoMethodError - return MultiEventStream.new - elsif tag.start_with?("kubehealth.Signals") + return Fluent::MultiEventStream.new + elsif tag.start_with?(@rewrite_tag) # this filter also acts as a pass through as we are rewriting the tag and emitting to the fluent stream es else @@ -274,6 +273,6 @@ def filter_stream(tag, es) @log.warn "Message: #{e.message} Backtrace: #{e.backtrace}" return nil end - end + end end end diff --git a/source/plugins/ruby/filter_inventory2mdm.rb b/source/plugins/ruby/filter_inventory2mdm.rb index 38ccab885..509ac608e 100644 --- a/source/plugins/ruby/filter_inventory2mdm.rb +++ b/source/plugins/ruby/filter_inventory2mdm.rb @@ -2,14 +2,16 @@ # frozen_string_literal: true -module Fluent +require 'fluent/plugin/filter' + +module Fluent::Plugin require 'logger' require 'yajl/json_gem' require_relative 'oms_common' require_relative 'CustomMetricsUtils' class Inventory2MdmFilter < Filter - Fluent::Plugin.register_filter('filter_inventory2mdm', self) + Fluent::Plugin.register_filter('inventory2mdm', self) config_param :enable_log, :integer, :default => 0 config_param :log_path, :string, :default => '/var/opt/microsoft/docker-cimprov/log/filter_inventory2mdm.log' @@ -115,8 +117,8 @@ def process_node_inventory_records(es) es.each{|time,record| begin - timestamp = record['DataItems'][0]['CollectionTime'] - node_status = record['DataItems'][0]['Status'] + timestamp = record['CollectionTime'] + node_status = record['Status'] if node_status.downcase.split(",").include? @@node_status_ready.downcase node_ready_count = node_ready_count+1 else @@ -161,8 +163,8 @@ def process_pod_inventory_records(es) records = [] es.each{|time,record| record_count += 1 - timestamp = record['DataItems'][0]['CollectionTime'] - podUid = record['DataItems'][0]['PodUid'] + timestamp = record['CollectionTime'] + podUid = record['PodUid'] if podUids.key?(podUid) #@log.info "pod with #{podUid} already counted" @@ -170,10 +172,10 @@ def process_pod_inventory_records(es) end podUids[podUid] = true - podPhaseDimValue = record['DataItems'][0]['PodStatus'] - podNamespaceDimValue = record['DataItems'][0]['Namespace'] - podControllerNameDimValue = record['DataItems'][0]['ControllerName'] - podNodeDimValue = record['DataItems'][0]['Computer'] + podPhaseDimValue = record['PodStatus'] + podNamespaceDimValue = record['Namespace'] + podControllerNameDimValue = record['ControllerName'] + podNodeDimValue = record['Computer'] if podControllerNameDimValue.nil? || podControllerNameDimValue.empty? podControllerNameDimValue = 'No Controller' @@ -263,7 +265,7 @@ def process_pod_inventory_records(es) end def filter_stream(tag, es) - new_es = MultiEventStream.new + new_es = Fluent::MultiEventStream.new filtered_records = [] time = DateTime.now begin diff --git a/source/plugins/ruby/filter_telegraf2mdm.rb b/source/plugins/ruby/filter_telegraf2mdm.rb index 88ae428d1..fd71f1682 100644 --- a/source/plugins/ruby/filter_telegraf2mdm.rb +++ b/source/plugins/ruby/filter_telegraf2mdm.rb @@ -2,7 +2,9 @@ # frozen_string_literal: true -module Fluent +require 'fluent/plugin/filter' + +module Fluent::Plugin require "logger" require "yajl/json_gem" require_relative "oms_common" @@ -11,7 +13,7 @@ module Fluent require_relative "constants" class Telegraf2MdmFilter < Filter - Fluent::Plugin.register_filter("filter_telegraf2mdm", self) + Fluent::Plugin.register_filter("telegraf2mdm", self) config_param :enable_log, :integer, :default => 0 config_param :log_path, :string, :default => "/var/opt/microsoft/docker-cimprov/log/filter_telegraf2mdm.log" @@ -64,7 +66,7 @@ def filter(tag, time, record) end def filter_stream(tag, es) - new_es = MultiEventStream.new + new_es = Fluent::MultiEventStream.new begin es.each { |time, record| filtered_records = filter(tag, time, record) diff --git a/source/plugins/ruby/health/health_container_cpu_memory_record_formatter.rb b/source/plugins/ruby/health/health_container_cpu_memory_record_formatter.rb index 12c72a120..ebf3abd7e 100644 --- a/source/plugins/ruby/health/health_container_cpu_memory_record_formatter.rb +++ b/source/plugins/ruby/health/health_container_cpu_memory_record_formatter.rb @@ -17,10 +17,10 @@ def initialize def get_record_from_cadvisor_record(cadvisor_record) begin - instance_name = cadvisor_record['DataItems'][0]['InstanceName'] - counter_name = cadvisor_record['DataItems'][0]['Collections'][0]['CounterName'] - metric_value = cadvisor_record['DataItems'][0]['Collections'][0]['Value'] - timestamp = cadvisor_record['DataItems'][0]['Timestamp'] + instance_name = cadvisor_record['InstanceName'] + counter_name = JSON.parse(cadvisor_record['json_Collections'])[0]['CounterName'] + metric_value = JSON.parse(cadvisor_record['json_Collections'])[0]['Value'] + timestamp = cadvisor_record['Timestamp'] health_container_cpu_memory_record = @@health_container_cpu_memory_record_template % { instance_name: instance_name, diff --git a/source/plugins/ruby/health/health_monitor_utils.rb b/source/plugins/ruby/health/health_monitor_utils.rb index c23d8824a..58f2ecc36 100644 --- a/source/plugins/ruby/health/health_monitor_utils.rb +++ b/source/plugins/ruby/health/health_monitor_utils.rb @@ -171,8 +171,9 @@ def get_cluster_cpu_memory_capacity(log, node_inventory: nil) cpu_capacity_json = KubernetesApiClient.parseNodeLimits(node_inventory, "capacity", "cpu", "cpuCapacityNanoCores") if !cpu_capacity_json.nil? cpu_capacity_json.each do |cpu_capacity_node| - if !cpu_capacity_node['DataItems'][0]['Collections'][0]['Value'].to_s.nil? - cluster_cpu_capacity += cpu_capacity_node['DataItems'][0]['Collections'][0]['Value'] + metricVal = JSON.parse(cpu_capacity_node['json_Collections'])[0]['Value'] + if !metricVal.to_s.nil? + cluster_cpu_capacity += metricVal end end else @@ -181,8 +182,9 @@ def get_cluster_cpu_memory_capacity(log, node_inventory: nil) memory_capacity_json = KubernetesApiClient.parseNodeLimits(node_inventory, "capacity", "memory", "memoryCapacityBytes") if !memory_capacity_json.nil? memory_capacity_json.each do |memory_capacity_node| - if !memory_capacity_node['DataItems'][0]['Collections'][0]['Value'].to_s.nil? - cluster_memory_capacity += memory_capacity_node['DataItems'][0]['Collections'][0]['Value'] + metricVal = JSON.parse(memory_capacity_node['json_Collections'])[0]['Value'] + if !metricVal.to_s.nil? + cluster_memory_capacity += metricVal end end else @@ -284,7 +286,7 @@ def build_metrics_hash(metrics_to_collect) def get_health_monitor_config health_monitor_config = {} begin - file = File.open('/opt/microsoft/omsagent/plugin/healthmonitorconfig.json', "r") + file = File.open('/etc/opt/microsoft/docker-cimprov/health/healthmonitorconfig.json', "r") if !file.nil? fileContents = file.read health_monitor_config = JSON.parse(fileContents) diff --git a/source/plugins/ruby/in_cadvisor_perf.rb b/source/plugins/ruby/in_cadvisor_perf.rb index b706ff00a..781042cea 100644 --- a/source/plugins/ruby/in_cadvisor_perf.rb +++ b/source/plugins/ruby/in_cadvisor_perf.rb @@ -1,10 +1,11 @@ #!/usr/local/bin/ruby # frozen_string_literal: true +require 'fluent/plugin/input' -module Fluent +module Fluent::Plugin class CAdvisor_Perf_Input < Input - Plugin.register_input("cadvisorperf", self) + Fluent::Plugin.register_input("cadvisor_perf", self) def initialize super @@ -15,14 +16,15 @@ def initialize require_relative "CAdvisorMetricsAPIClient" require_relative "oms_common" require_relative "omslog" - require_relative "constants" + require_relative "constants" end config_param :run_interval, :time, :default => 60 - config_param :tag, :string, :default => "oms.api.cadvisorperf" + config_param :tag, :string, :default => "oneagent.containerInsights.LINUX_PERF_BLOB" config_param :mdmtag, :string, :default => "mdm.cadvisorperf" config_param :nodehealthtag, :string, :default => "kubehealth.DaemonSet.Node" config_param :containerhealthtag, :string, :default => "kubehealth.DaemonSet.Container" + config_param :insightsmetricstag, :string, :default => "oneagent.containerInsights.INSIGHTS_METRICS_BLOB" def configure(conf) super @@ -30,6 +32,7 @@ def configure(conf) def start if @run_interval + super @finished = false @condition = ConditionVariable.new @mutex = Mutex.new @@ -44,24 +47,23 @@ def shutdown @condition.signal } @thread.join + super # This super must be at the end of shutdown method end end def enumerate() currentTime = Time.now - time = currentTime.to_f + time = Fluent::Engine.now batchTime = currentTime.utc.iso8601 @@istestvar = ENV["ISTEST"] begin - eventStream = MultiEventStream.new - insightsMetricsEventStream = MultiEventStream.new + eventStream = Fluent::MultiEventStream.new + insightsMetricsEventStream = Fluent::MultiEventStream.new metricData = CAdvisorMetricsAPIClient.getMetrics(winNode: nil, metricTime: batchTime ) - metricData.each do |record| - record["DataType"] = "LINUX_PERF_BLOB" - record["IPName"] = "LogManagement" - eventStream.add(time, record) if record - end - + metricData.each do |record| + eventStream.add(time, record) if record + end + router.emit_stream(@tag, eventStream) if eventStream router.emit_stream(@mdmtag, eventStream) if eventStream router.emit_stream(@containerhealthtag, eventStream) if eventStream @@ -75,19 +77,13 @@ def enumerate() #start GPU InsightsMetrics items begin containerGPUusageInsightsMetricsDataItems = [] - containerGPUusageInsightsMetricsDataItems.concat(CAdvisorMetricsAPIClient.getInsightsMetrics(winNode: nil, metricTime: batchTime)) - + containerGPUusageInsightsMetricsDataItems.concat(CAdvisorMetricsAPIClient.getInsightsMetrics(winNode: nil, metricTime: batchTime)) containerGPUusageInsightsMetricsDataItems.each do |insightsMetricsRecord| - wrapper = { - "DataType" => "INSIGHTS_METRICS_BLOB", - "IPName" => "ContainerInsights", - "DataItems" => [insightsMetricsRecord.each { |k, v| insightsMetricsRecord[k] = v }], - } - insightsMetricsEventStream.add(time, wrapper) if wrapper + insightsMetricsEventStream.add(time, insightsMetricsRecord) if insightsMetricsRecord end - router.emit_stream(Constants::INSIGHTSMETRICS_FLUENT_TAG, insightsMetricsEventStream) if insightsMetricsEventStream + router.emit_stream(@insightsmetricstag, insightsMetricsEventStream) if insightsMetricsEventStream router.emit_stream(@mdmtag, insightsMetricsEventStream) if insightsMetricsEventStream if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0 && insightsMetricsEventStream.count > 0) @@ -135,6 +131,6 @@ def run_periodic @mutex.lock end @mutex.unlock - end + end end # CAdvisor_Perf_Input end # module diff --git a/source/plugins/ruby/in_containerinventory.rb b/source/plugins/ruby/in_containerinventory.rb index c1126aa4e..eebf422d6 100644 --- a/source/plugins/ruby/in_containerinventory.rb +++ b/source/plugins/ruby/in_containerinventory.rb @@ -1,9 +1,11 @@ #!/usr/local/bin/ruby # frozen_string_literal: true -module Fluent +require 'fluent/plugin/input' + +module Fluent::Plugin class Container_Inventory_Input < Input - Plugin.register_input("containerinventory", self) + Fluent::Plugin.register_input("containerinventory", self) @@PluginName = "ContainerInventory" @@ -19,7 +21,7 @@ def initialize end config_param :run_interval, :time, :default => 60 - config_param :tag, :string, :default => "oms.containerinsights.containerinventory" + config_param :tag, :string, :default => "oneagent.containerInsights.CONTAINER_INVENTORY_BLOB" def configure(conf) super @@ -27,6 +29,7 @@ def configure(conf) def start if @run_interval + super @finished = false @condition = ConditionVariable.new @mutex = Mutex.new @@ -42,17 +45,18 @@ def shutdown @condition.signal } @thread.join + super # This super must be at the end of shutdown method end end def enumerate - currentTime = Time.now - emitTime = currentTime.to_f + currentTime = Time.now batchTime = currentTime.utc.iso8601 + emitTime = Fluent::Engine.now containerInventory = Array.new - eventStream = MultiEventStream.new + eventStream = Fluent::MultiEventStream.new hostName = "" - $log.info("in_container_inventory::enumerate : Begin processing @ #{Time.now.utc.iso8601}") + $log.info("in_container_inventory::enumerate : Begin processing @ #{Time.now.utc.iso8601}") begin containerRuntimeEnv = ENV["CONTAINER_RUNTIME"] $log.info("in_container_inventory::enumerate : container runtime : #{containerRuntimeEnv}") @@ -89,13 +93,8 @@ def enumerate end end end - containerInventory.each do |record| - wrapper = { - "DataType" => "CONTAINER_INVENTORY_BLOB", - "IPName" => "ContainerInsights", - "DataItems" => [record.each { |k, v| record[k] = v }], - } - eventStream.add(emitTime, wrapper) if wrapper + containerInventory.each do |record| + eventStream.add(emitTime, record) if record end router.emit_stream(@tag, eventStream) if eventStream @@istestvar = ENV["ISTEST"] @@ -149,6 +148,6 @@ def run_periodic @mutex.lock end @mutex.unlock - end + end end # Container_Inventory_Input end # module diff --git a/source/plugins/ruby/in_kube_events.rb b/source/plugins/ruby/in_kube_events.rb index f50019a01..6f65dab92 100644 --- a/source/plugins/ruby/in_kube_events.rb +++ b/source/plugins/ruby/in_kube_events.rb @@ -1,9 +1,11 @@ #!/usr/local/bin/ruby # frozen_string_literal: true -module Fluent +require 'fluent/plugin/input' + +module Fluent::Plugin class Kube_Event_Input < Input - Plugin.register_input("kubeevents", self) + Fluent::Plugin.register_input("kube_events", self) @@KubeEventsStateFile = "/var/opt/microsoft/docker-cimprov/state/KubeEventQueryState.yaml" def initialize @@ -29,14 +31,15 @@ def initialize end config_param :run_interval, :time, :default => 60 - config_param :tag, :string, :default => "oms.containerinsights.KubeEvents" + config_param :tag, :string, :default => "oneagent.containerInsights.KUBE_EVENTS_BLOB" def configure(conf) super end - def start + def start if @run_interval + super if !ENV["EVENTS_CHUNK_SIZE"].nil? && !ENV["EVENTS_CHUNK_SIZE"].empty? && ENV["EVENTS_CHUNK_SIZE"].to_i > 0 @EVENTS_CHUNK_SIZE = ENV["EVENTS_CHUNK_SIZE"].to_i else @@ -70,6 +73,7 @@ def shutdown @condition.signal } @thread.join + super end end @@ -80,8 +84,8 @@ def enumerate batchTime = currentTime.utc.iso8601 eventQueryState = getEventQueryState newEventQueryState = [] - @eventsCount = 0 - + @eventsCount = 0 + # Initializing continuation token to nil continuationToken = nil $log.info("in_kube_events::enumerate : Getting events from Kube API @ #{Time.now.utc.iso8601}") @@ -127,11 +131,11 @@ def enumerate end # end enumerate def parse_and_emit_records(events, eventQueryState, newEventQueryState, batchTime = Time.utc.iso8601) - currentTime = Time.now - emitTime = currentTime.to_f + currentTime = Time.now + emitTime = Fluent::Engine.now @@istestvar = ENV["ISTEST"] begin - eventStream = MultiEventStream.new + eventStream = Fluent::MultiEventStream.new events["items"].each do |items| record = {} # - Not sure if ingestion has the below mapping for this custom type. Fix it as part of fixed type conversion @@ -162,13 +166,8 @@ def parse_and_emit_records(events, eventQueryState, newEventQueryState, batchTim record["Count"] = items["count"] record["Computer"] = nodeName record["ClusterName"] = KubernetesApiClient.getClusterName - record["ClusterId"] = KubernetesApiClient.getClusterId - wrapper = { - "DataType" => "KUBE_EVENTS_BLOB", - "IPName" => "ContainerInsights", - "DataItems" => [record.each { |k, v| record[k] = v }], - } - eventStream.add(emitTime, wrapper) if wrapper + record["ClusterId"] = KubernetesApiClient.getClusterId + eventStream.add(emitTime, record) if record @eventsCount += 1 end router.emit_stream(@tag, eventStream) if eventStream diff --git a/source/plugins/ruby/in_kube_health.rb b/source/plugins/ruby/in_kube_health.rb index 874be26f6..db981c53e 100644 --- a/source/plugins/ruby/in_kube_health.rb +++ b/source/plugins/ruby/in_kube_health.rb @@ -1,17 +1,19 @@ #!/usr/local/bin/ruby # frozen_string_literal: true +require 'fluent/plugin/input' + require_relative "KubernetesApiClient" require_relative "oms_common" require_relative "omslog" require_relative "ApplicationInsightsUtility" -module Fluent +module Fluent::Plugin Dir[File.join(__dir__, "./health", "*.rb")].each { |file| require file } class KubeHealthInput < Input include HealthModel - Plugin.register_input("kubehealth", self) + Fluent::Plugin.register_input("kube_health", self) config_param :health_monitor_config_path, :default => "/etc/opt/microsoft/docker-cimprov/health/healthmonitorconfig.json" @@ -46,6 +48,7 @@ def configure(conf) def start begin + super if @run_interval @finished = false @condition = ConditionVariable.new @@ -76,20 +79,21 @@ def shutdown @condition.signal } @thread.join + super # This super must be at the end of shutdown method end end def enumerate if !@@cluster_health_model_enabled @@hmlog.info "Cluster Health Model disabled in in_kube_health" - return MultiEventStream.new + return Fluent::MultiEventStream.new end begin - currentTime = Time.now - emitTime = currentTime.to_f + currentTime = Time.now + emitTime = Fluent::Engine.now batchTime = currentTime.utc.iso8601 health_monitor_records = [] - eventStream = MultiEventStream.new + eventStream = Fluent::MultiEventStream.new #HealthMonitorUtils.refresh_kubernetes_api_data(@@hmlog, nil) # we do this so that if the call fails, we get a response code/header etc. diff --git a/source/plugins/ruby/in_kube_nodes.rb b/source/plugins/ruby/in_kube_nodes.rb index 99e804302..ffc11de55 100644 --- a/source/plugins/ruby/in_kube_nodes.rb +++ b/source/plugins/ruby/in_kube_nodes.rb @@ -1,17 +1,17 @@ #!/usr/local/bin/ruby # frozen_string_literal: true -module Fluent - class Kube_nodeInventory_Input < Input - Plugin.register_input("kubenodeinventory", self) +require 'fluent/plugin/input' - @@ContainerNodeInventoryTag = "oms.containerinsights.ContainerNodeInventory" - @@MDMKubeNodeInventoryTag = "mdm.kubenodeinventory" +module Fluent::Plugin + class Kube_nodeInventory_Input < Input + Fluent::Plugin.register_input("kube_nodes", self) + @@configMapMountPath = "/etc/config/settings/log-data-collection-settings" @@promConfigMountPath = "/etc/config/settings/prometheus-data-collection-settings" @@osmConfigMountPath = "/etc/config/osm-settings/osm-metric-collection-configuration" @@AzStackCloudFileName = "/etc/kubernetes/host/azurestackcloud.json" - @@kubeperfTag = "oms.api.KubePerf" + @@rsPromInterval = ENV["TELEMETRY_RS_PROM_INTERVAL"] @@rsPromFieldPassCount = ENV["TELEMETRY_RS_PROM_FIELDPASS_LENGTH"] @@ -35,7 +35,13 @@ def initialize require_relative "KubernetesApiClient" require_relative "ApplicationInsightsUtility" require_relative "oms_common" - require_relative "omslog" + require_relative "omslog" + + @ContainerNodeInventoryTag = "oneagent.containerInsights.CONTAINER_NODE_INVENTORY_BLOB" + @insightsMetricsTag = "oneagent.containerInsights.INSIGHTS_METRICS_BLOB" + @MDMKubeNodeInventoryTag = "mdm.kubenodeinventory" + @kubeperfTag = "oneagent.containerInsights.LINUX_PERF_BLOB" + # refer tomlparser-agent-config for the defaults @NODES_CHUNK_SIZE = 0 @NODES_EMIT_STREAM_BATCH_SIZE = 0 @@ -48,14 +54,15 @@ def initialize end config_param :run_interval, :time, :default => 60 - config_param :tag, :string, :default => "oms.containerinsights.KubeNodeInventory" + config_param :tag, :string, :default => "oneagent.containerInsights.KUBE_NODE_INVENTORY_BLOB" def configure(conf) super end - def start + def start if @run_interval + super if !ENV["NODES_CHUNK_SIZE"].nil? && !ENV["NODES_CHUNK_SIZE"].empty? && ENV["NODES_CHUNK_SIZE"].to_i > 0 @NODES_CHUNK_SIZE = ENV["NODES_CHUNK_SIZE"].to_i else @@ -90,6 +97,7 @@ def shutdown @condition.signal } @thread.join + super # This super must be at the end of shutdown method end end @@ -101,8 +109,10 @@ def enumerate @nodesAPIE2ELatencyMs = 0 @nodeInventoryE2EProcessingLatencyMs = 0 - nodeInventoryStartTime = (Time.now.to_f * 1000).to_i + nodeInventoryStartTime = (Time.now.to_f * 1000).to_i + nodesAPIChunkStartTime = (Time.now.to_f * 1000).to_i + # Initializing continuation token to nil continuationToken = nil $log.info("in_kube_nodes::enumerate : Getting nodes from Kube API @ #{Time.now.utc.iso8601}") @@ -151,49 +161,38 @@ def enumerate def parse_and_emit_records(nodeInventory, batchTime = Time.utc.iso8601) begin - currentTime = Time.now - emitTime = currentTime.to_f + currentTime = Time.now + emitTime = Fluent::Engine.now telemetrySent = false - eventStream = MultiEventStream.new - containerNodeInventoryEventStream = MultiEventStream.new - insightsMetricsEventStream = MultiEventStream.new - kubePerfEventStream = MultiEventStream.new + eventStream = Fluent::MultiEventStream.new + containerNodeInventoryEventStream = Fluent::MultiEventStream.new + insightsMetricsEventStream = Fluent::MultiEventStream.new + kubePerfEventStream = Fluent::MultiEventStream.new @@istestvar = ENV["ISTEST"] #get node inventory nodeInventory["items"].each do |item| # node inventory nodeInventoryRecord = getNodeInventoryRecord(item, batchTime) - wrapper = { - "DataType" => "KUBE_NODE_INVENTORY_BLOB", - "IPName" => "ContainerInsights", - "DataItems" => [nodeInventoryRecord.each { |k, v| nodeInventoryRecord[k] = v }], - } - eventStream.add(emitTime, wrapper) if wrapper + eventStream.add(emitTime, nodeInventoryRecord) if nodeInventoryRecord if @NODES_EMIT_STREAM_BATCH_SIZE > 0 && eventStream.count >= @NODES_EMIT_STREAM_BATCH_SIZE $log.info("in_kube_node::parse_and_emit_records: number of node inventory records emitted #{@NODES_EMIT_STREAM_BATCH_SIZE} @ #{Time.now.utc.iso8601}") router.emit_stream(@tag, eventStream) if eventStream $log.info("in_kube_node::parse_and_emit_records: number of mdm node inventory records emitted #{@NODES_EMIT_STREAM_BATCH_SIZE} @ #{Time.now.utc.iso8601}") - router.emit_stream(@@MDMKubeNodeInventoryTag, eventStream) if eventStream - + router.emit_stream(@MDMKubeNodeInventoryTag, eventStream) if eventStream if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) $log.info("kubeNodeInventoryEmitStreamSuccess @ #{Time.now.utc.iso8601}") end - eventStream = MultiEventStream.new + eventStream = Fluent::MultiEventStream.new end # container node inventory - containerNodeInventoryRecord = getContainerNodeInventoryRecord(item, batchTime) - containerNodeInventoryWrapper = { - "DataType" => "CONTAINER_NODE_INVENTORY_BLOB", - "IPName" => "ContainerInsights", - "DataItems" => [containerNodeInventoryRecord.each { |k, v| containerNodeInventoryRecord[k] = v }], - } - containerNodeInventoryEventStream.add(emitTime, containerNodeInventoryWrapper) if containerNodeInventoryWrapper + containerNodeInventoryRecord = getContainerNodeInventoryRecord(item, batchTime) + containerNodeInventoryEventStream.add(emitTime, containerNodeInventoryRecord) if containerNodeInventoryRecord if @NODES_EMIT_STREAM_BATCH_SIZE > 0 && containerNodeInventoryEventStream.count >= @NODES_EMIT_STREAM_BATCH_SIZE $log.info("in_kube_node::parse_and_emit_records: number of container node inventory records emitted #{@NODES_EMIT_STREAM_BATCH_SIZE} @ #{Time.now.utc.iso8601}") - router.emit_stream(@@ContainerNodeInventoryTag, containerNodeInventoryEventStream) if containerNodeInventoryEventStream - containerNodeInventoryEventStream = MultiEventStream.new + router.emit_stream(@ContainerNodeInventoryTag, containerNodeInventoryEventStream) if containerNodeInventoryEventStream + containerNodeInventoryEventStream = Fluent::MultiEventStream.new if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) $log.info("containerNodeInventoryEmitStreamSuccess @ #{Time.now.utc.iso8601}") end @@ -223,7 +222,8 @@ def parse_and_emit_records(nodeInventory, batchTime = Time.utc.iso8601) nodeMetricRecords.push(nodeMetricRecord) # add data to the cache so filter_cadvisor2mdm.rb can use it if is_windows_node - @NodeCache.cpu.set_capacity(nodeMetricRecord["DataItems"][0]["Host"], nodeMetricRecord["DataItems"][0]["Collections"][0]["Value"]) + metricVal = JSON.parse(nodeMetricRecord["json_Collections"])[0]["Value"] + @NodeCache.cpu.set_capacity(nodeMetricRecord["Host"], metricVal) end end nodeMetricRecord = KubernetesApiClient.parseNodeLimitsFromNodeItem(item, "capacity", "memory", "memoryCapacityBytes", batchTime) @@ -231,18 +231,17 @@ def parse_and_emit_records(nodeInventory, batchTime = Time.utc.iso8601) nodeMetricRecords.push(nodeMetricRecord) # add data to the cache so filter_cadvisor2mdm.rb can use it if is_windows_node - @NodeCache.mem.set_capacity(nodeMetricRecord["DataItems"][0]["Host"], nodeMetricRecord["DataItems"][0]["Collections"][0]["Value"]) + metricVal = JSON.parse(nodeMetricRecord["json_Collections"])[0]["Value"] + @NodeCache.mem.set_capacity(nodeMetricRecord["Host"], metricVal) end end - nodeMetricRecords.each do |metricRecord| - metricRecord["DataType"] = "LINUX_PERF_BLOB" - metricRecord["IPName"] = "LogManagement" + nodeMetricRecords.each do |metricRecord| kubePerfEventStream.add(emitTime, metricRecord) if metricRecord end if @NODES_EMIT_STREAM_BATCH_SIZE > 0 && kubePerfEventStream.count >= @NODES_EMIT_STREAM_BATCH_SIZE $log.info("in_kube_nodes::parse_and_emit_records: number of node perf metric records emitted #{@NODES_EMIT_STREAM_BATCH_SIZE} @ #{Time.now.utc.iso8601}") - router.emit_stream(@@kubeperfTag, kubePerfEventStream) if kubePerfEventStream - kubePerfEventStream = MultiEventStream.new + router.emit_stream(@kubeperfTag, kubePerfEventStream) if kubePerfEventStream + kubePerfEventStream = Fluent::MultiEventStream.new if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) $log.info("kubeNodePerfEmitStreamSuccess @ #{Time.now.utc.iso8601}") end @@ -266,18 +265,13 @@ def parse_and_emit_records(nodeInventory, batchTime = Time.utc.iso8601) if !insightsMetricsRecord.nil? && !insightsMetricsRecord.empty? nodeGPUInsightsMetricsRecords.push(insightsMetricsRecord) end - nodeGPUInsightsMetricsRecords.each do |insightsMetricsRecord| - wrapper = { - "DataType" => "INSIGHTS_METRICS_BLOB", - "IPName" => "ContainerInsights", - "DataItems" => [insightsMetricsRecord.each { |k, v| insightsMetricsRecord[k] = v }], - } - insightsMetricsEventStream.add(emitTime, wrapper) if wrapper + nodeGPUInsightsMetricsRecords.each do |insightsMetricsRecord| + insightsMetricsEventStream.add(emitTime, insightsMetricsRecord) if insightsMetricsRecord end if @NODES_EMIT_STREAM_BATCH_SIZE > 0 && insightsMetricsEventStream.count >= @NODES_EMIT_STREAM_BATCH_SIZE $log.info("in_kube_nodes::parse_and_emit_records: number of GPU node perf metric records emitted #{@NODES_EMIT_STREAM_BATCH_SIZE} @ #{Time.now.utc.iso8601}") - router.emit_stream(Constants::INSIGHTSMETRICS_FLUENT_TAG, insightsMetricsEventStream) if insightsMetricsEventStream - insightsMetricsEventStream = MultiEventStream.new + router.emit_stream(@insightsMetricsTag, insightsMetricsEventStream) if insightsMetricsEventStream + insightsMetricsEventStream = Fluent::MultiEventStream.new if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) $log.info("kubeNodeInsightsMetricsEmitStreamSuccess @ #{Time.now.utc.iso8601}") end @@ -337,15 +331,15 @@ def parse_and_emit_records(nodeInventory, batchTime = Time.utc.iso8601) $log.info("in_kube_node::parse_and_emit_records: number of node inventory records emitted #{eventStream.count} @ #{Time.now.utc.iso8601}") router.emit_stream(@tag, eventStream) if eventStream $log.info("in_kube_node::parse_and_emit_records: number of mdm node inventory records emitted #{eventStream.count} @ #{Time.now.utc.iso8601}") - router.emit_stream(@@MDMKubeNodeInventoryTag, eventStream) if eventStream + router.emit_stream(@MDMKubeNodeInventoryTag, eventStream) if eventStream if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) $log.info("kubeNodeInventoryEmitStreamSuccess @ #{Time.now.utc.iso8601}") end - eventStream = nil + eventStream = nil end if containerNodeInventoryEventStream.count > 0 $log.info("in_kube_node::parse_and_emit_records: number of container node inventory records emitted #{containerNodeInventoryEventStream.count} @ #{Time.now.utc.iso8601}") - router.emit_stream(@@ContainerNodeInventoryTag, containerNodeInventoryEventStream) if containerNodeInventoryEventStream + router.emit_stream(@ContainerNodeInventoryTag, containerNodeInventoryEventStream) if containerNodeInventoryEventStream containerNodeInventoryEventStream = nil if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) $log.info("containerNodeInventoryEmitStreamSuccess @ #{Time.now.utc.iso8601}") @@ -354,7 +348,7 @@ def parse_and_emit_records(nodeInventory, batchTime = Time.utc.iso8601) if kubePerfEventStream.count > 0 $log.info("in_kube_nodes::parse_and_emit_records: number of node perf metric records emitted #{kubePerfEventStream.count} @ #{Time.now.utc.iso8601}") - router.emit_stream(@@kubeperfTag, kubePerfEventStream) if kubePerfEventStream + router.emit_stream(@kubeperfTag, kubePerfEventStream) if kubePerfEventStream kubePerfEventStream = nil if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) $log.info("kubeNodePerfInventoryEmitStreamSuccess @ #{Time.now.utc.iso8601}") @@ -362,7 +356,7 @@ def parse_and_emit_records(nodeInventory, batchTime = Time.utc.iso8601) end if insightsMetricsEventStream.count > 0 $log.info("in_kube_nodes::parse_and_emit_records: number of GPU node perf metric records emitted #{insightsMetricsEventStream.count} @ #{Time.now.utc.iso8601}") - router.emit_stream(Constants::INSIGHTSMETRICS_FLUENT_TAG, insightsMetricsEventStream) if insightsMetricsEventStream + router.emit_stream(@insightsMetricsTag, insightsMetricsEventStream) if insightsMetricsEventStream insightsMetricsEventStream = nil if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) $log.info("kubeNodeInsightsMetricsEmitStreamSuccess @ #{Time.now.utc.iso8601}") @@ -513,10 +507,8 @@ def getNodeTelemetryProps(item) $log.warn "in_kube_nodes::getContainerNodeIngetNodeTelemetryPropsventoryRecord:Failed: #{errorStr}" end return properties - end + end end # Kube_Node_Input - - class NodeStatsCache # inner class for caching implementation (CPU and memory caching is handled the exact same way, so logic to do so is moved to a private inner class) # (to reduce code duplication) @@ -586,6 +578,5 @@ def cpu() def mem() return @@memCache end - end - + end end # module diff --git a/source/plugins/ruby/in_kube_podinventory.rb b/source/plugins/ruby/in_kube_podinventory.rb index 5256eb159..5598602cd 100644 --- a/source/plugins/ruby/in_kube_podinventory.rb +++ b/source/plugins/ruby/in_kube_podinventory.rb @@ -1,16 +1,17 @@ #!/usr/local/bin/ruby # frozen_string_literal: true -module Fluent +require 'fluent/plugin/input' + +module Fluent::Plugin require_relative "podinventory_to_mdm" class Kube_PodInventory_Input < Input - Plugin.register_input("kubepodinventory", self) + Fluent::Plugin.register_input("kube_podinventory", self) @@MDMKubePodInventoryTag = "mdm.kubepodinventory" @@hostName = (OMS::Common.get_hostname) - @@kubeperfTag = "oms.api.KubePerf" - @@kubeservicesTag = "oms.containerinsights.KubeServices" + def initialize super @@ -38,19 +39,25 @@ def initialize @winContainerCount = 0 @controllerData = {} @podInventoryE2EProcessingLatencyMs = 0 - @podsAPIE2ELatencyMs = 0 + @podsAPIE2ELatencyMs = 0 + + @kubeperfTag = "oneagent.containerInsights.LINUX_PERF_BLOB" + @kubeservicesTag = "oneagent.containerInsights.KUBE_SERVICES_BLOB" + @containerInventoryTag = "oneagent.containerInsights.CONTAINER_INVENTORY_BLOB" + @insightsMetricsTag = "oneagent.containerInsights.INSIGHTS_METRICS_BLOB" end config_param :run_interval, :time, :default => 60 - config_param :tag, :string, :default => "oms.containerinsights.KubePodInventory" + config_param :tag, :string, :default => "oneagent.containerInsights.KUBE_POD_INVENTORY_BLOB" def configure(conf) super @inventoryToMdmConvertor = Inventory2MdmConvertor.new() end - def start + def start if @run_interval + super if !ENV["PODS_CHUNK_SIZE"].nil? && !ENV["PODS_CHUNK_SIZE"].empty? && ENV["PODS_CHUNK_SIZE"].to_i > 0 @PODS_CHUNK_SIZE = ENV["PODS_CHUNK_SIZE"].to_i else @@ -58,7 +65,7 @@ def start $log.warn("in_kube_podinventory::start: setting to default value since got PODS_CHUNK_SIZE nil or empty") @PODS_CHUNK_SIZE = 1000 end - $log.info("in_kube_podinventory::start : PODS_CHUNK_SIZE @ #{@PODS_CHUNK_SIZE}") + $log.info("in_kube_podinventory::start: PODS_CHUNK_SIZE @ #{@PODS_CHUNK_SIZE}") if !ENV["PODS_EMIT_STREAM_BATCH_SIZE"].nil? && !ENV["PODS_EMIT_STREAM_BATCH_SIZE"].empty? && ENV["PODS_EMIT_STREAM_BATCH_SIZE"].to_i > 0 @PODS_EMIT_STREAM_BATCH_SIZE = ENV["PODS_EMIT_STREAM_BATCH_SIZE"].to_i @@ -67,8 +74,7 @@ def start $log.warn("in_kube_podinventory::start: setting to default value since got PODS_EMIT_STREAM_BATCH_SIZE nil or empty") @PODS_EMIT_STREAM_BATCH_SIZE = 200 end - $log.info("in_kube_podinventory::start : PODS_EMIT_STREAM_BATCH_SIZE @ #{@PODS_EMIT_STREAM_BATCH_SIZE}") - + $log.info("in_kube_podinventory::start: PODS_EMIT_STREAM_BATCH_SIZE @ #{@PODS_EMIT_STREAM_BATCH_SIZE}") @finished = false @condition = ConditionVariable.new @mutex = Mutex.new @@ -84,6 +90,7 @@ def shutdown @condition.signal } @thread.join + super # This super must be at the end of shutdown method end end @@ -100,7 +107,8 @@ def enumerate(podList = nil) batchTime = currentTime.utc.iso8601 serviceRecords = [] @podInventoryE2EProcessingLatencyMs = 0 - podInventoryStartTime = (Time.now.to_f * 1000).to_i + podInventoryStartTime = (Time.now.to_f * 1000).to_i + # Get services first so that we dont need to make a call for very chunk $log.info("in_kube_podinventory::enumerate : Getting services from Kube API @ #{Time.now.utc.iso8601}") serviceInfo = KubernetesApiClient.getKubeResourceInfo("services") @@ -189,12 +197,13 @@ def enumerate(podList = nil) end def parse_and_emit_records(podInventory, serviceRecords, continuationToken, batchTime = Time.utc.iso8601) - currentTime = Time.now - emitTime = currentTime.to_f + currentTime = Time.now + emitTime = Fluent::Engine.now #batchTime = currentTime.utc.iso8601 - eventStream = MultiEventStream.new - kubePerfEventStream = MultiEventStream.new - insightsMetricsEventStream = MultiEventStream.new + eventStream = Fluent::MultiEventStream.new + containerInventoryStream = Fluent::MultiEventStream.new + kubePerfEventStream = Fluent::MultiEventStream.new + insightsMetricsEventStream = Fluent::MultiEventStream.new @@istestvar = ENV["ISTEST"] begin #begin block start @@ -205,13 +214,8 @@ def parse_and_emit_records(podInventory, serviceRecords, continuationToken, batc podInventoryRecords = getPodInventoryRecords(item, serviceRecords, batchTime) podInventoryRecords.each do |record| if !record.nil? - wrapper = { - "DataType" => "KUBE_POD_INVENTORY_BLOB", - "IPName" => "ContainerInsights", - "DataItems" => [record.each { |k, v| record[k] = v }], - } - eventStream.add(emitTime, wrapper) if wrapper - @inventoryToMdmConvertor.process_pod_inventory_record(wrapper) + eventStream.add(emitTime, record) if record + @inventoryToMdmConvertor.process_pod_inventory_record(record) end end # Setting this flag to true so that we can send ContainerInventory records for containers @@ -228,13 +232,8 @@ def parse_and_emit_records(podInventory, serviceRecords, continuationToken, batc # Send container inventory records for containers on windows nodes @winContainerCount += containerInventoryRecords.length containerInventoryRecords.each do |cirecord| - if !cirecord.nil? - ciwrapper = { - "DataType" => "CONTAINER_INVENTORY_BLOB", - "IPName" => "ContainerInsights", - "DataItems" => [cirecord.each { |k, v| cirecord[k] = v }], - } - eventStream.add(emitTime, ciwrapper) if ciwrapper + if !cirecord.nil? + containerInventoryStream.add(emitTime, cirecord) if cirecord end end end @@ -246,7 +245,7 @@ def parse_and_emit_records(podInventory, serviceRecords, continuationToken, batc $log.info("kubePodInventoryEmitStreamSuccess @ #{Time.now.utc.iso8601}") end router.emit_stream(@tag, eventStream) if eventStream - eventStream = MultiEventStream.new + eventStream = Fluent::MultiEventStream.new end #container perf records @@ -256,19 +255,17 @@ def parse_and_emit_records(podInventory, serviceRecords, continuationToken, batc containerMetricDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimits(item, "limits", "cpu", "cpuLimitNanoCores", batchTime)) containerMetricDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimits(item, "limits", "memory", "memoryLimitBytes", batchTime)) - containerMetricDataItems.each do |record| - record["DataType"] = "LINUX_PERF_BLOB" - record["IPName"] = "LogManagement" + containerMetricDataItems.each do |record| kubePerfEventStream.add(emitTime, record) if record end if @PODS_EMIT_STREAM_BATCH_SIZE > 0 && kubePerfEventStream.count >= @PODS_EMIT_STREAM_BATCH_SIZE $log.info("in_kube_podinventory::parse_and_emit_records: number of container perf records emitted #{@PODS_EMIT_STREAM_BATCH_SIZE} @ #{Time.now.utc.iso8601}") - router.emit_stream(@@kubeperfTag, kubePerfEventStream) if kubePerfEventStream + router.emit_stream(@kubeperfTag, kubePerfEventStream) if kubePerfEventStream if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) $log.info("kubeContainerPerfEventEmitStreamSuccess @ #{Time.now.utc.iso8601}") end - kubePerfEventStream = MultiEventStream.new + kubePerfEventStream = Fluent::MultiEventStream.new end # container GPU records @@ -277,13 +274,8 @@ def parse_and_emit_records(podInventory, serviceRecords, continuationToken, batc containerGPUInsightsMetricsDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimitsAsInsightsMetrics(item, "limits", "nvidia.com/gpu", "containerGpuLimits", batchTime)) containerGPUInsightsMetricsDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimitsAsInsightsMetrics(item, "requests", "amd.com/gpu", "containerGpuRequests", batchTime)) containerGPUInsightsMetricsDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimitsAsInsightsMetrics(item, "limits", "amd.com/gpu", "containerGpuLimits", batchTime)) - containerGPUInsightsMetricsDataItems.each do |insightsMetricsRecord| - wrapper = { - "DataType" => "INSIGHTS_METRICS_BLOB", - "IPName" => "ContainerInsights", - "DataItems" => [insightsMetricsRecord.each { |k, v| insightsMetricsRecord[k] = v }], - } - insightsMetricsEventStream.add(emitTime, wrapper) if wrapper + containerGPUInsightsMetricsDataItems.each do |insightsMetricsRecord| + insightsMetricsEventStream.add(emitTime, insightsMetricsRecord) if insightsMetricsRecord end if @PODS_EMIT_STREAM_BATCH_SIZE > 0 && insightsMetricsEventStream.count >= @PODS_EMIT_STREAM_BATCH_SIZE @@ -291,8 +283,8 @@ def parse_and_emit_records(podInventory, serviceRecords, continuationToken, batc if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) $log.info("kubePodInsightsMetricsEmitStreamSuccess @ #{Time.now.utc.iso8601}") end - router.emit_stream(Constants::INSIGHTSMETRICS_FLUENT_TAG, insightsMetricsEventStream) if insightsMetricsEventStream - insightsMetricsEventStream = MultiEventStream.new + router.emit_stream(@insightsMetricsTag, insightsMetricsEventStream) if insightsMetricsEventStream + insightsMetricsEventStream = Fluent::MultiEventStream.new end end #podInventory block end @@ -305,9 +297,18 @@ def parse_and_emit_records(podInventory, serviceRecords, continuationToken, batc eventStream = nil end + if containerInventoryStream.count > 0 + $log.info("in_kube_podinventory::parse_and_emit_records: number of windows container inventory records emitted #{containerInventoryStream.count} @ #{Time.now.utc.iso8601}") + router.emit_stream(@containerInventoryTag, containerInventoryStream) if containerInventoryStream + if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) + $log.info("kubeWindowsContainerInventoryEmitStreamSuccess @ #{Time.now.utc.iso8601}") + end + containerInventoryStream = nil + end + if kubePerfEventStream.count > 0 $log.info("in_kube_podinventory::parse_and_emit_records: number of perf records emitted #{kubePerfEventStream.count} @ #{Time.now.utc.iso8601}") - router.emit_stream(@@kubeperfTag, kubePerfEventStream) if kubePerfEventStream + router.emit_stream(@kubeperfTag, kubePerfEventStream) if kubePerfEventStream kubePerfEventStream = nil if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) $log.info("kubeContainerPerfEventEmitStreamSuccess @ #{Time.now.utc.iso8601}") @@ -316,7 +317,7 @@ def parse_and_emit_records(podInventory, serviceRecords, continuationToken, batc if insightsMetricsEventStream.count > 0 $log.info("in_kube_podinventory::parse_and_emit_records: number of insights metrics records emitted #{insightsMetricsEventStream.count} @ #{Time.now.utc.iso8601}") - router.emit_stream(Constants::INSIGHTSMETRICS_FLUENT_TAG, insightsMetricsEventStream) if insightsMetricsEventStream + router.emit_stream(@insightsMetricsTag, insightsMetricsEventStream) if insightsMetricsEventStream if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) $log.info("kubePodInsightsMetricsEmitStreamSuccess @ #{Time.now.utc.iso8601}") end @@ -327,7 +328,7 @@ def parse_and_emit_records(podInventory, serviceRecords, continuationToken, batc @log.info "Sending pod inventory mdm records to out_mdm" pod_inventory_mdm_records = @inventoryToMdmConvertor.get_pod_inventory_mdm_records(batchTime) @log.info "pod_inventory_mdm_records.size #{pod_inventory_mdm_records.size}" - mdm_pod_inventory_es = MultiEventStream.new + mdm_pod_inventory_es = Fluent::MultiEventStream.new pod_inventory_mdm_records.each { |pod_inventory_mdm_record| mdm_pod_inventory_es.add(batchTime, pod_inventory_mdm_record) if pod_inventory_mdm_record } if pod_inventory_mdm_records @@ -335,22 +336,17 @@ def parse_and_emit_records(podInventory, serviceRecords, continuationToken, batc end if continuationToken.nil? # sending kube services inventory records - kubeServicesEventStream = MultiEventStream.new + kubeServicesEventStream = Fluent::MultiEventStream.new serviceRecords.each do |kubeServiceRecord| if !kubeServiceRecord.nil? # adding before emit to reduce memory foot print kubeServiceRecord["ClusterId"] = KubernetesApiClient.getClusterId - kubeServiceRecord["ClusterName"] = KubernetesApiClient.getClusterName - kubeServicewrapper = { - "DataType" => "KUBE_SERVICES_BLOB", - "IPName" => "ContainerInsights", - "DataItems" => [kubeServiceRecord.each { |k, v| kubeServiceRecord[k] = v }], - } - kubeServicesEventStream.add(emitTime, kubeServicewrapper) if kubeServicewrapper + kubeServiceRecord["ClusterName"] = KubernetesApiClient.getClusterName + kubeServicesEventStream.add(emitTime, kubeServiceRecord) if kubeServiceRecord if @PODS_EMIT_STREAM_BATCH_SIZE > 0 && kubeServicesEventStream.count >= @PODS_EMIT_STREAM_BATCH_SIZE $log.info("in_kube_podinventory::parse_and_emit_records: number of service records emitted #{@PODS_EMIT_STREAM_BATCH_SIZE} @ #{Time.now.utc.iso8601}") - router.emit_stream(@@kubeservicesTag, kubeServicesEventStream) if kubeServicesEventStream - kubeServicesEventStream = MultiEventStream.new + router.emit_stream(@kubeservicesTag, kubeServicesEventStream) if kubeServicesEventStream + kubeServicesEventStream = Fluent::MultiEventStream.new if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) $log.info("kubeServicesEventEmitStreamSuccess @ #{Time.now.utc.iso8601}") end @@ -360,7 +356,7 @@ def parse_and_emit_records(podInventory, serviceRecords, continuationToken, batc if kubeServicesEventStream.count > 0 $log.info("in_kube_podinventory::parse_and_emit_records : number of service records emitted #{kubeServicesEventStream.count} @ #{Time.now.utc.iso8601}") - router.emit_stream(@@kubeservicesTag, kubeServicesEventStream) if kubeServicesEventStream + router.emit_stream(@kubeservicesTag, kubeServicesEventStream) if kubeServicesEventStream if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) $log.info("kubeServicesEventEmitStreamSuccess @ #{Time.now.utc.iso8601}") end @@ -652,6 +648,6 @@ def getServiceNameFromLabels(namespace, labels, serviceRecords) ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) end return serviceName - end + end end # Kube_Pod_Input end # module diff --git a/source/plugins/ruby/in_kube_pvinventory.rb b/source/plugins/ruby/in_kube_pvinventory.rb index 4efe86f61..40eebac8a 100644 --- a/source/plugins/ruby/in_kube_pvinventory.rb +++ b/source/plugins/ruby/in_kube_pvinventory.rb @@ -1,6 +1,11 @@ -module Fluent +#!/usr/local/bin/ruby +# frozen_string_literal: true + +require 'fluent/plugin/input' + +module Fluent::Plugin class Kube_PVInventory_Input < Input - Plugin.register_input("kubepvinventory", self) + Fluent::Plugin.register_input("kube_pvinventory", self) @@hostName = (OMS::Common.get_hostname) @@ -22,14 +27,15 @@ def initialize end config_param :run_interval, :time, :default => 60 - config_param :tag, :string, :default => "oms.containerinsights.KubePVInventory" + config_param :tag, :string, :default => "oneagent.containerInsights.KUBE_PV_INVENTORY_BLOB" def configure(conf) super end - def start + def start if @run_interval + super @finished = false @condition = ConditionVariable.new @mutex = Mutex.new @@ -45,6 +51,7 @@ def shutdown @condition.signal } @thread.join + super end end @@ -54,7 +61,7 @@ def enumerate telemetryFlush = false @pvTypeToCountHash = {} currentTime = Time.now - batchTime = currentTime.utc.iso8601 + batchTime = currentTime.utc.iso8601 continuationToken = nil $log.info("in_kube_pvinventory::enumerate : Getting PVs from Kube API @ #{Time.now.utc.iso8601}") @@ -103,9 +110,9 @@ def enumerate end # end enumerate def parse_and_emit_records(pvInventory, batchTime = Time.utc.iso8601) - currentTime = Time.now - emitTime = currentTime.to_f - eventStream = MultiEventStream.new + currentTime = Time.now + emitTime = Fluent::Engine.now + eventStream = Fluent::MultiEventStream.new @@istestvar = ENV["ISTEST"] begin records = [] @@ -145,13 +152,8 @@ def parse_and_emit_records(pvInventory, batchTime = Time.utc.iso8601) end records.each do |record| - if !record.nil? - wrapper = { - "DataType" => "KUBE_PV_INVENTORY_BLOB", - "IPName" => "ContainerInsights", - "DataItems" => [record.each { |k, v| record[k] = v }], - } - eventStream.add(emitTime, wrapper) if wrapper + if !record.nil? + eventStream.add(emitTime, record) end end @@ -250,7 +252,6 @@ def run_periodic @mutex.lock end @mutex.unlock - end - + end end # Kube_PVInventory_Input -end # module \ No newline at end of file +end # module diff --git a/source/plugins/ruby/in_kubestate_deployments.rb b/source/plugins/ruby/in_kubestate_deployments.rb index 27e4709a2..182c3ffc1 100644 --- a/source/plugins/ruby/in_kubestate_deployments.rb +++ b/source/plugins/ruby/in_kubestate_deployments.rb @@ -1,9 +1,11 @@ #!/usr/local/bin/ruby # frozen_string_literal: true -module Fluent +require 'fluent/plugin/input' + +module Fluent::Plugin class Kube_Kubestate_Deployments_Input < Input - Plugin.register_input("kubestatedeployments", self) + Fluent::Plugin.register_input("kubestate_deployments", self) @@istestvar = ENV["ISTEST"] # telemetry - To keep telemetry cost reasonable, we keep track of the max deployments over a period of 15m @@deploymentsCount = 0 @@ -36,14 +38,15 @@ def initialize end config_param :run_interval, :time, :default => 60 - config_param :tag, :string, :default => Constants::INSIGHTSMETRICS_FLUENT_TAG + config_param :tag, :string, :default => "oneagent.containerInsights.INSIGHTS_METRICS_BLOB" def configure(conf) super end - def start + def start if @run_interval + super if !ENV["DEPLOYMENTS_CHUNK_SIZE"].nil? && !ENV["DEPLOYMENTS_CHUNK_SIZE"].empty? && ENV["DEPLOYMENTS_CHUNK_SIZE"].to_i > 0 @DEPLOYMENTS_CHUNK_SIZE = ENV["DEPLOYMENTS_CHUNK_SIZE"].to_i else @@ -52,11 +55,11 @@ def start @DEPLOYMENTS_CHUNK_SIZE = 500 end $log.info("in_kubestate_deployments::start : DEPLOYMENTS_CHUNK_SIZE @ #{@DEPLOYMENTS_CHUNK_SIZE}") - + @finished = false @condition = ConditionVariable.new @mutex = Mutex.new - @thread = Thread.new(&method(:run_periodic)) + @thread = Thread.new(&method(:run_periodic)) end end @@ -67,6 +70,7 @@ def shutdown @condition.signal } @thread.join + super # This super must be at the end of shutdown method end end @@ -77,8 +81,8 @@ def enumerate batchTime = currentTime.utc.iso8601 #set the running total for this batch to 0 - @deploymentsRunningTotal = 0 - + @deploymentsRunningTotal = 0 + # Initializing continuation token to nil continuationToken = nil $log.info("in_kubestate_deployments::enumerate : Getting deployments from Kube API @ #{Time.now.utc.iso8601}") @@ -126,7 +130,7 @@ def enumerate def parse_and_emit_records(deployments, batchTime = Time.utc.iso8601) metricItems = [] - insightsMetricsEventStream = MultiEventStream.new + insightsMetricsEventStream = Fluent::MultiEventStream.new begin metricInfo = deployments metricInfo["items"].each do |deployment| @@ -181,17 +185,12 @@ def parse_and_emit_records(deployments, batchTime = Time.utc.iso8601) metricItems.push(metricItem) end - time = Time.now.to_f - metricItems.each do |insightsMetricsRecord| - wrapper = { - "DataType" => "INSIGHTS_METRICS_BLOB", - "IPName" => "ContainerInsights", - "DataItems" => [insightsMetricsRecord.each { |k, v| insightsMetricsRecord[k] = v }], - } - insightsMetricsEventStream.add(time, wrapper) if wrapper + time = Fluent::Engine.now + metricItems.each do |insightsMetricsRecord| + insightsMetricsEventStream.add(time, insightsMetricsRecord) if insightsMetricsRecord end - router.emit_stream(Constants::INSIGHTSMETRICS_FLUENT_TAG, insightsMetricsEventStream) if insightsMetricsEventStream + router.emit_stream(@tag, insightsMetricsEventStream) if insightsMetricsEventStream $log.info("successfully emitted #{metricItems.length()} kube_state_deployment metrics") @deploymentsRunningTotal = @deploymentsRunningTotal + metricItems.length() @@ -234,6 +233,6 @@ def run_periodic @mutex.lock end @mutex.unlock - end + end end end diff --git a/source/plugins/ruby/in_kubestate_hpa.rb b/source/plugins/ruby/in_kubestate_hpa.rb index afecf8e3b..8f60bfb72 100644 --- a/source/plugins/ruby/in_kubestate_hpa.rb +++ b/source/plugins/ruby/in_kubestate_hpa.rb @@ -1,9 +1,11 @@ #!/usr/local/bin/ruby # frozen_string_literal: true -module Fluent +require 'fluent/plugin/input' + +module Fluent::Plugin class Kube_Kubestate_HPA_Input < Input - Plugin.register_input("kubestatehpa", self) + Fluent::Plugin.register_input("kubestate_hpa", self) @@istestvar = ENV["ISTEST"] def initialize @@ -16,7 +18,7 @@ def initialize require_relative "oms_common" require_relative "omslog" require_relative "ApplicationInsightsUtility" - require_relative "constants" + require_relative "constants" # refer tomlparser-agent-config for defaults # this configurable via configmap @@ -33,14 +35,15 @@ def initialize end config_param :run_interval, :time, :default => 60 - config_param :tag, :string, :default => Constants::INSIGHTSMETRICS_FLUENT_TAG + config_param :tag, :string, :default => "oneagent.containerInsights.INSIGHTS_METRICS_BLOB" def configure(conf) super end - def start + def start if @run_interval + super if !ENV["HPA_CHUNK_SIZE"].nil? && !ENV["HPA_CHUNK_SIZE"].empty? && ENV["HPA_CHUNK_SIZE"].to_i > 0 @HPA_CHUNK_SIZE = ENV["HPA_CHUNK_SIZE"].to_i else @@ -64,6 +67,7 @@ def shutdown @condition.signal } @thread.join + super end end @@ -74,7 +78,7 @@ def enumerate batchTime = currentTime.utc.iso8601 @hpaCount = 0 - + # Initializing continuation token to nil continuationToken = nil $log.info("in_kubestate_hpa::enumerate : Getting HPAs from Kube API @ #{Time.now.utc.iso8601}") @@ -113,7 +117,7 @@ def enumerate def parse_and_emit_records(hpas, batchTime = Time.utc.iso8601) metricItems = [] - insightsMetricsEventStream = MultiEventStream.new + insightsMetricsEventStream = Fluent::MultiEventStream.new begin metricInfo = hpas metricInfo["items"].each do |hpa| @@ -181,17 +185,12 @@ def parse_and_emit_records(hpas, batchTime = Time.utc.iso8601) metricItems.push(metricItem) end - time = Time.now.to_f - metricItems.each do |insightsMetricsRecord| - wrapper = { - "DataType" => "INSIGHTS_METRICS_BLOB", - "IPName" => "ContainerInsights", - "DataItems" => [insightsMetricsRecord.each { |k, v| insightsMetricsRecord[k] = v }], - } - insightsMetricsEventStream.add(time, wrapper) if wrapper + time = Fluent::Engine.now + metricItems.each do |insightsMetricsRecord| + insightsMetricsEventStream.add(time, insightsMetricsRecord) if insightsMetricsRecord end - router.emit_stream(Constants::INSIGHTSMETRICS_FLUENT_TAG, insightsMetricsEventStream) if insightsMetricsEventStream + router.emit_stream(@tag, insightsMetricsEventStream) if insightsMetricsEventStream $log.info("successfully emitted #{metricItems.length()} kube_state_hpa metrics") if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0 && insightsMetricsEventStream.count > 0) $log.info("kubestatehpaInsightsMetricsEmitStreamSuccess @ #{Time.now.utc.iso8601}") @@ -232,6 +231,6 @@ def run_periodic @mutex.lock end @mutex.unlock - end + end end end diff --git a/source/plugins/ruby/in_win_cadvisor_perf.rb b/source/plugins/ruby/in_win_cadvisor_perf.rb index 9c267cf4f..61e823ea6 100644 --- a/source/plugins/ruby/in_win_cadvisor_perf.rb +++ b/source/plugins/ruby/in_win_cadvisor_perf.rb @@ -1,9 +1,11 @@ #!/usr/local/bin/ruby # frozen_string_literal: true -module Fluent +require 'fluent/plugin/input' + +module Fluent::Plugin class Win_CAdvisor_Perf_Input < Input - Plugin.register_input("wincadvisorperf", self) + Fluent::Plugin.register_input("win_cadvisor_perf", self) @@winNodes = [] @@ -18,10 +20,11 @@ def initialize require_relative "oms_common" require_relative "omslog" require_relative "constants" + @insightsMetricsTag = "oneagent.containerInsights.INSIGHTS_METRICS_BLOB" end config_param :run_interval, :time, :default => 60 - config_param :tag, :string, :default => "oms.api.wincadvisorperf" + config_param :tag, :string, :default => "oneagent.containerInsights.LINUX_PERF_BLOB" config_param :mdmtag, :string, :default => "mdm.cadvisorperf" def configure(conf) @@ -50,11 +53,11 @@ def shutdown end def enumerate() - time = Time.now.to_f + time = Fluent::Engine.now begin timeDifference = (DateTime.now.to_time.to_i - @@winNodeQueryTimeTracker).abs timeDifferenceInMinutes = timeDifference / 60 - @@istestvar = ENV["ISTEST"] + @@istestvar = ENV["ISTEST"] #Resetting this cache so that it is populated with the current set of containers with every call CAdvisorMetricsAPIClient.resetWinContainerIdCache() @@ -68,12 +71,10 @@ def enumerate() @@winNodeQueryTimeTracker = DateTime.now.to_time.to_i end @@winNodes.each do |winNode| - eventStream = MultiEventStream.new + eventStream = Fluent::MultiEventStream.new metricData = CAdvisorMetricsAPIClient.getMetrics(winNode: winNode, metricTime: Time.now.utc.iso8601) metricData.each do |record| if !record.empty? - record["DataType"] = "LINUX_PERF_BLOB" - record["IPName"] = "LogManagement" eventStream.add(time, record) if record end end @@ -88,18 +89,13 @@ def enumerate() begin containerGPUusageInsightsMetricsDataItems = [] containerGPUusageInsightsMetricsDataItems.concat(CAdvisorMetricsAPIClient.getInsightsMetrics(winNode: winNode, metricTime: Time.now.utc.iso8601)) - insightsMetricsEventStream = MultiEventStream.new + insightsMetricsEventStream = Fluent::MultiEventStream.new containerGPUusageInsightsMetricsDataItems.each do |insightsMetricsRecord| - wrapper = { - "DataType" => "INSIGHTS_METRICS_BLOB", - "IPName" => "ContainerInsights", - "DataItems" => [insightsMetricsRecord.each { |k, v| insightsMetricsRecord[k] = v }], - } - insightsMetricsEventStream.add(time, wrapper) if wrapper + insightsMetricsEventStream.add(time, insightsMetricsRecord) if insightsMetricsRecord end - router.emit_stream(Constants::INSIGHTSMETRICS_FLUENT_TAG, insightsMetricsEventStream) if insightsMetricsEventStream + router.emit_stream(@insightsMetricsTag, insightsMetricsEventStream) if insightsMetricsEventStream router.emit_stream(@mdmtag, insightsMetricsEventStream) if insightsMetricsEventStream if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0 && insightsMetricsEventStream.count > 0) $log.info("winCAdvisorInsightsMetricsEmitStreamSuccess @ #{Time.now.utc.iso8601}") diff --git a/source/plugins/ruby/out_health_forward.rb b/source/plugins/ruby/out_health_forward.rb index 6fcfe368b..59eed97da 100644 --- a/source/plugins/ruby/out_health_forward.rb +++ b/source/plugins/ruby/out_health_forward.rb @@ -15,469 +15,593 @@ # limitations under the License. # -require 'base64' -require 'socket' -require 'fileutils' - -require 'cool.io' - require 'fluent/output' require 'fluent/config/error' +require 'fluent/clock' +require 'fluent/tls' +require 'base64' +require 'forwardable' -module Fluent - class ForwardOutputError < StandardError - end - - class ForwardOutputResponseError < ForwardOutputError - end +require 'fluent/compat/socket_util' +require 'fluent/plugin/out_forward/handshake_protocol' +require 'fluent/plugin/out_forward/load_balancer' +require 'fluent/plugin/out_forward/socket_cache' +require 'fluent/plugin/out_forward/failure_detector' +require 'fluent/plugin/out_forward/error' +require 'fluent/plugin/out_forward/connection_manager' +require 'fluent/plugin/out_forward/ack_handler' - class ForwardOutputConnectionClosedError < ForwardOutputError - end +module Fluent::Plugin + class HealthForwardOutput < Output + Fluent::Plugin.register_output('health_forward', self) - class ForwardOutputACKTimeoutError < ForwardOutputResponseError - end + helpers :socket, :server, :timer, :thread, :compat_parameters, :service_discovery - class HealthForwardOutput < ObjectBufferedOutput - Plugin.register_output('health_forward', self) + LISTEN_PORT = 25227 - def initialize - super - require 'fluent/plugin/socket_util' - @nodes = [] #=> [Node] - end + desc 'The transport protocol.' + config_param :transport, :enum, list: [:tcp, :tls], default: :tcp + # TODO: TLS session cache/tickets desc 'The timeout time when sending event logs.' config_param :send_timeout, :time, default: 60 - desc 'The transport protocol to use for heartbeats.(udp,tcp,none)' - config_param :heartbeat_type, default: :udp do |val| - case val.downcase - when 'tcp' - :tcp - when 'udp' - :udp - when 'none' - :none - else - raise ConfigError, "forward output heartbeat type should be 'tcp', 'udp', or 'none'" - end - end + desc 'The timeout time for socket connect' + config_param :connect_timeout, :time, default: nil + # TODO: add linger_timeout, recv_timeout + + desc 'The protocol to use for heartbeats (default is the same with "transport").' + config_param :heartbeat_type, :enum, list: [:transport, :tcp, :udp, :none], default: :transport desc 'The interval of the heartbeat packer.' config_param :heartbeat_interval, :time, default: 1 desc 'The wait time before accepting a server fault recovery.' config_param :recover_wait, :time, default: 10 desc 'The hard timeout used to detect server failure.' config_param :hard_timeout, :time, default: 60 - desc 'Set TTL to expire DNS cache in seconds.' - config_param :expire_dns_cache, :time, default: nil # 0 means disable cache desc 'The threshold parameter used to detect server faults.' config_param :phi_threshold, :integer, default: 16 desc 'Use the "Phi accrual failure detector" to detect server failure.' config_param :phi_failure_detector, :bool, default: true - # if any options added that requires extended forward api, fix @extend_internal_protocol - desc 'Change the protocol to at-least-once.' config_param :require_ack_response, :bool, default: false # require in_forward to respond with ack - desc 'This option is used when require_ack_response is true.' - config_param :ack_response_timeout, :time, default: 190 # 0 means do not wait for ack responses + + ## The reason of default value of :ack_response_timeout: # Linux default tcp_syn_retries is 5 (in many environment) # 3 + 6 + 12 + 24 + 48 + 96 -> 189 (sec) + desc 'This option is used when require_ack_response is true.' + config_param :ack_response_timeout, :time, default: 190 + + desc 'The interval while reading data from server' + config_param :read_interval_msec, :integer, default: 50 # 50ms + desc 'Reading data size from server' + config_param :read_length, :size, default: 512 # 512bytes + + desc 'Set TTL to expire DNS cache in seconds.' + config_param :expire_dns_cache, :time, default: nil # 0 means disable cache desc 'Enable client-side DNS round robin.' config_param :dns_round_robin, :bool, default: false # heartbeat_type 'udp' is not available for this + desc 'Ignore DNS resolution and errors at startup time.' + config_param :ignore_network_errors_at_startup, :bool, default: false + + desc 'Verify that a connection can be made with one of out_forward nodes at the time of startup.' + config_param :verify_connection_at_startup, :bool, default: false + + desc 'Compress buffered data.' + config_param :compress, :enum, list: [:text, :gzip], default: :text + + desc 'The default version of TLS transport.' + config_param :tls_version, :enum, list: Fluent::TLS::SUPPORTED_VERSIONS, default: Fluent::TLS::DEFAULT_VERSION + desc 'The cipher configuration of TLS transport.' + config_param :tls_ciphers, :string, default: Fluent::TLS::CIPHERS_DEFAULT + desc 'Skip all verification of certificates or not.' + config_param :tls_insecure_mode, :bool, default: false + desc 'Allow self signed certificates or not.' + config_param :tls_allow_self_signed_cert, :bool, default: false + desc 'Verify hostname of servers and certificates or not in TLS transport.' + config_param :tls_verify_hostname, :bool, default: true + desc 'The additional CA certificate path for TLS.' + config_param :tls_ca_cert_path, :array, value_type: :string, default: nil + desc 'The additional certificate path for TLS.' + config_param :tls_cert_path, :array, value_type: :string, default: nil + desc 'The client certificate path for TLS.' + config_param :tls_client_cert_path, :string, default: nil + desc 'The client private key path for TLS.' + config_param :tls_client_private_key_path, :string, default: nil + desc 'The client private key passphrase for TLS.' + config_param :tls_client_private_key_passphrase, :string, default: nil, secret: true + desc 'The certificate thumbprint for searching from Windows system certstore.' + config_param :tls_cert_thumbprint, :string, default: nil, secret: true + desc 'The certificate logical store name on Windows system certstore.' + config_param :tls_cert_logical_store_name, :string, default: nil + desc 'Enable to use certificate enterprise store on Windows system certstore.' + config_param :tls_cert_use_enterprise_store, :bool, default: true + desc "Enable keepalive connection." + config_param :keepalive, :bool, default: false + desc "Expired time of keepalive. Default value is nil, which means to keep connection as long as possible" + config_param :keepalive_timeout, :time, default: nil + + config_section :security, required: false, multi: false do + desc 'The hostname' + config_param :self_hostname, :string + desc 'Shared key for authentication' + config_param :shared_key, :string, secret: true + end + + config_section :server, param_name: :servers do + desc "The IP address or host name of the server." + config_param :host, :string + desc "The name of the server. Used for logging and certificate verification in TLS transport (when host is address)." + config_param :name, :string, default: nil + desc "The port number of the host." + config_param :port, :integer, default: LISTEN_PORT + desc "The shared key per server." + config_param :shared_key, :string, default: nil, secret: true + desc "The username for authentication." + config_param :username, :string, default: '' + desc "The password for authentication." + config_param :password, :string, default: '', secret: true + desc "Marks a node as the standby node for an Active-Standby model between Fluentd nodes." + config_param :standby, :bool, default: false + desc "The load balancing weight." + config_param :weight, :integer, default: 60 + end + attr_reader :nodes - config_param :port, :integer, default: DEFAULT_LISTEN_PORT, deprecated: "User host xxx instead." - config_param :host, :string, default: nil, deprecated: "Use port xxx instead." - desc 'Skip network related error, e.g. DNS error, during plugin setup' - config_param :skip_network_error_at_init, :bool, :default => false + config_param :port, :integer, default: LISTEN_PORT, obsoleted: "User section instead." + config_param :host, :string, default: nil, obsoleted: "Use section instead." + config_section :buffer do + config_set_default :chunk_keys, ["tag"] + end - attr_accessor :extend_internal_protocol + attr_reader :read_interval, :recover_sample_size - def configure(conf) + def initialize super - # backward compatibility - if host = conf['host'] - port = conf['port'] - port = port ? port.to_i : DEFAULT_LISTEN_PORT - e = conf.add_element('server') - e['host'] = host - e['port'] = port.to_s - end + @nodes = [] #=> [Node] + @loop = nil + @thread = nil - recover_sample_size = @recover_wait / @heartbeat_interval + @usock = nil + @keep_alive_watcher_interval = 5 # TODO + @suspend_flush = false + end - # add options here if any options addes which uses extended protocol - @extend_internal_protocol = if @require_ack_response - true - else - false - end + def configure(conf) + compat_parameters_convert(conf, :buffer, default_chunk_key: 'tag') - if @dns_round_robin - if @heartbeat_type == :udp - raise ConfigError, "forward output heartbeat type must be 'tcp' or 'none' to use dns_round_robin option" - end - end + super - conf.elements.each {|e| - next if e.name != "server" + unless @chunk_key_tag + raise Fluent::ConfigError, "buffer chunk key must include 'tag' for forward output" + end - host = e['host'] - port = e['port'] - port = port ? port.to_i : DEFAULT_LISTEN_PORT + @read_interval = @read_interval_msec / 1000.0 + @recover_sample_size = @recover_wait / @heartbeat_interval - weight = e['weight'] - weight = weight ? weight.to_i : 60 + if @heartbeat_type == :tcp + log.warn "'heartbeat_type tcp' is deprecated. use 'transport' instead." + @heartbeat_type = :transport + end - standby = !!e['standby'] + if @dns_round_robin && @heartbeat_type == :udp + raise Fluent::ConfigError, "forward output heartbeat type must be 'transport' or 'none' to use dns_round_robin option" + end - name = e['name'] - unless name - name = "#{host}:#{port}" + if @transport == :tls + # socket helper adds CA cert or signed certificate to same cert store internally so unify it in this place. + if @tls_cert_path && !@tls_cert_path.empty? + @tls_ca_cert_path = @tls_cert_path + end + if @tls_ca_cert_path && !@tls_ca_cert_path.empty? + @tls_ca_cert_path.each do |path| + raise Fluent::ConfigError, "specified cert path does not exist:#{path}" unless File.exist?(path) + raise Fluent::ConfigError, "specified cert path is not readable:#{path}" unless File.readable?(path) + end end - failure = FailureDetector.new(@heartbeat_interval, @hard_timeout, Time.now.to_i.to_f) - - node_conf = NodeConfig2.new(name, host, port, weight, standby, failure, - @phi_threshold, recover_sample_size, @expire_dns_cache, @phi_failure_detector, @dns_round_robin, @skip_network_error_at_init) + if @tls_insecure_mode + log.warn "TLS transport is configured in insecure way" + @tls_verify_hostname = false + @tls_allow_self_signed_cert = true + end - if @heartbeat_type == :none - @nodes << NoneHeartbeatNode.new(log, node_conf) + if Fluent.windows? + if (@tls_cert_path || @tls_ca_cert_path) && @tls_cert_logical_store_name + raise Fluent::ConfigError, "specified both cert path and tls_cert_logical_store_name is not permitted" + end else - @nodes << Node.new(log, node_conf) + raise Fluent::ConfigError, "This parameter is for only Windows" if @tls_cert_logical_store_name + raise Fluent::ConfigError, "This parameter is for only Windows" if @tls_cert_thumbprint end - log.info "adding forwarding server '#{name}'", host: host, port: port, weight: weight, plugin_id: plugin_id - } + end + + @ack_handler = @require_ack_response ? AckHandler.new(timeout: @ack_response_timeout, log: @log, read_length: @read_length) : nil + socket_cache = @keepalive ? SocketCache.new(@keepalive_timeout, @log) : nil + @connection_manager = Fluent::Plugin::ForwardOutput::ConnectionManager.new( + log: @log, + secure: !!@security, + connection_factory: method(:create_transfer_socket), + socket_cache: socket_cache, + ) - if @nodes.empty? - raise ConfigError, "forward output plugin requires at least one is required" + configs = [] + + # rewrite for using server as sd_static + conf.elements(name: 'server').each do |s| + s.name = 'service' end - end - def start - super + unless conf.elements(name: 'service').empty? + # To copy `services` element only + new_elem = Fluent::Config::Element.new('static_service_discovery', {}, {}, conf.elements(name: 'service')) + configs << { type: :static, conf: new_elem } + end - @rand_seed = Random.new.seed - rebuild_weight_array - @rr = 0 + conf.elements(name: 'service_discovery').each_with_index do |c, i| + configs << { type: @service_discovery[i][:@type], conf: c } + end - unless @heartbeat_type == :none - @loop = Coolio::Loop.new + service_discovery_create_manager( + :out_forward_service_discovery_watcher, + configurations: configs, + load_balancer: Fluent::Plugin::ForwardOutput::LoadBalancer.new(log), + custom_build_method: method(:build_node), + ) - if @heartbeat_type == :udp - # assuming all hosts use udp - @usock = SocketUtil.create_udp_socket(@nodes.first.host) - @usock.fcntl(Fcntl::F_SETFL, Fcntl::O_NONBLOCK) - @hb = HeartbeatHandler.new(@usock, method(:on_heartbeat)) - @loop.attach(@hb) + discovery_manager.services.each do |server| + # it's only for test + @nodes << server + unless @heartbeat_type == :none + begin + server.validate_host_resolution! + rescue => e + raise unless @ignore_network_errors_at_startup + log.warn "failed to resolve node name when configured", server: (server.name || server.host), error: e + server.disable! + end end + end - @timer = HeartbeatRequestTimer.new(@heartbeat_interval, method(:on_timer)) - @loop.attach(@timer) + unless @as_secondary + if @compress == :gzip && @buffer.compress == :text + @buffer.compress = :gzip + elsif @compress == :text && @buffer.compress == :gzip + log.info "buffer is compressed. If you also want to save the bandwidth of a network, Add `compress` configuration in " + end + end - @thread = Thread.new(&method(:run)) + if discovery_manager.services.empty? + raise Fluent::ConfigError, "forward output plugin requires at least one node is required. Add or " end - end - def shutdown - @finished = true - if @loop - @loop.watchers.each {|w| w.detach } - @loop.stop + if !@keepalive && @keepalive_timeout + log.warn('The value of keepalive_timeout is ignored. if you want to use keepalive, please add `keepalive true` to your conf.') end - @thread.join if @thread - @usock.close if @usock + + raise Fluent::ConfigError, "ack_response_timeout must be a positive integer" if @ack_response_timeout < 1 end - def run - @loop.run if @loop - rescue - log.error "unexpected error", error: $!.to_s - log.error_backtrace + def multi_workers_ready? + true end - def write_objects(tag, chunk) - return if chunk.empty? + def prefer_delayed_commit + @require_ack_response + end - error = nil + def overwrite_delayed_commit_timeout + # Output#start sets @delayed_commit_timeout by @buffer_config.delayed_commit_timeout + # But it should be overwritten by ack_response_timeout to rollback chunks after timeout + if @delayed_commit_timeout != @ack_response_timeout + log.info "delayed_commit_timeout is overwritten by ack_response_timeout" + @delayed_commit_timeout = @ack_response_timeout + 2 # minimum ack_reader IO.select interval is 1s + end + end - wlen = @weight_array.length - wlen.times do - @rr = (@rr + 1) % wlen - node = @weight_array[@rr] + def start + super - if node.available? + unless @heartbeat_type == :none + if @heartbeat_type == :udp + @usock = socket_create_udp(discovery_manager.services.first.host, discovery_manager.services.first.port, nonblock: true) + server_create_udp(:out_forward_heartbeat_receiver, 0, socket: @usock, max_bytes: @read_length, &method(:on_udp_heatbeat_response_recv)) + end + timer_execute(:out_forward_heartbeat_request, @heartbeat_interval, &method(:on_heartbeat_timer)) + end + + if @require_ack_response + overwrite_delayed_commit_timeout + thread_create(:out_forward_receiving_ack, &method(:ack_reader)) + end + + if @verify_connection_at_startup + discovery_manager.services.each do |node| begin - send_data(node, tag, chunk) - return - rescue - # for load balancing during detecting crashed servers - error = $! # use the latest error + node.verify_connection + rescue StandardError => e + log.fatal "forward's connection setting error: #{e.message}" + raise Fluent::UnrecoverableError, e.message end end end - if error - raise error - else - raise "no nodes are available" # TODO message + if @keepalive + timer_execute(:out_forward_keep_alived_socket_watcher, @keep_alive_watcher_interval, &method(:on_purge_obsolete_socks)) end end - private + def close + if @usock + # close socket and ignore errors: this socket will not be used anyway. + @usock.close rescue nil + end - def rebuild_weight_array - standby_nodes, regular_nodes = @nodes.partition {|n| - n.standby? - } + super + end - lost_weight = 0 - regular_nodes.each {|n| - unless n.available? - lost_weight += n.weight - end - } - log.debug "rebuilding weight array", lost_weight: lost_weight - - if lost_weight > 0 - standby_nodes.each {|n| - if n.available? - regular_nodes << n - log.warn "using standby node #{n.host}:#{n.port}", weight: n.weight - lost_weight -= n.weight - break if lost_weight <= 0 - end - } + def stop + super + + if @keepalive + @connection_manager.stop end + end + + def before_shutdown + super + @suspend_flush = true + end + + def after_shutdown + last_ack if @require_ack_response + super + end - weight_array = [] - gcd = regular_nodes.map {|n| n.weight }.inject(0) {|r,w| r.gcd(w) } - regular_nodes.each {|n| - (n.weight / gcd).times { - weight_array << n - } - } + def try_flush + return if @require_ack_response && @suspend_flush + super + end - # for load balancing during detecting crashed servers - coe = (regular_nodes.size * 6) / weight_array.size - weight_array *= coe if coe > 1 + def last_ack + overwrite_delayed_commit_timeout + ack_check(ack_select_interval) + end - r = Random.new(@rand_seed) - weight_array.sort_by! { r.rand } + def write(chunk) + return if chunk.empty? + tag = chunk.metadata.tag - @weight_array = weight_array + discovery_manager.select_service { |node| node.send_data(tag, chunk) } end - # MessagePack FixArray length = 3 (if @extend_internal_protocol) - # = 2 (else) - FORWARD_HEADER = [0x92].pack('C').freeze - FORWARD_HEADER_EXT = [0x93].pack('C').freeze - def forward_header - if @extend_internal_protocol - FORWARD_HEADER_EXT - else - FORWARD_HEADER + def try_write(chunk) + log.trace "writing a chunk to destination", chunk_id: dump_unique_id_hex(chunk.unique_id) + if chunk.empty? + commit_write(chunk.unique_id) + return end + tag = chunk.metadata.tag + discovery_manager.select_service { |node| node.send_data(tag, chunk) } + last_ack if @require_ack_response && @suspend_flush end - #FORWARD_TCP_HEARTBEAT_DATA = FORWARD_HEADER + ''.to_msgpack + [].to_msgpack - def send_heartbeat_tcp(node) - sock = connect(node) - begin - opt = [1, @send_timeout.to_i].pack('I!I!') # { int l_onoff; int l_linger; } - sock.setsockopt(Socket::SOL_SOCKET, Socket::SO_LINGER, opt) - opt = [@send_timeout.to_i, 0].pack('L!L!') # struct timeval - # don't send any data to not cause a compatibility problem - #sock.setsockopt(Socket::SOL_SOCKET, Socket::SO_SNDTIMEO, opt) - #sock.write FORWARD_TCP_HEARTBEAT_DATA - node.heartbeat(true) - ensure - sock.close + def create_transfer_socket(host, port, hostname, &block) + case @transport + when :tls + socket_create_tls( + host, port, + version: @tls_version, + ciphers: @tls_ciphers, + insecure: @tls_insecure_mode, + verify_fqdn: @tls_verify_hostname, + fqdn: hostname, + allow_self_signed_cert: @tls_allow_self_signed_cert, + cert_paths: @tls_ca_cert_path, + cert_path: @tls_client_cert_path, + private_key_path: @tls_client_private_key_path, + private_key_passphrase: @tls_client_private_key_passphrase, + cert_thumbprint: @tls_cert_thumbprint, + cert_logical_store_name: @tls_cert_logical_store_name, + cert_use_enterprise_store: @tls_cert_use_enterprise_store, + + # Enabling SO_LINGER causes tcp port exhaustion on Windows. + # This is because dynamic ports are only 16384 (from 49152 to 65535) and + # expiring SO_LINGER enabled ports should wait 4 minutes + # where set by TcpTimeDelay. Its default value is 4 minutes. + # So, we should disable SO_LINGER on Windows to prevent flood of waiting ports. + linger_timeout: Fluent.windows? ? nil : @send_timeout, + send_timeout: @send_timeout, + recv_timeout: @ack_response_timeout, + connect_timeout: @connect_timeout, + &block + ) + when :tcp + socket_create_tcp( + host, port, + linger_timeout: @send_timeout, + send_timeout: @send_timeout, + recv_timeout: @ack_response_timeout, + connect_timeout: @connect_timeout, + &block + ) + else + raise "BUG: unknown transport protocol #{@transport}" end end - def send_data(node, tag, chunk) - sock = connect(node) - begin - opt = [1, @send_timeout.to_i].pack('I!I!') # { int l_onoff; int l_linger; } - sock.setsockopt(Socket::SOL_SOCKET, Socket::SO_LINGER, opt) - - opt = [@send_timeout.to_i, 0].pack('L!L!') # struct timeval - sock.setsockopt(Socket::SOL_SOCKET, Socket::SO_SNDTIMEO, opt) - - # beginArray(2) - sock.write forward_header - - # writeRaw(tag) - sock.write tag.to_msgpack # tag - - # beginRaw(size) - sz = chunk.size - #if sz < 32 - # # FixRaw - # sock.write [0xa0 | sz].pack('C') - #elsif sz < 65536 - # # raw 16 - # sock.write [0xda, sz].pack('Cn') - #else - # raw 32 - sock.write [0xdb, sz].pack('CN') - #end - - # writeRawBody(packed_es) - chunk.write_to(sock) - - if @extend_internal_protocol - option = {} - option['chunk'] = Base64.encode64(chunk.unique_id) if @require_ack_response - sock.write option.to_msgpack - - if @require_ack_response && @ack_response_timeout > 0 - # Waiting for a response here results in a decrease of throughput because a chunk queue is locked. - # To avoid a decrease of troughput, it is necessary to prepare a list of chunks that wait for responses - # and process them asynchronously. - if IO.select([sock], nil, nil, @ack_response_timeout) - raw_data = sock.recv(1024) - - # When connection is closed by remote host, socket is ready to read and #recv returns an empty string that means EOF. - # If this happens we assume the data wasn't delivered and retry it. - if raw_data.empty? - @log.warn "node #{node.host}:#{node.port} closed the connection. regard it as unavailable." - node.disable! - raise ForwardOutputConnectionClosedError, "node #{node.host}:#{node.port} closed connection" - else - # Serialization type of the response is same as sent data. - res = MessagePack.unpack(raw_data) - - if res['ack'] != option['chunk'] - # Some errors may have occured when ack and chunk id is different, so send the chunk again. - raise ForwardOutputResponseError, "ack in response and chunk id in sent data are different" - end - end - - else - # IO.select returns nil on timeout. - # There are 2 types of cases when no response has been received: - # (1) the node does not support sending responses - # (2) the node does support sending response but responses have not arrived for some reasons. - @log.warn "no response from #{node.host}:#{node.port}. regard it as unavailable." - node.disable! - raise ForwardOutputACKTimeoutError, "node #{node.host}:#{node.port} does not return ACK" - end - end + def statistics + stats = super + services = discovery_manager.services + healthy_nodes_count = 0 + registed_nodes_count = services.size + services.each do |s| + if s.available? + healthy_nodes_count += 1 end - - node.heartbeat(false) - return res # for test - ensure - sock.close end + + stats.merge( + 'healthy_nodes_count' => healthy_nodes_count, + 'registered_nodes_count' => registed_nodes_count, + ) end - def connect(node) - # TODO unix socket? - TCPSocket.new(node.resolved_host, node.port) + # MessagePack FixArray length is 3 + FORWARD_HEADER = [0x93].pack('C').freeze + def forward_header + FORWARD_HEADER end - class HeartbeatRequestTimer < Coolio::TimerWatcher - def initialize(interval, callback) - super(interval, true) - @callback = callback - end + private - def on_timer - @callback.call - rescue - # TODO log? + def build_node(server) + name = server.name || "#{server.host}:#{server.port}" + log.info "adding forwarding server '#{name}'", host: server.host, port: server.port, weight: server.weight, plugin_id: plugin_id + + failure = Fluent::Plugin::ForwardOutput::FailureDetector.new(@heartbeat_interval, @hard_timeout, Time.now.to_i.to_f) + if @heartbeat_type == :none + NoneHeartbeatNode.new(self, server, failure: failure, connection_manager: @connection_manager, ack_handler: @ack_handler) + else + Node.new(self, server, failure: failure, connection_manager: @connection_manager, ack_handler: @ack_handler) end end - def on_timer - return if @finished - @nodes.each {|n| - if n.tick - rebuild_weight_array - end + def on_heartbeat_timer + need_rebuild = false + discovery_manager.services.each do |n| begin - #log.trace "sending heartbeat #{n.host}:#{n.port} on #{@heartbeat_type}" - if @heartbeat_type == :tcp - send_heartbeat_tcp(n) - else - @usock.send "\0", 0, Socket.pack_sockaddr_in(n.port, n.resolved_host) - end - rescue Errno::EAGAIN, Errno::EWOULDBLOCK, Errno::EINTR, Errno::ECONNREFUSED - # TODO log - log.debug "failed to send heartbeat packet to #{n.host}:#{n.port}", error: $!.to_s + log.trace "sending heartbeat", host: n.host, port: n.port, heartbeat_type: @heartbeat_type + n.usock = @usock if @usock + need_rebuild = n.send_heartbeat || need_rebuild + rescue Errno::EAGAIN, Errno::EWOULDBLOCK, Errno::EINTR, Errno::ECONNREFUSED, Errno::ETIMEDOUT => e + log.debug "failed to send heartbeat packet", host: n.host, port: n.port, heartbeat_type: @heartbeat_type, error: e + rescue => e + log.debug "unexpected error happen during heartbeat", host: n.host, port: n.port, heartbeat_type: @heartbeat_type, error: e end - } - end - class HeartbeatHandler < Coolio::IO - def initialize(io, callback) - super(io) - @io = io - @callback = callback + need_rebuild = n.tick || need_rebuild end - def on_readable - begin - msg, addr = @io.recvfrom(1024) - rescue Errno::EAGAIN, Errno::EWOULDBLOCK, Errno::EINTR - return - end - host = addr[3] - port = addr[1] - sockaddr = Socket.pack_sockaddr_in(port, host) - @callback.call(sockaddr, msg) - rescue - # TODO log? + if need_rebuild + discovery_manager.rebalance end end - def on_heartbeat(sockaddr, msg) - port, host = Socket.unpack_sockaddr_in(sockaddr) - if node = @nodes.find {|n| n.sockaddr == sockaddr } - #log.trace "heartbeat from '#{node.name}'", :host=>node.host, :port=>node.port + def on_udp_heatbeat_response_recv(data, sock) + sockaddr = Socket.pack_sockaddr_in(sock.remote_port, sock.remote_host) + if node = discovery_manager.services.find { |n| n.sockaddr == sockaddr } + # log.trace "heartbeat arrived", name: node.name, host: node.host, port: node.port if node.heartbeat - rebuild_weight_array + discovery_manager.rebalance end + else + log.warn("Unknown heartbeat response received from #{sock.remote_host}:#{sock.remote_port}. It may service out") end end - NodeConfig2 = Struct.new("NodeConfig2", :name, :host, :port, :weight, :standby, :failure, - :phi_threshold, :recover_sample_size, :expire_dns_cache, :phi_failure_detector, :dns_round_robin, :skip_network_error) + def on_purge_obsolete_socks + @connection_manager.purge_obsolete_socks + end + + def ack_select_interval + if @delayed_commit_timeout > 3 + 1 + else + @delayed_commit_timeout / 3.0 + end + end + + def ack_reader + select_interval = ack_select_interval + + while thread_current_running? + ack_check(select_interval) + end + end + + def ack_check(select_interval) + @ack_handler.collect_response(select_interval) do |chunk_id, node, sock, result| + @connection_manager.close(sock) + + case result + when AckHandler::Result::SUCCESS + commit_write(chunk_id) + when AckHandler::Result::FAILED + node.disable! + rollback_write(chunk_id, update_retry: false) + when AckHandler::Result::CHUNKID_UNMATCHED + rollback_write(chunk_id, update_retry: false) + else + log.warn("BUG: invalid status #{result} #{chunk_id}") + + if chunk_id + rollback_write(chunk_id, update_retry: false) + end + end + end + end class Node - def initialize(log, conf) - @log = log - @conf = conf - @name = @conf.name - @host = @conf.host - @port = @conf.port - @weight = @conf.weight - @failure = @conf.failure + extend Forwardable + def_delegators :@server, :discovery_id, :host, :port, :name, :weight, :standby + + # @param connection_manager [Fluent::Plugin::ForwardOutput::ConnectionManager] + # @param ack_handler [Fluent::Plugin::ForwardOutput::AckHandler] + def initialize(sender, server, failure:, connection_manager:, ack_handler:) + @sender = sender + @log = sender.log + @compress = sender.compress + @server = server + + @name = server.name + @host = server.host + @port = server.port + @weight = server.weight + @standby = server.standby + @failure = failure @available = true + # @hostname is used for certificate verification & TLS SNI + host_is_hostname = !(IPAddr.new(@host) rescue false) + @hostname = case + when host_is_hostname then @host + when @name then @name + else nil + end + + @usock = nil + + @handshake = Fluent::Plugin::ForwardOutput::HandshakeProtocol.new( + log: @log, + hostname: sender.security && sender.security.self_hostname, + shared_key: server.shared_key || (sender.security && sender.security.shared_key) || '', + password: server.password || '', + username: server.username || '', + ) + + @unpacker = Fluent::MessagePackFactory.msgpack_unpacker + @resolved_host = nil @resolved_time = 0 - begin - resolved_host # check dns - rescue => e - if @conf.skip_network_error - log.warn "#{@name} got network error during setup. Resolve host later", :error => e, :error_class => e.class - else - raise - end - end - end + @resolved_once = false + + @connection_manager = connection_manager + @ack_handler = ack_handler + end + + attr_accessor :usock - attr_reader :conf - attr_reader :name, :host, :port, :weight - attr_reader :sockaddr # used by on_heartbeat - attr_reader :failure, :available # for test + attr_reader :state + attr_reader :sockaddr # used by on_udp_heatbeat_response_recv + attr_reader :failure # for test + + def validate_host_resolution! + resolved_host + end def available? @available @@ -488,41 +612,158 @@ def disable! end def standby? - @conf.standby + @standby + end + + def verify_connection + connect do |sock, ri| + ensure_established_connection(sock, ri) + end + end + + def establish_connection(sock, ri) + while ri.state != :established + begin + # TODO: On Ruby 2.2 or earlier, read_nonblock doesn't work expectedly. + # We need rewrite around here using new socket/server plugin helper. + buf = sock.read_nonblock(@sender.read_length) + if buf.empty? + sleep @sender.read_interval + next + end + @unpacker.feed_each(buf) do |data| + if @handshake.invoke(sock, ri, data) == :established + @log.debug "connection established", host: @host, port: @port + end + end + rescue IO::WaitReadable + # If the exception is Errno::EWOULDBLOCK or Errno::EAGAIN, it is extended by IO::WaitReadable. + # So IO::WaitReadable can be used to rescue the exceptions for retrying read_nonblock. + # https//docs.ruby-lang.org/en/2.3.0/IO.html#method-i-read_nonblock + sleep @sender.read_interval unless ri.state == :established + rescue SystemCallError => e + @log.warn "disconnected by error", host: @host, port: @port, error: e + disable! + break + rescue EOFError + @log.warn "disconnected", host: @host, port: @port + disable! + break + rescue HeloError => e + @log.warn "received invalid helo message from #{@name}" + disable! + break + rescue PingpongError => e + @log.warn "connection refused to #{@name || @host}: #{e.message}" + disable! + break + end + end + end + + def send_data_actual(sock, tag, chunk) + option = { 'size' => chunk.size, 'compressed' => @compress } + option['chunk'] = Base64.encode64(chunk.unique_id) if @ack_handler + + # https://github.com/fluent/fluentd/wiki/Forward-Protocol-Specification-v1#packedforward-mode + # out_forward always uses str32 type for entries. + # str16 can store only 64kbytes, and it should be much smaller than buffer chunk size. + + tag = tag.dup.force_encoding(Encoding::UTF_8) + + sock.write @sender.forward_header # array, size=3 + sock.write tag.to_msgpack # 1. tag: String (str) + chunk.open(compressed: @compress) do |chunk_io| + entries = [0xdb, chunk_io.size].pack('CN') + sock.write entries.force_encoding(Encoding::UTF_8) # 2. entries: String (str32) + IO.copy_stream(chunk_io, sock) # writeRawBody(packed_es) + end + sock.write option.to_msgpack # 3. option: Hash(map) + + # TODO: use bin32 for non-utf8 content(entries) when old msgpack-ruby (0.5.x or earlier) not supported + end + + def send_data(tag, chunk) + ack = @ack_handler && @ack_handler.create_ack(chunk.unique_id, self) + connect(nil, ack: ack) do |sock, ri| + ensure_established_connection(sock, ri) + send_data_actual(sock, tag, chunk) + end + + heartbeat(false) + nil + end + + # FORWARD_TCP_HEARTBEAT_DATA = FORWARD_HEADER + ''.to_msgpack + [].to_msgpack + # + # @return [Boolean] return true if it needs to rebuild nodes + def send_heartbeat + begin + dest_addr = resolved_host + @resolved_once = true + rescue ::SocketError => e + if !@resolved_once && @sender.ignore_network_errors_at_startup + @log.warn "failed to resolve node name in heartbeating", server: @name || @host, error: e + return false + end + raise + end + + case @sender.heartbeat_type + when :transport + connect(dest_addr) do |sock, ri| + ensure_established_connection(sock, ri) + + ## don't send any data to not cause a compatibility problem + # sock.write FORWARD_TCP_HEARTBEAT_DATA + + # successful tcp connection establishment is considered as valid heartbeat. + # When heartbeat is succeeded after detached, return true. It rebuilds weight array. + heartbeat(true) + end + when :udp + @usock.send "\0", 0, Socket.pack_sockaddr_in(@port, dest_addr) + # response is going to receive at on_udp_heatbeat_response_recv + false + when :none # :none doesn't use this class + raise "BUG: heartbeat_type none must not use Node" + else + raise "BUG: unknown heartbeat_type '#{@sender.heartbeat_type}'" + end end def resolved_host - case @conf.expire_dns_cache + case @sender.expire_dns_cache when 0 # cache is disabled - return resolve_dns! + resolve_dns! when nil # persistent cache - return @resolved_host ||= resolve_dns! + @resolved_host ||= resolve_dns! else - now = Engine.now + now = Fluent::EventTime.now rh = @resolved_host - if !rh || now - @resolved_time >= @conf.expire_dns_cache + if !rh || now - @resolved_time >= @sender.expire_dns_cache rh = @resolved_host = resolve_dns! @resolved_time = now end - return rh + rh end end def resolve_dns! addrinfo_list = Socket.getaddrinfo(@host, @port, nil, Socket::SOCK_STREAM) - addrinfo = @conf.dns_round_robin ? addrinfo_list.sample : addrinfo_list.first - @sockaddr = Socket.pack_sockaddr_in(addrinfo[1], addrinfo[3]) # used by on_heartbeat + addrinfo = @sender.dns_round_robin ? addrinfo_list.sample : addrinfo_list.first + @sockaddr = Socket.pack_sockaddr_in(addrinfo[1], addrinfo[3]) # used by on_udp_heatbeat_response_recv addrinfo[3] end private :resolve_dns! def tick now = Time.now.to_f - if !@available + unless available? if @failure.hard_timeout?(now) @failure.clear end @@ -531,41 +772,51 @@ def tick if @failure.hard_timeout?(now) @log.warn "detached forwarding server '#{@name}'", host: @host, port: @port, hard_timeout: true - @available = false + disable! @resolved_host = nil # expire cached host @failure.clear return true end - if @conf.phi_failure_detector + if @sender.phi_failure_detector phi = @failure.phi(now) - #$log.trace "phi '#{@name}'", :host=>@host, :port=>@port, :phi=>phi - if phi > @conf.phi_threshold - @log.warn "detached forwarding server '#{@name}'", host: @host, port: @port, phi: phi - @available = false + if phi > @sender.phi_threshold + @log.warn "detached forwarding server '#{@name}'", host: @host, port: @port, phi: phi, phi_threshold: @sender.phi_threshold + disable! @resolved_host = nil # expire cached host @failure.clear return true end end - return false + false end def heartbeat(detect=true) now = Time.now.to_f @failure.add(now) - #@log.trace "heartbeat from '#{@name}'", :host=>@host, :port=>@port, :available=>@available, :sample_size=>@failure.sample_size - if detect && !@available && @failure.sample_size > @conf.recover_sample_size + if detect && !available? && @failure.sample_size > @sender.recover_sample_size @available = true @log.warn "recovered forwarding server '#{@name}'", host: @host, port: @port - return true + true else - return nil + nil end end - def to_msgpack(out = '') - [@host, @port, @weight, @available].to_msgpack(out) + private + + def ensure_established_connection(sock, request_info) + if request_info.state != :established + establish_connection(sock, request_info) + + if request_info.state != :established + raise ConnectionClosedError, "failed to establish connection with node #{@name}" + end + end + end + + def connect(host = nil, ack: false, &block) + @connection_manager.connect(host: host || resolved_host, port: port, hostname: @hostname, ack: ack, &block) end end @@ -583,96 +834,5 @@ def heartbeat(detect=true) true end end - - class FailureDetector - PHI_FACTOR = 1.0 / Math.log(10.0) - SAMPLE_SIZE = 1000 - - def initialize(heartbeat_interval, hard_timeout, init_last) - @heartbeat_interval = heartbeat_interval - @last = init_last - @hard_timeout = hard_timeout - - # microsec - @init_gap = (heartbeat_interval * 1e6).to_i - @window = [@init_gap] - end - - def hard_timeout?(now) - now - @last > @hard_timeout - end - - def add(now) - if @window.empty? - @window << @init_gap - @last = now - else - gap = now - @last - @window << (gap * 1e6).to_i - @window.shift if @window.length > SAMPLE_SIZE - @last = now - end - end - - def phi(now) - size = @window.size - return 0.0 if size == 0 - - # Calculate weighted moving average - mean_usec = 0 - fact = 0 - @window.each_with_index {|gap,i| - mean_usec += gap * (1+i) - fact += (1+i) - } - mean_usec = mean_usec / fact - - # Normalize arrive intervals into 1sec - mean = (mean_usec.to_f / 1e6) - @heartbeat_interval + 1 - - # Calculate phi of the phi accrual failure detector - t = now - @last - @heartbeat_interval + 1 - phi = PHI_FACTOR * t / mean - - return phi - end - - def sample_size - @window.size - end - - def clear - @window.clear - @last = 0 - end - end - - ## TODO - #class RPC - # def initialize(this) - # @this = this - # end - # - # def list_nodes - # @this.nodes - # end - # - # def list_fault_nodes - # list_nodes.select {|n| !n.available? } - # end - # - # def list_available_nodes - # list_nodes.select {|n| n.available? } - # end - # - # def add_node(name, host, port, weight) - # end - # - # def recover_node(host, port) - # end - # - # def remove_node(host, port) - # end - #end end end diff --git a/source/plugins/ruby/out_mdm.rb b/source/plugins/ruby/out_mdm.rb index 6238eb51a..8e80fb753 100644 --- a/source/plugins/ruby/out_mdm.rb +++ b/source/plugins/ruby/out_mdm.rb @@ -1,11 +1,12 @@ #!/usr/local/bin/ruby # frozen_string_literal: true -module Fluent - class OutputMDM < BufferedOutput - config_param :retry_mdm_post_wait_minutes, :integer +require 'fluent/plugin/output' - Plugin.register_output("out_mdm", self) +module Fluent::Plugin + class OutputMDM < Output + config_param :retry_mdm_post_wait_minutes, :integer + Fluent::Plugin.register_output("mdm", self) def initialize super @@ -57,8 +58,6 @@ def initialize end def configure(conf) - s = conf.add_element("secondary") - s["type"] = ChunkErrorHandler::SecondaryName super end @@ -204,7 +203,7 @@ def get_access_token end def write_status_file(success, message) - fn = "/var/opt/microsoft/omsagent/log/MDMIngestion.status" + fn = "/var/opt/microsoft/docker-cimprov/log/MDMIngestion.status" status = '{ "operation": "MDMIngestion", "success": "%s", "message": "%s" }' % [success, message] begin File.open(fn, "w") { |file| file.write(status) } @@ -270,6 +269,7 @@ def write(chunk) flush_mdm_exception_telemetry if (!@first_post_attempt_made || (Time.now > @last_post_attempt_time + retry_mdm_post_wait_minutes * 60)) && @can_send_data_to_mdm post_body = [] + chunk.extend Fluent::ChunkMessagePackEventStreamer chunk.msgpack_each { |(tag, record)| post_body.push(record.to_json) } @@ -320,7 +320,7 @@ def send_to_mdm(post_body) ApplicationInsightsUtility.sendCustomEvent("AKSCustomMetricsMDMSendSuccessful", {}) @last_telemetry_sent_time = Time.now end - rescue Net::HTTPServerException => e + rescue Net::HTTPClientException => e # see https://docs.ruby-lang.org/en/2.6.0/NEWS.html about deprecating HTTPServerException and adding HTTPClientException if !response.nil? && !response.body.nil? #body will have actual error @log.info "Failed to Post Metrics to MDM : #{e} Response.body: #{response.body}" else @@ -334,7 +334,7 @@ def send_to_mdm(post_body) # Not raising exception, as that will cause retries to happen elsif !response.code.empty? && response.code.start_with?("4") # Log 400 errors and continue - @log.info "Non-retryable HTTPServerException when POSTing Metrics to MDM #{e} Response: #{response}" + @log.info "Non-retryable HTTPClientException when POSTing Metrics to MDM #{e} Response: #{response}" else # raise if the response code is non-400 @log.info "HTTPServerException when POSTing Metrics to MDM #{e} Response: #{response}" @@ -352,72 +352,5 @@ def send_to_mdm(post_body) raise e end end - - private - - class ChunkErrorHandler - include Configurable - include PluginId - include PluginLoggerMixin - - SecondaryName = "__ChunkErrorHandler__" - - Plugin.register_output(SecondaryName, self) - - def initialize - @router = nil - end - - def secondary_init(primary) - @error_handlers = create_error_handlers @router - end - - def start - # NOP - end - - def shutdown - # NOP - end - - def router=(r) - @router = r - end - - def write(chunk) - chunk.msgpack_each { |(tag, record)| - @error_handlers[tag].emit(record) - } - end - - private - - def create_error_handlers(router) - nop_handler = NopErrorHandler.new - Hash.new() { |hash, tag| - etag = OMS::Common.create_error_tag tag - hash[tag] = router.match?(etag) ? - ErrorHandler.new(router, etag) : - nop_handler - } - end - - class ErrorHandler - def initialize(router, etag) - @router = router - @etag = etag - end - - def emit(record) - @router.emit(@etag, Fluent::Engine.now, record) - end - end - - class NopErrorHandler - def emit(record) - # NOP - end - end - end end # class OutputMDM end # module Fluent diff --git a/source/plugins/ruby/podinventory_to_mdm.rb b/source/plugins/ruby/podinventory_to_mdm.rb index d9cb71bd4..c24a91a87 100644 --- a/source/plugins/ruby/podinventory_to_mdm.rb +++ b/source/plugins/ruby/podinventory_to_mdm.rb @@ -279,16 +279,16 @@ def process_pod_inventory_record(record) begin records = [] - podUid = record["DataItems"][0]["PodUid"] + podUid = record["PodUid"] if @pod_uids.key?(podUid) return end @pod_uids[podUid] = true - podPhaseDimValue = record["DataItems"][0]["PodStatus"] - podNamespaceDimValue = record["DataItems"][0]["Namespace"] - podControllerNameDimValue = record["DataItems"][0]["ControllerName"] - podNodeDimValue = record["DataItems"][0]["Computer"] + podPhaseDimValue = record["PodStatus"] + podNamespaceDimValue = record["Namespace"] + podControllerNameDimValue = record["ControllerName"] + podNodeDimValue = record["Computer"] if podControllerNameDimValue.nil? || podControllerNameDimValue.empty? podControllerNameDimValue = "No Controller" diff --git a/source/plugins/utils/oms_common.rb b/source/plugins/utils/oms_common.rb new file mode 100644 index 000000000..c10cb8638 --- /dev/null +++ b/source/plugins/utils/oms_common.rb @@ -0,0 +1,143 @@ +module OMS + + MSDockerCImprovHostnameFilePath = '/var/opt/microsoft/docker-cimprov/state/containerhostname' + IPV6_REGEX = '\h{4}:\h{4}:\h{4}:\h{4}:\h{4}:\h{4}:\h{4}:\h{4}' + IPV4_Approximate_REGEX = '\d+\.\d+\.\d+\.\d+' + + class RetryRequestException < Exception + # Throw this exception to tell the fluentd engine to retry and + # inform the output plugin that it is indeed retryable + end + + class Common + require 'socket' + require_relative 'omslog' + + @@Hostname = nil + @@HostnameFilePath = MSDockerCImprovHostnameFilePath + + + class << self + + # Internal methods + # (left public for easy testing, though protected may be better later) + + def clean_hostname_string(hnBuffer) + return "" if hnBuffer.nil? # So give the rest of the program a string to deal with. + hostname_buffer = hnBuffer.strip + return hostname_buffer + end + + def has_designated_hostnamefile? + return false if @@HostnameFilePath.nil? + return false unless @@HostnameFilePath =~ /\w/ + return false unless File.exist?(@@HostnameFilePath) + return true + end + + def is_dot_separated_string?(hnBuffer) + return true if /[^.]+\.[^.]+/ =~ hnBuffer + return false + end + + def is_hostname_compliant?(hnBuffer) + # RFC 2181: + # Size limit is 1 to 63 octets, so probably bytesize is appropriate method. + return false if hnBuffer.nil? + return false if /\./ =~ hnBuffer # Hostname by definition may not contain a dot. + return false if /:/ =~ hnBuffer # Hostname by definition may not contain a colon. + return false unless 1 <= hnBuffer.bytesize && hnBuffer.bytesize <= 63 + return true + end + + def is_like_ipv4_string?(hnBuffer) + return false unless /\A#{IPV4_Approximate_REGEX}\z/ =~ hnBuffer + qwa = hnBuffer.split('.') + return false unless qwa.length == 4 + return false if qwa[0].to_i == 0 + qwa.each do |quadwordstring| + bi = quadwordstring.to_i + # This may need more detail if 255 octets are sometimes allowed, but I don't think so. + return false unless 0 <= bi and bi < 255 + end + return true + end + + def is_like_ipv6_string?(hnBuffer) + return true if /\A#{IPV6_REGEX}\z/ =~ hnBuffer + return false + end + + def look_for_socket_class_host_address + hostname_buffer = nil + + begin + hostname_buffer = Socket.gethostname + rescue => error + OMS::Log.error_once("Unable to get the Host Name using socket facility: #{error}") + return + end + @@Hostname = clean_hostname_string(hostname_buffer) + + return # Thwart accidental return to force correct use. + end + + def look_in_designated_hostnamefile + # Issue: + # When omsagent runs inside a container, gethostname returns the hostname of the container (random name) + # not the actual machine hostname. + # One way to solve this problem is to set the container hostname same as machine name, but this is not + # possible when host-machine is a private VM inside a cluster. + # Solution: + # Share/mount ‘/etc/hostname’ as '/var/opt/microsoft/omsagent/state/containername' with container and + # omsagent will read hostname from shared file. + hostname_buffer = nil + + unless File.readable?(@@HostnameFilePath) + OMS::Log.warn_once("File '#{@@HostnameFilePath}' exists but is not readable.") + return + end + + begin + hostname_buffer = File.read(@@HostnameFilePath) + rescue => error + OMS::Log.warn_once("Unable to read the hostname from #{@@HostnameFilePath}: #{error}") + end + @@Hostname = clean_hostname_string(hostname_buffer) + return # Thwart accidental return to force correct use. + end + + def validate_hostname_equivalent(hnBuffer) + # RFC 1123 and 2181 + # Note that for now we are limiting the earlier maximum of 63 for fqdn labels and thus + # hostnames UNTIL we are assured azure will allow 255, as specified in RFC 1123, or + # we are otherwise instructed. + rfcl = "RFCs 1123, 2181 with hostname range of {1,63} octets for non-root item." + return if is_hostname_compliant?(hnBuffer) + return if is_like_ipv4_string?(hnBuffer) + return if is_like_ipv6_string?(hnBuffer) + msg = "Hostname '#{hnBuffer}' not compliant (#{rfcl}). Not IP Address Either." + OMS::Log.warn_once(msg) + raise NameError, msg + end + + # End of Internal methods + + def get_hostname(ignoreOldValue = false) + if not is_hostname_compliant?(@@Hostname) or ignoreOldValue then + + look_in_designated_hostnamefile if has_designated_hostnamefile? + + look_for_socket_class_host_address unless is_hostname_compliant?(@@Hostname) + end + + begin + validate_hostname_equivalent(@@Hostname) + rescue => error + OMS::Log.warn_once("Hostname '#{@@Hostname}' found, but did NOT validate as compliant. #{error}. Using anyway.") + end + return @@Hostname + end + end # Class methods + end # class Common +end # module OMS diff --git a/source/plugins/utils/omslog.rb b/source/plugins/utils/omslog.rb new file mode 100644 index 000000000..b65bf947c --- /dev/null +++ b/source/plugins/utils/omslog.rb @@ -0,0 +1,50 @@ +module OMS + class Log + require 'set' + require 'digest' + + @@error_proc = Proc.new {|message| $log.error message } + @@warn_proc = Proc.new {|message| $log.warn message } + @@info_proc = Proc.new {|message| $log.info message } + @@debug_proc = Proc.new {|message| $log.debug message } + + @@logged_hashes = Set.new + + class << self + def error_once(message, tag=nil) + log_once(@@error_proc, @@debug_proc, message, tag) + end + + def warn_once(message, tag=nil) + log_once(@@warn_proc, @@debug_proc, message, tag) + end + + def info_once(message, tag=nil) + log_once(@@info_proc, @@debug_proc, message, tag) + end + + def log_once(first_loglevel_proc, next_loglevel_proc, message, tag=nil) + # Will log a message once with the first procedure and subsequently with the second + # This allows repeated messages to be ignored by having the second logging function at a lower log level + # An optional tag can be used as the message key + + if tag == nil + tag = message + end + + md5_digest = Digest::MD5.new + tag_hash = md5_digest.update(tag).base64digest + res = @@logged_hashes.add?(tag_hash) + + if res == nil + # The hash was already in the set + next_loglevel_proc.call(message) + else + # First time we see this hash + first_loglevel_proc.call(message) + end + end + end # Class methods + + end # Class Log +end # Module OMS From 959b455d5ab873b6fa5ed7445bd61dc847ec2c08 Mon Sep 17 00:00:00 2001 From: rashmichandrashekar Date: Wed, 26 May 2021 17:41:00 -0700 Subject: [PATCH 106/301] Send perf metrics to MDM from windows daemonset (#568) --- .../scripts/tomlparser-mdm-metrics-config.rb | 59 ++++++++++---- .../installer/datafiles/base_container.data | 2 +- build/windows/Makefile.ps1 | 10 +++ build/windows/installer/conf/fluent.conf | 32 ++++++++ kubernetes/omsagent.yaml | 9 +++ kubernetes/windows/Dockerfile | 6 +- kubernetes/windows/main.ps1 | 66 +++++++++++++++- .../ruby/ApplicationInsightsUtility.rb | 36 ++++++--- .../plugins/ruby/CAdvisorMetricsAPIClient.rb | 77 ++++++++++++------- source/plugins/ruby/KubernetesApiClient.rb | 7 +- source/plugins/ruby/MdmMetricsGenerator.rb | 7 +- .../plugins/ruby/arc_k8s_cluster_identity.rb | 29 ++++--- source/plugins/ruby/filter_cadvisor2mdm.rb | 29 ++++--- source/plugins/ruby/in_cadvisor_perf.rb | 29 ++++--- source/plugins/ruby/in_win_cadvisor_perf.rb | 11 ++- source/plugins/ruby/kubelet_utils.rb | 7 +- 16 files changed, 311 insertions(+), 105 deletions(-) rename build/{linux => common}/installer/scripts/tomlparser-mdm-metrics-config.rb (75%) diff --git a/build/linux/installer/scripts/tomlparser-mdm-metrics-config.rb b/build/common/installer/scripts/tomlparser-mdm-metrics-config.rb similarity index 75% rename from build/linux/installer/scripts/tomlparser-mdm-metrics-config.rb rename to build/common/installer/scripts/tomlparser-mdm-metrics-config.rb index dcf179bf2..b6a4419cf 100644 --- a/build/linux/installer/scripts/tomlparser-mdm-metrics-config.rb +++ b/build/common/installer/scripts/tomlparser-mdm-metrics-config.rb @@ -1,9 +1,16 @@ #!/usr/local/bin/ruby # frozen_string_literal: true -require_relative "tomlrb" -require_relative "ConfigParseErrorLogger" +#this should be require relative in Linux and require in windows, since it is a gem install on windows +@os_type = ENV["OS_TYPE"] +if !@os_type.nil? && !@os_type.empty? && @os_type.strip.casecmp("windows") == 0 + require "tomlrb" +else + require_relative "tomlrb" +end + require_relative "/etc/fluent/plugin/constants" +require_relative "ConfigParseErrorLogger" @configMapMountPath = "/etc/config/settings/alertable-metrics-configuration-settings" @configVersion = "" @@ -124,6 +131,10 @@ def populateSettingValuesFromConfigMap(parsedConfig) end end +def get_command_windows(env_variable_name, env_variable_value) + return "[System.Environment]::SetEnvironmentVariable(\"#{env_variable_name}\", \"#{env_variable_value}\", \"Process\")" + "\n" + "[System.Environment]::SetEnvironmentVariable(\"#{env_variable_name}\", \"#{env_variable_value}\", \"Machine\")" + "\n" +end + @configSchemaVersion = ENV["AZMON_AGENT_CFG_SCHEMA_VERSION"] puts "****************Start MDM Metrics Config Processing********************" if !@configSchemaVersion.nil? && !@configSchemaVersion.empty? && @configSchemaVersion.strip.casecmp("v1") == 0 #note v1 is the only supported schema version, so hardcoding it @@ -137,19 +148,37 @@ def populateSettingValuesFromConfigMap(parsedConfig) end end -# Write the settings to file, so that they can be set as environment variables -file = File.open("config_mdm_metrics_env_var", "w") +if !@os_type.nil? && !@os_type.empty? && @os_type.strip.casecmp("windows") == 0 + # Write the settings to file, so that they can be set as environment variables in windows container + file = File.open("setmdmenv.ps1", "w") -if !file.nil? - file.write("export AZMON_ALERT_CONTAINER_CPU_THRESHOLD=#{@percentageCpuUsageThreshold}\n") - file.write("export AZMON_ALERT_CONTAINER_MEMORY_RSS_THRESHOLD=#{@percentageMemoryRssThreshold}\n") - file.write("export AZMON_ALERT_CONTAINER_MEMORY_WORKING_SET_THRESHOLD=\"#{@percentageMemoryWorkingSetThreshold}\"\n") - file.write("export AZMON_ALERT_PV_USAGE_THRESHOLD=#{@percentagePVUsageThreshold}\n") - file.write("export AZMON_ALERT_JOB_COMPLETION_TIME_THRESHOLD=#{@jobCompletionThresholdMinutes}\n") - # Close file after writing all MDM setting environment variables - file.close - puts "****************End MDM Metrics Config Processing********************" + if !file.nil? + commands = get_command_windows("AZMON_ALERT_CONTAINER_CPU_THRESHOLD", @percentageCpuUsageThreshold) + file.write(commands) + commands = get_command_windows("AZMON_ALERT_CONTAINER_MEMORY_WORKING_SET_THRESHOLD", @percentageMemoryWorkingSetThreshold) + file.write(commands) + # Close file after writing all environment variables + file.close + puts "****************End MDM Metrics Config Processing********************" + else + puts "Exception while opening file for writing MDM metric config environment variables" + puts "****************End MDM Metrics Config Processing********************" + end else - puts "Exception while opening file for writing MDM metric config environment variables" - puts "****************End MDM Metrics Config Processing********************" + # Write the settings to file, so that they can be set as environment variables in linux container + file = File.open("config_mdm_metrics_env_var", "w") + + if !file.nil? + file.write("export AZMON_ALERT_CONTAINER_CPU_THRESHOLD=#{@percentageCpuUsageThreshold}\n") + file.write("export AZMON_ALERT_CONTAINER_MEMORY_RSS_THRESHOLD=#{@percentageMemoryRssThreshold}\n") + file.write("export AZMON_ALERT_CONTAINER_MEMORY_WORKING_SET_THRESHOLD=\"#{@percentageMemoryWorkingSetThreshold}\"\n") + file.write("export AZMON_ALERT_PV_USAGE_THRESHOLD=#{@percentagePVUsageThreshold}\n") + file.write("export AZMON_ALERT_JOB_COMPLETION_TIME_THRESHOLD=#{@jobCompletionThresholdMinutes}\n") + # Close file after writing all MDM setting environment variables + file.close + puts "****************End MDM Metrics Config Processing********************" + else + puts "Exception while opening file for writing MDM metric config environment variables" + puts "****************End MDM Metrics Config Processing********************" + end end diff --git a/build/linux/installer/datafiles/base_container.data b/build/linux/installer/datafiles/base_container.data index b9f889dba..de8ccbba0 100644 --- a/build/linux/installer/datafiles/base_container.data +++ b/build/linux/installer/datafiles/base_container.data @@ -42,7 +42,7 @@ MAINTAINER: 'Microsoft Corporation' /opt/microsoft/docker-cimprov/bin/TelegrafTCPErrorTelemetry.sh; build/linux/installer/scripts/TelegrafTCPErrorTelemetry.sh; 755; root; root /opt/livenessprobe.sh; build/linux/installer/scripts/livenessprobe.sh; 755; root; root /opt/tomlparser-prom-customconfig.rb; build/common/installer/scripts/tomlparser-prom-customconfig.rb; 755; root; root -/opt/tomlparser-mdm-metrics-config.rb; build/linux/installer/scripts/tomlparser-mdm-metrics-config.rb; 755; root; root +/opt/tomlparser-mdm-metrics-config.rb; build/common/installer/scripts/tomlparser-mdm-metrics-config.rb; 755; root; root /opt/tomlparser-metric-collection-config.rb; build/linux/installer/scripts/tomlparser-metric-collection-config.rb; 755; root; root diff --git a/build/windows/Makefile.ps1 b/build/windows/Makefile.ps1 index 2d49330ea..737abc92a 100644 --- a/build/windows/Makefile.ps1 +++ b/build/windows/Makefile.ps1 @@ -180,4 +180,14 @@ $exclude = @('*.cs','*.csproj') Copy-Item -Path $installerdir -Destination $publishdir -Recurse -Force -Exclude $exclude Write-Host("successfully copied installer files conf and scripts from :" + $installerdir + " to :" + $publishdir + " ") -ForegroundColor Green +$rubyplugindir = Join-Path -Path $rootdir -ChildPath "source\plugins\ruby" +Write-Host("copying ruby source files from :" + $rubyplugindir + " to :" + $publishdir + " ...") +Copy-Item -Path $rubyplugindir -Destination $publishdir -Recurse -Force +Write-Host("successfully copied ruby source files from :" + $rubyplugindir + " to :" + $publishdir + " ") -ForegroundColor Green + +$utilsplugindir = Join-Path -Path $rootdir -ChildPath "source\plugins\utils" +Write-Host("copying ruby util files from :" + $utilsplugindir + " to :" + $publishdir + " ...") +Copy-Item -Path $utilsplugindir -Destination $publishdir -Recurse -Force +Write-Host("successfully copied ruby util files from :" + $utilsplugindir + " to :" + $publishdir + " ") -ForegroundColor Green + Set-Location $currentdir \ No newline at end of file diff --git a/build/windows/installer/conf/fluent.conf b/build/windows/installer/conf/fluent.conf index d5eb475ca..741e5ce19 100644 --- a/build/windows/installer/conf/fluent.conf +++ b/build/windows/installer/conf/fluent.conf @@ -4,6 +4,13 @@ @log_level info + + @type cadvisor_perf + tag oms.api.cadvisorperf + run_interval 60 + @log_level debug + + @type tail path "#{ENV['AZMON_LOG_TAIL_PATH']}" @@ -29,6 +36,14 @@ @include fluent-docker-parser.conf +#custom_metrics_mdm filter plugin + + @type cadvisor2mdm + metrics_to_collect cpuUsageNanoCores,memoryWorkingSetBytes + log_path /etc/omsagentwindows/filter_cadvisor2mdm.log + @log_level info + + @type grep @@ -46,6 +61,23 @@ + + @type mdm + @log_level debug + + @type file + path /etc/omsagentwindows/out_mdm_cdvisorperf.buffer + overflow_action drop_oldest_chunk + chunk_limit_size 4m + flush_interval 20s + retry_max_times 10 + retry_wait 5s + retry_max_interval 5m + flush_thread_count 5 + + retry_mdm_post_wait_minutes 30 + + @type forward send_timeout 60s diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index ab6bbea9c..4290e1d59 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -790,6 +790,9 @@ spec: fieldPath: status.hostIP - name: SIDECAR_SCRAPING_ENABLED value: "true" + # Update this with the user assigned msi client id for omsagent + - name: USER_ASSIGNED_IDENTITY_CLIENT_ID + value: "" # Add this only for clouds that require cert bootstrapping - name: REQUIRES_CERT_BOOTSTRAP value: "true" @@ -812,6 +815,9 @@ spec: # - mountPath: C:\ca # name: ca-certs # readOnly: true + - mountPath: C:\etc\kubernetes\host + name: azure-json-path + readOnly: true livenessProbe: exec: command: @@ -843,6 +849,9 @@ spec: - name: docker-windows-kuberenetes-container-logs hostPath: path: C:\var + - name: azure-json-path + hostPath: + path: C:\k # Need to mount this only for airgapped clouds - Commenting this since it wont exist in non airgapped clouds #- name: ca-certs # hostPath: diff --git a/kubernetes/windows/Dockerfile b/kubernetes/windows/Dockerfile index fefd089a8..5a5298d0b 100644 --- a/kubernetes/windows/Dockerfile +++ b/kubernetes/windows/Dockerfile @@ -20,7 +20,7 @@ RUN refreshenv \ && gem install cool.io -v 1.5.4 --platform ruby \ && gem install oj -v 3.3.10 \ && gem install json -v 2.2.0 \ -&& gem install fluentd -v 1.10.2 \ +&& gem install fluentd -v 1.12.2 \ && gem install win32-service -v 1.0.1 \ && gem install win32-ipc -v 0.7.0 \ && gem install win32-event -v 0.6.3 \ @@ -69,6 +69,10 @@ COPY ./omsagentwindows/installer/conf/telegraf.conf /etc/telegraf/ # copy keepcert alive ruby scripts COPY ./omsagentwindows/installer/scripts/rubyKeepCertificateAlive/*.rb /etc/fluent/plugin/ +#Copy fluentd ruby plugins +COPY ./omsagentwindows/ruby/ /etc/fluent/plugin/ +COPY ./omsagentwindows/utils/*.rb /etc/fluent/plugin/ + ENV AGENT_VERSION ${IMAGE_TAG} ENV OS_TYPE "windows" ENV APPLICATIONINSIGHTS_AUTH "NzAwZGM5OGYtYTdhZC00NThkLWI5NWMtMjA3ZjM3NmM3YmRi" diff --git a/kubernetes/windows/main.ps1 b/kubernetes/windows/main.ps1 index baf95fca4..bc053b0d6 100644 --- a/kubernetes/windows/main.ps1 +++ b/kubernetes/windows/main.ps1 @@ -134,9 +134,6 @@ function Set-EnvironmentVariables { [System.Environment]::SetEnvironmentVariable("APPLICATIONINSIGHTS_ENDPOINT", $appInsightsEndpoint, "machine") Write-Host "Successfully set environment variable APPLICATIONINSIGHTS_ENDPOINT - $($appInsightsEndpoint) for target 'machine'..." } - else { - Write-Host "Failed to set environment variable APPLICATIONINSIGHTS_ENDPOINT for target 'machine' since it is either null or empty" - } # Check if the instrumentation key needs to be fetched from a storage account (as in airgapped clouds) $aiKeyURl = [System.Environment]::GetEnvironmentVariable('APPLICATIONINSIGHTS_AUTH_URL') @@ -180,14 +177,71 @@ function Set-EnvironmentVariables { [System.Environment]::SetEnvironmentVariable("TELEMETRY_APPLICATIONINSIGHTS_KEY", $aiKeyDecoded, "Process") [System.Environment]::SetEnvironmentVariable("TELEMETRY_APPLICATIONINSIGHTS_KEY", $aiKeyDecoded, "Machine") + # Setting environment variables required by the fluentd plugins + $aksResourceId = [System.Environment]::GetEnvironmentVariable("AKS_RESOURCE_ID", "process") + if (![string]::IsNullOrEmpty($aksResourceId)) { + [System.Environment]::SetEnvironmentVariable("AKS_RESOURCE_ID", $aksResourceId, "machine") + Write-Host "Successfully set environment variable AKS_RESOURCE_ID - $($aksResourceId) for target 'machine'..." + } + else { + Write-Host "Failed to set environment variable AKS_RESOURCE_ID for target 'machine' since it is either null or empty" + } + + $aksRegion = [System.Environment]::GetEnvironmentVariable("AKS_REGION", "process") + if (![string]::IsNullOrEmpty($aksRegion)) { + [System.Environment]::SetEnvironmentVariable("AKS_REGION", $aksRegion, "machine") + Write-Host "Successfully set environment variable AKS_REGION - $($aksRegion) for target 'machine'..." + } + else { + Write-Host "Failed to set environment variable AKS_REGION for target 'machine' since it is either null or empty" + } + + $controllerType = [System.Environment]::GetEnvironmentVariable("CONTROLLER_TYPE", "process") + if (![string]::IsNullOrEmpty($controllerType)) { + [System.Environment]::SetEnvironmentVariable("CONTROLLER_TYPE", $controllerType, "machine") + Write-Host "Successfully set environment variable CONTROLLER_TYPE - $($controllerType) for target 'machine'..." + } + else { + Write-Host "Failed to set environment variable CONTROLLER_TYPE for target 'machine' since it is either null or empty" + } + + $osType = [System.Environment]::GetEnvironmentVariable("OS_TYPE", "process") + if (![string]::IsNullOrEmpty($osType)) { + [System.Environment]::SetEnvironmentVariable("OS_TYPE", $osType, "machine") + Write-Host "Successfully set environment variable OS_TYPE - $($osType) for target 'machine'..." + } + else { + Write-Host "Failed to set environment variable OS_TYPE for target 'machine' since it is either null or empty" + } + + $userMsi = [System.Environment]::GetEnvironmentVariable("USER_ASSIGNED_IDENTITY_CLIENT_ID", "process") + if (![string]::IsNullOrEmpty($userMsi)) { + [System.Environment]::SetEnvironmentVariable("USER_ASSIGNED_IDENTITY_CLIENT_ID", $userMsi, "machine") + Write-Host "Successfully set environment variable USER_ASSIGNED_IDENTITY_CLIENT_ID - $($userMsi) for target 'machine'..." + } + + $hostName = [System.Environment]::GetEnvironmentVariable("HOSTNAME", "process") + if (![string]::IsNullOrEmpty($hostName)) { + [System.Environment]::SetEnvironmentVariable("HOSTNAME", $hostName, "machine") + Write-Host "Successfully set environment variable HOSTNAME - $($hostName) for target 'machine'..." + } + else { + Write-Host "Failed to set environment variable HOSTNAME for target 'machine' since it is either null or empty" + } + # run config parser ruby /opt/omsagentwindows/scripts/ruby/tomlparser.rb .\setenv.ps1 + + # run mdm config parser + ruby /opt/omsagentwindows/scripts/ruby/tomlparser-mdm-metrics-config.rb + .\setmdmenv.ps1 } function Get-ContainerRuntime { # default container runtime and make default as containerd when containerd becomes default in AKS $containerRuntime = "docker" + $cAdvisorIsSecure = "false" $response = "" $NODE_IP = "" try { @@ -227,6 +281,7 @@ function Get-ContainerRuntime { if (![string]::IsNullOrEmpty($response) -and $response.StatusCode -eq 200) { Write-Host "API call to https://$($NODE_IP):10250/pods succeeded" $isPodsAPISuccess = $true + $cAdvisorIsSecure = "true" } } catch { @@ -234,6 +289,11 @@ function Get-ContainerRuntime { } } + # set IS_SECURE_CADVISOR_PORT env for debug and telemetry purpose + Write-Host "Setting IS_SECURE_CADVISOR_PORT environment variable as $($cAdvisorIsSecure)" + [System.Environment]::SetEnvironmentVariable("IS_SECURE_CADVISOR_PORT", $cAdvisorIsSecure, "Process") + [System.Environment]::SetEnvironmentVariable("IS_SECURE_CADVISOR_PORT", $cAdvisorIsSecure, "Machine") + if ($isPodsAPISuccess) { if (![string]::IsNullOrEmpty($response.Content)) { $podList = $response.Content | ConvertFrom-Json diff --git a/source/plugins/ruby/ApplicationInsightsUtility.rb b/source/plugins/ruby/ApplicationInsightsUtility.rb index 6ae567337..74d08c1e6 100644 --- a/source/plugins/ruby/ApplicationInsightsUtility.rb +++ b/source/plugins/ruby/ApplicationInsightsUtility.rb @@ -21,10 +21,15 @@ class ApplicationInsightsUtility @@EnvApplicationInsightsEndpoint = "APPLICATIONINSIGHTS_ENDPOINT" @@EnvControllerType = "CONTROLLER_TYPE" @@EnvContainerRuntime = "CONTAINER_RUNTIME" - + @@isWindows = false + @@hostName = (OMS::Common.get_hostname) + @@os_type = ENV["OS_TYPE"] + if !@@os_type.nil? && !@@os_type.empty? && @@os_type.strip.casecmp("windows") == 0 + @@isWindows = true + @@hostName = ENV["HOSTNAME"] + end @@CustomProperties = {} @@Tc = nil - @@hostName = (OMS::Common.get_hostname) @@proxy = (ProxyUtils.getProxyConfiguration) def initialize @@ -133,16 +138,23 @@ def initializeUtility() end def getContainerRuntimeInfo() - containerRuntime = ENV[@@EnvContainerRuntime] - if !containerRuntime.nil? && !containerRuntime.empty? - # DockerVersion field holds either containerRuntime for non-docker or Dockerversion if its docker - @@CustomProperties["DockerVersion"] = containerRuntime - if containerRuntime.casecmp("docker") == 0 - dockerInfo = DockerApiClient.dockerInfo - if (!dockerInfo.nil? && !dockerInfo.empty?) - @@CustomProperties["DockerVersion"] = dockerInfo["Version"] + begin + # Not doing this for windows since docker is being deprecated soon and we dont want to bring in the socket dependency. + if !@@isWindows.nil? && @@isWindows == false + containerRuntime = ENV[@@EnvContainerRuntime] + if !containerRuntime.nil? && !containerRuntime.empty? + # DockerVersion field holds either containerRuntime for non-docker or Dockerversion if its docker + @@CustomProperties["DockerVersion"] = containerRuntime + if containerRuntime.casecmp("docker") == 0 + dockerInfo = DockerApiClient.dockerInfo + if (!dockerInfo.nil? && !dockerInfo.empty?) + @@CustomProperties["DockerVersion"] = dockerInfo["Version"] + end + end end end + rescue => errorStr + $log.warn("Exception in AppInsightsUtility: getContainerRuntimeInfo - error: #{errorStr}") end end @@ -262,7 +274,7 @@ def sendMetricTelemetry(metricName, metricValue, properties) end def getWorkspaceId() - begin + begin workspaceId = ENV["WSID"] if workspaceId.nil? || workspaceId.empty? $log.warn("Exception in AppInsightsUtility: getWorkspaceId - WorkspaceID either nil or empty") @@ -274,7 +286,7 @@ def getWorkspaceId() end def getWorkspaceCloud() - begin + begin workspaceDomain = ENV["DOMAIN"] workspaceCloud = "AzureCloud" if workspaceDomain.casecmp("opinsights.azure.com") == 0 diff --git a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb index f02459aef..10720752d 100644 --- a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb +++ b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb @@ -38,7 +38,12 @@ class CAdvisorMetricsAPIClient @npmIntegrationBasic = ENV["TELEMETRY_NPM_INTEGRATION_METRICS_BASIC"] @npmIntegrationAdvanced = ENV["TELEMETRY_NPM_INTEGRATION_METRICS_ADVANCED"] - @LogPath = "/var/opt/microsoft/docker-cimprov/log/kubernetes_perf_log.txt" + @os_type = ENV["OS_TYPE"] + if !@os_type.nil? && !@os_type.empty? && @os_type.strip.casecmp("windows") == 0 + @LogPath = "/etc/omsagentwindows/kubernetes_perf_log.txt" + else + @LogPath = "/var/opt/microsoft/docker-cimprov/log/kubernetes_perf_log.txt" + end @Log = Logger.new(@LogPath, 2, 10 * 1048576) #keep last 2 files, max log file size = 10M # @@rxBytesLast = nil # @@rxBytesTimeLast = nil @@ -142,39 +147,54 @@ def getMetrics(winNode: nil, metricTime: Time.now.utc.iso8601) operatingSystem = "Linux" end if !metricInfo.nil? - metricDataItems.concat(getContainerMemoryMetricItems(metricInfo, hostName, "workingSetBytes", Constants::MEMORY_WORKING_SET_BYTES, metricTime, operatingSystem)) - metricDataItems.concat(getContainerStartTimeMetricItems(metricInfo, hostName, "restartTimeEpoch", metricTime)) - - if operatingSystem == "Linux" - metricDataItems.concat(getContainerCpuMetricItems(metricInfo, hostName, "usageNanoCores", Constants::CPU_USAGE_NANO_CORES, metricTime)) - metricDataItems.concat(getContainerMemoryMetricItems(metricInfo, hostName, "rssBytes", Constants::MEMORY_RSS_BYTES, metricTime, operatingSystem)) - metricDataItems.push(getNodeMetricItem(metricInfo, hostName, "memory", "rssBytes", Constants::MEMORY_RSS_BYTES, metricTime)) - elsif operatingSystem == "Windows" + # Checking if we are in windows daemonset and sending only few metrics that are needed for MDM + if !@os_type.nil? && !@os_type.empty? && @os_type.strip.casecmp("windows") == 0 + # Container metrics + metricDataItems.concat(getContainerMemoryMetricItems(metricInfo, hostName, "workingSetBytes", Constants::MEMORY_WORKING_SET_BYTES, metricTime, operatingSystem)) containerCpuUsageNanoSecondsRate = getContainerCpuMetricItemRate(metricInfo, hostName, "usageCoreNanoSeconds", Constants::CPU_USAGE_NANO_CORES, metricTime) if containerCpuUsageNanoSecondsRate && !containerCpuUsageNanoSecondsRate.empty? && !containerCpuUsageNanoSecondsRate.nil? metricDataItems.concat(containerCpuUsageNanoSecondsRate) end - end + # Node metrics + cpuUsageNanoSecondsRate = getNodeMetricItemRate(metricInfo, hostName, "cpu", "usageCoreNanoSeconds", Constants::CPU_USAGE_NANO_CORES, operatingSystem, metricTime) + if cpuUsageNanoSecondsRate && !cpuUsageNanoSecondsRate.empty? && !cpuUsageNanoSecondsRate.nil? + metricDataItems.push(cpuUsageNanoSecondsRate) + end + metricDataItems.push(getNodeMetricItem(metricInfo, hostName, "memory", "workingSetBytes", Constants::MEMORY_WORKING_SET_BYTES, metricTime)) + else + metricDataItems.concat(getContainerMemoryMetricItems(metricInfo, hostName, "workingSetBytes", Constants::MEMORY_WORKING_SET_BYTES, metricTime, operatingSystem)) + metricDataItems.concat(getContainerStartTimeMetricItems(metricInfo, hostName, "restartTimeEpoch", metricTime)) + + if operatingSystem == "Linux" + metricDataItems.concat(getContainerCpuMetricItems(metricInfo, hostName, "usageNanoCores", Constants::CPU_USAGE_NANO_CORES, metricTime)) + metricDataItems.concat(getContainerMemoryMetricItems(metricInfo, hostName, "rssBytes", Constants::MEMORY_RSS_BYTES, metricTime, operatingSystem)) + metricDataItems.push(getNodeMetricItem(metricInfo, hostName, "memory", "rssBytes", Constants::MEMORY_RSS_BYTES, metricTime)) + elsif operatingSystem == "Windows" + containerCpuUsageNanoSecondsRate = getContainerCpuMetricItemRate(metricInfo, hostName, "usageCoreNanoSeconds", Constants::CPU_USAGE_NANO_CORES, metricTime) + if containerCpuUsageNanoSecondsRate && !containerCpuUsageNanoSecondsRate.empty? && !containerCpuUsageNanoSecondsRate.nil? + metricDataItems.concat(containerCpuUsageNanoSecondsRate) + end + end - cpuUsageNanoSecondsRate = getNodeMetricItemRate(metricInfo, hostName, "cpu", "usageCoreNanoSeconds", Constants::CPU_USAGE_NANO_CORES, operatingSystem, metricTime) - if cpuUsageNanoSecondsRate && !cpuUsageNanoSecondsRate.empty? && !cpuUsageNanoSecondsRate.nil? - metricDataItems.push(cpuUsageNanoSecondsRate) + cpuUsageNanoSecondsRate = getNodeMetricItemRate(metricInfo, hostName, "cpu", "usageCoreNanoSeconds", Constants::CPU_USAGE_NANO_CORES, operatingSystem, metricTime) + if cpuUsageNanoSecondsRate && !cpuUsageNanoSecondsRate.empty? && !cpuUsageNanoSecondsRate.nil? + metricDataItems.push(cpuUsageNanoSecondsRate) + end + metricDataItems.push(getNodeMetricItem(metricInfo, hostName, "memory", "workingSetBytes", Constants::MEMORY_WORKING_SET_BYTES, metricTime)) + + metricDataItems.push(getNodeLastRebootTimeMetric(metricInfo, hostName, "restartTimeEpoch", metricTime)) + # Disabling networkRxRate and networkTxRate since we dont use it as of now. + #metricDataItems.push(getNodeMetricItem(metricInfo, hostName, "network", "rxBytes", "networkRxBytes")) + #metricDataItems.push(getNodeMetricItem(metricInfo, hostName, "network", "txBytes", "networkTxBytes")) + # networkRxRate = getNodeMetricItemRate(metricInfo, hostName, "network", "rxBytes", "networkRxBytesPerSec") + # if networkRxRate && !networkRxRate.empty? && !networkRxRate.nil? + # metricDataItems.push(networkRxRate) + # end + # networkTxRate = getNodeMetricItemRate(metricInfo, hostName, "network", "txBytes", "networkTxBytesPerSec") + # if networkTxRate && !networkTxRate.empty? && !networkTxRate.nil? + # metricDataItems.push(networkTxRate) + # end end - metricDataItems.push(getNodeMetricItem(metricInfo, hostName, "memory", "workingSetBytes", Constants::MEMORY_WORKING_SET_BYTES, metricTime)) - - metricDataItems.push(getNodeLastRebootTimeMetric(metricInfo, hostName, "restartTimeEpoch", metricTime)) - - # Disabling networkRxRate and networkTxRate since we dont use it as of now. - #metricDataItems.push(getNodeMetricItem(metricInfo, hostName, "network", "rxBytes", "networkRxBytes")) - #metricDataItems.push(getNodeMetricItem(metricInfo, hostName, "network", "txBytes", "networkTxBytes")) - # networkRxRate = getNodeMetricItemRate(metricInfo, hostName, "network", "rxBytes", "networkRxBytesPerSec") - # if networkRxRate && !networkRxRate.empty? && !networkRxRate.nil? - # metricDataItems.push(networkRxRate) - # end - # networkTxRate = getNodeMetricItemRate(metricInfo, hostName, "network", "txBytes", "networkTxBytesPerSec") - # if networkTxRate && !networkTxRate.empty? && !networkTxRate.nil? - # metricDataItems.push(networkTxRate) - # end else @Log.warn("Couldn't get metric information for host: #{hostName}") end @@ -203,7 +223,6 @@ def getContainerCpuMetricItems(metricJSON, hostName, cpuMetricNameToCollect, met containerName = container["name"] metricValue = container["cpu"][cpuMetricNameToCollect] metricTime = metricPollTime #container["cpu"]["time"] - metricItem = {} metricItem["Timestamp"] = metricTime diff --git a/source/plugins/ruby/KubernetesApiClient.rb b/source/plugins/ruby/KubernetesApiClient.rb index 3720bf6dc..4b50e20d8 100644 --- a/source/plugins/ruby/KubernetesApiClient.rb +++ b/source/plugins/ruby/KubernetesApiClient.rb @@ -25,7 +25,12 @@ class KubernetesApiClient #@@IsValidRunningNode = nil #@@IsLinuxCluster = nil @@KubeSystemNamespace = "kube-system" - @LogPath = "/var/opt/microsoft/docker-cimprov/log/kubernetes_client_log.txt" + @os_type = ENV["OS_TYPE"] + if !@os_type.nil? && !@os_type.empty? && @os_type.strip.casecmp("windows") == 0 + @LogPath = "/etc/omsagentwindows/kubernetes_client_log.txt" + else + @LogPath = "/var/opt/microsoft/docker-cimprov/log/kubernetes_client_log.txt" + end @Log = Logger.new(@LogPath, 2, 10 * 1048576) #keep last 2 files, max log file size = 10M @@TokenFileName = "/var/run/secrets/kubernetes.io/serviceaccount/token" @@TokenStr = nil diff --git a/source/plugins/ruby/MdmMetricsGenerator.rb b/source/plugins/ruby/MdmMetricsGenerator.rb index a809087dc..73cf19fac 100644 --- a/source/plugins/ruby/MdmMetricsGenerator.rb +++ b/source/plugins/ruby/MdmMetricsGenerator.rb @@ -10,7 +10,12 @@ class MdmMetricsGenerator require_relative "constants" require_relative "oms_common" - @log_path = "/var/opt/microsoft/docker-cimprov/log/mdm_metrics_generator.log" + @os_type = ENV["OS_TYPE"] + if !@os_type.nil? && !@os_type.empty? && @os_type.strip.casecmp("windows") == 0 + @log_path = "/etc/omsagentwindows/mdm_metrics_generator.log" + else + @log_path = "/var/opt/microsoft/docker-cimprov/log/mdm_metrics_generator.log" + end @log = Logger.new(@log_path, 1, 5000000) @@hostName = (OMS::Common.get_hostname) diff --git a/source/plugins/ruby/arc_k8s_cluster_identity.rb b/source/plugins/ruby/arc_k8s_cluster_identity.rb index 552dafb1f..39b8c1c96 100644 --- a/source/plugins/ruby/arc_k8s_cluster_identity.rb +++ b/source/plugins/ruby/arc_k8s_cluster_identity.rb @@ -18,15 +18,20 @@ class ArcK8sClusterIdentity @@crd_resource_uri_template = "%{kube_api_server_url}/apis/%{cluster_config_crd_api_version}/namespaces/%{cluster_identity_resource_namespace}/azureclusteridentityrequests/%{cluster_identity_resource_name}" @@secret_resource_uri_template = "%{kube_api_server_url}/api/v1/namespaces/%{cluster_identity_token_secret_namespace}/secrets/%{token_secret_name}" @@azure_monitor_custom_metrics_audience = "https://monitoring.azure.com/" - @@cluster_identity_request_kind = "AzureClusterIdentityRequest" + @@cluster_identity_request_kind = "AzureClusterIdentityRequest" def initialize - @LogPath = "/var/opt/microsoft/docker-cimprov/log/arc_k8s_cluster_identity.log" + @os_type = ENV["OS_TYPE"] + if !@os_type.nil? && !@os_type.empty? && @os_type.strip.casecmp("windows") == 0 + @LogPath = "/etc/omsagentwindows/arc_k8s_cluster_identity.log" + else + @LogPath = "/var/opt/microsoft/docker-cimprov/log/arc_k8s_cluster_identity.log" + end @log = Logger.new(@LogPath, 1, 5000000) @log.info "initialize start @ #{Time.now.utc.iso8601}" @token_expiry_time = Time.now @cached_access_token = String.new - @isLastTokenRenewalUpdatePending = false + @isLastTokenRenewalUpdatePending = false @token_file_path = "/var/run/secrets/kubernetes.io/serviceaccount/token" @cert_file_path = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" @kube_api_server_url = KubernetesApiClient.getKubeAPIServerUrl @@ -34,8 +39,8 @@ def initialize @log.warn "got api server url nil from KubernetesApiClient.getKubeAPIServerUrl @ #{Time.now.utc.iso8601}" end @http_client = get_http_client - @service_account_token = get_service_account_token - @extensionName = ENV["ARC_K8S_EXTENSION_NAME"] + @service_account_token = get_service_account_token + @extensionName = ENV["ARC_K8S_EXTENSION_NAME"] @log.info "extension name:#{@extensionName} @ #{Time.now.utc.iso8601}" @log.info "initialize complete @ #{Time.now.utc.iso8601}" end @@ -55,7 +60,7 @@ def get_cluster_identity_token() @isLastTokenRenewalUpdatePending = true else @log.warn "last token renewal update still pending @ #{Time.now.utc.iso8601}" - end + end end @log.info "get token reference from crd @ #{Time.now.utc.iso8601}" tokenReference = get_token_reference_from_crd @@ -68,7 +73,7 @@ def get_cluster_identity_token() token = get_token_from_secret(token_secret_name, token_secret_data_name) if !token.nil? @cached_access_token = token - @isLastTokenRenewalUpdatePending = false + @isLastTokenRenewalUpdatePending = false else @log.warn "got token nil from secret: #{@token_secret_name}" end @@ -141,7 +146,7 @@ def get_token_reference_from_crd() create_request.body = crd_request_body_json create_response = @http_client.request(create_request) @log.info "Got response of #{create_response.code} for POST #{crd_request_uri} @ #{Time.now.utc.iso8601}" - end + end rescue => err @log.warn "get_token_reference_from_crd call failed: #{err}" ApplicationInsightsUtility.sendExceptionTelemetry(err, { "FeatureArea" => "MDM" }) @@ -159,7 +164,7 @@ def renew_near_expiry_token() cluster_identity_resource_namespace: @@cluster_identity_resource_namespace, cluster_identity_resource_name: @@cluster_identity_resource_name, } - update_crd_request_body = { 'status': {'expirationTime': ''} } + update_crd_request_body = { 'status': { 'expirationTime': "" } } update_crd_request_body_json = update_crd_request_body.to_json update_crd_request_uri = crd_request_uri + "/status" update_request = Net::HTTP::Patch.new(update_crd_request_uri) @@ -234,9 +239,9 @@ def get_crd_request_body body["metadata"]["namespace"] = @@cluster_identity_resource_namespace body["spec"] = {} body["spec"]["audience"] = @@azure_monitor_custom_metrics_audience - if !@extensionName.nil? && !@extensionName.empty? - body["spec"]["resourceId"] = @extensionName - end + if !@extensionName.nil? && !@extensionName.empty? + body["spec"]["resourceId"] = @extensionName + end return body end end diff --git a/source/plugins/ruby/filter_cadvisor2mdm.rb b/source/plugins/ruby/filter_cadvisor2mdm.rb index 62dcf31dc..9c6b661b0 100644 --- a/source/plugins/ruby/filter_cadvisor2mdm.rb +++ b/source/plugins/ruby/filter_cadvisor2mdm.rb @@ -2,7 +2,7 @@ # frozen_string_literal: true -require 'fluent/plugin/filter' +require "fluent/plugin/filter" module Fluent::Plugin require "logger" @@ -28,6 +28,12 @@ class CAdvisor2MdmFilter < Filter @@metric_threshold_hash = {} @@controller_type = "" + @@isWindows = false + @@os_type = ENV["OS_TYPE"] + if !@@os_type.nil? && !@@os_type.empty? && @@os_type.strip.casecmp("windows") == 0 + @@isWindows = true + end + def initialize super end @@ -130,15 +136,17 @@ def flushMetricTelemetry # Also send for PV usage metrics begin - pvTimeDifference = (DateTime.now.to_time.to_i - @@pvUsageTelemetryTimeTracker).abs - pvTimeDifferenceInMinutes = pvTimeDifference / 60 - if (pvTimeDifferenceInMinutes >= Constants::TELEMETRY_FLUSH_INTERVAL_IN_MINUTES) - pvProperties = {} - pvProperties["PVUsageThresholdPercentage"] = @@metric_threshold_hash[Constants::PV_USED_BYTES] - pvProperties["PVUsageThresholdExceededInLastFlushInterval"] = @pvExceededUsageThreshold - ApplicationInsightsUtility.sendCustomEvent(Constants::PV_USAGE_HEART_BEAT_EVENT, pvProperties) - @pvExceededUsageThreshold = false - @@pvUsageTelemetryTimeTracker = DateTime.now.to_time.to_i + if !@@isWindows.nil? && @@isWindows == false + pvTimeDifference = (DateTime.now.to_time.to_i - @@pvUsageTelemetryTimeTracker).abs + pvTimeDifferenceInMinutes = pvTimeDifference / 60 + if (pvTimeDifferenceInMinutes >= Constants::TELEMETRY_FLUSH_INTERVAL_IN_MINUTES) + pvProperties = {} + pvProperties["PVUsageThresholdPercentage"] = @@metric_threshold_hash[Constants::PV_USED_BYTES] + pvProperties["PVUsageThresholdExceededInLastFlushInterval"] = @pvExceededUsageThreshold + ApplicationInsightsUtility.sendCustomEvent(Constants::PV_USAGE_HEART_BEAT_EVENT, pvProperties) + @pvExceededUsageThreshold = false + @@pvUsageTelemetryTimeTracker = DateTime.now.to_time.to_i + end end rescue => errorStr @log.info "Error in flushMetricTelemetry: #{errorStr} for PV usage telemetry" @@ -346,7 +354,6 @@ def ensure_cpu_memory_capacity_set # cpu_capacity and memory_capacity keep initialized value of 0.0 @log.error "Error getting capacity_from_kubelet: cpu_capacity and memory_capacity" end - end end diff --git a/source/plugins/ruby/in_cadvisor_perf.rb b/source/plugins/ruby/in_cadvisor_perf.rb index 781042cea..b3f9bd08b 100644 --- a/source/plugins/ruby/in_cadvisor_perf.rb +++ b/source/plugins/ruby/in_cadvisor_perf.rb @@ -1,16 +1,20 @@ #!/usr/local/bin/ruby # frozen_string_literal: true -require 'fluent/plugin/input' +require "fluent/plugin/input" module Fluent::Plugin - class CAdvisor_Perf_Input < Input Fluent::Plugin.register_input("cadvisor_perf", self) + @@isWindows = false + @@os_type = ENV["OS_TYPE"] + if !@@os_type.nil? && !@@os_type.empty? && @@os_type.strip.casecmp("windows") == 0 + @@isWindows = true + end def initialize super require "yaml" - require 'yajl/json_gem' + require "yajl/json_gem" require "time" require_relative "CAdvisorMetricsAPIClient" @@ -69,31 +73,32 @@ def enumerate() router.emit_stream(@containerhealthtag, eventStream) if eventStream router.emit_stream(@nodehealthtag, eventStream) if eventStream - if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0 && eventStream.count > 0) $log.info("cAdvisorPerfEmitStreamSuccess @ #{Time.now.utc.iso8601}") end #start GPU InsightsMetrics items begin - containerGPUusageInsightsMetricsDataItems = [] - containerGPUusageInsightsMetricsDataItems.concat(CAdvisorMetricsAPIClient.getInsightsMetrics(winNode: nil, metricTime: batchTime)) + if !@@isWindows.nil? && @@isWindows == false + containerGPUusageInsightsMetricsDataItems = [] + containerGPUusageInsightsMetricsDataItems.concat(CAdvisorMetricsAPIClient.getInsightsMetrics(winNode: nil, metricTime: batchTime)) containerGPUusageInsightsMetricsDataItems.each do |insightsMetricsRecord| insightsMetricsEventStream.add(time, insightsMetricsRecord) if insightsMetricsRecord end - router.emit_stream(@insightsmetricstag, insightsMetricsEventStream) if insightsMetricsEventStream - router.emit_stream(@mdmtag, insightsMetricsEventStream) if insightsMetricsEventStream - - if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0 && insightsMetricsEventStream.count > 0) - $log.info("cAdvisorInsightsMetricsEmitStreamSuccess @ #{Time.now.utc.iso8601}") + router.emit_stream(@insightsmetricstag, insightsMetricsEventStream) if insightsMetricsEventStream + router.emit_stream(@mdmtag, insightsMetricsEventStream) if insightsMetricsEventStream + + if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0 && insightsMetricsEventStream.count > 0) + $log.info("cAdvisorInsightsMetricsEmitStreamSuccess @ #{Time.now.utc.iso8601}") + end end rescue => errorStr $log.warn "Failed when processing GPU Usage metrics in_cadvisor_perf : #{errorStr}" $log.debug_backtrace(errorStr.backtrace) ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) - end + end #end GPU InsightsMetrics items rescue => errorStr diff --git a/source/plugins/ruby/in_win_cadvisor_perf.rb b/source/plugins/ruby/in_win_cadvisor_perf.rb index 61e823ea6..9ab2474b1 100644 --- a/source/plugins/ruby/in_win_cadvisor_perf.rb +++ b/source/plugins/ruby/in_win_cadvisor_perf.rb @@ -1,7 +1,7 @@ #!/usr/local/bin/ruby # frozen_string_literal: true -require 'fluent/plugin/input' +require "fluent/plugin/input" module Fluent::Plugin class Win_CAdvisor_Perf_Input < Input @@ -20,7 +20,7 @@ def initialize require_relative "oms_common" require_relative "omslog" require_relative "constants" - @insightsMetricsTag = "oneagent.containerInsights.INSIGHTS_METRICS_BLOB" + @insightsMetricsTag = "oneagent.containerInsights.INSIGHTS_METRICS_BLOB" end config_param :run_interval, :time, :default => 60 @@ -57,7 +57,7 @@ def enumerate() begin timeDifference = (DateTime.now.to_time.to_i - @@winNodeQueryTimeTracker).abs timeDifferenceInMinutes = timeDifference / 60 - @@istestvar = ENV["ISTEST"] + @@istestvar = ENV["ISTEST"] #Resetting this cache so that it is populated with the current set of containers with every call CAdvisorMetricsAPIClient.resetWinContainerIdCache() @@ -79,7 +79,6 @@ def enumerate() end end router.emit_stream(@tag, eventStream) if eventStream - router.emit_stream(@mdmtag, eventStream) if eventStream if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0 && eventStream.count > 0) $log.info("winCAdvisorPerfEmitStreamSuccess @ #{Time.now.utc.iso8601}") @@ -89,10 +88,10 @@ def enumerate() begin containerGPUusageInsightsMetricsDataItems = [] containerGPUusageInsightsMetricsDataItems.concat(CAdvisorMetricsAPIClient.getInsightsMetrics(winNode: winNode, metricTime: Time.now.utc.iso8601)) - insightsMetricsEventStream = Fluent::MultiEventStream.new + insightsMetricsEventStream = Fluent::MultiEventStream.new containerGPUusageInsightsMetricsDataItems.each do |insightsMetricsRecord| - insightsMetricsEventStream.add(time, insightsMetricsRecord) if insightsMetricsRecord + insightsMetricsEventStream.add(time, insightsMetricsRecord) if insightsMetricsRecord end router.emit_stream(@insightsMetricsTag, insightsMetricsEventStream) if insightsMetricsEventStream diff --git a/source/plugins/ruby/kubelet_utils.rb b/source/plugins/ruby/kubelet_utils.rb index e2c731b79..22bc87c0e 100644 --- a/source/plugins/ruby/kubelet_utils.rb +++ b/source/plugins/ruby/kubelet_utils.rb @@ -9,7 +9,12 @@ require "bigdecimal" class KubeletUtils - @log_path = "/var/opt/microsoft/docker-cimprov/log/filter_cadvisor2mdm.log" + @os_type = ENV["OS_TYPE"] + if !@os_type.nil? && !@os_type.empty? && @os_type.strip.casecmp("windows") == 0 + @log_path = "/etc/omsagentwindows/filter_cadvisor2mdm.log" + else + @log_path = "/var/opt/microsoft/docker-cimprov/log/filter_cadvisor2mdm.log" + end @log = Logger.new(@log_path, 1, 5000000) class << self From e4da5193c13162d1556999198b7572ce687a0c78 Mon Sep 17 00:00:00 2001 From: David Michelman Date: Thu, 27 May 2021 20:23:25 -0700 Subject: [PATCH 107/301] updating json gem to address CVE-2020-10663 (#567) * updating json gem to address CVE-2020-10663 * updating json gem to address CVE-2020-10663 --- kubernetes/linux/setup.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/kubernetes/linux/setup.sh b/kubernetes/linux/setup.sh index 3d00e4c57..b8829e13b 100644 --- a/kubernetes/linux/setup.sh +++ b/kubernetes/linux/setup.sh @@ -44,11 +44,13 @@ sudo echo "deb https://packages.fluentbit.io/ubuntu/xenial xenial main" >> /etc/ sudo apt-get update sudo apt-get install td-agent-bit=1.6.8 -y -# install ruby2.6 +# install ruby2.6 sudo apt-get install software-properties-common -y sudo apt-add-repository ppa:brightbox/ruby-ng -y sudo apt-get update sudo apt-get install ruby2.6 ruby2.6-dev gcc make -y +# to fix CVE-2020-10663 +gem update json -v 2.5.1 # fluentd v1 gem gem install fluentd -v "1.12.2" --no-document fluentd --setup ./fluent From 49486a8df6bc06336fc11ab51b0d19ac36497006 Mon Sep 17 00:00:00 2001 From: David Michelman Date: Thu, 27 May 2021 21:37:34 -0700 Subject: [PATCH 108/301] update recommended alerts readme (#570) @dcbrown16 pointed out that this page links to the wrong document in [this issue](https://github.com/microsoft/Docker-Provider/issues/475). The content in the currently linked page is identitical to the page which should be linked, so it's a simple fix. --- alerts/recommended_alerts_ARM/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/alerts/recommended_alerts_ARM/README.md b/alerts/recommended_alerts_ARM/README.md index 81dec8929..53e5a9ad7 100644 --- a/alerts/recommended_alerts_ARM/README.md +++ b/alerts/recommended_alerts_ARM/README.md @@ -24,7 +24,7 @@ Completed job count|Calculates number of jobs completed more than six hours ago. ### How to enable with a Resource Manager template 1. Download one or all of the available templates that describe how to create the alert. -2. Create and use a [parameters file](https://review.docs.microsoft.com/azure/azure-resource-manager/templates/parameter-files) as a JSON to set the values required to create the alert rule. +2. Create and use a [parameters file](https://docs.microsoft.com/en-us/azure/azure-resource-manager/templates/parameter-files) as a JSON to set the values required to create the alert rule. 3. Deploy the template from the Azure portal, PowerShell, or Azure CLI. For step by step procedures on how to enable alerts via Resource manager, please go [here.](https://aka.ms/ci_alerts_arm) From ef23fc684f7dfb62e61e6ae0634c7ba02a39ca20 Mon Sep 17 00:00:00 2001 From: David Michelman Date: Fri, 28 May 2021 15:10:53 -0700 Subject: [PATCH 109/301] trying again to fix the json gem (#571) * trying again to fix the json gem * removing installation of newer json gem --- kubernetes/linux/setup.sh | 2 -- 1 file changed, 2 deletions(-) diff --git a/kubernetes/linux/setup.sh b/kubernetes/linux/setup.sh index b8829e13b..17cfb3f77 100644 --- a/kubernetes/linux/setup.sh +++ b/kubernetes/linux/setup.sh @@ -49,8 +49,6 @@ sudo apt-get install software-properties-common -y sudo apt-add-repository ppa:brightbox/ruby-ng -y sudo apt-get update sudo apt-get install ruby2.6 ruby2.6-dev gcc make -y -# to fix CVE-2020-10663 -gem update json -v 2.5.1 # fluentd v1 gem gem install fluentd -v "1.12.2" --no-document fluentd --setup ./fluent From cfa804a1adeb3eb4e82d78f14569e3238e2f6dbd Mon Sep 17 00:00:00 2001 From: rashmichandrashekar Date: Tue, 1 Jun 2021 15:37:14 -0700 Subject: [PATCH 110/301] Addressing PR comments for - https://github.com/microsoft/Docker-Provider/pull/568 (#569) --- source/plugins/ruby/ApplicationInsightsUtility.rb | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/source/plugins/ruby/ApplicationInsightsUtility.rb b/source/plugins/ruby/ApplicationInsightsUtility.rb index 74d08c1e6..31f9503cd 100644 --- a/source/plugins/ruby/ApplicationInsightsUtility.rb +++ b/source/plugins/ruby/ApplicationInsightsUtility.rb @@ -139,12 +139,12 @@ def initializeUtility() def getContainerRuntimeInfo() begin - # Not doing this for windows since docker is being deprecated soon and we dont want to bring in the socket dependency. - if !@@isWindows.nil? && @@isWindows == false - containerRuntime = ENV[@@EnvContainerRuntime] - if !containerRuntime.nil? && !containerRuntime.empty? - # DockerVersion field holds either containerRuntime for non-docker or Dockerversion if its docker - @@CustomProperties["DockerVersion"] = containerRuntime + containerRuntime = ENV[@@EnvContainerRuntime] + if !containerRuntime.nil? && !containerRuntime.empty? + # DockerVersion field holds either containerRuntime for non-docker or Dockerversion if its docker + @@CustomProperties["DockerVersion"] = containerRuntime + # Not doing this for windows since docker is being deprecated soon and we dont want to bring in the socket dependency. + if !@@isWindows.nil? && @@isWindows == false if containerRuntime.casecmp("docker") == 0 dockerInfo = DockerApiClient.dockerInfo if (!dockerInfo.nil? && !dockerInfo.empty?) From 0d3e4a13ef07e44ea834ced3c317eee98b694c16 Mon Sep 17 00:00:00 2001 From: Tsubasa Nomura Date: Fri, 11 Jun 2021 07:35:44 +0900 Subject: [PATCH 111/301] Mem_Buf_limit is configurable via ConfigMap (#574) --- .../installer/scripts/td-agent-bit-conf-customizer.rb | 6 ++++++ build/linux/installer/conf/td-agent-bit.conf | 2 +- .../linux/installer/scripts/tomlparser-agent-config.rb | 10 ++++++++++ 3 files changed, 17 insertions(+), 1 deletion(-) diff --git a/build/common/installer/scripts/td-agent-bit-conf-customizer.rb b/build/common/installer/scripts/td-agent-bit-conf-customizer.rb index ea1536866..82c6c1d17 100644 --- a/build/common/installer/scripts/td-agent-bit-conf-customizer.rb +++ b/build/common/installer/scripts/td-agent-bit-conf-customizer.rb @@ -6,6 +6,7 @@ @default_service_interval = "1" @default_buffer_chunk_size = "1" @default_buffer_max_size = "1" +@default_mem_buf_limit = "10" def is_number?(value) true if Integer(value) rescue false @@ -19,6 +20,7 @@ def substituteFluentBitPlaceHolders interval = ENV["FBIT_SERVICE_FLUSH_INTERVAL"] bufferChunkSize = ENV["FBIT_TAIL_BUFFER_CHUNK_SIZE"] bufferMaxSize = ENV["FBIT_TAIL_BUFFER_MAX_SIZE"] + memBufLimit = ENV["FBIT_TAIL_MEM_BUF_LIMIT"] serviceInterval = (!interval.nil? && is_number?(interval) && interval.to_i > 0 ) ? interval : @default_service_interval serviceIntervalSetting = "Flush " + serviceInterval @@ -32,8 +34,12 @@ def substituteFluentBitPlaceHolders tailBufferMaxSize = tailBufferChunkSize end + tailMemBufLimit = (!memBufLimit.nil? && is_number?(memBufLimit) && memBufLimit.to_i > 10) ? memBufLimit : @default_mem_buf_limit + tailMemBufLimitSetting = "Mem_Buf_Limit " + tailMemBufLimit + "m" + text = File.read(@td_agent_bit_conf_path) new_contents = text.gsub("${SERVICE_FLUSH_INTERVAL}", serviceIntervalSetting) + new_contents = new_contents.gsub("${TAIL_MEM_BUF_LIMIT}", tailMemBufLimitSetting) if !tailBufferChunkSize.nil? new_contents = new_contents.gsub("${TAIL_BUFFER_CHUNK_SIZE}", "Buffer_Chunk_Size " + tailBufferChunkSize + "m") else diff --git a/build/linux/installer/conf/td-agent-bit.conf b/build/linux/installer/conf/td-agent-bit.conf index 045aefcaf..beba6a3ca 100644 --- a/build/linux/installer/conf/td-agent-bit.conf +++ b/build/linux/installer/conf/td-agent-bit.conf @@ -19,7 +19,7 @@ DB /var/log/omsagent-fblogs.db DB.Sync Off Parser docker - Mem_Buf_Limit 10m + ${TAIL_MEM_BUF_LIMIT} ${TAIL_BUFFER_CHUNK_SIZE} ${TAIL_BUFFER_MAX_SIZE} Rotate_Wait 20 diff --git a/build/linux/installer/scripts/tomlparser-agent-config.rb b/build/linux/installer/scripts/tomlparser-agent-config.rb index e587909e5..4daaf6a0c 100644 --- a/build/linux/installer/scripts/tomlparser-agent-config.rb +++ b/build/linux/installer/scripts/tomlparser-agent-config.rb @@ -59,6 +59,7 @@ @fbitFlushIntervalSecs = 0 @fbitTailBufferChunkSizeMBs = 0 @fbitTailBufferMaxSizeMBs = 0 +@fbitTailMemBufLimitMBs = 0 def is_number?(value) @@ -168,6 +169,12 @@ def populateSettingValuesFromConfigMap(parsedConfig) @fbitTailBufferMaxSizeMBs = @fbitTailBufferChunkSizeMBs puts "config::warn: since tail_buf_maxsize_megabytes not provided hence using tail_buf_maxsize_megabytes=#{@fbitTailBufferMaxSizeMBs} which is same as the value of tail_buf_chunksize_megabytes" end + + fbitTailMemBufLimitMBs = fbit_config[:tail_mem_buf_limit_megabytes] + if !fbitTailMemBufLimitMBs.nil? && is_number?(fbitTailMemBufLimitMBs) && fbitTailMemBufLimitMBs.to_i > 0 + @fbitTailMemBufLimitMBs = fbitTailMemBufLimitMBs.to_i + puts "Using config map value: tail_mem_buf_limit_megabytes = #{@fbitTailMemBufLimitMBs}" + end end end rescue => errorStr @@ -212,6 +219,9 @@ def populateSettingValuesFromConfigMap(parsedConfig) if @fbitTailBufferMaxSizeMBs > 0 file.write("export FBIT_TAIL_BUFFER_MAX_SIZE=#{@fbitTailBufferMaxSizeMBs}\n") end + if @fbitTailMemBufLimitMBs > 0 + file.write("export FBIT_TAIL_MEM_BUF_LIMIT=#{@fbitTailMemBufLimitMBs}\n") + end # Close file after writing all environment variables file.close else From 50b99fff5c97780601f438610e3126c7a5df7401 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Fri, 11 Jun 2021 10:59:03 -0700 Subject: [PATCH 112/301] add log rotation settings for fluentd logs (#577) --- kubernetes/linux/main.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index b21ed6b96..b9e338fa9 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -478,10 +478,10 @@ fi if [ "${CONTAINER_TYPE}" != "PrometheusSidecar" ]; then if [ ! -e "/etc/config/kube.conf" ]; then echo "*** starting fluentd v1 in daemonset" - fluentd -c /etc/fluent/container.conf -o /var/opt/microsoft/docker-cimprov/log/fluentd.log & + fluentd -c /etc/fluent/container.conf -o /var/opt/microsoft/docker-cimprov/log/fluentd.log --log-rotate-age 5 --log-rotate-size 20971520 & else echo "*** starting fluentd v1 in replicaset" - fluentd -c /etc/fluent/kube.conf -o /var/opt/microsoft/docker-cimprov/log/fluentd.log & + fluentd -c /etc/fluent/kube.conf -o /var/opt/microsoft/docker-cimprov/log/fluentd.log --log-rotate-age 5 --log-rotate-size 20971520 & fi fi From 4cebe73a6a000b91183b4bdf45b5cd2f2d069d3c Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Fri, 11 Jun 2021 12:28:21 -0700 Subject: [PATCH 113/301] Gangams/release 06112021 (#578) * updates related to ciprod06112021 release * minor update --- ReleaseNotes.md | 17 +++++++++++++++++ build/version | 6 +++--- kubernetes/linux/Dockerfile | 2 +- kubernetes/omsagent.yaml | 14 +++++++------- kubernetes/windows/Dockerfile | 2 +- 5 files changed, 29 insertions(+), 12 deletions(-) diff --git a/ReleaseNotes.md b/ReleaseNotes.md index d7d6de6af..394caba09 100644 --- a/ReleaseNotes.md +++ b/ReleaseNotes.md @@ -11,6 +11,23 @@ additional questions or comments. Note : The agent version(s) below has dates (ciprod), which indicate the agent build dates (not release dates) +### 06/11/2021 - +##### Version microsoft/oms:ciprod06112021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod06112021 (linux) +##### Version microsoft/oms:win-ciprod06112021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod06112021 (windows) + - Linux Agent + - Removal of base omsagent dependency + - Using MDSD version 1.10.1 as base agent for all the supported LA data types + - Ruby version upgrade to 2.6 i.e. same version as windows agent + - Upgrade FluentD gem version to 1.12.2 + - All the Ruby Fluentd Plugins upgraded to v1 as per Fluentd guidance + - Windows Agent + - CA cert changes for airgapped clouds + - Send perf metrics to MDM from windows daemonset + - FluentD gem version upgrade from 1.10.2 to 1.12.2 to make same version as Linux Agent + - Doc updates + - README updates related to OSM preview release for Arc K8s + - README updates related to recommended alerts + ### 05/20/2021 - ##### Version microsoft/oms:ciprod05202021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod05202021 (linux) ##### No Windows changes with this release, win-ciprod04222021 still current. diff --git a/build/version b/build/version index d70d1f9bc..95d20e931 100644 --- a/build/version +++ b/build/version @@ -2,11 +2,11 @@ # Build Version Information -CONTAINER_BUILDVERSION_MAJOR=15 -CONTAINER_BUILDVERSION_MINOR=2 +CONTAINER_BUILDVERSION_MAJOR=16 +CONTAINER_BUILDVERSION_MINOR=0 CONTAINER_BUILDVERSION_PATCH=0 CONTAINER_BUILDVERSION_BUILDNR=0 -CONTAINER_BUILDVERSION_DATE=20210512 +CONTAINER_BUILDVERSION_DATE=20210611 CONTAINER_BUILDVERSION_STATUS=Developer_Build #-------------------------------- End of File ----------------------------------- diff --git a/kubernetes/linux/Dockerfile b/kubernetes/linux/Dockerfile index 3ad3cd315..1ae7bef61 100644 --- a/kubernetes/linux/Dockerfile +++ b/kubernetes/linux/Dockerfile @@ -2,7 +2,7 @@ FROM ubuntu:18.04 MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=ciprod05202021 +ARG IMAGE_TAG=ciprod06112021 ENV AGENT_VERSION ${IMAGE_TAG} ENV tmpdir /opt ENV APPLICATIONINSIGHTS_AUTH NzAwZGM5OGYtYTdhZC00NThkLWI5NWMtMjA3ZjM3NmM3YmRi diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index 4290e1d59..617c81f38 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -358,7 +358,7 @@ spec: tier: node annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "15.2.0-0" + dockerProviderVersion: "16.0.0-0" schema-versions: "v1" spec: serviceAccountName: omsagent @@ -368,7 +368,7 @@ spec: value: "3" containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod05202021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod06112021" imagePullPolicy: IfNotPresent resources: limits: @@ -446,7 +446,7 @@ spec: timeoutSeconds: 15 #Only in sidecar scraping mode - name: omsagent-prometheus - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod05202021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod06112021" imagePullPolicy: IfNotPresent resources: limits: @@ -583,13 +583,13 @@ spec: rsName: "omsagent-rs" annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "15.2.0-0" + dockerProviderVersion: "16.0.0-0" schema-versions: "v1" spec: serviceAccountName: omsagent containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod05202021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod06112021" imagePullPolicy: IfNotPresent resources: limits: @@ -750,7 +750,7 @@ spec: tier: node-win annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "15.0.0-0" + dockerProviderVersion: "16.0.0-0" schema-versions: "v1" spec: serviceAccountName: omsagent @@ -760,7 +760,7 @@ spec: value: "3" containers: - name: omsagent-win - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod04222021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod06112021" imagePullPolicy: IfNotPresent resources: limits: diff --git a/kubernetes/windows/Dockerfile b/kubernetes/windows/Dockerfile index 5a5298d0b..997b2f310 100644 --- a/kubernetes/windows/Dockerfile +++ b/kubernetes/windows/Dockerfile @@ -3,7 +3,7 @@ MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=win-ciprod04222021 +ARG IMAGE_TAG=win-ciprod06112021 # Do not split this into multiple RUN! # Docker creates a layer for every RUN-Statement From adabaf93ac2e3a334e3e921548b80ac3b0d32487 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Fri, 11 Jun 2021 12:56:56 -0700 Subject: [PATCH 114/301] release note update (#579) --- ReleaseNotes.md | 1 + 1 file changed, 1 insertion(+) diff --git a/ReleaseNotes.md b/ReleaseNotes.md index 394caba09..266dadf1c 100644 --- a/ReleaseNotes.md +++ b/ReleaseNotes.md @@ -20,6 +20,7 @@ Note : The agent version(s) below has dates (ciprod), which indicate t - Ruby version upgrade to 2.6 i.e. same version as windows agent - Upgrade FluentD gem version to 1.12.2 - All the Ruby Fluentd Plugins upgraded to v1 as per Fluentd guidance + - Fluent-bit tail plugin Mem_Buf_limit is configurable via ConfigMap - Windows Agent - CA cert changes for airgapped clouds - Send perf metrics to MDM from windows daemonset From 0c701207300557191cc7adec9967215c4f8daa8c Mon Sep 17 00:00:00 2001 From: rashmichandrashekar Date: Mon, 14 Jun 2021 14:39:50 -0700 Subject: [PATCH 115/301] Make sidecar fluentbit chunk size configurable (#573) --- Documentation/AgentSettings/ReadMe.md | 26 +++++ .../scripts/tomlparser-prom-agent-config.rb | 102 ++++++++++++++++++ .../conf/td-agent-bit-prom-side-car.conf | 6 +- .../installer/datafiles/base_container.data | 3 +- kubernetes/container-azm-ms-agentconfig.yaml | 10 ++ .../linux/defaultpromenvvariables-sidecar | 3 + kubernetes/linux/main.sh | 15 +++ source/plugins/go/src/telemetry.go | 4 + 8 files changed, 165 insertions(+), 4 deletions(-) create mode 100644 Documentation/AgentSettings/ReadMe.md create mode 100644 build/common/installer/scripts/tomlparser-prom-agent-config.rb diff --git a/Documentation/AgentSettings/ReadMe.md b/Documentation/AgentSettings/ReadMe.md new file mode 100644 index 000000000..3e55d7d44 --- /dev/null +++ b/Documentation/AgentSettings/ReadMe.md @@ -0,0 +1,26 @@ +## Configurable agent settings for high scale prometheus metric scraping using pod annotations with prometheus sidecar. + +Container Insights agent runs native prometheus telegraf plugin to scrape prometheus metrics using pod annotations. +The metrics scraped from the telegraf plugin are sent to the fluent bit tcp listener. +In order to support higher volumes of prometheus metrics scraping some of the tcp listener settings can be tuned. +[Fluent Bit TCP listener](https://docs.fluentbit.io/manual/pipeline/inputs/tcp) + +* Chunk Size - This can be increased to process bigger chunks of data. + +* Buffer Size - This should be greater than or equal to the chunk size. + +* Mem Buf Limit - This can be increased to increase the buffer size. But the memory limit on the sidecar also needs to be increased accordingly. +Note that this can only be achieved using helm chart today. + + +** Note - The LA ingestion team also states that higher chunk sizes might not necessarily mean higher throughput since there are pipeline limitations. + +``` + agent-settings: |- + # prometheus scrape fluent bit settings for high scale + # buffer size should be greater than or equal to chunk size else we set it to chunk size. + [agent_settings.prometheus_fbit_settings] + tcp_listener_chunk_size = 10 + tcp_listener_buffer_size = 10 + tcp_listener_mem_buf_limit = 200 +``` diff --git a/build/common/installer/scripts/tomlparser-prom-agent-config.rb b/build/common/installer/scripts/tomlparser-prom-agent-config.rb new file mode 100644 index 000000000..be9d08e59 --- /dev/null +++ b/build/common/installer/scripts/tomlparser-prom-agent-config.rb @@ -0,0 +1,102 @@ +#!/usr/local/bin/ruby + +#this should be require relative in Linux and require in windows, since it is a gem install on windows +@os_type = ENV["OS_TYPE"] +if !@os_type.nil? && !@os_type.empty? && @os_type.strip.casecmp("windows") == 0 + require "tomlrb" +else + require_relative "tomlrb" +end + +require_relative "ConfigParseErrorLogger" + +@configMapMountPath = "/etc/config/settings/agent-settings" +@configSchemaVersion = "" + +@promFbitChunkSize = 10 +@promFbitBufferSize = 10 +@promFbitMemBufLimit = 200 + +def is_number?(value) + true if Integer(value) rescue false +end + +# Use parser to parse the configmap toml file to a ruby structure +def parseConfigMap + begin + # Check to see if config map is created + if (File.file?(@configMapMountPath)) + puts "config::configmap container-azm-ms-agentconfig for sidecar agent settings mounted, parsing values" + parsedConfig = Tomlrb.load_file(@configMapMountPath, symbolize_keys: true) + puts "config::Successfully parsed mounted config map" + return parsedConfig + else + puts "config::configmap container-azm-ms-agentconfig for sidecar agent settings not mounted, using defaults" + return nil + end + rescue => errorStr + ConfigParseErrorLogger.logError("Exception while parsing config map for sidecar agent settings : #{errorStr}, using defaults, please check config map for errors") + return nil + end +end + +# Use the ruby structure created after config parsing to set the right values to be used as environment variables +def populateSettingValuesFromConfigMap(parsedConfig) + begin + if !parsedConfig.nil? && !parsedConfig[:agent_settings].nil? + # fbit config settings + prom_fbit_config = parsedConfig[:agent_settings][:prometheus_fbit_settings] + if !prom_fbit_config.nil? + chunk_size = prom_fbit_config[:tcp_listener_chunk_size] + if !chunk_size.nil? && is_number?(chunk_size) && chunk_size.to_i > 0 + @promFbitChunkSize = chunk_size.to_i + puts "Using config map value: AZMON_SIDECAR_FBIT_CHUNK_SIZE = #{@promFbitChunkSize.to_s + "m"}" + end + buffer_size = prom_fbit_config[:tcp_listener_buffer_size] + if !buffer_size.nil? && is_number?(buffer_size) && buffer_size.to_i > 0 + @promFbitBufferSize = buffer_size.to_i + puts "Using config map value: AZMON_SIDECAR_FBIT_BUFFER_SIZE = #{@promFbitBufferSize.to_s + "m"}" + if @promFbitBufferSize < @promFbitChunkSize + @promFbitBufferSize = @promFbitChunkSize + puts "Setting Fbit buffer size equal to chunk size since it is set to less than chunk size - AZMON_SIDECAR_FBIT_BUFFER_SIZE = #{@promFbitBufferSize.to_s + "m"}" + end + end + mem_buf_limit = prom_fbit_config[:tcp_listener_mem_buf_limit] + if !mem_buf_limit.nil? && is_number?(mem_buf_limit) && mem_buf_limit.to_i > 0 + @promFbitMemBufLimit = mem_buf_limit.to_i + puts "Using config map value: AZMON_SIDECAR_FBIT_MEM_BUF_LIMIT = #{@promFbitMemBufLimit.to_s + "m"}" + end + end + end + rescue => errorStr + puts "config::error:Exception while reading config settings for sidecar agent configuration setting - #{errorStr}, using defaults" + end +end + +@configSchemaVersion = ENV["AZMON_AGENT_CFG_SCHEMA_VERSION"] +puts "****************Start Sidecar Agent Config Processing********************" +if !@configSchemaVersion.nil? && !@configSchemaVersion.empty? && @configSchemaVersion.strip.casecmp("v1") == 0 #note v1 is the only supported schema version , so hardcoding it + configMapSettings = parseConfigMap + if !configMapSettings.nil? + populateSettingValuesFromConfigMap(configMapSettings) + end +else + if (File.file?(@configMapMountPath)) + ConfigParseErrorLogger.logError("config::unsupported/missing config schema version - '#{@configSchemaVersion}' , using defaults, please use supported schema version") + end + @enable_health_model = false +end + +# Write the settings to file, so that they can be set as environment variables +file = File.open("side_car_fbit_config_env_var", "w") + +if !file.nil? + file.write("export AZMON_SIDECAR_FBIT_CHUNK_SIZE=#{@promFbitChunkSize.to_s + "m"}\n") + file.write("export AZMON_SIDECAR_FBIT_BUFFER_SIZE=#{@promFbitBufferSize.to_s + "m"}\n") + file.write("export AZMON_SIDECAR_FBIT_MEM_BUF_LIMIT=#{@promFbitMemBufLimit.to_s + "m"}\n") + # Close file after writing all environment variables + file.close +else + puts "Exception while opening file for writing config environment variables" + puts "****************End Sidecar Agent Config Processing********************" +end diff --git a/build/linux/installer/conf/td-agent-bit-prom-side-car.conf b/build/linux/installer/conf/td-agent-bit-prom-side-car.conf index 8a69f7995..2c85a4200 100644 --- a/build/linux/installer/conf/td-agent-bit-prom-side-car.conf +++ b/build/linux/installer/conf/td-agent-bit-prom-side-car.conf @@ -29,9 +29,9 @@ Tag oms.container.perf.telegraf.* Listen 0.0.0.0 Port 25229 - Chunk_Size 10m - Buffer_Size 10m - Mem_Buf_Limit 200m + Chunk_Size ${AZMON_SIDECAR_FBIT_CHUNK_SIZE} + Buffer_Size ${AZMON_SIDECAR_FBIT_BUFFER_SIZE} + Mem_Buf_Limit ${AZMON_SIDECAR_FBIT_MEM_BUF_LIMIT} [OUTPUT] Name oms diff --git a/build/linux/installer/datafiles/base_container.data b/build/linux/installer/datafiles/base_container.data index de8ccbba0..88c790be3 100644 --- a/build/linux/installer/datafiles/base_container.data +++ b/build/linux/installer/datafiles/base_container.data @@ -42,7 +42,8 @@ MAINTAINER: 'Microsoft Corporation' /opt/microsoft/docker-cimprov/bin/TelegrafTCPErrorTelemetry.sh; build/linux/installer/scripts/TelegrafTCPErrorTelemetry.sh; 755; root; root /opt/livenessprobe.sh; build/linux/installer/scripts/livenessprobe.sh; 755; root; root /opt/tomlparser-prom-customconfig.rb; build/common/installer/scripts/tomlparser-prom-customconfig.rb; 755; root; root -/opt/tomlparser-mdm-metrics-config.rb; build/common/installer/scripts/tomlparser-mdm-metrics-config.rb; 755; root; root +/opt/tomlparser-prom-agent-config.rb; build/common/installer/scripts/tomlparser-prom-agent-config.rb; 755; root; root +/opt/tomlparser-mdm-metrics-config.rb; build/common/installer/scripts/tomlparser-mdm-metrics-config.rb; 755; root; root /opt/tomlparser-metric-collection-config.rb; build/linux/installer/scripts/tomlparser-metric-collection-config.rb; 755; root; root diff --git a/kubernetes/container-azm-ms-agentconfig.yaml b/kubernetes/container-azm-ms-agentconfig.yaml index 543f270c1..21b31f76f 100644 --- a/kubernetes/container-azm-ms-agentconfig.yaml +++ b/kubernetes/container-azm-ms-agentconfig.yaml @@ -135,6 +135,16 @@ data: [integrations.azure_network_policy_manager] collect_basic_metrics = false collect_advanced_metrics = false + +# Doc - https://github.com/microsoft/Docker-Provider/blob/ci_prod/Documentation/AgentSettings/ReadMe.md + agent-settings: |- + # prometheus scrape fluent bit settings for high scale + # buffer size should be greater than or equal to chunk size else we set it to chunk size. + [agent_settings.prometheus_fbit_settings] + tcp_listener_chunk_size = 10 + tcp_listener_buffer_size = 10 + tcp_listener_mem_buf_limit = 200 + metadata: name: container-azm-ms-agentconfig namespace: kube-system diff --git a/kubernetes/linux/defaultpromenvvariables-sidecar b/kubernetes/linux/defaultpromenvvariables-sidecar index 3301488d8..68388f88e 100644 --- a/kubernetes/linux/defaultpromenvvariables-sidecar +++ b/kubernetes/linux/defaultpromenvvariables-sidecar @@ -7,3 +7,6 @@ export AZMON_TELEGRAF_CUSTOM_PROM_PLUGINS_WITH_NAMESPACE_FILTER="" export AZMON_TELEGRAF_OSM_PROM_PLUGINS="" export AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_LABEL_SELECTOR="kubernetes_label_selector = ''" export AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_FIELD_SELECTOR="kubernetes_field_selector = ''" +export AZMON_SIDECAR_FBIT_CHUNK_SIZE="10m" +export AZMON_SIDECAR_FBIT_BUFFER_SIZE="10m" +export AZMON_SIDECAR_FBIT_MEM_BUF_LIMIT="200m" diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index b9e338fa9..ec348bba3 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -306,6 +306,21 @@ if [ -e "telemetry_prom_config_env_var" ]; then source telemetry_prom_config_env_var fi +#Parse sidecar agent settings for custom configuration +if [ ! -e "/etc/config/kube.conf" ]; then + if [ "${CONTAINER_TYPE}" == "PrometheusSidecar" ]; then + #Parse the agent configmap to create a file with new custom settings. + /usr/bin/ruby2.6 tomlparser-prom-agent-config.rb + #Sourcing config environment variable file if it exists + if [ -e "side_car_fbit_config_env_var" ]; then + cat side_car_fbit_config_env_var | while read line; do + echo $line >> ~/.bashrc + done + source side_car_fbit_config_env_var + fi + fi +fi + #Parse the configmap to set the right environment variables for MDM metrics configuration for Alerting. if [ "${CONTAINER_TYPE}" != "PrometheusSidecar" ]; then diff --git a/source/plugins/go/src/telemetry.go b/source/plugins/go/src/telemetry.go index 4750b4624..debe003e4 100644 --- a/source/plugins/go/src/telemetry.go +++ b/source/plugins/go/src/telemetry.go @@ -161,6 +161,10 @@ func SendContainerLogPluginMetrics(telemetryPushIntervalProperty string) { telemetryDimensions["OsmNamespaceCount"] = strconv.Itoa(osmNamespaceCount) } + telemetryDimensions["PromFbitChunkSize"] = os.Getenv("AZMON_SIDECAR_FBIT_CHUNK_SIZE") + telemetryDimensions["PromFbitBufferSize"] = os.Getenv("AZMON_SIDECAR_FBIT_BUFFER_SIZE") + telemetryDimensions["PromFbitMemBufLimit"] = os.Getenv("AZMON_SIDECAR_FBIT_MEM_BUF_LIMIT") + SendEvent(eventNameCustomPrometheusSidecarHeartbeat, telemetryDimensions) } else { From a7a2d739f1ef22a507637253d7d5d307e7afeec4 Mon Sep 17 00:00:00 2001 From: Vishwanath Date: Tue, 15 Jun 2021 09:36:55 -0700 Subject: [PATCH 116/301] Fix vulnerabilities (#583) * test * test1 * test-2 * test-3 * 3 * 4 * test * 2 * 3 * 4 * 5 * 6 * rename gem for windows * fix * fix --- .github/workflows/pr-checker.yml | 99 ++++++++++++++++++++++++++++++++ kubernetes/linux/setup.sh | 9 ++- kubernetes/windows/setup.ps1 | 12 +++- source/plugins/go/src/go.mod | 1 + source/plugins/go/src/go.sum | 7 +++ 5 files changed, 126 insertions(+), 2 deletions(-) create mode 100644 .github/workflows/pr-checker.yml diff --git a/.github/workflows/pr-checker.yml b/.github/workflows/pr-checker.yml new file mode 100644 index 000000000..c75e6dc24 --- /dev/null +++ b/.github/workflows/pr-checker.yml @@ -0,0 +1,99 @@ +name: pullrequest-build-and-scan +on: + pull_request: + types: [opened, synchronize, reopened] + branches: + - ci_dev + - ci_prod + paths-ignore: + - '**.md' +jobs: + LINUX-build-and-scan: + runs-on: ubuntu-latest + steps: + - name: Set-workflow-initiator + run: echo "Initiated by - ${GITHUB_ACTOR}" + - name: Set-branch-name-for-pr + if: ${{ github.event_name == 'pull_request' }} + run: echo "BRANCH_NAME=$(echo ${GITHUB_HEAD_REF} | tr / _)" >> $GITHUB_ENV + - name: Set-Env + run: echo "ENV=dev" >> $GITHUB_ENV + - name: Set-ACR-Registry + run: echo "ACR_REGISTRY=containerinsightsprod.azurecr.io" >> $GITHUB_ENV + - name: Set-ACR-Repository + run: echo "ACR_REPOSITORY=/public/azuremonitor/containerinsights/cidev" >> $GITHUB_ENV + - name: Set-image-tag-name + run: echo "IMAGE_TAG_NAME=cidev" >> $GITHUB_ENV + - name: Set-image-tag-suffix + run: echo "IMAGE_TAG_DATE=$(date +%m-%d-%Y)" >> $GITHUB_ENV + - name: Set-commit-sha + run: echo "COMMIT_SHA=${GITHUB_SHA::8}" >> $GITHUB_ENV + - name: Set-image-tag + run: echo "IMAGETAG=${ACR_REGISTRY}${ACR_REPOSITORY}:${IMAGE_TAG_NAME}-${BRANCH_NAME}-${IMAGE_TAG_DATE}-${COMMIT_SHA}" >> $GITHUB_ENV + - name: Set-image-telemetry-tag + run: echo "IMAGETAG_TELEMETRY=${IMAGE_TAG_NAME}-${BRANCH_NAME}-${IMAGE_TAG_DATE}-${COMMIT_SHA}" >> $GITHUB_ENV + - name: Set-Helm-OCI-Experimental-feature + run: echo "HELM_EXPERIMENTAL_OCI=1" >> $GITHUB_ENV + - name: Set-Helm-chart-version + run: echo "HELM_CHART_VERSION=0.0.1" >> $GITHUB_ENV + - name: Set-Helm-tag + run: echo "HELMTAG=${ACR_REGISTRY}${ACR_REPOSITORY}:${IMAGE_TAG_NAME}-chart-${BRANCH_NAME}-${HELM_CHART_VERSION}-${IMAGE_TAG_DATE}-${COMMIT_SHA}" >> $GITHUB_ENV + - name: Checkout-code + uses: actions/checkout@v2 + - name: Show-versions-On-build-machine + run: lsb_release -a && go version && helm version && docker version + - name: Install-build-dependencies + run: sudo apt-get install build-essential -y + - name: Build-source-code + run: cd ./build/linux/ && make + - name: Create-docker-image + run: | + cd ./kubernetes/linux/ && docker build . --file Dockerfile -t $IMAGETAG --build-arg IMAGE_TAG=$IMAGETAG_TELEMETRY + - name: List-docker-images + run: docker images --digests --all + - name: Run-trivy-scanner-on-docker-image + uses: aquasecurity/trivy-action@master + with: + image-ref: "${{ env.IMAGETAG }}" + format: 'table' + severity: 'CRITICAL,HIGH' + vuln-type: 'os,library' + skip-dirs: 'opt/telegraf' + exit-code: '1' + timeout: '5m0s' + WINDOWS-build: + runs-on: windows-latest + steps: + - name: Set-workflow-initiator + run: echo ("Initiated by -" + $env:GITHUB_ACTOR) + - name: Set-branch-name-for-pr + if: ${{ github.event_name == 'pull_request' }} + run: echo ("BRANCH_NAME=" + $env:GITHUB_HEAD_REF.replace('/','_')) >> $env:GITHUB_ENV + - name: Set-Env + run: echo ("ENV=dev") >> $env:GITHUB_ENV + - name: Set-ACR-Registry + run: echo ("ACR_REGISTRY=containerinsightsprod.azurecr.io") >> $env:GITHUB_ENV + - name: Set-ACR-Repository + run: echo ("ACR_REPOSITORY=/public/azuremonitor/containerinsights/cidev") >> $env:GITHUB_ENV + - name: Set-image-tag-name + run: echo ("IMAGE_TAG_NAME=cidev-win") >> $env:GITHUB_ENV + - name: Set-image-tag-suffix + run: echo ("IMAGE_TAG_DATE="+ (Get-Date -Format "MM-dd-yyyy")) >> $env:GITHUB_ENV + - name: Set-commit-sha + run: echo ("COMMIT_SHA=" + $env:GITHUB_SHA.SubString(0,8)) >> $env:GITHUB_ENV + - name: Set-image-tag + run: echo ("IMAGETAG=" + $env:ACR_REGISTRY + $env:ACR_REPOSITORY + ":" + $env:IMAGE_TAG_NAME + "-" + $env:BRANCH_NAME + "-" + $env:IMAGE_TAG_DATE + "-" + $env:COMMIT_SHA) >> $env:GITHUB_ENV + - name: Set-image-telemetry-tag + run: echo ("IMAGETAG_TELEMETRY=" + $env:IMAGE_TAG_NAME + "-" + $env:BRANCH_NAME + "-" + $env:IMAGE_TAG_DATE + "-" + $env:COMMIT_SHA) >> $env:GITHUB_ENV + - name: Checkout-code + uses: actions/checkout@v2 + - name: Show-versions-On-build-machine + run: systeminfo && go version && docker version + - name: Build-source-code + run: cd ./build/windows/ && & .\Makefile.ps1 + - name: Create-docker-image + run: | + cd ./kubernetes/windows/ && docker build . --file Dockerfile -t $env:IMAGETAG --build-arg IMAGE_TAG=$env:IMAGETAG_TELEMETRY + - name: List-docker-images + run: docker images --digests --all + diff --git a/kubernetes/linux/setup.sh b/kubernetes/linux/setup.sh index 17cfb3f77..ad7cc2232 100644 --- a/kubernetes/linux/setup.sh +++ b/kubernetes/linux/setup.sh @@ -27,7 +27,6 @@ sudo apt-get install jq=1.5+dfsg-2 -y #used to setcaps for ruby process to read /proc/env sudo apt-get install libcap2-bin -y -#1.18 pre-release wget https://dl.influxdata.com/telegraf/releases/telegraf-1.18.0_linux_amd64.tar.gz tar -zxvf telegraf-1.18.0_linux_amd64.tar.gz @@ -63,3 +62,11 @@ rm -f $TMPDIR/envmdsd # Remove settings for cron.daily that conflict with the node's cron.daily. Since both are trying to rotate the same files # in /var/log at the same time, the rotation doesn't happen correctly and then the *.1 file is forever logged to. rm /etc/logrotate.d/alternatives /etc/logrotate.d/apt /etc/logrotate.d/azure-mdsd /etc/logrotate.d/rsyslog + +#Remove gemfile.lock for http_parser gem 0.6.0 +#see - https://github.com/fluent/fluentd/issues/3374 https://github.com/tmm1/http_parser.rb/issues/70 +if [ -e "/var/lib/gems/2.6.0/gems/http_parser.rb-0.6.0/Gemfile.lock" ]; then + #rename + echo "Renaming unused gemfile.lock for http_parser 0.6.0" + mv /var/lib/gems/2.6.0/gems/http_parser.rb-0.6.0/Gemfile.lock /var/lib/gems/2.6.0/gems/http_parser.rb-0.6.0/renamed_Gemfile_lock.renamed +fi diff --git a/kubernetes/windows/setup.ps1 b/kubernetes/windows/setup.ps1 index 25aad5e16..3e47b7eb2 100644 --- a/kubernetes/windows/setup.ps1 +++ b/kubernetes/windows/setup.ps1 @@ -65,6 +65,16 @@ Write-Host ('Extracting Certificate Generator Package') Expand-Archive -Path /opt/omsagentwindows/certificategenerator.zip -Destination /opt/omsagentwindows/certgenerator/ -Force Write-Host ('Finished Extracting Certificate Generator Package') +Write-Host ("Removing Install folder") + Remove-Item /installation -Recurse -Write-Host ("Removing Install folder") \ No newline at end of file +#Remove gemfile.lock for http_parser gem 0.6.0 +#see - https://github.com/fluent/fluentd/issues/3374 https://github.com/tmm1/http_parser.rb/issues/70 + +$gemfile = "\ruby26\lib\ruby\gems\2.6.0\gems\http_parser.rb-0.6.0\Gemfile.lock" +$gemfileFullPath = $Env:SYSTEMDRIVE + "\" + $gemfile +If (Test-Path -Path $gemfile ) { + Write-Host ("Renaming unused gemfile.lock for http_parser 0.6.0") + Rename-Item -Path $gemfileFullPath -NewName "renamed_Gemfile_lock.renamed" +} \ No newline at end of file diff --git a/source/plugins/go/src/go.mod b/source/plugins/go/src/go.mod index 3fd38a9bd..5b5c735e5 100644 --- a/source/plugins/go/src/go.mod +++ b/source/plugins/go/src/go.mod @@ -31,4 +31,5 @@ require ( k8s.io/api v0.0.0-20180628040859-072894a440bd // indirect k8s.io/apimachinery v0.0.0-20180621070125-103fd098999d k8s.io/client-go v8.0.0+incompatible + golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f ) diff --git a/source/plugins/go/src/go.sum b/source/plugins/go/src/go.sum index 52bb2ab04..64745749f 100644 --- a/source/plugins/go/src/go.sum +++ b/source/plugins/go/src/go.sum @@ -108,6 +108,10 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90Pveol golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975 h1:/Tl7pH94bvbAAHBdZJT947M/+gp0+CqQXDtMRC0fseo= +golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f h1:aZp0e2vLN4MToVqnjNEYEtrEA8RH8U8FN1CU7JgqsPU= +golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/net v0.0.0-20170809000501-1c05540f6879 h1:0rFa7EaCGdQPmZVbo9F7MNF65b8dyzS6EUnXjs9Cllk= golang.org/x/net v0.0.0-20170809000501-1c05540f6879/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -120,8 +124,11 @@ golang.org/x/sys v0.0.0-20171031081856-95c657629925/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221 h1:/ZHdbVpdR/jk3g30/d4yUL0JU9kksj8+F/bnQUVLGDM= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/text v0.0.0-20170810154203-b19bf474d317 h1:WKW+OPdYPlvOTVGHuMfjnIC6yY2SI93yFB0pZ7giBmQ= golang.org/x/text v0.0.0-20170810154203-b19bf474d317/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= From 154c11dd0cfe99c3b065e83967324b6e561aaa72 Mon Sep 17 00:00:00 2001 From: rashmichandrashekar Date: Wed, 16 Jun 2021 10:14:12 -0700 Subject: [PATCH 117/301] Windows build optimization (#582) --- README.md | 26 ++++++++ kubernetes/windows/Dockerfile-dev-base-image | 43 +++++++++++++ kubernetes/windows/Dockerfile-dev-image | 45 +++++++++++++ .../build-and-publish-dev-docker-image.ps1 | 64 +++++++++++++++++++ .../dockerbuild/build-dev-base-image.ps1 | 32 ++++++++++ 5 files changed, 210 insertions(+) create mode 100644 kubernetes/windows/Dockerfile-dev-base-image create mode 100644 kubernetes/windows/Dockerfile-dev-image create mode 100644 kubernetes/windows/dockerbuild/build-and-publish-dev-docker-image.ps1 create mode 100644 kubernetes/windows/dockerbuild/build-dev-base-image.ps1 diff --git a/README.md b/README.md index 555234c61..73bf858cd 100644 --- a/README.md +++ b/README.md @@ -210,6 +210,32 @@ powershell -ExecutionPolicy bypass # switch to powershell if you are not on pow .\build-and-publish-docker-image.ps1 -image /: # trigger build code and image and publish docker hub or acr ``` +##### Developer Build optimizations +If you do not want to build the image from scratch every time you make changes during development,you can choose to build the docker images that are separated out by +* Base image and dependencies including agent bootstrap(setup.ps1) +* Agent conf and plugin changes + +To do this, the very first time you start developing you would need to execute below instructions in elevated command prompt of powershell. +This builds the base image(omsagent-win-base) with all the package dependencies +``` +cd %userprofile%\Docker-Provider\kubernetes\windows\dockerbuild # based on your repo path +docker login # if you want to publish the image to acr then login to acr via `docker login ` +powershell -ExecutionPolicy bypass # switch to powershell if you are not on powershell already +.\build-dev-base-image.ps1 # builds base image and dependencies +``` + +And then run the script to build the image consisting of code and conf changes. +``` +.\build-and-publish-dev-docker-image.ps1 -image /: # trigger build code and image and publish docker hub or acr +``` + +For the subsequent builds, you can just run - + +``` +.\build-and-publish-dev-docker-image.ps1 -image /: # trigger build code and image and publish docker hub or acr +``` +###### Note - If you have changes in setup.ps1 and want to test those changes, uncomment the section consisting of setup.ps1 in the Dockerfile-dev-image file. + #### Option 2 - Using WSL2 to Build the Windows agent ##### On WSL2, Build Certificate Generator Source code and Out OMS Go plugin code diff --git a/kubernetes/windows/Dockerfile-dev-base-image b/kubernetes/windows/Dockerfile-dev-base-image new file mode 100644 index 000000000..9c6ae8db8 --- /dev/null +++ b/kubernetes/windows/Dockerfile-dev-base-image @@ -0,0 +1,43 @@ +FROM mcr.microsoft.com/windows/servercore:ltsc2019 +MAINTAINER OMSContainers@microsoft.com +LABEL vendor=Microsoft\ Corp \ + com.microsoft.product="Azure Monitor for containers" + +# Do not split this into multiple RUN! +# Docker creates a layer for every RUN-Statement +RUN powershell -Command "Set-ExecutionPolicy Bypass -Scope Process -Force; iex ((New-Object System.Net.WebClient).DownloadString('https://chocolatey.org/install.ps1'))" +# Fluentd depends on cool.io whose fat gem is only available for Ruby < 2.5, so need to specify --platform ruby when install Ruby > 2.5 and install msys2 to get dev tools +RUN choco install -y ruby --version 2.6.5.1 --params "'/InstallDir:C:\ruby26'" \ +&& choco install -y msys2 --version 20200903.0.0 --params "'/NoPath /NoUpdate /InstallDir:C:\ruby26\msys64'" \ +&& choco install -y vim + +# gangams - optional MSYS2 update via ridk failing in merged docker file so skipping that since we dont need optional update +RUN refreshenv \ +&& ridk install 3 \ +&& echo gem: --no-document >> C:\ProgramData\gemrc \ +&& gem install cool.io -v 1.5.4 --platform ruby \ +&& gem install oj -v 3.3.10 \ +&& gem install json -v 2.2.0 \ +&& gem install fluentd -v 1.12.2 \ +&& gem install win32-service -v 1.0.1 \ +&& gem install win32-ipc -v 0.7.0 \ +&& gem install win32-event -v 0.6.3 \ +&& gem install windows-pr -v 1.2.6 \ +&& gem install tomlrb -v 1.3.0 \ +&& gem install gyoku -v 1.3.1 \ +&& gem sources --clear-all + +# Remove gem cache and chocolatey +RUN powershell -Command "Remove-Item -Force C:\ruby26\lib\ruby\gems\2.6.0\cache\*.gem; Remove-Item -Recurse -Force 'C:\ProgramData\chocolatey'" + +SHELL ["powershell"] + +ENV tmpdir /opt/omsagentwindows/scripts/powershell + +WORKDIR /opt/omsagentwindows/scripts/powershell + +# copy certificate generator binaries zip +COPY ./omsagentwindows/*.zip /opt/omsagentwindows/ + +COPY setup.ps1 /opt/omsagentwindows/scripts/powershell +RUN ./setup.ps1 \ No newline at end of file diff --git a/kubernetes/windows/Dockerfile-dev-image b/kubernetes/windows/Dockerfile-dev-image new file mode 100644 index 000000000..6764ef8c4 --- /dev/null +++ b/kubernetes/windows/Dockerfile-dev-image @@ -0,0 +1,45 @@ +FROM omsagent-win-base +MAINTAINER OMSContainers@microsoft.com +LABEL vendor=Microsoft\ Corp \ + com.microsoft.product="Azure Monitor for containers" + +#Uncomment below to test setup.ps1 changes +#COPY setup.ps1 /opt/omsagentwindows/scripts/powershell +#RUN ./setup.ps1 +COPY main.ps1 /opt/omsagentwindows/scripts/powershell +COPY ./omsagentwindows/installer/scripts/filesystemwatcher.ps1 /opt/omsagentwindows/scripts/powershell +COPY ./omsagentwindows/installer/scripts/livenessprobe.cmd /opt/omsagentwindows/scripts/cmd/ +COPY setdefaulttelegrafenvvariables.ps1 /opt/omsagentwindows/scripts/powershell + +# copy ruby scripts to /opt folder +COPY ./omsagentwindows/installer/scripts/*.rb /opt/omsagentwindows/scripts/ruby/ + +# copy out_oms.so file +COPY ./omsagentwindows/out_oms.so /opt/omsagentwindows/out_oms.so + +# copy fluent, fluent-bit and out_oms conf files +COPY ./omsagentwindows/installer/conf/fluent.conf /etc/fluent/ +# copy fluent docker and cri parser conf files +COPY ./omsagentwindows/installer/conf/fluent-cri-parser.conf /etc/fluent/ +COPY ./omsagentwindows/installer/conf/fluent-docker-parser.conf /etc/fluent/ +COPY ./omsagentwindows/installer/conf/fluent-bit.conf /etc/fluent-bit +COPY ./omsagentwindows/installer/conf/out_oms.conf /etc/omsagentwindows + +# copy telegraf conf file +COPY ./omsagentwindows/installer/conf/telegraf.conf /etc/telegraf/ + +# copy keepcert alive ruby scripts +COPY ./omsagentwindows/installer/scripts/rubyKeepCertificateAlive/*.rb /etc/fluent/plugin/ + +#Copy fluentd ruby plugins +COPY ./omsagentwindows/ruby/ /etc/fluent/plugin/ +COPY ./omsagentwindows/utils/*.rb /etc/fluent/plugin/ + +ENV AGENT_VERSION ${IMAGE_TAG} +ENV OS_TYPE "windows" +ENV APPLICATIONINSIGHTS_AUTH "NzAwZGM5OGYtYTdhZC00NThkLWI5NWMtMjA3ZjM3NmM3YmRi" +ENV AZMON_COLLECT_ENV False +ENV CI_CERT_LOCATION "C://oms.crt" +ENV CI_KEY_LOCATION "C://oms.key" + +ENTRYPOINT ["powershell", "C:\\opt\\omsagentwindows\\scripts\\powershell\\main.ps1"] diff --git a/kubernetes/windows/dockerbuild/build-and-publish-dev-docker-image.ps1 b/kubernetes/windows/dockerbuild/build-and-publish-dev-docker-image.ps1 new file mode 100644 index 000000000..0fde7f379 --- /dev/null +++ b/kubernetes/windows/dockerbuild/build-and-publish-dev-docker-image.ps1 @@ -0,0 +1,64 @@ +<# + .DESCRIPTION + Builds the Windows Agent code and Docker Image and pushes the docker image to specified repo + + .PARAMETER image + docker image. format should be /: +#> +param( + [Parameter(mandatory = $true)] + [string]$image +) + +$currentdir = $PSScriptRoot +Write-Host("current script dir : " + $currentdir + " ") + +if ($false -eq (Test-Path -Path $currentdir)) { + Write-Host("Invalid current dir : " + $currentdir + " ") -ForegroundColor Red + exit +} + +if ([string]::IsNullOrEmpty($image)) { + Write-Host "Image parameter shouldnt be null or empty" -ForegroundColor Red + exit +} + +$imageparts = $image.split(":") +if (($imageparts.Length -ne 2)){ + Write-Host "Image not in valid format. Expected format should be /:" -ForegroundColor Red + exit +} + +$imagetag = $imageparts[1].ToLower() +$imagerepo = $imageparts[0] + +if ($imagetag.StartsWith("win-") -eq $false) +{ + Write-Host "adding win- prefix image tag since its not provided" + $imagetag = "win-$imagetag" +} + +Write-Host "image tag used is :$imagetag" + +Write-Host "start:Building the cert generator and out oms code via Makefile.ps1" +..\..\..\build\windows\Makefile.ps1 +Write-Host "end:Building the cert generator and out oms code via Makefile.ps1" + +$dockerFileDir = Split-Path -Path $currentdir +Write-Host("builddir dir : " + $dockerFileDir + " ") +if ($false -eq (Test-Path -Path $dockerFileDir)) { + Write-Host("Invalid dockerFile Dir : " + $dockerFileDir + " ") -ForegroundColor Red + exit +} + +Write-Host "changing directory to DockerFile dir: $dockerFileDir" +Set-Location -Path $dockerFileDir + +$updateImage = ${imagerepo} + ":" + ${imageTag} +Write-Host "STAT:Triggering docker image build: $image" +docker build -t $updateImage --build-arg IMAGE_TAG=$imageTag -f Dockerfile-dev-image . +Write-Host "END:Triggering docker image build: $updateImage" + +Write-Host "STAT:pushing docker image : $updateImage" +docker push $updateImage +Write-Host "EnD:pushing docker image : $updateImage" diff --git a/kubernetes/windows/dockerbuild/build-dev-base-image.ps1 b/kubernetes/windows/dockerbuild/build-dev-base-image.ps1 new file mode 100644 index 000000000..142e20c3f --- /dev/null +++ b/kubernetes/windows/dockerbuild/build-dev-base-image.ps1 @@ -0,0 +1,32 @@ +<# + .DESCRIPTION + Builds the Docker Image locally for the server core ltsc base and installs dependencies + +#> + +$currentdir = $PSScriptRoot +Write-Host("current script dir : " + $currentdir + " ") + +if ($false -eq (Test-Path -Path $currentdir)) { + Write-Host("Invalid current dir : " + $currentdir + " ") -ForegroundColor Red + exit +} + +Write-Host "start:Building the cert generator and out oms code via Makefile.ps1" +..\..\..\build\windows\Makefile.ps1 +Write-Host "end:Building the cert generator and out oms code via Makefile.ps1" + +$dockerFileDir = Split-Path -Path $currentdir +Write-Host("builddir dir : " + $dockerFileDir + " ") +if ($false -eq (Test-Path -Path $dockerFileDir)) { + Write-Host("Invalid dockerFile Dir : " + $dockerFileDir + " ") -ForegroundColor Red + exit +} + +Write-Host "changing directory to DockerFile dir: $dockerFileDir" +Set-Location -Path $dockerFileDir + +$updateImage = "omsagent-win-base" +Write-Host "STAT:Triggering base docker image build: $updateImage" +docker build -t $updateImage -f Dockerfile-dev-base-image . +Write-Host "END:Triggering docker image build: $updateImage" \ No newline at end of file From 68e90b63e1efd7f572b586da63f67096bfb07648 Mon Sep 17 00:00:00 2001 From: Vishwanath Date: Tue, 22 Jun 2021 22:06:35 -0700 Subject: [PATCH 118/301] fix windows build failure due to msys2 version --- kubernetes/windows/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kubernetes/windows/Dockerfile b/kubernetes/windows/Dockerfile index 997b2f310..94be59644 100644 --- a/kubernetes/windows/Dockerfile +++ b/kubernetes/windows/Dockerfile @@ -10,7 +10,7 @@ ARG IMAGE_TAG=win-ciprod06112021 RUN powershell -Command "Set-ExecutionPolicy Bypass -Scope Process -Force; iex ((New-Object System.Net.WebClient).DownloadString('https://chocolatey.org/install.ps1'))" # Fluentd depends on cool.io whose fat gem is only available for Ruby < 2.5, so need to specify --platform ruby when install Ruby > 2.5 and install msys2 to get dev tools RUN choco install -y ruby --version 2.6.5.1 --params "'/InstallDir:C:\ruby26'" \ -&& choco install -y msys2 --version 20200903.0.0 --params "'/NoPath /NoUpdate /InstallDir:C:\ruby26\msys64'" \ +&& choco install -y msys2 --version 20210604.0.0 --params "'/NoPath /NoUpdate /InstallDir:C:\ruby26\msys64'" \ && choco install -y vim # gangams - optional MSYS2 update via ridk failing in merged docker file so skipping that since we dont need optional update From cf68a4fde5c7471bfa6679703d1d77d0f98745ea Mon Sep 17 00:00:00 2001 From: rashmichandrashekar Date: Wed, 23 Jun 2021 10:28:49 -0700 Subject: [PATCH 119/301] Fix telegraf startup issue when endpoint is unreachable (#587) --- build/linux/installer/conf/telegraf-prom-side-car.conf | 8 +++++++- build/linux/installer/conf/telegraf-rs.conf | 8 +++++++- build/linux/installer/conf/telegraf.conf | 8 ++++++-- build/linux/installer/conf/test.json | 1 + build/linux/installer/datafiles/base_container.data | 3 +++ kubernetes/linux/main.sh | 9 ++++++--- 6 files changed, 30 insertions(+), 7 deletions(-) create mode 100644 build/linux/installer/conf/test.json diff --git a/build/linux/installer/conf/telegraf-prom-side-car.conf b/build/linux/installer/conf/telegraf-prom-side-car.conf index b3b4ba1d3..1b6bab9f9 100644 --- a/build/linux/installer/conf/telegraf-prom-side-car.conf +++ b/build/linux/installer/conf/telegraf-prom-side-car.conf @@ -109,7 +109,7 @@ ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "json" - namedrop = ["agent_telemetry"] + namedrop = ["agent_telemetry", "file"] ############################################################################### # PROCESSOR PLUGINS # @@ -119,6 +119,12 @@ [processors.converter.fields] float = ["*"] +# Dummy plugin to test out toml parsing happens properly +[[inputs.file]] + interval = "24h" + files = ["test.json"] + data_format = "json" + #Prometheus Custom Metrics [[inputs.prometheus]] interval = "$AZMON_TELEGRAF_CUSTOM_PROM_INTERVAL" diff --git a/build/linux/installer/conf/telegraf-rs.conf b/build/linux/installer/conf/telegraf-rs.conf index ee1cf8819..0ca07f7e5 100644 --- a/build/linux/installer/conf/telegraf-rs.conf +++ b/build/linux/installer/conf/telegraf-rs.conf @@ -121,7 +121,7 @@ ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "json" - namedrop = ["agent_telemetry"] + namedrop = ["agent_telemetry", "file"] #tagdrop = ["AgentVersion","AKS_RESOURCE_ID", "ACS_RESOURCE_NAME", "Region","ClusterName","ClusterType", "Computer", "ControllerType"] [[outputs.application_insights]] @@ -538,6 +538,12 @@ #tagexclude = ["AgentVersion","AKS_RESOURCE_ID","ACS_RESOURCE_NAME", "Region", "ClusterName", "ClusterType", "Computer", "ControllerType"] # [inputs.prometheus.tagpass] +# Dummy plugin to test out toml parsing happens properly +[[inputs.file]] + interval = "24h" + files = ["test.json"] + data_format = "json" + #Prometheus Custom Metrics [[inputs.prometheus]] interval = "$AZMON_TELEGRAF_CUSTOM_PROM_INTERVAL" diff --git a/build/linux/installer/conf/telegraf.conf b/build/linux/installer/conf/telegraf.conf index 5a5bb2d8c..8b6e2ad4b 100644 --- a/build/linux/installer/conf/telegraf.conf +++ b/build/linux/installer/conf/telegraf.conf @@ -120,7 +120,7 @@ ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "json" - namedrop = ["agent_telemetry"] + namedrop = ["agent_telemetry", "file"] #tagdrop = ["AgentVersion","AKS_RESOURCE_ID", "ACS_RESOURCE_NAME", "Region","ClusterName","ClusterType", "Computer", "ControllerType"] # Output to send MDM metrics to fluent bit and then route it to fluentD @@ -425,7 +425,11 @@ # fieldpass = ["usage_active","cluster","node","host","device"] # taginclude = ["cluster","cpu","node"] - +# Dummy plugin to test out toml parsing happens properly +[[inputs.file]] + interval = "24h" + files = ["test.json"] + data_format = "json" # Read metrics about disk usage by mount point [[inputs.disk]] diff --git a/build/linux/installer/conf/test.json b/build/linux/installer/conf/test.json new file mode 100644 index 000000000..9e26dfeeb --- /dev/null +++ b/build/linux/installer/conf/test.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/build/linux/installer/datafiles/base_container.data b/build/linux/installer/datafiles/base_container.data index 88c790be3..bdacf647d 100644 --- a/build/linux/installer/datafiles/base_container.data +++ b/build/linux/installer/datafiles/base_container.data @@ -36,6 +36,7 @@ MAINTAINER: 'Microsoft Corporation' /etc/opt/microsoft/docker-cimprov/td-agent-bit-rs.conf; build/linux/installer/conf/td-agent-bit-rs.conf; 644; root; root /etc/opt/microsoft/docker-cimprov/azm-containers-parser.conf; build/linux/installer/conf/azm-containers-parser.conf; 644; root; root /etc/opt/microsoft/docker-cimprov/out_oms.conf; build/linux/installer/conf/out_oms.conf; 644; root; root +/etc/opt/microsoft/docker-cimprov/test.json; build/linux/installer/conf/test.json; 644; root; root /etc/opt/microsoft/docker-cimprov/telegraf.conf; build/linux/installer/conf/telegraf.conf; 644; root; root /etc/opt/microsoft/docker-cimprov/telegraf-prom-side-car.conf; build/linux/installer/conf/telegraf-prom-side-car.conf; 644; root; root /etc/opt/microsoft/docker-cimprov/telegraf-rs.conf; build/linux/installer/conf/telegraf-rs.conf; 644; root; root @@ -53,6 +54,8 @@ MAINTAINER: 'Microsoft Corporation' /opt/ConfigParseErrorLogger.rb; build/common/installer/scripts/ConfigParseErrorLogger.rb; 755; root; root /opt/tomlparser-npm-config.rb; build/linux/installer/scripts/tomlparser-npm-config.rb; 755; root; root /opt/tomlparser-osm-config.rb; build/linux/installer/scripts/tomlparser-osm-config.rb; 755; root; root +/opt/test.json; build/linux/installer/conf/test.json; 644; root; root + /etc/opt/microsoft/docker-cimprov/health/healthmonitorconfig.json; build/linux/installer/conf/healthmonitorconfig.json; 644; root; root diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index ec348bba3..1a7034d4d 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -504,17 +504,19 @@ fi if [ ! -e "/etc/config/kube.conf" ]; then if [ "${CONTAINER_TYPE}" == "PrometheusSidecar" ] && [ -e "/opt/telegraf-test-prom-side-car.conf" ]; then echo "****************Start Telegraf in Test Mode**************************" - /opt/telegraf --config /opt/telegraf-test-prom-side-car.conf -test + /opt/telegraf --config /opt/telegraf-test-prom-side-car.conf --input-filter file -test if [ $? -eq 0 ]; then mv "/opt/telegraf-test-prom-side-car.conf" "/etc/opt/microsoft/docker-cimprov/telegraf-prom-side-car.conf" + echo "Moving test conf file to telegraf side-car conf since test run succeeded" fi echo "****************End Telegraf Run in Test Mode**************************" else if [ -e "/opt/telegraf-test.conf" ]; then echo "****************Start Telegraf in Test Mode**************************" - /opt/telegraf --config /opt/telegraf-test.conf -test + /opt/telegraf --config /opt/telegraf-test.conf --input-filter file -test if [ $? -eq 0 ]; then mv "/opt/telegraf-test.conf" "/etc/opt/microsoft/docker-cimprov/telegraf.conf" + echo "Moving test conf file to telegraf daemonset conf since test run succeeded" fi echo "****************End Telegraf Run in Test Mode**************************" fi @@ -522,9 +524,10 @@ if [ ! -e "/etc/config/kube.conf" ]; then else if [ -e "/opt/telegraf-test-rs.conf" ]; then echo "****************Start Telegraf in Test Mode**************************" - /opt/telegraf --config /opt/telegraf-test-rs.conf -test + /opt/telegraf --config /opt/telegraf-test-rs.conf --input-filter file -test if [ $? -eq 0 ]; then mv "/opt/telegraf-test-rs.conf" "/etc/opt/microsoft/docker-cimprov/telegraf-rs.conf" + echo "Moving test conf file to telegraf replicaset conf since test run succeeded" fi echo "****************End Telegraf Run in Test Mode**************************" fi From cd2275354aafb588f0ed74cf8d747e40226e4974 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Wed, 23 Jun 2021 13:17:56 -0700 Subject: [PATCH 120/301] revert fbit tail plugins defaults to std defaults (#586) --- .../installer/scripts/td-agent-bit-conf-customizer.rb | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/build/common/installer/scripts/td-agent-bit-conf-customizer.rb b/build/common/installer/scripts/td-agent-bit-conf-customizer.rb index 82c6c1d17..f29c87407 100644 --- a/build/common/installer/scripts/td-agent-bit-conf-customizer.rb +++ b/build/common/installer/scripts/td-agent-bit-conf-customizer.rb @@ -3,9 +3,7 @@ @td_agent_bit_conf_path = "/etc/opt/microsoft/docker-cimprov/td-agent-bit.conf" -@default_service_interval = "1" -@default_buffer_chunk_size = "1" -@default_buffer_max_size = "1" +@default_service_interval = "15" @default_mem_buf_limit = "10" def is_number?(value) @@ -25,9 +23,9 @@ def substituteFluentBitPlaceHolders serviceInterval = (!interval.nil? && is_number?(interval) && interval.to_i > 0 ) ? interval : @default_service_interval serviceIntervalSetting = "Flush " + serviceInterval - tailBufferChunkSize = (!bufferChunkSize.nil? && is_number?(bufferChunkSize) && bufferChunkSize.to_i > 0) ? bufferChunkSize : @default_buffer_chunk_size + tailBufferChunkSize = (!bufferChunkSize.nil? && is_number?(bufferChunkSize) && bufferChunkSize.to_i > 0) ? bufferChunkSize : nil - tailBufferMaxSize = (!bufferMaxSize.nil? && is_number?(bufferMaxSize) && bufferMaxSize.to_i > 0) ? bufferMaxSize : @default_buffer_max_size = "1" + tailBufferMaxSize = (!bufferMaxSize.nil? && is_number?(bufferMaxSize) && bufferMaxSize.to_i > 0) ? bufferMaxSize : nil if ((!tailBufferChunkSize.nil? && tailBufferMaxSize.nil?) || (!tailBufferChunkSize.nil? && !tailBufferMaxSize.nil? && tailBufferChunkSize.to_i > tailBufferMaxSize.to_i)) puts "config:warn buffer max size must be greater or equal to chunk size" From 8c41a42043a1cf7f5635e2521dfa7660d430dbeb Mon Sep 17 00:00:00 2001 From: David Michelman Date: Thu, 1 Jul 2021 13:40:41 -0700 Subject: [PATCH 121/301] fixed another bug (#593) --- source/plugins/ruby/in_kube_nodes.rb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/plugins/ruby/in_kube_nodes.rb b/source/plugins/ruby/in_kube_nodes.rb index ffc11de55..ebfa903fd 100644 --- a/source/plugins/ruby/in_kube_nodes.rb +++ b/source/plugins/ruby/in_kube_nodes.rb @@ -559,10 +559,10 @@ def clean_cache() end end - nodes_to_remove.each do node_name + nodes_to_remove.each {|node_name| @cacheHash.delete(node_name) @timeAdded.delete(node_name) - end + } end end end # NodeCache From 00f1a0dbd0d835b085e4b1e0f55de22bc65545db Mon Sep 17 00:00:00 2001 From: bragi92 Date: Fri, 9 Jul 2021 16:46:41 +0000 Subject: [PATCH 122/301] feat: add new metrics to MDM for allocatable % calculation of cpu and memory usage (#584) * feat: allocatable cpu and memory % metrics for MDM * maybe * linux is working * windwos.... * some more * comment * better * syntax * ruby * revert omsagent.yaml * comments * pr feedback * pr feedback * testing msys2 version update * better --- kubernetes/windows/Dockerfile-dev-base-image | 2 +- .../plugins/ruby/CAdvisorMetricsAPIClient.rb | 5 + source/plugins/ruby/MdmMetricsGenerator.rb | 22 +++- source/plugins/ruby/constants.rb | 3 + source/plugins/ruby/filter_cadvisor2mdm.rb | 68 +++++++++-- source/plugins/ruby/kubelet_utils.rb | 108 ++++++++++++++++++ 6 files changed, 194 insertions(+), 14 deletions(-) diff --git a/kubernetes/windows/Dockerfile-dev-base-image b/kubernetes/windows/Dockerfile-dev-base-image index 9c6ae8db8..0081f9c53 100644 --- a/kubernetes/windows/Dockerfile-dev-base-image +++ b/kubernetes/windows/Dockerfile-dev-base-image @@ -8,7 +8,7 @@ LABEL vendor=Microsoft\ Corp \ RUN powershell -Command "Set-ExecutionPolicy Bypass -Scope Process -Force; iex ((New-Object System.Net.WebClient).DownloadString('https://chocolatey.org/install.ps1'))" # Fluentd depends on cool.io whose fat gem is only available for Ruby < 2.5, so need to specify --platform ruby when install Ruby > 2.5 and install msys2 to get dev tools RUN choco install -y ruby --version 2.6.5.1 --params "'/InstallDir:C:\ruby26'" \ -&& choco install -y msys2 --version 20200903.0.0 --params "'/NoPath /NoUpdate /InstallDir:C:\ruby26\msys64'" \ +&& choco install -y msys2 --version 20210604.0.0 --params "'/NoPath /NoUpdate /InstallDir:C:\ruby26\msys64'" \ && choco install -y vim # gangams - optional MSYS2 update via ridk failing in merged docker file so skipping that since we dont need optional update diff --git a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb index 10720752d..da6e94f5f 100644 --- a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb +++ b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb @@ -81,6 +81,11 @@ def getSummaryStatsFromCAdvisor(winNode) return getResponse(winNode, relativeUri) end + def getCongifzCAdvisor(winNode: nil) + relativeUri = "/configz" + return getResponse(winNode, relativeUri) + end + def getAllMetricsCAdvisor(winNode: nil) relativeUri = "/metrics/cadvisor" return getResponse(winNode, relativeUri) diff --git a/source/plugins/ruby/MdmMetricsGenerator.rb b/source/plugins/ruby/MdmMetricsGenerator.rb index 73cf19fac..0858990da 100644 --- a/source/plugins/ruby/MdmMetricsGenerator.rb +++ b/source/plugins/ruby/MdmMetricsGenerator.rb @@ -37,6 +37,12 @@ class MdmMetricsGenerator Constants::MEMORY_WORKING_SET_BYTES => Constants::MDM_NODE_MEMORY_WORKING_SET_PERCENTAGE, } + @@node_metric_name_metric_allocatable_percentage_name_hash = { + Constants::CPU_USAGE_MILLI_CORES => Constants::MDM_NODE_CPU_USAGE_ALLOCATABLE_PERCENTAGE, + Constants::MEMORY_RSS_BYTES => Constants::MDM_NODE_MEMORY_RSS_ALLOCATABLE_PERCENTAGE, + Constants::MEMORY_WORKING_SET_BYTES => Constants::MDM_NODE_MEMORY_WORKING_SET_ALLOCATABLE_PERCENTAGE, + } + @@container_metric_name_metric_percentage_name_hash = { Constants::CPU_USAGE_MILLI_CORES => Constants::MDM_CONTAINER_CPU_UTILIZATION_METRIC, Constants::CPU_USAGE_NANO_CORES => Constants::MDM_CONTAINER_CPU_UTILIZATION_METRIC, @@ -526,7 +532,7 @@ def getContainerResourceUtilizationThresholds return metric_threshold_hash end - def getNodeResourceMetricRecords(record, metric_name, metric_value, percentage_metric_value) + def getNodeResourceMetricRecords(record, metric_name, metric_value, percentage_metric_value, allocatable_percentage_metric_value) records = [] begin custommetricrecord = MdmAlertTemplates::Node_resource_metrics_template % { @@ -554,6 +560,20 @@ def getNodeResourceMetricRecords(record, metric_name, metric_value, percentage_m } records.push(Yajl::Parser.parse(StringIO.new(additional_record))) end + + if !allocatable_percentage_metric_value.nil? + additional_record = MdmAlertTemplates::Node_resource_metrics_template % { + timestamp: record["Timestamp"], + metricName: @@node_metric_name_metric_allocatable_percentage_name_hash[metric_name], + hostvalue: record["Host"], + objectnamevalue: record["ObjectName"], + instancenamevalue: record["InstanceName"], + metricminvalue: allocatable_percentage_metric_value, + metricmaxvalue: allocatable_percentage_metric_value, + metricsumvalue: allocatable_percentage_metric_value, + } + records.push(Yajl::Parser.parse(StringIO.new(additional_record))) + end rescue => errorStr @log.info "Error in getNodeResourceMetricRecords: #{errorStr}" ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) diff --git a/source/plugins/ruby/constants.rb b/source/plugins/ruby/constants.rb index c037c99f6..c40d4c357 100644 --- a/source/plugins/ruby/constants.rb +++ b/source/plugins/ruby/constants.rb @@ -60,6 +60,9 @@ class Constants MDM_NODE_CPU_USAGE_PERCENTAGE = "cpuUsagePercentage" MDM_NODE_MEMORY_RSS_PERCENTAGE = "memoryRssPercentage" MDM_NODE_MEMORY_WORKING_SET_PERCENTAGE = "memoryWorkingSetPercentage" + MDM_NODE_CPU_USAGE_ALLOCATABLE_PERCENTAGE = "cpuUsageAllocatablePercentage" + MDM_NODE_MEMORY_RSS_ALLOCATABLE_PERCENTAGE = "memoryRssAllocatablePercentage" + MDM_NODE_MEMORY_WORKING_SET_ALLOCATABLE_PERCENTAGE = "memoryWorkingSetAllocatablePercentage" CONTAINER_TERMINATED_RECENTLY_IN_MINUTES = 5 OBJECT_NAME_K8S_CONTAINER = "K8SContainer" diff --git a/source/plugins/ruby/filter_cadvisor2mdm.rb b/source/plugins/ruby/filter_cadvisor2mdm.rb index 9c6b661b0..6bafa372a 100644 --- a/source/plugins/ruby/filter_cadvisor2mdm.rb +++ b/source/plugins/ruby/filter_cadvisor2mdm.rb @@ -66,8 +66,10 @@ def start # initialize cpu and memory limit if @process_incoming_stream @cpu_capacity = 0.0 + @cpu_allocatable = 0.0 @memory_capacity = 0.0 - ensure_cpu_memory_capacity_set + @memory_allocatable = 0.0 + ensure_cpu_memory_capacity_and_allocatable_set @containerCpuLimitHash = {} @containerMemoryLimitHash = {} @containerResourceDimensionHash = {} @@ -167,6 +169,7 @@ def filter(tag, time, record) counter_name = JSON.parse(record["json_Collections"])[0]["CounterName"] percentage_metric_value = 0.0 + allocatable_percentage_metric_value = 0.0 metric_value = JSON.parse(record["json_Collections"])[0]["Value"] if object_name == Constants::OBJECT_NAME_K8S_NODE && @metrics_to_collect_hash.key?(counter_name.downcase) @@ -176,39 +179,62 @@ def filter(tag, time, record) metric_value /= 1000000 #cadvisor record is in nanocores. Convert to mc if @@controller_type.downcase == "replicaset" target_node_cpu_capacity_mc = @NodeCache.cpu.get_capacity(record["Host"]) / 1000000 + target_node_cpu_allocatable_mc = 0.0 # We do not need this value in the replicaset else target_node_cpu_capacity_mc = @cpu_capacity + target_node_cpu_allocatable_mc = @cpu_allocatable end - @log.info "Metric_value: #{metric_value} CPU Capacity #{target_node_cpu_capacity_mc}" + @log.info "Metric_value: #{metric_value} CPU Capacity #{target_node_cpu_capacity_mc} CPU Allocatable #{target_node_cpu_allocatable_mc} " if target_node_cpu_capacity_mc != 0.0 percentage_metric_value = (metric_value) * 100 / target_node_cpu_capacity_mc end + if target_node_cpu_allocatable_mc != 0.0 + allocatable_percentage_metric_value = (metric_value) * 100 / target_node_cpu_allocatable_mc + else + allocatable_percentage_metric_value = 0.0 + end end if counter_name.start_with?("memory") metric_name = counter_name if @@controller_type.downcase == "replicaset" target_node_mem_capacity = @NodeCache.mem.get_capacity(record["Host"]) + target_node_mem_allocatable = 0.0 # We do not need this value in the replicaset else target_node_mem_capacity = @memory_capacity + target_node_mem_allocatable = @memory_allocatable # We do not need this value in the replicaset end - @log.info "Metric_value: #{metric_value} Memory Capacity #{target_node_mem_capacity}" + + @log.info "Metric_value: #{metric_value} Memory Capacity #{target_node_mem_capacity} Memory Allocatable #{target_node_mem_allocatable}" if target_node_mem_capacity != 0.0 percentage_metric_value = metric_value * 100 / target_node_mem_capacity end + + if target_node_mem_allocatable != 0.0 + allocatable_percentage_metric_value = metric_value * 100 / target_node_mem_allocatable + else + allocatable_percentage_metric_value = 0.0 + end end - @log.info "percentage_metric_value for metric: #{metric_name} for instance: #{record["Host"]} percentage: #{percentage_metric_value}" + @log.info "percentage_metric_value for metric: #{metric_name} for instance: #{record["Host"]} percentage: #{percentage_metric_value} allocatable_percentage: #{allocatable_percentage_metric_value}" - # do some sanity checking. Do we want this? - if percentage_metric_value > 100.0 or percentage_metric_value < 0.0 + # do some sanity checking. + if percentage_metric_value > 100.0 telemetryProperties = {} telemetryProperties["Computer"] = record["Host"] telemetryProperties["MetricName"] = metric_name telemetryProperties["MetricPercentageValue"] = percentage_metric_value ApplicationInsightsUtility.sendCustomEvent("ErrorPercentageOutOfBounds", telemetryProperties) end + if allocatable_percentage_metric_value > 100.0 + telemetryProperties = {} + telemetryProperties["Computer"] = record["Host"] + telemetryProperties["MetricName"] = metric_name + telemetryProperties["MetricAllocatablePercentageValue"] = allocatable_percentage_metric_value + ApplicationInsightsUtility.sendCustomEvent("ErrorPercentageOutOfBounds", telemetryProperties) + end - return MdmMetricsGenerator.getNodeResourceMetricRecords(record, metric_name, metric_value, percentage_metric_value) + return MdmMetricsGenerator.getNodeResourceMetricRecords(record, metric_name, metric_value, percentage_metric_value, allocatable_percentage_metric_value) elsif object_name == Constants::OBJECT_NAME_K8S_CONTAINER && @metrics_to_collect_hash.key?(counter_name.downcase) instanceName = record["InstanceName"] metricName = counter_name @@ -304,13 +330,20 @@ def filterPVInsightsMetrics(record) end end - def ensure_cpu_memory_capacity_set - if @cpu_capacity != 0.0 && @memory_capacity != 0.0 - @log.info "CPU And Memory Capacity are already set" + def ensure_cpu_memory_capacity_and_allocatable_set + @@controller_type = ENV["CONTROLLER_TYPE"] + + if @cpu_capacity != 0.0 && @memory_capacity != 0.0 && @@controller_type.downcase == "replicaset" + @log.info "CPU And Memory Capacity are already set and their values are as follows @cpu_capacity : #{@cpu_capacity}, @memory_capacity: #{@memory_capacity}" + return + end + + if @@controller_type.downcase == "daemonset" && @cpu_capacity != 0.0 && @memory_capacity != 0.0 && @cpu_allocatable != 0.0 && @memory_allocatable != 0.0 + @log.info "CPU And Memory Capacity are already set and their values are as follows @cpu_capacity : #{@cpu_capacity}, @memory_capacity: #{@memory_capacity}" + @log.info "CPU And Memory Allocatable are already set and their values are as follows @cpu_allocatable : #{@cpu_allocatable}, @memory_allocatable: #{@memory_allocatable}" return end - @@controller_type = ENV["CONTROLLER_TYPE"] if @@controller_type.downcase == "replicaset" @log.info "ensure_cpu_memory_capacity_set @cpu_capacity #{@cpu_capacity} @memory_capacity #{@memory_capacity}" @@ -354,13 +387,24 @@ def ensure_cpu_memory_capacity_set # cpu_capacity and memory_capacity keep initialized value of 0.0 @log.error "Error getting capacity_from_kubelet: cpu_capacity and memory_capacity" end + + allocatable_from_kubelet = KubeletUtils.get_node_allocatable(@cpu_capacity, @memory_capacity) + + # Error handling in case /configz endpoint fails + if !allocatable_from_kubelet.nil? && allocatable_from_kubelet.length > 1 + @cpu_allocatable = allocatable_from_kubelet[0] + @memory_allocatable = allocatable_from_kubelet[1] + else + # cpu_allocatable and memory_allocatable keep initialized value of 0.0 + @log.error "Error getting allocatable_from_kubelet: cpu_allocatable and memory_allocatable" + end end end def filter_stream(tag, es) new_es = Fluent::MultiEventStream.new begin - ensure_cpu_memory_capacity_set + ensure_cpu_memory_capacity_and_allocatable_set # Getting container limits hash if @process_incoming_stream @containerCpuLimitHash, @containerMemoryLimitHash, @containerResourceDimensionHash = KubeletUtils.get_all_container_limits diff --git a/source/plugins/ruby/kubelet_utils.rb b/source/plugins/ruby/kubelet_utils.rb index 22bc87c0e..e31407b54 100644 --- a/source/plugins/ruby/kubelet_utils.rb +++ b/source/plugins/ruby/kubelet_utils.rb @@ -41,6 +41,114 @@ def get_node_capacity end end + def get_node_allocatable(cpu_capacity, memory_capacity) + begin + if cpu_capacity == 0.0 || memory_capacity == 0.0 + @log.error "kubelet_utils.rb::get_node_allocatble - cpu_capacity or memory_capacity values not set. Hence we cannot calculate allocatable values" + end + + cpu_allocatable = 1.0 + memory_allocatable = 1.0 + + allocatable_response = CAdvisorMetricsAPIClient.getCongifzCAdvisor(winNode: nil) + parsed_response = JSON.parse(allocatable_response.body) + + begin + kubereserved_cpu = parsed_response["kubeletconfig"]["kubeReserved"]["cpu"] + if kubereserved_cpu.nil? || kubereserved_cpu == "" + kubereserved_cpu = "0" + end + @log.info "get_node_allocatable::kubereserved_cpu #{kubereserved_cpu}" + rescue => errorStr + @log.error "Error in get_node_allocatable::kubereserved_cpu: #{errorStr}" + kubereserved_cpu = "0" + ApplicationInsightsUtility.sendExceptionTelemetry("Error in get_node_allocatable::kubereserved_cpu: #{errorStr}") + end + + begin + kubereserved_memory = parsed_response["kubeletconfig"]["kubeReserved"]["memory"] + if kubereserved_memory.nil? || kubereserved_memory == "" + kubereserved_memory = "0" + end + @log.info "get_node_allocatable::kubereserved_memory #{kubereserved_memory}" + rescue => errorStr + @log.error "Error in get_node_allocatable::kubereserved_memory: #{errorStr}" + kubereserved_memory = "0" + ApplicationInsightsUtility.sendExceptionTelemetry("Error in get_node_allocatable::kubereserved_cpu: #{errorStr}") + end + begin + systemReserved_cpu = parsed_response["kubeletconfig"]["systemReserved"]["cpu"] + if systemReserved_cpu.nil? || systemReserved_cpu == "" + systemReserved_cpu = "0" + end + @log.info "get_node_allocatable::systemReserved_cpu #{systemReserved_cpu}" + rescue => errorStr + # this will likely always reach this condition for AKS ~ only applicable for hyrid + MDM combination + @log.error "Error in get_node_allocatable::systemReserved_cpu: #{errorStr}" + systemReserved_cpu = "0" + ApplicationInsightsUtility.sendExceptionTelemetry("Error in get_node_allocatable::kubereserved_cpu: #{errorStr}") + end + + begin + explicitlyReserved_cpu = parsed_response["kubeletconfig"]["reservedCPUs"] + if explicitlyReserved_cpu.nil? || explicitlyReserved_cpu == "" + explicitlyReserved_cpu = "0" + end + @log.info "get_node_allocatable::explicitlyReserved_cpu #{explicitlyReserved_cpu}" + rescue => errorStr + # this will likely always reach this condition for AKS ~ only applicable for hyrid + MDM combination + @log.error "Error in get_node_allocatable::explicitlyReserved_cpu: #{errorStr}" + explicitlyReserved_cpu = "0" + ApplicationInsightsUtility.sendExceptionTelemetry("Error in get_node_allocatable::explicitlyReserved_cpu: #{errorStr}") + end + + begin + systemReserved_memory = parsed_response["kubeletconfig"]["systemReserved"]["memory"] + if systemReserved_memory.nil? || systemReserved_memory == "" + systemReserved_memory = "0" + end + @log.info "get_node_allocatable::systemReserved_memory #{systemReserved_memory}" + rescue => errorStr + @log.error "Error in get_node_allocatable::systemReserved_memory: #{errorStr}" + systemReserved_memory = "0" + ApplicationInsightsUtility.sendExceptionTelemetry("Error in get_node_allocatable::kubereserved_cpu: #{errorStr}") + end + + begin + evictionHard_memory = parsed_response["kubeletconfig"]["evictionHard"]["memory.available"] + if evictionHard_memory.nil? || evictionHard_memory == "" + evictionHard_memory = "0" + end + @log.info "get_node_allocatable::evictionHard_memory #{evictionHard_memory}" + rescue => errorStr + @log.error "Error in get_node_allocatable::evictionHard_memory: #{errorStr}" + evictionHard_memory = "0" + ApplicationInsightsUtility.sendExceptionTelemetry("Error in get_node_allocatable::kubereserved_cpu: #{errorStr}") + end + + # do calculation in nanocore since that's what KubernetesApiClient.getMetricNumericValue expects + cpu_capacity_number = cpu_capacity.to_i * 1000.0 ** 2 + # subtract to get allocatable. Formula : Allocatable = Capacity - ( kube reserved + system reserved + eviction threshold ) + # https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/#node-allocatable + if KubernetesApiClient.getMetricNumericValue("cpu", explicitlyReserved_cpu) > 0 + cpu_allocatable = cpu_capacity_number - KubernetesApiClient.getMetricNumericValue("cpu", explicitlyReserved_cpu) + else + cpu_allocatable = cpu_capacity_number - (KubernetesApiClient.getMetricNumericValue("cpu", kubereserved_cpu) + KubernetesApiClient.getMetricNumericValue("cpu", systemReserved_cpu)) + end + # convert back to units similar to what we get for capacity + cpu_allocatable = cpu_allocatable / (1000.0 ** 2) + @log.info "CPU Allocatable #{cpu_allocatable}" + + memory_allocatable = memory_capacity - (KubernetesApiClient.getMetricNumericValue("memory", kubereserved_memory) + KubernetesApiClient.getMetricNumericValue("memory", systemReserved_memory) + KubernetesApiClient.getMetricNumericValue("memory", evictionHard_memory)) + @log.info "Memory Allocatable #{memory_allocatable}" + + return [cpu_allocatable, memory_allocatable] + rescue => errorStr + @log.info "Error get_node_allocatable: #{errorStr}" + ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) + end + end + def get_all_container_limits begin @log.info "in get_all_container_limits..." From e1f9978677fd37953aa4d5af24fb763010b0de05 Mon Sep 17 00:00:00 2001 From: Vishwanath Date: Mon, 12 Jul 2021 12:46:23 -0700 Subject: [PATCH 123/301] update adx sdk for perf issue (#601) --- source/plugins/go/src/go.mod | 2 +- source/plugins/go/src/go.sum | 6 ++++++ source/plugins/go/src/oms.go | 2 +- 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/source/plugins/go/src/go.mod b/source/plugins/go/src/go.mod index 5b5c735e5..c3e6c2044 100644 --- a/source/plugins/go/src/go.mod +++ b/source/plugins/go/src/go.mod @@ -4,7 +4,7 @@ go 1.14 require ( code.cloudfoundry.org/clock v1.0.1-0.20200131002207-86534f4ca3a5 // indirect - github.com/Azure/azure-kusto-go v0.1.4-0.20200427191510-041d4ed55f86 + github.com/Azure/azure-kusto-go v0.3.2 github.com/Azure/go-autorest/autorest/azure/auth v0.4.2 github.com/fluent/fluent-bit-go v0.0.0-20171103221316-c4a158a6e3a7 github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680 // indirect diff --git a/source/plugins/go/src/go.sum b/source/plugins/go/src/go.sum index 64745749f..7e8b3d765 100644 --- a/source/plugins/go/src/go.sum +++ b/source/plugins/go/src/go.sum @@ -5,9 +5,13 @@ github.com/Azure/azure-kusto-go v0.1.3 h1:0u+YqfIvwj5PHd+moXwtlxVePt8xTLU1ixM8Q6 github.com/Azure/azure-kusto-go v0.1.3/go.mod h1:55hwXJ3PaahmWZFP7VC4+PlgsSUuetSA30rFtYFabfc= github.com/Azure/azure-kusto-go v0.1.4-0.20200427191510-041d4ed55f86 h1:vyhCediIKg1gZ9H/kMcutU8F8BFNhxLk76Gti8UAOzo= github.com/Azure/azure-kusto-go v0.1.4-0.20200427191510-041d4ed55f86/go.mod h1:55hwXJ3PaahmWZFP7VC4+PlgsSUuetSA30rFtYFabfc= +github.com/Azure/azure-kusto-go v0.3.2 h1:XpS9co6GvEDl2oICF9HsjEsQVwEpRK6wbNWb9Z+uqsY= +github.com/Azure/azure-kusto-go v0.3.2/go.mod h1:wd50n4qlsSxh+G4f80t+Fnl2ShK9AcXD+lMOstiKuYo= github.com/Azure/azure-pipeline-go v0.1.8/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg= github.com/Azure/azure-pipeline-go v0.2.1 h1:OLBdZJ3yvOn2MezlWvbrBMTEUQC72zAftRZOMdj5HYo= github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= +github.com/Azure/azure-sdk-for-go v44.1.0+incompatible h1:l1UGvaaoMCUwVGUauvHzeB4t+Y0yPX5iJwBhzc0LqyE= +github.com/Azure/azure-sdk-for-go v44.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-storage-blob-go v0.8.0 h1:53qhf0Oxa0nOjgbDeeYPUeyiNmafAFEY95rZLK0Tj6o= github.com/Azure/azure-storage-blob-go v0.8.0/go.mod h1:lPI3aLPpuLTeUwh1sViKXFxwl2B6teiRqI0deQUvsw0= github.com/Azure/azure-storage-queue-go v0.0.0-20191125232315-636801874cdd h1:b3wyxBl3vvr15tUAziPBPK354y+LSdfPCpex5oBttHo= @@ -73,6 +77,7 @@ github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBv github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149 h1:HfxbT6/JcvIljmERptWhwa8XzP7H3T+Z2N26gTsaDaA= github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= @@ -116,6 +121,7 @@ golang.org/x/net v0.0.0-20170809000501-1c05540f6879 h1:0rFa7EaCGdQPmZVbo9F7MNF65 golang.org/x/net v0.0.0-20170809000501-1c05540f6879/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200421231249-e086a090c8fd h1:QPwSajcTUrFriMF1nJ3XzgoqakqQEsnZf9LdXdi2nkI= golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= diff --git a/source/plugins/go/src/oms.go b/source/plugins/go/src/oms.go index 25f364c55..217ba1efc 100644 --- a/source/plugins/go/src/oms.go +++ b/source/plugins/go/src/oms.go @@ -1285,7 +1285,7 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { //ADXFlushMutex.Lock() //defer ADXFlushMutex.Unlock() //MultiJSON support is not there yet - if ingestionErr := ADXIngestor.FromReader(ctx, r, ingest.IngestionMappingRef("ContainerLogV2Mapping", ingest.JSON), ingest.FileFormat(ingest.JSON)); ingestionErr != nil { + if _, ingestionErr := ADXIngestor.FromReader(ctx, r, ingest.IngestionMappingRef("ContainerLogV2Mapping", ingest.JSON), ingest.FileFormat(ingest.JSON)); ingestionErr != nil { Log("Error when streaming to ADX Ingestion: %s", ingestionErr.Error()) //ADXIngestor = nil //not required as per ADX team. Will keep it to indicate that we tried this approach From c9ade1ba51672292a052b5697664781de7bed2c0 Mon Sep 17 00:00:00 2001 From: Vishwanath Date: Mon, 12 Jul 2021 20:10:03 -0700 Subject: [PATCH 124/301] remove md check --- .github/workflows/pr-checker.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.github/workflows/pr-checker.yml b/.github/workflows/pr-checker.yml index c75e6dc24..f3bdb27e8 100644 --- a/.github/workflows/pr-checker.yml +++ b/.github/workflows/pr-checker.yml @@ -5,8 +5,6 @@ on: branches: - ci_dev - ci_prod - paths-ignore: - - '**.md' jobs: LINUX-build-and-scan: runs-on: ubuntu-latest From 6e2732e2896e72efbd3948e217b34ee5bbc2aff0 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Tue, 13 Jul 2021 06:48:55 -0700 Subject: [PATCH 125/301] Gangams/release notes update for hotfix (#596) * release notes updates * release notes updates for ciprod06112021-1 --- ReleaseNotes.md | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/ReleaseNotes.md b/ReleaseNotes.md index 266dadf1c..423161236 100644 --- a/ReleaseNotes.md +++ b/ReleaseNotes.md @@ -11,21 +11,28 @@ additional questions or comments. Note : The agent version(s) below has dates (ciprod), which indicate the agent build dates (not release dates) +### 07/02/2021 - +##### Version microsoft/oms:ciprod06112021-1 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod06112021-1 (linux) +##### Version microsoft/oms:win-ciprod06112021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod06112021 (windows) +##### Code change log +- Hotfix for crash in clean_cache in in_kube_node_inventory plugin +- We didn't rebuild windows container, so the image version for windows container stays the same as last release (ciprod:win-ciprod06112021) before this hotfix + ### 06/11/2021 - ##### Version microsoft/oms:ciprod06112021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod06112021 (linux) ##### Version microsoft/oms:win-ciprod06112021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod06112021 (windows) - Linux Agent - - Removal of base omsagent dependency + - Removal of base omsagent dependency - Using MDSD version 1.10.1 as base agent for all the supported LA data types - Ruby version upgrade to 2.6 i.e. same version as windows agent - - Upgrade FluentD gem version to 1.12.2 + - Upgrade FluentD gem version to 1.12.2 - All the Ruby Fluentd Plugins upgraded to v1 as per Fluentd guidance - Fluent-bit tail plugin Mem_Buf_limit is configurable via ConfigMap - Windows Agent - CA cert changes for airgapped clouds - Send perf metrics to MDM from windows daemonset - FluentD gem version upgrade from 1.10.2 to 1.12.2 to make same version as Linux Agent - - Doc updates + - Doc updates - README updates related to OSM preview release for Arc K8s - README updates related to recommended alerts @@ -63,7 +70,7 @@ Note : The agent version(s) below has dates (ciprod), which indicate t ##### Version microsoft/oms:ciprod03262021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod03262021 (linux) ##### Version microsoft/oms:win-ciprod03262021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod03262021 (windows) ##### Code change log -- Started collecting new metric - kubelet running pods count +- Started collecting new metric - kubelet running pods count - Onboarding script fixes to add explicit json output - Proxy and token updates for ARC - Doc updates for Microsoft charts repo release @@ -94,13 +101,13 @@ Note : The agent version(s) below has dates (ciprod), which indicate t ##### Version microsoft/oms:win-ciprod01112021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod01112021 (windows) ##### Code change log - Fixes for Linux Agent Replicaset Pod OOMing issue -- Update fluentbit (1.14.2 to 1.6.8) for the Linux Daemonset +- Update fluentbit (1.14.2 to 1.6.8) for the Linux Daemonset - Make Fluentbit settings: log_flush_interval_secs, tail_buf_chunksize_megabytes and tail_buf_maxsize_megabytes configurable via configmap - Support for PV inventory collection - Removal of Custom metric region check for Public cloud regions and update to use cloud environment variable to determine the custom metric support - For daemonset pods, add the dnsconfig to use ndots: 3 from ndots:5 to optimize the number of DNS API calls made - Fix for inconsistency in the collection container environment variables for the pods which has high number of containers -- Fix for disabling of std{out;err} log_collection_settings via configmap issue in windows daemonset +- Fix for disabling of std{out;err} log_collection_settings via configmap issue in windows daemonset - Update to use workspace key from mount file rather than environment variable for windows daemonset agent - Remove per container info logs in the container inventory - Enable ADX route for windows container logs From 6df299f9658c8397ea48948b3c614de629acefb2 Mon Sep 17 00:00:00 2001 From: rashmichandrashekar Date: Wed, 14 Jul 2021 16:39:13 -0700 Subject: [PATCH 126/301] Cherry picking hotfix changes to ci_dev (#605) --- kubernetes/windows/Dockerfile | 2 +- kubernetes/windows/main.ps1 | 55 ++++++++++++++++++++--------------- 2 files changed, 32 insertions(+), 25 deletions(-) diff --git a/kubernetes/windows/Dockerfile b/kubernetes/windows/Dockerfile index 94be59644..0ba64cd75 100644 --- a/kubernetes/windows/Dockerfile +++ b/kubernetes/windows/Dockerfile @@ -3,7 +3,7 @@ MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=win-ciprod06112021 +ARG IMAGE_TAG=win-ciprod06112021-2 # Do not split this into multiple RUN! # Docker creates a layer for every RUN-Statement diff --git a/kubernetes/windows/main.ps1 b/kubernetes/windows/main.ps1 index bc053b0d6..1bb9a3468 100644 --- a/kubernetes/windows/main.ps1 +++ b/kubernetes/windows/main.ps1 @@ -140,7 +140,7 @@ function Set-EnvironmentVariables { if ($aiKeyURl) { $aiKeyFetched = "" # retry up to 5 times - for( $i = 1; $i -le 4; $i++) { + for ( $i = 1; $i -le 4; $i++) { try { $response = Invoke-WebRequest -uri $aiKeyURl -UseBasicParsing -TimeoutSec 5 -ErrorAction:Stop @@ -229,6 +229,24 @@ function Set-EnvironmentVariables { Write-Host "Failed to set environment variable HOSTNAME for target 'machine' since it is either null or empty" } + $nodeIp = [System.Environment]::GetEnvironmentVariable("NODE_IP", "process") + if (![string]::IsNullOrEmpty($nodeIp)) { + [System.Environment]::SetEnvironmentVariable("NODE_IP", $nodeIp, "machine") + Write-Host "Successfully set environment variable NODE_IP - $($nodeIp) for target 'machine'..." + } + else { + Write-Host "Failed to set environment variable NODE_IP for target 'machine' since it is either null or empty" + } + + $agentVersion = [System.Environment]::GetEnvironmentVariable("AGENT_VERSION", "process") + if (![string]::IsNullOrEmpty($agentVersion)) { + [System.Environment]::SetEnvironmentVariable("AGENT_VERSION", $agentVersion, "machine") + Write-Host "Successfully set environment variable AGENT_VERSION - $($agentVersion) for target 'machine'..." + } + else { + Write-Host "Failed to set environment variable AGENT_VERSION for target 'machine' since it is either null or empty" + } + # run config parser ruby /opt/omsagentwindows/scripts/ruby/tomlparser.rb .\setenv.ps1 @@ -364,13 +382,12 @@ function Start-Fluent-Telegraf { if (![string]::IsNullOrEmpty($containerRuntime) -and [string]$containerRuntime.StartsWith('docker') -eq $false) { # change parser from docker to cri if the container runtime is not docker Write-Host "changing parser from Docker to CRI since container runtime : $($containerRuntime) and which is non-docker" - (Get-Content -Path C:/etc/fluent/fluent.conf -Raw) -replace 'fluent-docker-parser.conf','fluent-cri-parser.conf' | Set-Content C:/etc/fluent/fluent.conf + (Get-Content -Path C:/etc/fluent/fluent.conf -Raw) -replace 'fluent-docker-parser.conf', 'fluent-cri-parser.conf' | Set-Content C:/etc/fluent/fluent.conf } # Start telegraf only in sidecar scraping mode $sidecarScrapingEnabled = [System.Environment]::GetEnvironmentVariable('SIDECAR_SCRAPING_ENABLED') - if (![string]::IsNullOrEmpty($sidecarScrapingEnabled) -and $sidecarScrapingEnabled.ToLower() -eq 'true') - { + if (![string]::IsNullOrEmpty($sidecarScrapingEnabled) -and $sidecarScrapingEnabled.ToLower() -eq 'true') { Write-Host "Starting telegraf..." Start-Telegraf } @@ -411,15 +428,6 @@ function Start-Telegraf { Write-Host "Failed to set environment variable KUBERNETES_SERVICE_PORT for target 'machine' since it is either null or empty" } - $nodeIp = [System.Environment]::GetEnvironmentVariable("NODE_IP", "process") - if (![string]::IsNullOrEmpty($nodeIp)) { - [System.Environment]::SetEnvironmentVariable("NODE_IP", $nodeIp, "machine") - Write-Host "Successfully set environment variable NODE_IP - $($nodeIp) for target 'machine'..." - } - else { - Write-Host "Failed to set environment variable NODE_IP for target 'machine' since it is either null or empty" - } - Write-Host "Installing telegraf service" C:\opt\telegraf\telegraf.exe --service install --config "C:\etc\telegraf\telegraf.conf" @@ -432,14 +440,15 @@ function Start-Telegraf { sc.exe \\$serverName config telegraf start= delayed-auto Write-Host "Successfully set delayed start for telegraf" - } else { + } + else { Write-Host "Failed to get environment variable PODNAME to set delayed telegraf start" } } catch { - $e = $_.Exception - Write-Host $e - Write-Host "exception occured in delayed telegraf start.. continuing without exiting" + $e = $_.Exception + Write-Host $e + Write-Host "exception occured in delayed telegraf start.. continuing without exiting" } Write-Host "Running telegraf service in test mode" C:\opt\telegraf\telegraf.exe --config "C:\etc\telegraf\telegraf.conf" --test @@ -448,8 +457,7 @@ function Start-Telegraf { # Trying to start telegraf again if it did not start due to fluent bit not being ready at startup Get-Service telegraf | findstr Running - if ($? -eq $false) - { + if ($? -eq $false) { Write-Host "trying to start telegraf in again in 30 seconds, since fluentbit might not have been ready..." Start-Sleep -s 30 C:\opt\telegraf\telegraf.exe --service start @@ -488,7 +496,7 @@ function Bootstrap-CACertificates { $certMountPath = "C:\ca" Get-ChildItem $certMountPath | Foreach-Object { - $absolutePath=$_.FullName + $absolutePath = $_.FullName Write-Host "cert path: $($absolutePath)" Import-Certificate -FilePath $absolutePath -CertStoreLocation 'Cert:\LocalMachine\Root' -Verbose } @@ -510,10 +518,9 @@ Start-FileSystemWatcher $aksResourceId = [System.Environment]::GetEnvironmentVariable("AKS_RESOURCE_ID") $requiresCertBootstrap = [System.Environment]::GetEnvironmentVariable("REQUIRES_CERT_BOOTSTRAP") if (![string]::IsNullOrEmpty($requiresCertBootstrap) -and ` - $requiresCertBootstrap.ToLower() -eq 'true' -and ` - ![string]::IsNullOrEmpty($aksResourceId) -and ` - $aksResourceId.ToLower().Contains("/microsoft.containerservice/managedclusters/")) -{ + $requiresCertBootstrap.ToLower() -eq 'true' -and ` + ![string]::IsNullOrEmpty($aksResourceId) -and ` + $aksResourceId.ToLower().Contains("/microsoft.containerservice/managedclusters/")) { Bootstrap-CACertificates } From 3b3833745a12b5c793dfc66186bc54db76aa77d7 Mon Sep 17 00:00:00 2001 From: rashmichandrashekar Date: Thu, 15 Jul 2021 12:25:25 -0700 Subject: [PATCH 127/301] release changes (#607) --- ReleaseNotes.md | 5 +++++ kubernetes/omsagent.yaml | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/ReleaseNotes.md b/ReleaseNotes.md index 423161236..0c51b737c 100644 --- a/ReleaseNotes.md +++ b/ReleaseNotes.md @@ -11,6 +11,11 @@ additional questions or comments. Note : The agent version(s) below has dates (ciprod), which indicate the agent build dates (not release dates) +### 07/13/2021 - +##### Version microsoft/oms:win-ciprod06112021-2 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod06112021-2 (windows) +##### Code change log +- Hotfix for fixing NODE_IP environment variable not set issue for non sidecar mode + ### 07/02/2021 - ##### Version microsoft/oms:ciprod06112021-1 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod06112021-1 (linux) ##### Version microsoft/oms:win-ciprod06112021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod06112021 (windows) diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index 617c81f38..855f3a8e1 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -760,7 +760,7 @@ spec: value: "3" containers: - name: omsagent-win - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod06112021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod06112021-2" imagePullPolicy: IfNotPresent resources: limits: From bcea7fcfbc8c68bed62912c772d21bf2823a23e5 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Mon, 19 Jul 2021 14:43:57 -0700 Subject: [PATCH 128/301] Gangams/aad stage3 msi auth (#585) * changes related to aad msi auth feature * use existing envvars * fix imds token expiry interval * refactor the windows agent ingestion token code * code cleanup * fix build errors * code clean up * code clean up * code clean up * code clean up * more refactoring * fix bug * fix bug * add debug logs * add nil checks * revert changes * revert yaml change since this added in aks side * fix pr feedback * fix pr feedback * refine retry code * update mdsd env as per official build * cleanup * update env vars per mdsd * update with mdsd official build * skip cert gen & renewal incase of aad msi auth * add nil check * cherry windows agent nodeip issue * fix merge issue Co-authored-by: rashmichandrashekar --- .../installer/datafiles/base_container.data | 3 + .../in_heartbeat_request.rb | 20 +- kubernetes/linux/main.sh | 112 ++-- kubernetes/linux/setup.sh | 4 +- kubernetes/windows/main.ps1 | 72 ++- .../ci-extension-dcr-streams.md | 186 +++++++ scripts/dcr-onboarding/ci-extension-dcr.json | 59 ++ source/plugins/go/src/extension/extension.go | 101 ++++ source/plugins/go/src/extension/interfaces.go | 34 ++ .../plugins/go/src/extension/socket_writer.go | 85 +++ source/plugins/go/src/go.mod | 2 +- .../plugins/go/src/ingestion_token_utils.go | 516 ++++++++++++++++++ source/plugins/go/src/oms.go | 91 ++- source/plugins/go/src/utils.go | 43 +- .../ruby/ApplicationInsightsUtility.rb | 9 +- source/plugins/ruby/CustomMetricsUtils.rb | 4 +- source/plugins/ruby/constants.rb | 23 + .../ruby/filter_health_model_builder.rb | 26 +- source/plugins/ruby/in_cadvisor_perf.rb | 26 +- source/plugins/ruby/in_containerinventory.rb | 38 +- source/plugins/ruby/in_kube_events.rb | 22 +- source/plugins/ruby/in_kube_nodes.rb | 58 +- source/plugins/ruby/in_kube_podinventory.rb | 54 +- source/plugins/ruby/in_kube_pvinventory.rb | 25 +- .../plugins/ruby/in_kubestate_deployments.rb | 21 +- source/plugins/ruby/in_kubestate_hpa.rb | 18 +- source/plugins/ruby/in_win_cadvisor_perf.rb | 12 + source/plugins/ruby/out_mdm.rb | 27 +- source/plugins/utils/extension.rb | 77 +++ source/plugins/utils/extension_utils.rb | 27 + 30 files changed, 1612 insertions(+), 183 deletions(-) create mode 100644 scripts/dcr-onboarding/ci-extension-dcr-streams.md create mode 100644 scripts/dcr-onboarding/ci-extension-dcr.json create mode 100644 source/plugins/go/src/extension/extension.go create mode 100644 source/plugins/go/src/extension/interfaces.go create mode 100644 source/plugins/go/src/extension/socket_writer.go create mode 100644 source/plugins/go/src/ingestion_token_utils.go create mode 100644 source/plugins/utils/extension.rb create mode 100644 source/plugins/utils/extension_utils.rb diff --git a/build/linux/installer/datafiles/base_container.data b/build/linux/installer/datafiles/base_container.data index bdacf647d..b71cafd49 100644 --- a/build/linux/installer/datafiles/base_container.data +++ b/build/linux/installer/datafiles/base_container.data @@ -150,6 +150,9 @@ MAINTAINER: 'Microsoft Corporation' /etc/fluent/plugin/omslog.rb; source/plugins/utils/omslog.rb; 644; root; root /etc/fluent/plugin/oms_common.rb; source/plugins/utils/oms_common.rb; 644; root; root +/etc/fluent/plugin/extension.rb; source/plugins/utils/extension.rb; 644; root; root +/etc/fluent/plugin/extension_utils.rb; source/plugins/utils/extension_utils.rb; 644; root; root + /etc/fluent/kube.conf; build/linux/installer/conf/kube.conf; 644; root; root /etc/fluent/container.conf; build/linux/installer/conf/container.conf; 644; root; root diff --git a/build/windows/installer/scripts/rubyKeepCertificateAlive/in_heartbeat_request.rb b/build/windows/installer/scripts/rubyKeepCertificateAlive/in_heartbeat_request.rb index e255c4a71..e525d8681 100644 --- a/build/windows/installer/scripts/rubyKeepCertificateAlive/in_heartbeat_request.rb +++ b/build/windows/installer/scripts/rubyKeepCertificateAlive/in_heartbeat_request.rb @@ -36,14 +36,18 @@ def start def enumerate begin - puts "Calling certificate renewal code..." - maintenance = OMS::OnboardingHelper.new( - ENV["WSID"], - ENV["DOMAIN"], - ENV["CI_AGENT_GUID"] - ) - ret_code = maintenance.register_certs() - puts "Return code from register certs : #{ret_code}" + if !ENV["AAD_MSI_AUTH_MODE"].nil? && !ENV["AAD_MSI_AUTH_MODE"].empty? && ENV["AAD_MSI_AUTH_MODE"].downcase == "true" + puts "skipping certificate renewal code since AAD MSI auth configured" + else + puts "Calling certificate renewal code..." + maintenance = OMS::OnboardingHelper.new( + ENV["WSID"], + ENV["DOMAIN"], + ENV["CI_AGENT_GUID"] + ) + ret_code = maintenance.register_certs() + puts "Return code from register certs : #{ret_code}" + end rescue => errorStr puts "in_heartbeat_request::enumerate:Failed in enumerate: #{errorStr}" # STDOUT telemetry should alredy be going to Traces in AI. diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index 1a7034d4d..428e6f35a 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -12,7 +12,7 @@ waitforlisteneronTCPport() { echo "${FUNCNAME[0]} called with incorrect arguments<$1 , $2>. Required arguments <#port, #wait-time-in-seconds>" return -1 else - + if [[ $port =~ $numeric ]] && [[ $waittimesecs =~ $numeric ]]; then #local varlistener=$(netstat -lnt | awk '$6 == "LISTEN" && $4 ~ ":25228$"') while true @@ -57,7 +57,11 @@ else export customResourceId=$AKS_RESOURCE_ID echo "export customResourceId=$AKS_RESOURCE_ID" >> ~/.bashrc source ~/.bashrc - echo "customResourceId:$customResourceId" + echo "customResourceId:$customResourceId" + export customRegion=$AKS_REGION + echo "export customRegion=$AKS_REGION" >> ~/.bashrc + source ~/.bashrc + echo "customRegion:$customRegion" fi #set agent config schema version @@ -194,9 +198,15 @@ fi if [ -z $domain ]; then ClOUD_ENVIRONMENT="unknown" elif [ $domain == "opinsights.azure.com" ]; then - CLOUD_ENVIRONMENT="public" -else - CLOUD_ENVIRONMENT="national" + CLOUD_ENVIRONMENT="azurepubliccloud" +elif [ $domain == "opinsights.azure.cn" ]; then + CLOUD_ENVIRONMENT="azurechinacloud" +elif [ $domain == "opinsights.azure.us" ]; then + CLOUD_ENVIRONMENT="azureusgovernmentcloud" +elif [ $domain == "opinsights.azure.eaglex.ic.gov" ]; then + CLOUD_ENVIRONMENT="usnat" +elif [ $domain == "opinsights.azure.microsoft.scloud" ]; then + CLOUD_ENVIRONMENT="ussec" fi export CLOUD_ENVIRONMENT=$CLOUD_ENVIRONMENT echo "export CLOUD_ENVIRONMENT=$CLOUD_ENVIRONMENT" >> ~/.bashrc @@ -233,9 +243,9 @@ if [ ${#APPLICATIONINSIGHTS_AUTH_URL} -ge 1 ]; then # (check if APPLICATIONINSI fi -aikey=$(echo $APPLICATIONINSIGHTS_AUTH | base64 --decode) -export TELEMETRY_APPLICATIONINSIGHTS_KEY=$aikey -echo "export TELEMETRY_APPLICATIONINSIGHTS_KEY=$aikey" >> ~/.bashrc +aikey=$(echo $APPLICATIONINSIGHTS_AUTH | base64 --decode) +export TELEMETRY_APPLICATIONINSIGHTS_KEY=$aikey +echo "export TELEMETRY_APPLICATIONINSIGHTS_KEY=$aikey" >> ~/.bashrc source ~/.bashrc @@ -421,7 +431,7 @@ export KUBELET_RUNTIME_OPERATIONS_ERRORS_METRIC="kubelet_docker_operations_error if [ "$CONTAINER_RUNTIME" != "docker" ]; then # these metrics are avialble only on k8s versions <1.18 and will get deprecated from 1.18 export KUBELET_RUNTIME_OPERATIONS_METRIC="kubelet_runtime_operations" - export KUBELET_RUNTIME_OPERATIONS_ERRORS_METRIC="kubelet_runtime_operations_errors" + export KUBELET_RUNTIME_OPERATIONS_ERRORS_METRIC="kubelet_runtime_operations_errors" fi echo "set caps for ruby process to read container env from proc" @@ -445,34 +455,56 @@ DOCKER_CIMPROV_VERSION=$(dpkg -l | grep docker-cimprov | awk '{print $3}') echo "DOCKER_CIMPROV_VERSION=$DOCKER_CIMPROV_VERSION" export DOCKER_CIMPROV_VERSION=$DOCKER_CIMPROV_VERSION echo "export DOCKER_CIMPROV_VERSION=$DOCKER_CIMPROV_VERSION" >> ~/.bashrc -echo "*** activating oneagent in legacy auth mode ***" -CIWORKSPACE_id="$(cat /etc/omsagent-secret/WSID)" -#use the file path as its secure than env -CIWORKSPACE_keyFile="/etc/omsagent-secret/KEY" -cat /etc/mdsd.d/envmdsd | while read line; do - echo $line >> ~/.bashrc -done -source /etc/mdsd.d/envmdsd -echo "setting mdsd workspaceid & key for workspace:$CIWORKSPACE_id" -export CIWORKSPACE_id=$CIWORKSPACE_id -echo "export CIWORKSPACE_id=$CIWORKSPACE_id" >> ~/.bashrc -export CIWORKSPACE_keyFile=$CIWORKSPACE_keyFile -echo "export CIWORKSPACE_keyFile=$CIWORKSPACE_keyFile" >> ~/.bashrc -export OMS_TLD=$domain -echo "export OMS_TLD=$OMS_TLD" >> ~/.bashrc -export MDSD_FLUENT_SOCKET_PORT="29230" -echo "export MDSD_FLUENT_SOCKET_PORT=$MDSD_FLUENT_SOCKET_PORT" >> ~/.bashrc -#skip imds lookup since not used in legacy auth path +#skip imds lookup since not used either legacy or aad msi auth path export SKIP_IMDS_LOOKUP_FOR_LEGACY_AUTH="true" echo "export SKIP_IMDS_LOOKUP_FOR_LEGACY_AUTH=$SKIP_IMDS_LOOKUP_FOR_LEGACY_AUTH" >> ~/.bashrc - +# this used by mdsd to determine cloud specific LA endpoints +export OMS_TLD=$domain +echo "export OMS_TLD=$OMS_TLD" >> ~/.bashrc +cat /etc/mdsd.d/envmdsd | while read line; do + echo $line >> ~/.bashrc +done +source /etc/mdsd.d/envmdsd +MDSD_AAD_MSI_AUTH_ARGS="" +# check if its AAD Auth MSI mode via USING_AAD_MSI_AUTH +export AAD_MSI_AUTH_MODE=false +if [ "${USING_AAD_MSI_AUTH}" == "true" ]; then + echo "*** activating oneagent in aad auth msi mode ***" + # msi auth specific args + MDSD_AAD_MSI_AUTH_ARGS="-a -A" + export AAD_MSI_AUTH_MODE=true + echo "export AAD_MSI_AUTH_MODE=true" >> ~/.bashrc + # this used by mdsd to determine the cloud specific AMCS endpoints + export customEnvironment=$CLOUD_ENVIRONMENT + echo "export customEnvironment=$customEnvironment" >> ~/.bashrc + export MDSD_FLUENT_SOCKET_PORT="28230" + echo "export MDSD_FLUENT_SOCKET_PORT=$MDSD_FLUENT_SOCKET_PORT" >> ~/.bashrc + export ENABLE_MCS="true" + echo "export ENABLE_MCS=$ENABLE_MCS" >> ~/.bashrc + export MONITORING_USE_GENEVA_CONFIG_SERVICE="false" + echo "export MONITORING_USE_GENEVA_CONFIG_SERVICE=$MONITORING_USE_GENEVA_CONFIG_SERVICE" >> ~/.bashrc + export MDSD_USE_LOCAL_PERSISTENCY="false" + echo "export MDSD_USE_LOCAL_PERSISTENCY=$MDSD_USE_LOCAL_PERSISTENCY" >> ~/.bashrc +else + echo "*** activating oneagent in legacy auth mode ***" + CIWORKSPACE_id="$(cat /etc/omsagent-secret/WSID)" + #use the file path as its secure than env + CIWORKSPACE_keyFile="/etc/omsagent-secret/KEY" + echo "setting mdsd workspaceid & key for workspace:$CIWORKSPACE_id" + export CIWORKSPACE_id=$CIWORKSPACE_id + echo "export CIWORKSPACE_id=$CIWORKSPACE_id" >> ~/.bashrc + export CIWORKSPACE_keyFile=$CIWORKSPACE_keyFile + echo "export CIWORKSPACE_keyFile=$CIWORKSPACE_keyFile" >> ~/.bashrc + export MDSD_FLUENT_SOCKET_PORT="29230" + echo "export MDSD_FLUENT_SOCKET_PORT=$MDSD_FLUENT_SOCKET_PORT" >> ~/.bashrc +fi source ~/.bashrc dpkg -l | grep mdsd | awk '{print $2 " " $3}' -if [ "${CONTAINER_TYPE}" == "PrometheusSidecar" ]; then - echo "starting mdsd with mdsd-port=26130, fluentport=26230 and influxport=26330 in legacy auth mode in sidecar container..." +if [ "${CONTAINER_TYPE}" == "PrometheusSidecar" ]; then + echo "starting mdsd with mdsd-port=26130, fluentport=26230 and influxport=26330 in sidecar container..." #use tenant name to avoid unix socket conflict and different ports for port conflict #roleprefix to use container specific mdsd socket export TENANT_NAME="${CONTAINER_TYPE}" @@ -482,23 +514,23 @@ if [ "${CONTAINER_TYPE}" == "PrometheusSidecar" ]; then source ~/.bashrc mkdir /var/run/mdsd-${CONTAINER_TYPE} # add -T 0xFFFF for full traces - mdsd -r ${MDSD_ROLE_PREFIX} -p 26130 -f 26230 -i 26330 -e ${MDSD_LOG}/mdsd.err -w ${MDSD_LOG}/mdsd.warn -o ${MDSD_LOG}/mdsd.info -q ${MDSD_LOG}/mdsd.qos & -else - echo "starting mdsd in legacy auth mode in main container..." - # add -T 0xFFFF for full traces - mdsd -e ${MDSD_LOG}/mdsd.err -w ${MDSD_LOG}/mdsd.warn -o ${MDSD_LOG}/mdsd.info -q ${MDSD_LOG}/mdsd.qos & + mdsd ${MDSD_AAD_MSI_AUTH_ARGS} -r ${MDSD_ROLE_PREFIX} -p 26130 -f 26230 -i 26330 -e ${MDSD_LOG}/mdsd.err -w ${MDSD_LOG}/mdsd.warn -o ${MDSD_LOG}/mdsd.info -q ${MDSD_LOG}/mdsd.qos & +else + echo "starting mdsd mode in main container..." + # add -T 0xFFFF for full traces + mdsd ${MDSD_AAD_MSI_AUTH_ARGS} -e ${MDSD_LOG}/mdsd.err -w ${MDSD_LOG}/mdsd.warn -o ${MDSD_LOG}/mdsd.info -q ${MDSD_LOG}/mdsd.qos & fi -# no dependency on fluentd for prometheus side car container -if [ "${CONTAINER_TYPE}" != "PrometheusSidecar" ]; then +# no dependency on fluentd for prometheus side car container +if [ "${CONTAINER_TYPE}" != "PrometheusSidecar" ]; then if [ ! -e "/etc/config/kube.conf" ]; then echo "*** starting fluentd v1 in daemonset" fluentd -c /etc/fluent/container.conf -o /var/opt/microsoft/docker-cimprov/log/fluentd.log --log-rotate-age 5 --log-rotate-size 20971520 & else echo "*** starting fluentd v1 in replicaset" fluentd -c /etc/fluent/kube.conf -o /var/opt/microsoft/docker-cimprov/log/fluentd.log --log-rotate-age 5 --log-rotate-size 20971520 & - fi -fi + fi +fi #If config parsing was successful, a copy of the conf file with replaced custom settings file is created if [ ! -e "/etc/config/kube.conf" ]; then @@ -635,7 +667,7 @@ echo "getting rsyslog status..." service rsyslog status shutdown() { - pkill -f mdsd + pkill -f mdsd } trap "shutdown" SIGTERM diff --git a/kubernetes/linux/setup.sh b/kubernetes/linux/setup.sh index ad7cc2232..933c14aed 100644 --- a/kubernetes/linux/setup.sh +++ b/kubernetes/linux/setup.sh @@ -9,8 +9,8 @@ sed -i -e 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen && \ dpkg-reconfigure --frontend=noninteractive locales && \ update-locale LANG=en_US.UTF-8 -#install oneagent - Official bits (05/17/2021) -wget https://github.com/microsoft/Docker-Provider/releases/download/05172021-oneagent/azure-mdsd_1.10.1-build.master.213_x86_64.deb +#install oneagent - Official bits (06/24/2021) +wget https://github.com/microsoft/Docker-Provider/releases/download/06242021-oneagent/azure-mdsd_1.10.3-build.master.241_x86_64.deb /usr/bin/dpkg -i $TMPDIR/azure-mdsd*.deb cp -f $TMPDIR/mdsd.xml /etc/mdsd.d diff --git a/kubernetes/windows/main.ps1 b/kubernetes/windows/main.ps1 index 1bb9a3468..3cbc11e20 100644 --- a/kubernetes/windows/main.ps1 +++ b/kubernetes/windows/main.ps1 @@ -43,17 +43,49 @@ function Start-FileSystemWatcher { function Set-EnvironmentVariables { $domain = "opinsights.azure.com" - $cloud_environment = "public" + $mcs_endpoint = "monitor.azure.com" + $cloud_environment = "azurepubliccloud" if (Test-Path /etc/omsagent-secret/DOMAIN) { # TODO: Change to omsagent-secret before merging $domain = Get-Content /etc/omsagent-secret/DOMAIN - $cloud_environment = "national" + if (![string]::IsNullOrEmpty($domain)) { + if ($domain -eq "opinsights.azure.com") { + $cloud_environment = "azurepubliccloud" + $mcs_endpoint = "monitor.azure.com" + } elseif ($domain -eq "opinsights.azure.cn") { + $cloud_environment = "azurechinacloud" + $mcs_endpoint = "monitor.azure.cn" + } elseif ($domain -eq "opinsights.azure.us") { + $cloud_environment = "azureusgovernmentcloud" + $mcs_endpoint = "monitor.azure.us" + } elseif ($domain -eq "opinsights.azure.eaglex.ic.gov") { + $cloud_environment = "usnat" + $mcs_endpoint = "monitor.azure.eaglex.ic.gov" + } elseif ($domain -eq "opinsights.azure.microsoft.scloud") { + $cloud_environment = "ussec" + $mcs_endpoint = "monitor.azure.microsoft.scloud" + } else { + Write-Host "Invalid or Unsupported domain name $($domain). EXITING....." + exit 1 + } + } else { + Write-Host "Domain name either null or empty. EXITING....." + exit 1 + } } + Write-Host "Log analytics domain: $($domain)" + Write-Host "MCS endpoint: $($mcs_endpoint)" + Write-Host "Cloud Environment: $($cloud_environment)" + # Set DOMAIN [System.Environment]::SetEnvironmentVariable("DOMAIN", $domain, "Process") [System.Environment]::SetEnvironmentVariable("DOMAIN", $domain, "Machine") + # Set MCS Endpoint + [System.Environment]::SetEnvironmentVariable("MCS_ENDPOINT", $mcs_endpoint, "Process") + [System.Environment]::SetEnvironmentVariable("MCS_ENDPOINT", $mcs_endpoint, "Machine") + # Set CLOUD_ENVIRONMENT [System.Environment]::SetEnvironmentVariable("CLOUD_ENVIRONMENT", $cloud_environment, "Process") [System.Environment]::SetEnvironmentVariable("CLOUD_ENVIRONMENT", $cloud_environment, "Machine") @@ -158,7 +190,7 @@ function Set-EnvironmentVariables { Write-Host $_.Exception } } - + # Check if the fetched IKey was properly encoded. if not then turn off telemetry if ($aiKeyFetched -match '^[A-Za-z0-9=]+$') { Write-Host "Using cloud-specific instrumentation key" @@ -229,6 +261,21 @@ function Set-EnvironmentVariables { Write-Host "Failed to set environment variable HOSTNAME for target 'machine' since it is either null or empty" } + # check if its AAD Auth MSI mode via USING_AAD_MSI_AUTH environment variable + $isAADMSIAuth = [System.Environment]::GetEnvironmentVariable("USING_AAD_MSI_AUTH", "process") + if (![string]::IsNullOrEmpty($isAADMSIAuth)) { + [System.Environment]::SetEnvironmentVariable("AAD_MSI_AUTH_MODE", $isAADMSIAuth, "Process") + [System.Environment]::SetEnvironmentVariable("AAD_MSI_AUTH_MODE", $isAADMSIAuth, "Machine") + Write-Host "Successfully set environment variable AAD_MSI_AUTH_MODE - $($isAADMSIAuth) for target 'machine'..." + } + + # check if use token proxy endpoint set via USE_IMDS_TOKEN_PROXY_END_POINT environment variable + $useIMDSTokenProxyEndpoint = [System.Environment]::GetEnvironmentVariable("USE_IMDS_TOKEN_PROXY_END_POINT", "process") + if (![string]::IsNullOrEmpty($useIMDSTokenProxyEndpoint)) { + [System.Environment]::SetEnvironmentVariable("USE_IMDS_TOKEN_PROXY_END_POINT", $useIMDSTokenProxyEndpoint, "Process") + [System.Environment]::SetEnvironmentVariable("USE_IMDS_TOKEN_PROXY_END_POINT", $useIMDSTokenProxyEndpoint, "Machine") + Write-Host "Successfully set environment variable USE_IMDS_TOKEN_PROXY_END_POINT - $($useIMDSTokenProxyEndpoint) for target 'machine'..." + } $nodeIp = [System.Environment]::GetEnvironmentVariable("NODE_IP", "process") if (![string]::IsNullOrEmpty($nodeIp)) { [System.Environment]::SetEnvironmentVariable("NODE_IP", $nodeIp, "machine") @@ -427,7 +474,15 @@ function Start-Telegraf { else { Write-Host "Failed to set environment variable KUBERNETES_SERVICE_PORT for target 'machine' since it is either null or empty" } - + $nodeIp = [System.Environment]::GetEnvironmentVariable("NODE_IP", "process") + if (![string]::IsNullOrEmpty($nodeIp)) { + [System.Environment]::SetEnvironmentVariable("NODE_IP", $nodeIp, "machine") + Write-Host "Successfully set environment variable NODE_IP - $($nodeIp) for target 'machine'..." + } + else { + Write-Host "Failed to set environment variable NODE_IP for target 'machine' since it is either null or empty" + } + Write-Host "Installing telegraf service" C:\opt\telegraf\telegraf.exe --service install --config "C:\etc\telegraf\telegraf.conf" @@ -524,8 +579,13 @@ if (![string]::IsNullOrEmpty($requiresCertBootstrap) -and ` Bootstrap-CACertificates } -Generate-Certificates -Test-CertificatePath +$isAADMSIAuth = [System.Environment]::GetEnvironmentVariable("USING_AAD_MSI_AUTH") +if (![string]::IsNullOrEmpty($isAADMSIAuth) -and $isAADMSIAuth.ToLower() -eq 'true') { + Write-Host "skipping agent onboarding via cert since AAD MSI Auth configured" +} else { + Generate-Certificates + Test-CertificatePath +} Start-Fluent-Telegraf # List all powershell processes running. This should have main.ps1 and filesystemwatcher.ps1 diff --git a/scripts/dcr-onboarding/ci-extension-dcr-streams.md b/scripts/dcr-onboarding/ci-extension-dcr-streams.md new file mode 100644 index 000000000..cbac41838 --- /dev/null +++ b/scripts/dcr-onboarding/ci-extension-dcr-streams.md @@ -0,0 +1,186 @@ +# 1 - ContainerLogV2 +> Note- Please note, this table uses NG schema +``` +stream-id: Microsoft-ContainerLogV2 +data-type: CONTAINERINSIGHTS_CONTAINERLOGV2 +intelligence-pack: ContainerInsights +solutions: ContainerInsights +platform: Any +la-table-name: ContainerLogV2 +alias-stream-id: Microsoft-ContainerLogV2 +contact-alias: OMScontainers@microsoft.com +stage: to review +tags: agent +``` + +# 2 - InsightsMetrics +``` +stream-id: Microsoft-InsightsMetrics +data-type: INSIGHTS_METRICS_BLOB +intelligence-pack: ContainerInsights +solutions: ContainerInsights +platform: Any +la-table-name: InsightsMetrics +alias-stream-id: Microsoft-InsightsMetrics +contact-alias: OMScontainers@microsoft.com +stage: to review +tags: agent +``` + +# 3 - ContainerInventory + +``` +stream-id: Microsoft-ContainerInventory +data-type: CONTAINER_INVENTORY_BLOB +intelligence-pack: ContainerInsights +solutions: ContainerInsights +platform: Any +la-table-name: ContainerInventory +alias-stream-id: Microsoft-ContainerInventory +contact-alias: OMScontainers@microsoft.com +stage: to review +tags: agent +``` + +# 4 - ContainerLog + +``` +stream-id: Microsoft-ContainerLog +data-type: CONTAINER_LOG_BLOB +intelligence-pack: Containers +solutions: ContainerInsights +platform: Any +la-table-name: ContainerLog +alias-stream-id: Microsoft-ContainerLog +contact-alias: OMScontainers@microsoft.com +stage: to review +tags: agent +``` + +# 5 - ContainerNodeInventory + +``` +stream-id: Microsoft-ContainerNodeInventory +data-type: CONTAINER_NODE_INVENTORY_BLOB +intelligence-pack: ContainerInsights +solutions: ContainerInsights +platform: Any +la-table-name: ContainerNodeInventory +alias-stream-id: Microsoft-ContainerNodeInventory +contact-alias: OMScontainers@microsoft.com +stage: to review +tags: agent +``` + +# 6 - KubePodInventory +``` +stream-id: Microsoft-KubePodInventory +data-type: KUBE_POD_INVENTORY_BLOB +intelligence-pack: ContainerInsights +solutions: ContainerInsights +platform: Any +la-table-name: KubePodInventory +alias-stream-id: Microsoft-KubePodInventory +contact-alias: OMScontainers@microsoft.com +stage: to review +tags: agent +``` + +# 7 - KubeNodeInventory +``` +stream-id: Microsoft-KubeNodeInventory +data-type: KUBE_NODE_INVENTORY_BLOB +intelligence-pack: ContainerInsights +solutions: ContainerInsights +platform: Any +la-table-name: KubeNodeInventory +alias-stream-id: Microsoft-KubeNodeInventory +contact-alias: OMScontainers@microsoft.com +stage: to review +tags: agent +``` + +# 8 - KubePVInventory +``` +stream-id: Microsoft-KubePVInventory +data-type: KUBE_PV_INVENTORY_BLOB +intelligence-pack: ContainerInsights +solutions: ContainerInsights +platform: Any +la-table-name: KubePVInventory +alias-stream-id: Microsoft-KubePVInventory +contact-alias: OMScontainers@microsoft.com +stage: to review +tags: agent +``` + +# 9 - KubeEvents +``` +stream-id: Microsoft-KubeEvents +data-type: KUBE_EVENTS_BLOB +intelligence-pack: ContainerInsights +solutions: ContainerInsights +platform: Any +la-table-name: KubeEvents +alias-stream-id: Microsoft-KubeEvents +contact-alias: OMScontainers@microsoft.com +stage: to review +tags: agent +``` + +# 10 - KubeServices +``` +stream-id: Microsoft-KubeServices +data-type: KUBE_SERVICES_BLOB +intelligence-pack: ContainerInsights +solutions: ContainerInsights +platform: Any +la-table-name: KubeServices +alias-stream-id: Microsoft-KubeServices +contact-alias: OMScontainers@microsoft.com +stage: to review +tags: agent +``` + +# 11 - KubeMonAgentEvents +``` +stream-id: Microsoft-KubeMonAgentEvents +data-type: KUBE_MON_AGENT_EVENTS_BLOB +intelligence-pack: Containers +solutions: ContainerInsights +platform: Any +la-table-name: KubeMonAgentEvents +alias-stream-id: Microsoft-KubeMonAgentEvents +contact-alias: OMScontainers@microsoft.com +stage: to review +tags: agent +``` + +# 12 - KubeHealth +``` +stream-id: Microsoft-KubeHealth +data-type: KUBE_HEALTH_BLOB +intelligence-pack: ContainerInsights +solutions: ContainerInsights +platform: Any +la-table-name: KubeHealth +alias-stream-id: Microsoft-KubeHealth +contact-alias: OMScontainers@microsoft.com +stage: to review +tags: agent +``` + +# 13 - Perf +``` +> Note - This stream already exists +stream-id: Microsoft-Perf +data-type: LINUX_PERF_BLOB +intelligence-pack: LogManagement +solutions: ContainerInsights +platform: Any +la-table-name: LogManagement +alias-stream-id: Microsoft-Perf +contact-alias: OMScontainers@microsoft.com +stage: to review +tags: agent +``` diff --git a/scripts/dcr-onboarding/ci-extension-dcr.json b/scripts/dcr-onboarding/ci-extension-dcr.json new file mode 100644 index 000000000..f3fbec79b --- /dev/null +++ b/scripts/dcr-onboarding/ci-extension-dcr.json @@ -0,0 +1,59 @@ +{ + "location": "", + "properties": { + "dataSources": { + "extensions": [ + { + "name": "ContainerInsightsExtension", + "streams": [ + "Microsoft-Perf", + "Microsoft-ContainerInventory", + "Microsoft-ContainerLog", + "Microsoft-ContainerLogV2", + "Microsoft-ContainerNodeInventory", + "Microsoft-KubeEvents", + "Microsoft-KubeHealth", + "Microsoft-KubeMonAgentEvents", + "Microsoft-KubeNodeInventory", + "Microsoft-KubePodInventory", + "Microsoft-KubePVInventory", + "Microsoft-KubeServices", + "Microsoft-InsightsMetrics" + + ], + "extensionName": "ContainerInsights" + } + ] + }, + "destinations": { + "logAnalytics": [ + { + "workspaceResourceId": "/subscriptions//resourcegroups//providers/microsoft.operationalinsights/workspaces/", + "name": "ciworkspace" + } + ] + }, + "dataFlows": [ + { + "streams": [ + "Microsoft-Perf", + "Microsoft-ContainerInventory", + "Microsoft-ContainerLog", + "Microsoft-ContainerLogV2", + "Microsoft-ContainerNodeInventory", + "Microsoft-KubeEvents", + "Microsoft-KubeHealth", + "Microsoft-KubeMonAgentEvents", + "Microsoft-KubeNodeInventory", + "Microsoft-KubePodInventory", + "Microsoft-KubePVInventory", + "Microsoft-KubeServices", + "Microsoft-InsightsMetrics" + ], + "destinations": [ + "ciworkspace" + ] + } + ] + } +} diff --git a/source/plugins/go/src/extension/extension.go b/source/plugins/go/src/extension/extension.go new file mode 100644 index 000000000..c68140ded --- /dev/null +++ b/source/plugins/go/src/extension/extension.go @@ -0,0 +1,101 @@ +package extension + +import ( + "encoding/json" + "fmt" + "log" + "sync" + "strings" + uuid "github.com/google/uuid" + "github.com/ugorji/go/codec" +) + +type Extension struct { + datatypeStreamIdMap map[string]string +} + +var singleton *Extension +var once sync.Once +var extensionconfiglock sync.Mutex +var logger *log.Logger +var containerType string + +func GetInstance(flbLogger *log.Logger, containerType string) *Extension { + once.Do(func() { + singleton = &Extension{make(map[string]string)} + flbLogger.Println("Extension Instance created") + }) + logger = flbLogger + containerType = containerType + return singleton +} + +func (e *Extension) GetOutputStreamId(datatype string) string { + extensionconfiglock.Lock() + defer extensionconfiglock.Unlock() + if len(e.datatypeStreamIdMap) > 0 && e.datatypeStreamIdMap[datatype] != "" { + message := fmt.Sprintf("OutputstreamId: %s for the datatype: %s", e.datatypeStreamIdMap[datatype], datatype) + logger.Printf(message) + return e.datatypeStreamIdMap[datatype] + } + var err error + e.datatypeStreamIdMap, err = getDataTypeToStreamIdMapping() + if err != nil { + message := fmt.Sprintf("Error getting datatype to streamid mapping: %s", err.Error()) + logger.Printf(message) + } + return e.datatypeStreamIdMap[datatype] +} + +func getDataTypeToStreamIdMapping() (map[string]string, error) { + logger.Printf("extensionconfig::getDataTypeToStreamIdMapping:: getting extension config from fluent socket - start") + guid := uuid.New() + datatypeOutputStreamMap := make(map[string]string) + + taggedData := map[string]interface{}{"Request": "AgentTaggedData", "RequestId": guid.String(), "Tag": "ContainerInsights", "Version": "1"} + jsonBytes, err := json.Marshal(taggedData) + + var data []byte + enc := codec.NewEncoderBytes(&data, new(codec.MsgpackHandle)) + if err := enc.Encode(string(jsonBytes)); err != nil { + return datatypeOutputStreamMap, err + } + + fs := &FluentSocketWriter{ } + fs.sockAddress = "/var/run/mdsd/default_fluent.socket" + if containerType != "" && strings.Compare(strings.ToLower(containerType), "prometheussidecar") == 0 { + fs.sockAddress = fmt.Sprintf("/var/run/mdsd-%s/default_fluent.socket", containerType) + } + responseBytes, err := fs.WriteAndRead(data) + defer fs.disConnect() + logger.Printf("Info::mdsd::Making call to FluentSocket: %s to write and read the config data", fs.sockAddress) + if err != nil { + return datatypeOutputStreamMap, err + } + response := string(responseBytes) + + var responseObjet AgentTaggedDataResponse + err = json.Unmarshal([]byte(response), &responseObjet) + if err != nil { + logger.Printf("Error::mdsd::Failed to unmarshal config data. Error message: %s", string(err.Error())) + return datatypeOutputStreamMap, err + } + + var extensionData TaggedData + json.Unmarshal([]byte(responseObjet.TaggedData), &extensionData) + + extensionConfigs := extensionData.ExtensionConfigs + logger.Printf("Info::mdsd::build the datatype and streamid map -- start") + for _, extensionConfig := range extensionConfigs { + outputStreams := extensionConfig.OutputStreams + for dataType, outputStreamID := range outputStreams { + logger.Printf("Info::mdsd::datatype: %s, outputstreamId: %s", dataType, outputStreamID) + datatypeOutputStreamMap[dataType] = outputStreamID.(string) + } + } + logger.Printf("Info::mdsd::build the datatype and streamid map -- end") + + logger.Printf("extensionconfig::getDataTypeToStreamIdMapping:: getting extension config from fluent socket-end") + + return datatypeOutputStreamMap, nil +} diff --git a/source/plugins/go/src/extension/interfaces.go b/source/plugins/go/src/extension/interfaces.go new file mode 100644 index 000000000..c70ef17b8 --- /dev/null +++ b/source/plugins/go/src/extension/interfaces.go @@ -0,0 +1,34 @@ +package extension + +// AgentTaggedDataResponse struct for response from AgentTaggedData request +type AgentTaggedDataResponse struct { + Request string `json:"Request"` + RequestID string `json:"RequestId"` + Version string `json:"Version"` + Success bool `json:"Success"` + Description string `json:"Description"` + TaggedData string `json:"TaggedData"` +} + +// TaggedData structure for respone +type TaggedData struct { + SchemaVersion int `json:"schemaVersion"` + Version int `json:"version"` + ExtensionName string `json:"extensionName"` + ExtensionConfigs []ExtensionConfig `json:"extensionConfigurations"` + OutputStreamDefinitions map[string]StreamDefinition `json:"outputStreamDefinitions"` +} + +// StreamDefinition structure for named pipes +type StreamDefinition struct { + NamedPipe string `json:"namedPipe"` +} + +// ExtensionConfig structure for extension definition in DCR +type ExtensionConfig struct { + ID string `json:"id"` + OriginIds []string `json:"originIds"` + ExtensionSettings map[string]interface{} `json:"extensionSettings"` + InputStreams map[string]interface{} `json:"inputStreams"` + OutputStreams map[string]interface{} `json:"outputStreams"` +} diff --git a/source/plugins/go/src/extension/socket_writer.go b/source/plugins/go/src/extension/socket_writer.go new file mode 100644 index 000000000..1b16b319c --- /dev/null +++ b/source/plugins/go/src/extension/socket_writer.go @@ -0,0 +1,85 @@ +package extension + +import ( + "net" +) + +//MaxRetries for trying to write data to the socket +const MaxRetries = 5 + +//ReadBufferSize for reading data from sockets +//Current CI extension config size is ~5KB and going with 20KB to handle any future scenarios +const ReadBufferSize = 20480 + +//FluentSocketWriter writes data to AMA's default fluent socket +type FluentSocketWriter struct { + socket net.Conn + sockAddress string +} + +func (fs *FluentSocketWriter) connect() error { + c, err := net.Dial("unix", fs.sockAddress) + if err != nil { + return err + } + fs.socket = c + return nil +} + +func (fs *FluentSocketWriter) disConnect() error { + if (fs.socket != nil) { + fs.socket.Close() + fs.socket = nil + } + return nil +} + +func (fs *FluentSocketWriter) writeWithRetries(data []byte) (int, error) { + var ( + err error + n int + ) + for i := 0; i < MaxRetries; i++ { + n, err = fs.socket.Write(data) + if err == nil { + return n, nil + } + } + if err, ok := err.(net.Error); !ok || !err.Temporary() { + // so that connect() is called next time if write fails + // this happens when mdsd is restarted + _ = fs.socket.Close() // no need to log the socket closing error + fs.socket = nil + } + return 0, err +} + +func (fs *FluentSocketWriter) read() ([]byte, error) { + buf := make([]byte, ReadBufferSize) + n, err := fs.socket.Read(buf) + if err != nil { + return nil, err + } + return buf[:n], nil + +} + +func (fs *FluentSocketWriter) Write(payload []byte) (int, error) { + if fs.socket == nil { + // previous write failed with permanent error and socket was closed. + if err := fs.connect(); err != nil { + return 0, err + } + } + + return fs.writeWithRetries(payload) +} + +//WriteAndRead writes data to the socket and sends the response back +func (fs *FluentSocketWriter) WriteAndRead(payload []byte) ([]byte, error) { + _, err := fs.Write(payload) + if err != nil { + return nil, err + } + return fs.read() +} diff --git a/source/plugins/go/src/go.mod b/source/plugins/go/src/go.mod index c3e6c2044..db29a0553 100644 --- a/source/plugins/go/src/go.mod +++ b/source/plugins/go/src/go.mod @@ -23,7 +23,7 @@ require ( github.com/philhofer/fwd v1.0.0 // indirect github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b // indirect github.com/tinylib/msgp v1.1.2 - github.com/ugorji/go v1.1.2-0.20180813092308-00b869d2f4a5 // indirect + github.com/ugorji/go v1.1.2-0.20180813092308-00b869d2f4a5 golang.org/x/net v0.0.0-20200421231249-e086a090c8fd // indirect golang.org/x/time v0.0.0-20161028155119-f51c12702a4d // indirect gopkg.in/inf.v0 v0.9.0 // indirect diff --git a/source/plugins/go/src/ingestion_token_utils.go b/source/plugins/go/src/ingestion_token_utils.go new file mode 100644 index 000000000..c96685042 --- /dev/null +++ b/source/plugins/go/src/ingestion_token_utils.go @@ -0,0 +1,516 @@ +package main + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "os" + "regexp" + "strconv" + "strings" + "time" +) + +const IMDSTokenPathForWindows = "c:/etc/imds-access-token/token" // only used in windows +const AMCSAgentConfigAPIVersion = "2020-08-01-preview" +const AMCSIngestionTokenAPIVersion = "2020-04-01-preview" +const MaxRetries = 3 + +var IMDSToken string +var IMDSTokenExpiration int64 + +var ConfigurationId string +var ChannelId string + +var IngestionAuthToken string +var IngestionAuthTokenExpiration int64 + +type IMDSResponse struct { + AccessToken string `json:"access_token"` + ClientID string `json:"client_id"` + ExpiresIn string `json:"expires_in"` + ExpiresOn string `json:"expires_on"` + ExtExpiresIn string `json:"ext_expires_in"` + NotBefore string `json:"not_before"` + Resource string `json:"resource"` + TokenType string `json:"token_type"` +} + +type AgentConfiguration struct { + Configurations []struct { + Configurationid string `json:"configurationId"` + Etag string `json:"eTag"` + Op string `json:"op"` + Content struct { + Datasources []struct { + Configuration struct { + Extensionname string `json:"extensionName"` + } `json:"configuration"` + ID string `json:"id"` + Kind string `json:"kind"` + Streams []struct { + Stream string `json:"stream"` + Solution string `json:"solution"` + Extensionoutputstream string `json:"extensionOutputStream"` + } `json:"streams"` + Sendtochannels []string `json:"sendToChannels"` + } `json:"dataSources"` + Channels []struct { + Endpoint string `json:"endpoint"` + ID string `json:"id"` + Protocol string `json:"protocol"` + } `json:"channels"` + Extensionconfigurations struct { + Containerinsights []struct { + ID string `json:"id"` + Originids []string `json:"originIds"` + Outputstreams struct { + LinuxPerfBlob string `json:"LINUX_PERF_BLOB"` + ContainerInventoryBlob string `json:"CONTAINER_INVENTORY_BLOB"` + ContainerLogBlob string `json:"CONTAINER_LOG_BLOB"` + ContainerinsightsContainerlogv2 string `json:"CONTAINERINSIGHTS_CONTAINERLOGV2"` + ContainerNodeInventoryBlob string `json:"CONTAINER_NODE_INVENTORY_BLOB"` + KubeEventsBlob string `json:"KUBE_EVENTS_BLOB"` + KubeHealthBlob string `json:"KUBE_HEALTH_BLOB"` + KubeMonAgentEventsBlob string `json:"KUBE_MON_AGENT_EVENTS_BLOB"` + KubeNodeInventoryBlob string `json:"KUBE_NODE_INVENTORY_BLOB"` + KubePodInventoryBlob string `json:"KUBE_POD_INVENTORY_BLOB"` + KubePvInventoryBlob string `json:"KUBE_PV_INVENTORY_BLOB"` + KubeServicesBlob string `json:"KUBE_SERVICES_BLOB"` + InsightsMetricsBlob string `json:"INSIGHTS_METRICS_BLOB"` + } `json:"outputStreams"` + } `json:"ContainerInsights"` + } `json:"extensionConfigurations"` + } `json:"content"` + } `json:"configurations"` +} + +type IngestionTokenResponse struct { + Configurationid string `json:"configurationId"` + Ingestionauthtoken string `json:"ingestionAuthToken"` +} + +func getAccessTokenFromIMDS() (string, int64, error) { + Log("Info getAccessTokenFromIMDS: start") + useIMDSTokenProxyEndPoint := os.Getenv("USE_IMDS_TOKEN_PROXY_END_POINT") + imdsAccessToken := "" + var responseBytes []byte + var err error + + if (useIMDSTokenProxyEndPoint != "" && strings.Compare(strings.ToLower(useIMDSTokenProxyEndPoint), "true") == 0) { + Log("Info Reading IMDS Access Token from IMDS Token proxy endpoint") + mcsEndpoint := os.Getenv("MCS_ENDPOINT") + msi_endpoint_string := fmt.Sprintf("http://169.254.169.254/metadata/identity/oauth2/token?api-version=2018-02-01&resource=https://%s/", mcsEndpoint) + var msi_endpoint *url.URL + msi_endpoint, err := url.Parse(msi_endpoint_string) + if err != nil { + Log("getAccessTokenFromIMDS: Error creating IMDS endpoint URL: %s", err.Error()) + return imdsAccessToken, 0, err + } + req, err := http.NewRequest("GET", msi_endpoint.String(), nil) + if err != nil { + Log("getAccessTokenFromIMDS: Error creating HTTP request: %s", err.Error()) + return imdsAccessToken, 0, err + } + req.Header.Add("Metadata", "true") + + //IMDS endpoint nonroutable endpoint and requests doesnt go through proxy hence using dedicated http client + httpClient := &http.Client{Timeout: 30 * time.Second} + + // Call managed services for Azure resources token endpoint + var resp *http.Response = nil + IsSuccess := false + for retryCount := 0; retryCount < MaxRetries; retryCount++ { + resp, err = httpClient.Do(req) + if err != nil { + message := fmt.Sprintf("getAccessTokenFromIMDS: Error calling token endpoint: %s, retryCount: %d", err.Error(), retryCount) + Log(message) + SendException(message) + continue + } + + if resp != nil && resp.Body != nil { + defer resp.Body.Close() + } + + Log("getAccessTokenFromIMDS: IMDS Response Status: %d, retryCount: %d", resp.StatusCode, retryCount) + if IsRetriableError(resp.StatusCode) { + message := fmt.Sprintf("getAccessTokenFromIMDS: IMDS Request failed with an error code: %d, retryCount: %d", resp.StatusCode, retryCount) + Log(message) + retryDelay := time.Duration((retryCount + 1) * 100) * time.Millisecond + if resp.StatusCode == 429 { + if resp != nil && resp.Header.Get("Retry-After") != "" { + after, err := strconv.ParseInt(resp.Header.Get("Retry-After"), 10, 64) + if err != nil && after > 0 { + retryDelay = time.Duration(after) * time.Second + } + } + } + time.Sleep(retryDelay) + continue + } else if resp.StatusCode != 200 { + message := fmt.Sprintf("getAccessTokenFromIMDS: IMDS Request failed with nonretryable error code: %d, retryCount: %d", resp.StatusCode, retryCount) + Log(message) + SendException(message) + return imdsAccessToken, 0, err + } + IsSuccess = true + break // call succeeded, don't retry any more + } + if !IsSuccess || resp == nil || resp.Body == nil { + Log("getAccessTokenFromIMDS: IMDS Request ran out of retries") + return imdsAccessToken, 0, err + } + + // Pull out response body + responseBytes, err = ioutil.ReadAll(resp.Body) + if err != nil { + Log("getAccessTokenFromIMDS: Error reading response body: %s", err.Error()) + return imdsAccessToken, 0, err + } + + } else { + Log("Info Reading IMDS Access Token from file : %s", IMDSTokenPathForWindows) + if _, err = os.Stat(IMDSTokenPathForWindows); os.IsNotExist(err) { + Log("getAccessTokenFromIMDS: IMDS token file doesnt exist: %s", err.Error()) + return imdsAccessToken, 0, err + } + //adding retries incase if we ended up reading the token file while the token file being written + for retryCount := 0; retryCount < MaxRetries; retryCount++ { + responseBytes, err = ioutil.ReadFile(IMDSTokenPathForWindows) + if err != nil { + Log("getAccessTokenFromIMDS: Could not read IMDS token from file: %s, retryCount: %d", err.Error(), retryCount) + time.Sleep(time.Duration((retryCount + 1) * 100) * time.Millisecond) + continue + } + break + } + } + + if responseBytes == nil { + Log("getAccessTokenFromIMDS: Error responseBytes is nil") + return imdsAccessToken, 0, err + } + + // Unmarshall response body into struct + var imdsResponse IMDSResponse + err = json.Unmarshal(responseBytes, &imdsResponse) + if err != nil { + Log("getAccessTokenFromIMDS: Error unmarshalling the response: %s", err.Error()) + return imdsAccessToken, 0, err + } + imdsAccessToken = imdsResponse.AccessToken + + expiration, err := strconv.ParseInt(imdsResponse.ExpiresOn, 10, 64) + if err != nil { + Log("getAccessTokenFromIMDS: Error parsing ExpiresOn field from IMDS response: %s", err.Error()) + return imdsAccessToken, 0, err + } + Log("Info getAccessTokenFromIMDS: end") + return imdsAccessToken, expiration, nil +} + +func getAgentConfiguration(imdsAccessToken string) (configurationId string, channelId string, err error) { + Log("Info getAgentConfiguration: start") + configurationId = "" + channelId = "" + var amcs_endpoint *url.URL + osType := os.Getenv("OS_TYPE") + resourceId := os.Getenv("AKS_RESOURCE_ID") + resourceRegion := os.Getenv("AKS_REGION") + mcsEndpoint := os.Getenv("MCS_ENDPOINT") + amcs_endpoint_string := fmt.Sprintf("https://%s.handler.control.%s%s/agentConfigurations?platform=%s&api-version=%s", resourceRegion, mcsEndpoint, resourceId, osType, AMCSAgentConfigAPIVersion) + amcs_endpoint, err = url.Parse(amcs_endpoint_string) + if err != nil { + Log("getAgentConfiguration: Error creating AMCS endpoint URL: %s", err.Error()) + return configurationId, channelId, err + } + + var bearer = "Bearer " + imdsAccessToken + // Create a new request using http + req, err := http.NewRequest("GET", amcs_endpoint.String(), nil) + if err != nil { + message := fmt.Sprintf("getAgentConfiguration: Error creating HTTP request for AMCS endpoint: %s", err.Error()) + Log(message) + return configurationId, channelId, err + } + req.Header.Set("Authorization", bearer) + + var resp *http.Response = nil + IsSuccess := false + for retryCount := 0; retryCount < MaxRetries; retryCount++ { + resp, err = HTTPClient.Do(req) + if err != nil { + message := fmt.Sprintf("getAgentConfiguration: Error calling AMCS endpoint: %s", err.Error()) + Log(message) + SendException(message) + continue + } + if resp != nil && resp.Body != nil { + defer resp.Body.Close() + } + Log("getAgentConfiguration Response Status: %d", resp.StatusCode) + if IsRetriableError(resp.StatusCode) { + message := fmt.Sprintf("getAgentConfiguration: Request failed with an error code: %d, retryCount: %d", resp.StatusCode, retryCount) + Log(message) + retryDelay := time.Duration((retryCount + 1) * 100) * time.Millisecond + if resp.StatusCode == 429 { + if resp != nil && resp.Header.Get("Retry-After") != "" { + after, err := strconv.ParseInt(resp.Header.Get("Retry-After"), 10, 64) + if err != nil && after > 0 { + retryDelay = time.Duration(after) * time.Second + } + } + } + time.Sleep(retryDelay) + continue + } else if resp.StatusCode != 200 { + message := fmt.Sprintf("getAgentConfiguration: Request failed with nonretryable error code: %d, retryCount: %d", resp.StatusCode, retryCount) + Log(message) + SendException(message) + return configurationId, channelId, err + } + IsSuccess = true + break // call succeeded, don't retry any more + } + if !IsSuccess || resp == nil || resp.Body == nil { + message := fmt.Sprintf("getAgentConfiguration Request ran out of retries") + Log(message) + SendException(message) + return configurationId, channelId, err + } + responseBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + Log("getAgentConfiguration: Error reading response body from AMCS API call: %s", err.Error()) + return configurationId, channelId, err + } + + // Unmarshall response body into struct + var agentConfiguration AgentConfiguration + err = json.Unmarshal(responseBytes, &agentConfiguration) + if err != nil { + message := fmt.Sprintf("getAgentConfiguration: Error unmarshalling the response: %s", err.Error()) + Log(message) + SendException(message) + return configurationId, channelId, err + } + + if len(agentConfiguration.Configurations) == 0 { + message := "getAgentConfiguration: Received empty agentConfiguration.Configurations array" + Log(message) + SendException(message) + return configurationId, channelId, err + } + + if len(agentConfiguration.Configurations[0].Content.Channels) == 0 { + message := "getAgentConfiguration: Received empty agentConfiguration.Configurations[0].Content.Channels" + Log(message) + SendException(message) + return configurationId, channelId, err + } + + configurationId = agentConfiguration.Configurations[0].Configurationid + channelId = agentConfiguration.Configurations[0].Content.Channels[0].ID + + Log("getAgentConfiguration: obtained configurationId: %s, channelId: %s", configurationId, channelId) + Log("Info getAgentConfiguration: end") + + return configurationId, channelId, nil +} + +func getIngestionAuthToken(imdsAccessToken string, configurationId string, channelId string) (ingestionAuthToken string, refreshInterval int64, err error) { + Log("Info getIngestionAuthToken: start") + ingestionAuthToken = "" + refreshInterval = 0 + var amcs_endpoint *url.URL + osType := os.Getenv("OS_TYPE") + resourceId := os.Getenv("AKS_RESOURCE_ID") + resourceRegion := os.Getenv("AKS_REGION") + mcsEndpoint := os.Getenv("MCS_ENDPOINT") + amcs_endpoint_string := fmt.Sprintf("https://%s.handler.control.%s%s/agentConfigurations/%s/channels/%s/issueIngestionToken?platform=%s&api-version=%s", resourceRegion, mcsEndpoint, resourceId, configurationId, channelId, osType, AMCSIngestionTokenAPIVersion) + amcs_endpoint, err = url.Parse(amcs_endpoint_string) + if err != nil { + Log("getIngestionAuthToken: Error creating AMCS endpoint URL: %s", err.Error()) + return ingestionAuthToken, refreshInterval, err + } + + var bearer = "Bearer " + imdsAccessToken + // Create a new request using http + req, err := http.NewRequest("GET", amcs_endpoint.String(), nil) + if err != nil { + Log("getIngestionAuthToken: Error creating HTTP request for AMCS endpoint: %s", err.Error()) + return ingestionAuthToken, refreshInterval, err + } + + // add authorization header to the req + req.Header.Add("Authorization", bearer) + + var resp *http.Response = nil + IsSuccess := false + for retryCount := 0; retryCount < MaxRetries; retryCount++ { + // Call managed services for Azure resources token endpoint + resp, err = HTTPClient.Do(req) + if err != nil { + message := fmt.Sprintf("getIngestionAuthToken: Error calling AMCS endpoint for ingestion auth token: %s", err.Error()) + Log(message) + SendException(message) + resp = nil + continue + } + + if resp != nil && resp.Body != nil { + defer resp.Body.Close() + } + + Log("getIngestionAuthToken Response Status: %d", resp.StatusCode) + if IsRetriableError(resp.StatusCode) { + message := fmt.Sprintf("getIngestionAuthToken: Request failed with an error code: %d, retryCount: %d", resp.StatusCode, retryCount) + Log(message) + retryDelay := time.Duration((retryCount + 1) * 100) * time.Millisecond + if resp.StatusCode == 429 { + if resp != nil && resp.Header.Get("Retry-After") != "" { + after, err := strconv.ParseInt(resp.Header.Get("Retry-After"), 10, 64) + if err != nil && after > 0 { + retryDelay = time.Duration(after) * time.Second + } + } + } + time.Sleep(retryDelay) + continue + } else if resp.StatusCode != 200 { + message := fmt.Sprintf("getIngestionAuthToken: Request failed with nonretryable error code: %d, retryCount: %d", resp.StatusCode, retryCount) + Log(message) + SendException(message) + return ingestionAuthToken, refreshInterval, err + } + IsSuccess = true + break + } + + if !IsSuccess || resp == nil || resp.Body == nil { + message := "getIngestionAuthToken: ran out of retries calling AMCS for ingestion token" + Log(message) + SendException(message) + return ingestionAuthToken, refreshInterval, err + } + + // Pull out response body + responseBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + Log("getIngestionAuthToken: Error reading response body from AMCS Ingestion API call : %s", err.Error()) + return ingestionAuthToken, refreshInterval, err + } + + // Unmarshall response body into struct + var ingestionTokenResponse IngestionTokenResponse + err = json.Unmarshal(responseBytes, &ingestionTokenResponse) + if err != nil { + Log("getIngestionAuthToken: Error unmarshalling the response: %s", err.Error()) + return ingestionAuthToken, refreshInterval, err + } + + ingestionAuthToken = ingestionTokenResponse.Ingestionauthtoken + + refreshInterval, err = getTokenRefreshIntervalFromAmcsResponse(resp.Header) + if err != nil { + Log("getIngestionAuthToken: Error failed to parse max-age response header") + return ingestionAuthToken, refreshInterval, err + } + Log("getIngestionAuthToken: refresh interval %d seconds", refreshInterval) + + Log("Info getIngestionAuthToken: end") + return ingestionAuthToken, refreshInterval, nil +} + +var cacheControlHeaderRegex = regexp.MustCompile(`max-age=([0-9]+)`) + +func getTokenRefreshIntervalFromAmcsResponse(header http.Header) (refreshInterval int64, err error) { + cacheControlHeader, valueInMap := header["Cache-Control"] + if !valueInMap { + return 0, errors.New("getTokenRefreshIntervalFromAmcsResponse: Cache-Control not in passed header") + } + + for _, entry := range cacheControlHeader { + match := cacheControlHeaderRegex.FindStringSubmatch(entry) + if len(match) == 2 { + interval := 0 + interval, err = strconv.Atoi(match[1]) + if err != nil { + Log("getTokenRefreshIntervalFromAmcsResponse: error getting timeout from auth token. Header: " + strings.Join(cacheControlHeader, ",")) + return 0, err + } + refreshInterval = int64(interval) + return refreshInterval, nil + } + } + + return 0, errors.New("getTokenRefreshIntervalFromAmcsResponse: didn't find max-age in response header") +} + +func refreshIngestionAuthToken() { + for ; true; <-IngestionAuthTokenRefreshTicker.C { + if IMDSToken == "" || IMDSTokenExpiration <= (time.Now().Unix() + 60 * 60) { // token valid 24 hrs and refresh token 1 hr before expiry + imdsToken, imdsTokenExpiry, err := getAccessTokenFromIMDS() + if err != nil { + message := fmt.Sprintf("refreshIngestionAuthToken: Error on getAccessTokenFromIMDS %s \n", err.Error()) + Log(message) + SendException(message) + } else { + IMDSToken = imdsToken + IMDSTokenExpiration = imdsTokenExpiry + } + } + if IMDSToken == "" { + message := "refreshIngestionAuthToken: IMDSToken is empty" + Log(message) + SendException(message) + continue + } + var err error + // ignore agent configuration expiring, the configuration and channel IDs will never change (without creating an agent restart) + if ConfigurationId == "" || ChannelId == "" { + ConfigurationId, ChannelId, err = getAgentConfiguration(IMDSToken) + if err != nil { + message := fmt.Sprintf("refreshIngestionAuthToken: Error getAgentConfiguration %s \n", err.Error()) + Log(message) + SendException(message) + continue + } + } + if IMDSToken == "" || ConfigurationId == "" || ChannelId == "" { + message := "refreshIngestionAuthToken: IMDSToken or ConfigurationId or ChannelId empty" + Log(message) + SendException(message) + continue + } + ingestionAuthToken, refreshIntervalInSeconds, err := getIngestionAuthToken(IMDSToken, ConfigurationId, ChannelId) + if err != nil { + message := fmt.Sprintf("refreshIngestionAuthToken: Error getIngestionAuthToken %s \n", err.Error()) + Log(message) + SendException(message) + continue + } + IngestionAuthTokenUpdateMutex.Lock() + ODSIngestionAuthToken = ingestionAuthToken + IngestionAuthTokenUpdateMutex.Unlock() + if refreshIntervalInSeconds > 0 && refreshIntervalInSeconds != defaultIngestionAuthTokenRefreshIntervalSeconds { + //TODO - use Reset which is better when go version upgraded to 1.15 or up rather Stop() and NewTicker + //IngestionAuthTokenRefreshTicker.Reset(time.Second * time.Duration(refreshIntervalInSeconds)) + IngestionAuthTokenRefreshTicker.Stop() + IngestionAuthTokenRefreshTicker = time.NewTicker(time.Second * time.Duration(refreshIntervalInSeconds)) + } + } +} + +func IsRetriableError(httpStatusCode int) bool { + retryableStatusCodes := [5]int{408, 429, 502, 503, 504} + for _, code := range retryableStatusCodes { + if code == httpStatusCode { + return true + } + } + return false +} diff --git a/source/plugins/go/src/oms.go b/source/plugins/go/src/oms.go index 217ba1efc..0761ef664 100644 --- a/source/plugins/go/src/oms.go +++ b/source/plugins/go/src/oms.go @@ -22,6 +22,7 @@ import ( "github.com/tinylib/msgp/msgp" lumberjack "gopkg.in/natefinch/lumberjack.v2" + "Docker-Provider/source/plugins/go/src/extension" "github.com/Azure/azure-kusto-go/kusto/ingest" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -88,6 +89,7 @@ const IPName = "ContainerInsights" const defaultContainerInventoryRefreshInterval = 60 const kubeMonAgentConfigEventFlushInterval = 60 +const defaultIngestionAuthTokenRefreshIntervalSeconds = 3600 //Eventsource name in mdsd const MdsdContainerLogSourceName = "ContainerLogSource" @@ -106,6 +108,11 @@ const ContainerLogsV1Route = "v1" //container logs schema (v2=ContainerLogsV2 table in LA, anything else ContainerLogs table in LA. This is applicable only if Container logs route is NOT ADX) const ContainerLogV2SchemaVersion = "v2" +//env variable for AAD MSI Auth mode +const AADMSIAuthMode = "AAD_MSI_AUTH_MODE" + +// Tag prefix of mdsd output streamid for AMA in MSI auth mode +const MdsdOutputStreamIdTagPrefix = "dcr-" //env variable to container type const ContainerTypeEnv = "CONTAINER_TYPE" @@ -168,7 +175,9 @@ var ( // flag to check if its Windows OS IsWindows bool // container type - ContainerType string + ContainerType string + // flag to check whether LA AAD MSI Auth Enabled or not + IsAADMSIAuthMode bool ) var ( @@ -194,6 +203,10 @@ var ( EventHashUpdateMutex = &sync.Mutex{} // parent context used by ADX uploader ParentContext = context.Background() + // IngestionAuthTokenUpdateMutex read and write mutex access for ODSIngestionAuthToken + IngestionAuthTokenUpdateMutex = &sync.Mutex{} + // ODSIngestionAuthToken for windows agent AAD MSI Auth + ODSIngestionAuthToken string ) var ( @@ -201,6 +214,8 @@ var ( ContainerImageNameRefreshTicker *time.Ticker // KubeMonAgentConfigEventsSendTicker to send config events every hour KubeMonAgentConfigEventsSendTicker *time.Ticker + // IngestionAuthTokenRefreshTicker to refresh ingestion token + IngestionAuthTokenRefreshTicker *time.Ticker ) var ( @@ -702,7 +717,11 @@ func flushKubeMonAgentEventRecords() { } } } - if (IsWindows == false && len(msgPackEntries) > 0) { //for linux, mdsd route + if (IsWindows == false && len(msgPackEntries) > 0) { //for linux, mdsd route + if IsAADMSIAuthMode == true && strings.HasPrefix(MdsdKubeMonAgentEventsTagName, MdsdOutputStreamIdTagPrefix) == false { + Log("Info::mdsd::obtaining output stream id for data type: %s", KubeMonAgentEventDataType) + MdsdKubeMonAgentEventsTagName = extension.GetInstance(FLBLogger, ContainerType).GetOutputStreamId(KubeMonAgentEventDataType) + } Log("Info::mdsd:: using mdsdsource name for KubeMonAgentEvents: %s", MdsdKubeMonAgentEventsTagName) msgpBytes := convertMsgPackEntriesToMsgpBytes(MdsdKubeMonAgentEventsTagName, msgPackEntries) if MdsdKubeMonMsgpUnixSocketClient == nil { @@ -760,6 +779,16 @@ func flushKubeMonAgentEventRecords() { req.Header.Set("x-ms-AzureResourceId", ResourceID) } + if IsAADMSIAuthMode == true { + IngestionAuthTokenUpdateMutex.Lock() + ingestionAuthToken := ODSIngestionAuthToken + IngestionAuthTokenUpdateMutex.Unlock() + if ingestionAuthToken == "" { + Log("Error::ODS Ingestion Auth Token is empty. Please check error log.") + } + req.Header.Set("Authorization", "Bearer "+ingestionAuthToken) + } + resp, err := HTTPClient.Do(req) elapsed = time.Since(start) @@ -904,7 +933,11 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int } } } - if (len(msgPackEntries) > 0) { + if (len(msgPackEntries) > 0) { + if IsAADMSIAuthMode == true && (strings.HasPrefix(MdsdInsightsMetricsTagName, MdsdOutputStreamIdTagPrefix) == false) { + Log("Info::mdsd::obtaining output stream id for InsightsMetricsDataType since Log Analytics AAD MSI Auth Enabled") + MdsdInsightsMetricsTagName = extension.GetInstance(FLBLogger, ContainerType).GetOutputStreamId(InsightsMetricsDataType) + } msgpBytes := convertMsgPackEntriesToMsgpBytes(MdsdInsightsMetricsTagName, msgPackEntries) if MdsdInsightsMetricsMsgpUnixSocketClient == nil { Log("Error::mdsd::mdsd connection does not exist. re-connecting ...") @@ -979,6 +1012,18 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int if ResourceCentric == true { req.Header.Set("x-ms-AzureResourceId", ResourceID) } + if IsAADMSIAuthMode == true { + IngestionAuthTokenUpdateMutex.Lock() + ingestionAuthToken := ODSIngestionAuthToken + IngestionAuthTokenUpdateMutex.Unlock() + if ingestionAuthToken == "" { + message := "Error::ODS Ingestion Auth Token is empty. Please check error log." + Log(message) + return output.FLB_RETRY + } + // add authorization header to the req + req.Header.Set("Authorization", "Bearer "+ingestionAuthToken) + } start := time.Now() resp, err := HTTPClient.Do(req) @@ -1184,6 +1229,16 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { if len(msgPackEntries) > 0 && ContainerLogsRouteV2 == true { //flush to mdsd + if IsAADMSIAuthMode == true && strings.HasPrefix(MdsdContainerLogTagName, MdsdOutputStreamIdTagPrefix) == false { + Log("Info::mdsd::obtaining output stream id") + if ContainerLogSchemaV2 == true { + MdsdContainerLogTagName = extension.GetInstance(FLBLogger, ContainerType).GetOutputStreamId(ContainerLogV2DataType) + } else { + MdsdContainerLogTagName = extension.GetInstance(FLBLogger, ContainerType).GetOutputStreamId(ContainerLogDataType) + } + Log("Info::mdsd:: using mdsdsource name: %s", MdsdContainerLogTagName) + } + fluentForward := MsgPackForward{ Tag: MdsdContainerLogTagName, Entries: msgPackEntries, @@ -1343,6 +1398,18 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { req.Header.Set("x-ms-AzureResourceId", ResourceID) } + if IsAADMSIAuthMode == true { + IngestionAuthTokenUpdateMutex.Lock() + ingestionAuthToken := ODSIngestionAuthToken + IngestionAuthTokenUpdateMutex.Unlock() + if ingestionAuthToken == "" { + Log("Error::ODS Ingestion Auth Token is empty. Please check error log.") + return output.FLB_RETRY + } + // add authorization header to the req + req.Header.Set("Authorization", "Bearer "+ingestionAuthToken) + } + resp, err := HTTPClient.Do(req) elapsed = time.Since(start) @@ -1439,8 +1506,7 @@ func GetContainerIDK8sNamespacePodNameFromFileName(filename string) (string, str } // InitializePlugin reads and populates plugin configuration -func InitializePlugin(pluginConfPath string, agentVersion string) { - +func InitializePlugin(pluginConfPath string, agentVersion string) { go func() { isTest := os.Getenv("ISTEST") if strings.Compare(strings.ToLower(strings.TrimSpace(isTest)), "true") == 0 { @@ -1541,6 +1607,11 @@ func InitializePlugin(pluginConfPath string, agentVersion string) { } Log("OMSEndpoint %s", OMSEndpoint) + IsAADMSIAuthMode = false + if strings.Compare(strings.ToLower(os.Getenv(AADMSIAuthMode)), "true") == 0 { + IsAADMSIAuthMode = true + Log("AAD MSI Auth Mode Configured") + } ResourceID = os.Getenv(envAKSResourceID) if len(ResourceID) > 0 { @@ -1712,5 +1783,11 @@ func InitializePlugin(pluginConfPath string, agentVersion string) { } MdsdInsightsMetricsTagName = MdsdInsightsMetricsSourceName - MdsdKubeMonAgentEventsTagName = MdsdKubeMonAgentEventsSourceName -} \ No newline at end of file + MdsdKubeMonAgentEventsTagName = MdsdKubeMonAgentEventsSourceName + Log("ContainerLogsRouteADX: %v, IsWindows: %v, IsAADMSIAuthMode = %v \n", ContainerLogsRouteADX, IsWindows, IsAADMSIAuthMode) + if !ContainerLogsRouteADX && IsWindows && IsAADMSIAuthMode { + Log("defaultIngestionAuthTokenRefreshIntervalSeconds = %d \n", defaultIngestionAuthTokenRefreshIntervalSeconds) + IngestionAuthTokenRefreshTicker = time.NewTicker(time.Second * time.Duration(defaultIngestionAuthTokenRefreshIntervalSeconds)) + go refreshIngestionAuthToken() + } +} diff --git a/source/plugins/go/src/utils.go b/source/plugins/go/src/utils.go index 3fe5c6d0e..02d30607e 100644 --- a/source/plugins/go/src/utils.go +++ b/source/plugins/go/src/utils.go @@ -63,27 +63,32 @@ func ReadConfiguration(filename string) (map[string]string, error) { // CreateHTTPClient used to create the client for sending post requests to OMSEndpoint func CreateHTTPClient() { - certFilePath := PluginConfiguration["cert_file_path"] - keyFilePath := PluginConfiguration["key_file_path"] - if IsWindows == false { - certFilePath = fmt.Sprintf(certFilePath, WorkspaceID) - keyFilePath = fmt.Sprintf(keyFilePath, WorkspaceID) - } - cert, err := tls.LoadX509KeyPair(certFilePath, keyFilePath) - if err != nil { - message := fmt.Sprintf("Error when loading cert %s", err.Error()) - SendException(message) - time.Sleep(30 * time.Second) - Log(message) - log.Fatalf("Error when loading cert %s", err.Error()) - } + var transport *http.Transport + if IsAADMSIAuthMode { + transport = &http.Transport{} + } else { + certFilePath := PluginConfiguration["cert_file_path"] + keyFilePath := PluginConfiguration["key_file_path"] + if IsWindows == false { + certFilePath = fmt.Sprintf(certFilePath, WorkspaceID) + keyFilePath = fmt.Sprintf(keyFilePath, WorkspaceID) + } + cert, err := tls.LoadX509KeyPair(certFilePath, keyFilePath) + if err != nil { + message := fmt.Sprintf("Error when loading cert %s", err.Error()) + SendException(message) + time.Sleep(30 * time.Second) + Log(message) + log.Fatalf("Error when loading cert %s", err.Error()) + } - tlsConfig := &tls.Config{ - Certificates: []tls.Certificate{cert}, - } + tlsConfig := &tls.Config{ + Certificates: []tls.Certificate{cert}, + } - tlsConfig.BuildNameToCertificate() - transport := &http.Transport{TLSClientConfig: tlsConfig} + tlsConfig.BuildNameToCertificate() + transport = &http.Transport{TLSClientConfig: tlsConfig} + } // set the proxy if the proxy configured if ProxyEndpoint != "" { proxyEndpointUrl, err := url.Parse(ProxyEndpoint) diff --git a/source/plugins/ruby/ApplicationInsightsUtility.rb b/source/plugins/ruby/ApplicationInsightsUtility.rb index 31f9503cd..eaa1d903d 100644 --- a/source/plugins/ruby/ApplicationInsightsUtility.rb +++ b/source/plugins/ruby/ApplicationInsightsUtility.rb @@ -21,6 +21,8 @@ class ApplicationInsightsUtility @@EnvApplicationInsightsEndpoint = "APPLICATIONINSIGHTS_ENDPOINT" @@EnvControllerType = "CONTROLLER_TYPE" @@EnvContainerRuntime = "CONTAINER_RUNTIME" + @@EnvAADMSIAuthMode = "AAD_MSI_AUTH_MODE" + @@isWindows = false @@hostName = (OMS::Common.get_hostname) @@os_type = ENV["OS_TYPE"] @@ -82,7 +84,12 @@ def initializeUtility() isProxyConfigured = false $log.info("proxy is not configured") end - + aadAuthMSIMode = ENV[@@EnvAADMSIAuthMode] + if !aadAuthMSIMode.nil? && !aadAuthMSIMode.empty? && aadAuthMSIMode.downcase == "true".downcase + @@CustomProperties["aadAuthMSIMode"] = "true" + else + @@CustomProperties["aadAuthMSIMode"] = "false" + end #Check if telemetry is turned off telemetryOffSwitch = ENV["DISABLE_TELEMETRY"] if telemetryOffSwitch && !telemetryOffSwitch.nil? && !telemetryOffSwitch.empty? && telemetryOffSwitch.downcase == "true".downcase diff --git a/source/plugins/ruby/CustomMetricsUtils.rb b/source/plugins/ruby/CustomMetricsUtils.rb index 220313e6b..fd9290b78 100644 --- a/source/plugins/ruby/CustomMetricsUtils.rb +++ b/source/plugins/ruby/CustomMetricsUtils.rb @@ -13,8 +13,8 @@ def check_custom_metrics_availability if aks_region.to_s.empty? || aks_resource_id.to_s.empty? return false # This will also take care of AKS-Engine Scenario. AKS_REGION/AKS_RESOURCE_ID is not set for AKS-Engine. Only ACS_RESOURCE_NAME is set end - - return aks_cloud_environment.to_s.downcase == 'public' + + return aks_cloud_environment.to_s.downcase == 'azurepubliccloud' end end end \ No newline at end of file diff --git a/source/plugins/ruby/constants.rb b/source/plugins/ruby/constants.rb index c40d4c357..40fa80c14 100644 --- a/source/plugins/ruby/constants.rb +++ b/source/plugins/ruby/constants.rb @@ -106,5 +106,28 @@ class Constants #Pod Statuses POD_STATUS_TERMINATING = "Terminating" + # Data type ids + CONTAINER_INVENTORY_DATA_TYPE = "CONTAINER_INVENTORY_BLOB" + CONTAINER_NODE_INVENTORY_DATA_TYPE = "CONTAINER_NODE_INVENTORY_BLOB" + PERF_DATA_TYPE = "LINUX_PERF_BLOB" + INSIGHTS_METRICS_DATA_TYPE = "INSIGHTS_METRICS_BLOB" + KUBE_SERVICES_DATA_TYPE = "KUBE_SERVICES_BLOB" + KUBE_POD_INVENTORY_DATA_TYPE = "KUBE_POD_INVENTORY_BLOB" + KUBE_NODE_INVENTORY_DATA_TYPE = "KUBE_NODE_INVENTORY_BLOB" + KUBE_PV_INVENTORY_DATA_TYPE = "KUBE_PV_INVENTORY_BLOB" + KUBE_EVENTS_DATA_TYPE = "KUBE_EVENTS_BLOB" + KUBE_MON_AGENT_EVENTS_DATA_TYPE = "KUBE_MON_AGENT_EVENTS_BLOB" + KUBE_HEALTH_DATA_TYPE = "KUBE_HEALTH_BLOB" + CONTAINERLOGV2_DATA_TYPE = "CONTAINERINSIGHTS_CONTAINERLOGV2" + CONTAINERLOG_DATA_TYPE = "CONTAINER_LOG_BLOB" + + #ContainerInsights Extension (AMCS) + CI_EXTENSION_NAME = "ContainerInsights" + CI_EXTENSION_VERSION = "1" + #Current CI extension config size is ~5KB and going with 20KB to handle any future scenarios + CI_EXTENSION_CONFIG_MAX_BYTES = 20480 + ONEAGENT_FLUENT_SOCKET_NAME = "/var/run/mdsd/default_fluent.socket" + #Tag prefix for output stream + EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX = "dcr-" end diff --git a/source/plugins/ruby/filter_health_model_builder.rb b/source/plugins/ruby/filter_health_model_builder.rb index d491f17c2..9decda881 100644 --- a/source/plugins/ruby/filter_health_model_builder.rb +++ b/source/plugins/ruby/filter_health_model_builder.rb @@ -4,11 +4,12 @@ require 'fluent/plugin/filter' -module Fluent::Plugin +module Fluent::Plugin + require_relative 'extension_utils' require 'logger' require 'yajl/json_gem' Dir[File.join(__dir__, './health', '*.rb')].each { |file| require file } - + class FilterHealthModelBuilder < Filter include HealthModel @@ -22,7 +23,7 @@ class FilterHealthModelBuilder < Filter attr_reader :buffer, :model_builder, :health_model_definition, :monitor_factory, :state_finalizers, :monitor_set, :model_builder, :hierarchy_builder, :resources, :kube_api_down_handler, :provider, :reducer, :state, :generator, :telemetry - + @@cluster_id = KubernetesApiClient.getClusterId @@token_file_path = "/var/run/secrets/kubernetes.io/serviceaccount/token" @@cert_file_path = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" @@ -56,7 +57,7 @@ def initialize deserialized_state_info = @cluster_health_state.get_state @state.initialize_state(deserialized_state_info) end - + rescue => e ApplicationInsightsUtility.sendExceptionTelemetry(e, {"FeatureArea" => "Health"}) end @@ -90,7 +91,14 @@ def filter_stream(tag, es) end begin new_es = Fluent::MultiEventStream.new - time = Time.now + time = Time.now + if ExtensionUtils.isAADMSIAuthMode() + $log.info("filter_health_model_builder::enumerate: AAD AUTH MSI MODE") + if @rewrite_tag.nil? || !@rewrite_tag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) + @rewrite_tag = ExtensionUtils.getOutputStreamId(Constants::KUBE_HEALTH_DATA_TYPE) + end + $log.info("filter_health_model_builder::filter_stream: using tag -#{@rewrite_tag} @ #{Time.now.utc.iso8601}") + end if tag.start_with?("kubehealth.DaemonSet.Node") node_records = [] @@ -222,7 +230,7 @@ def filter_stream(tag, es) @log.info "after optimizing health signals all_monitors.size #{all_monitors.size}" - + # for each key in monitor.keys, # get the state from health_monitor_state # generate the record to send @@ -245,7 +253,7 @@ def filter_stream(tag, es) @cluster_new_state = new_state end end - end + end new_es.add(emit_time, record) } @@ -261,7 +269,7 @@ def filter_stream(tag, es) @telemetry.send # return an empty event stream, else the match will throw a NoMethodError return Fluent::MultiEventStream.new - elsif tag.start_with?(@rewrite_tag) + elsif tag.start_with?(@rewrite_tag) # this filter also acts as a pass through as we are rewriting the tag and emitting to the fluent stream es else @@ -273,6 +281,6 @@ def filter_stream(tag, es) @log.warn "Message: #{e.message} Backtrace: #{e.backtrace}" return nil end - end + end end end diff --git a/source/plugins/ruby/in_cadvisor_perf.rb b/source/plugins/ruby/in_cadvisor_perf.rb index b3f9bd08b..862e88e44 100644 --- a/source/plugins/ruby/in_cadvisor_perf.rb +++ b/source/plugins/ruby/in_cadvisor_perf.rb @@ -20,7 +20,8 @@ def initialize require_relative "CAdvisorMetricsAPIClient" require_relative "oms_common" require_relative "omslog" - require_relative "constants" + require_relative "constants" + require_relative "extension_utils" end config_param :run_interval, :time, :default => 60 @@ -61,13 +62,24 @@ def enumerate() batchTime = currentTime.utc.iso8601 @@istestvar = ENV["ISTEST"] begin - eventStream = Fluent::MultiEventStream.new + eventStream = Fluent::MultiEventStream.new insightsMetricsEventStream = Fluent::MultiEventStream.new metricData = CAdvisorMetricsAPIClient.getMetrics(winNode: nil, metricTime: batchTime ) - metricData.each do |record| - eventStream.add(time, record) if record - end - + metricData.each do |record| + eventStream.add(time, record) if record + end + + if ExtensionUtils.isAADMSIAuthMode() + $log.info("in_cadvisor_perf::enumerate: AAD AUTH MSI MODE") + if @tag.nil? || !@tag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) + @tag = ExtensionUtils.getOutputStreamId(Constants::PERF_DATA_TYPE) + end + if @insightsmetricstag.nil? || !@insightsmetricstag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) + @insightsmetricstag = ExtensionUtils.getOutputStreamId(Constants::INSIGHTS_METRICS_DATA_TYPE) + end + $log.info("in_cadvisor_perf::enumerate: using perf tag -#{@tag} @ #{Time.now.utc.iso8601}") + $log.info("in_cadvisor_perf::enumerate: using insightsmetrics tag -#{@insightsmetricstag} @ #{Time.now.utc.iso8601}") + end router.emit_stream(@tag, eventStream) if eventStream router.emit_stream(@mdmtag, eventStream) if eventStream router.emit_stream(@containerhealthtag, eventStream) if eventStream @@ -136,6 +148,6 @@ def run_periodic @mutex.lock end @mutex.unlock - end + end end # CAdvisor_Perf_Input end # module diff --git a/source/plugins/ruby/in_containerinventory.rb b/source/plugins/ruby/in_containerinventory.rb index eebf422d6..9fcb7ab90 100644 --- a/source/plugins/ruby/in_containerinventory.rb +++ b/source/plugins/ruby/in_containerinventory.rb @@ -7,17 +7,18 @@ module Fluent::Plugin class Container_Inventory_Input < Input Fluent::Plugin.register_input("containerinventory", self) - @@PluginName = "ContainerInventory" + @@PluginName = "ContainerInventory" def initialize super require "yajl/json_gem" - require "time" + require "time" require_relative "ContainerInventoryState" require_relative "ApplicationInsightsUtility" require_relative "omslog" require_relative "CAdvisorMetricsAPIClient" - require_relative "kubernetes_container_inventory" + require_relative "kubernetes_container_inventory" + require_relative "extension_utils" end config_param :run_interval, :time, :default => 60 @@ -47,21 +48,28 @@ def shutdown @thread.join super # This super must be at the end of shutdown method end - end - + end + def enumerate - currentTime = Time.now + currentTime = Time.now batchTime = currentTime.utc.iso8601 emitTime = Fluent::Engine.now containerInventory = Array.new eventStream = Fluent::MultiEventStream.new hostName = "" - $log.info("in_container_inventory::enumerate : Begin processing @ #{Time.now.utc.iso8601}") + $log.info("in_container_inventory::enumerate : Begin processing @ #{Time.now.utc.iso8601}") + if ExtensionUtils.isAADMSIAuthMode() + $log.info("in_container_inventory::enumerate: AAD AUTH MSI MODE") + if @tag.nil? || !@tag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) + @tag = ExtensionUtils.getOutputStreamId(Constants::CONTAINER_INVENTORY_DATA_TYPE) + end + $log.info("in_container_inventory::enumerate: using tag -#{@tag} @ #{Time.now.utc.iso8601}") + end begin containerRuntimeEnv = ENV["CONTAINER_RUNTIME"] $log.info("in_container_inventory::enumerate : container runtime : #{containerRuntimeEnv}") clusterCollectEnvironmentVar = ENV["AZMON_CLUSTER_COLLECT_ENV_VAR"] - $log.info("in_container_inventory::enumerate : using cadvisor apis") + $log.info("in_container_inventory::enumerate : using cadvisor apis") containerIds = Array.new response = CAdvisorMetricsAPIClient.getPodsFromCAdvisor(winNode: nil) if !response.nil? && !response.body.nil? @@ -76,10 +84,10 @@ def enumerate end containerIds.push containerRecord["InstanceID"] containerInventory.push containerRecord - end + end end - end - end + end + end # Update the state for deleted containers deletedContainers = ContainerInventoryState.getDeletedContainers(containerIds) if !deletedContainers.nil? && !deletedContainers.empty? @@ -87,13 +95,13 @@ def enumerate container = ContainerInventoryState.readContainerState(deletedContainer) if !container.nil? container.each { |k, v| container[k] = v } - container["State"] = "Deleted" + container["State"] = "Deleted" KubernetesContainerInventory.deleteCGroupCacheEntryForDeletedContainer(container["InstanceID"]) containerInventory.push container end end - end - containerInventory.each do |record| + end + containerInventory.each do |record| eventStream.add(emitTime, record) if record end router.emit_stream(@tag, eventStream) if eventStream @@ -148,6 +156,6 @@ def run_periodic @mutex.lock end @mutex.unlock - end + end end # Container_Inventory_Input end # module diff --git a/source/plugins/ruby/in_kube_events.rb b/source/plugins/ruby/in_kube_events.rb index 6f65dab92..deeae6e14 100644 --- a/source/plugins/ruby/in_kube_events.rb +++ b/source/plugins/ruby/in_kube_events.rb @@ -3,7 +3,7 @@ require 'fluent/plugin/input' -module Fluent::Plugin +module Fluent::Plugin class Kube_Event_Input < Input Fluent::Plugin.register_input("kube_events", self) @@KubeEventsStateFile = "/var/opt/microsoft/docker-cimprov/state/KubeEventQueryState.yaml" @@ -18,6 +18,7 @@ def initialize require_relative "oms_common" require_relative "omslog" require_relative "ApplicationInsightsUtility" + require_relative "extension_utils" # refer tomlparser-agent-config for defaults # this configurable via configmap @@ -37,7 +38,7 @@ def configure(conf) super end - def start + def start if @run_interval super if !ENV["EVENTS_CHUNK_SIZE"].nil? && !ENV["EVENTS_CHUNK_SIZE"].empty? && ENV["EVENTS_CHUNK_SIZE"].to_i > 0 @@ -84,8 +85,15 @@ def enumerate batchTime = currentTime.utc.iso8601 eventQueryState = getEventQueryState newEventQueryState = [] - @eventsCount = 0 - + @eventsCount = 0 + + if ExtensionUtils.isAADMSIAuthMode() + $log.info("in_kube_events::enumerate: AAD AUTH MSI MODE") + if @tag.nil? || !@tag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) + @tag = ExtensionUtils.getOutputStreamId(Constants::KUBE_EVENTS_DATA_TYPE) + end + $log.info("in_kube_events::enumerate: using kubeevents tag -#{@tag} @ #{Time.now.utc.iso8601}") + end # Initializing continuation token to nil continuationToken = nil $log.info("in_kube_events::enumerate : Getting events from Kube API @ #{Time.now.utc.iso8601}") @@ -131,8 +139,8 @@ def enumerate end # end enumerate def parse_and_emit_records(events, eventQueryState, newEventQueryState, batchTime = Time.utc.iso8601) - currentTime = Time.now - emitTime = Fluent::Engine.now + currentTime = Time.now + emitTime = Fluent::Engine.now @@istestvar = ENV["ISTEST"] begin eventStream = Fluent::MultiEventStream.new @@ -166,7 +174,7 @@ def parse_and_emit_records(events, eventQueryState, newEventQueryState, batchTim record["Count"] = items["count"] record["Computer"] = nodeName record["ClusterName"] = KubernetesApiClient.getClusterName - record["ClusterId"] = KubernetesApiClient.getClusterId + record["ClusterId"] = KubernetesApiClient.getClusterId eventStream.add(emitTime, record) if record @eventsCount += 1 end diff --git a/source/plugins/ruby/in_kube_nodes.rb b/source/plugins/ruby/in_kube_nodes.rb index ebfa903fd..bc62756a1 100644 --- a/source/plugins/ruby/in_kube_nodes.rb +++ b/source/plugins/ruby/in_kube_nodes.rb @@ -6,12 +6,12 @@ module Fluent::Plugin class Kube_nodeInventory_Input < Input Fluent::Plugin.register_input("kube_nodes", self) - + @@configMapMountPath = "/etc/config/settings/log-data-collection-settings" @@promConfigMountPath = "/etc/config/settings/prometheus-data-collection-settings" @@osmConfigMountPath = "/etc/config/osm-settings/osm-metric-collection-configuration" @@AzStackCloudFileName = "/etc/kubernetes/host/azurestackcloud.json" - + @@rsPromInterval = ENV["TELEMETRY_RS_PROM_INTERVAL"] @@rsPromFieldPassCount = ENV["TELEMETRY_RS_PROM_FIELDPASS_LENGTH"] @@ -35,11 +35,12 @@ def initialize require_relative "KubernetesApiClient" require_relative "ApplicationInsightsUtility" require_relative "oms_common" - require_relative "omslog" + require_relative "omslog" + require_relative "extension_utils" - @ContainerNodeInventoryTag = "oneagent.containerInsights.CONTAINER_NODE_INVENTORY_BLOB" - @insightsMetricsTag = "oneagent.containerInsights.INSIGHTS_METRICS_BLOB" - @MDMKubeNodeInventoryTag = "mdm.kubenodeinventory" + @ContainerNodeInventoryTag = "oneagent.containerInsights.CONTAINER_NODE_INVENTORY_BLOB" + @insightsMetricsTag = "oneagent.containerInsights.INSIGHTS_METRICS_BLOB" + @MDMKubeNodeInventoryTag = "mdm.kubenodeinventory" @kubeperfTag = "oneagent.containerInsights.LINUX_PERF_BLOB" # refer tomlparser-agent-config for the defaults @@ -60,7 +61,7 @@ def configure(conf) super end - def start + def start if @run_interval super if !ENV["NODES_CHUNK_SIZE"].nil? && !ENV["NODES_CHUNK_SIZE"].empty? && ENV["NODES_CHUNK_SIZE"].to_i > 0 @@ -109,8 +110,27 @@ def enumerate @nodesAPIE2ELatencyMs = 0 @nodeInventoryE2EProcessingLatencyMs = 0 - nodeInventoryStartTime = (Time.now.to_f * 1000).to_i - + nodeInventoryStartTime = (Time.now.to_f * 1000).to_i + + if ExtensionUtils.isAADMSIAuthMode() + $log.info("in_kube_nodes::enumerate: AAD AUTH MSI MODE") + if @kubeperfTag.nil? || !@kubeperfTag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) + @kubeperfTag = ExtensionUtils.getOutputStreamId(Constants::PERF_DATA_TYPE) + end + if @insightsMetricsTag.nil? || !@insightsMetricsTag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) + @insightsMetricsTag = ExtensionUtils.getOutputStreamId(Constants::INSIGHTS_METRICS_DATA_TYPE) + end + if @ContainerNodeInventoryTag.nil? || !@ContainerNodeInventoryTag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) + @ContainerNodeInventoryTag = ExtensionUtils.getOutputStreamId(Constants::CONTAINER_NODE_INVENTORY_DATA_TYPE) + end + if @tag.nil? || !@tag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) + @tag = ExtensionUtils.getOutputStreamId(Constants::KUBE_NODE_INVENTORY_DATA_TYPE) + end + $log.info("in_kube_nodes::enumerate: using perf tag -#{@kubeperfTag} @ #{Time.now.utc.iso8601}") + $log.info("in_kube_nodes::enumerate: using insightsmetrics tag -#{@insightsMetricsTag} @ #{Time.now.utc.iso8601}") + $log.info("in_kube_nodes::enumerate: using containernodeinventory tag -#{@ContainerNodeInventoryTag} @ #{Time.now.utc.iso8601}") + $log.info("in_kube_nodes::enumerate: using kubenodeinventory tag -#{@tag} @ #{Time.now.utc.iso8601}") + end nodesAPIChunkStartTime = (Time.now.to_f * 1000).to_i # Initializing continuation token to nil @@ -161,19 +181,19 @@ def enumerate def parse_and_emit_records(nodeInventory, batchTime = Time.utc.iso8601) begin - currentTime = Time.now - emitTime = Fluent::Engine.now + currentTime = Time.now + emitTime = Fluent::Engine.now telemetrySent = false eventStream = Fluent::MultiEventStream.new containerNodeInventoryEventStream = Fluent::MultiEventStream.new insightsMetricsEventStream = Fluent::MultiEventStream.new - kubePerfEventStream = Fluent::MultiEventStream.new + kubePerfEventStream = Fluent::MultiEventStream.new @@istestvar = ENV["ISTEST"] #get node inventory nodeInventory["items"].each do |item| # node inventory nodeInventoryRecord = getNodeInventoryRecord(item, batchTime) - eventStream.add(emitTime, nodeInventoryRecord) if nodeInventoryRecord + eventStream.add(emitTime, nodeInventoryRecord) if nodeInventoryRecord if @NODES_EMIT_STREAM_BATCH_SIZE > 0 && eventStream.count >= @NODES_EMIT_STREAM_BATCH_SIZE $log.info("in_kube_node::parse_and_emit_records: number of node inventory records emitted #{@NODES_EMIT_STREAM_BATCH_SIZE} @ #{Time.now.utc.iso8601}") router.emit_stream(@tag, eventStream) if eventStream @@ -186,7 +206,7 @@ def parse_and_emit_records(nodeInventory, batchTime = Time.utc.iso8601) end # container node inventory - containerNodeInventoryRecord = getContainerNodeInventoryRecord(item, batchTime) + containerNodeInventoryRecord = getContainerNodeInventoryRecord(item, batchTime) containerNodeInventoryEventStream.add(emitTime, containerNodeInventoryRecord) if containerNodeInventoryRecord if @NODES_EMIT_STREAM_BATCH_SIZE > 0 && containerNodeInventoryEventStream.count >= @NODES_EMIT_STREAM_BATCH_SIZE @@ -235,7 +255,7 @@ def parse_and_emit_records(nodeInventory, batchTime = Time.utc.iso8601) @NodeCache.mem.set_capacity(nodeMetricRecord["Host"], metricVal) end end - nodeMetricRecords.each do |metricRecord| + nodeMetricRecords.each do |metricRecord| kubePerfEventStream.add(emitTime, metricRecord) if metricRecord end if @NODES_EMIT_STREAM_BATCH_SIZE > 0 && kubePerfEventStream.count >= @NODES_EMIT_STREAM_BATCH_SIZE @@ -265,7 +285,7 @@ def parse_and_emit_records(nodeInventory, batchTime = Time.utc.iso8601) if !insightsMetricsRecord.nil? && !insightsMetricsRecord.empty? nodeGPUInsightsMetricsRecords.push(insightsMetricsRecord) end - nodeGPUInsightsMetricsRecords.each do |insightsMetricsRecord| + nodeGPUInsightsMetricsRecords.each do |insightsMetricsRecord| insightsMetricsEventStream.add(emitTime, insightsMetricsRecord) if insightsMetricsRecord end if @NODES_EMIT_STREAM_BATCH_SIZE > 0 && insightsMetricsEventStream.count >= @NODES_EMIT_STREAM_BATCH_SIZE @@ -335,7 +355,7 @@ def parse_and_emit_records(nodeInventory, batchTime = Time.utc.iso8601) if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) $log.info("kubeNodeInventoryEmitStreamSuccess @ #{Time.now.utc.iso8601}") end - eventStream = nil + eventStream = nil end if containerNodeInventoryEventStream.count > 0 $log.info("in_kube_node::parse_and_emit_records: number of container node inventory records emitted #{containerNodeInventoryEventStream.count} @ #{Time.now.utc.iso8601}") @@ -507,7 +527,7 @@ def getNodeTelemetryProps(item) $log.warn "in_kube_nodes::getContainerNodeIngetNodeTelemetryPropsventoryRecord:Failed: #{errorStr}" end return properties - end + end end # Kube_Node_Input class NodeStatsCache # inner class for caching implementation (CPU and memory caching is handled the exact same way, so logic to do so is moved to a private inner class) @@ -578,5 +598,5 @@ def cpu() def mem() return @@memCache end - end + end end # module diff --git a/source/plugins/ruby/in_kube_podinventory.rb b/source/plugins/ruby/in_kube_podinventory.rb index 5598602cd..3f5f4f1cc 100644 --- a/source/plugins/ruby/in_kube_podinventory.rb +++ b/source/plugins/ruby/in_kube_podinventory.rb @@ -11,7 +11,7 @@ class Kube_PodInventory_Input < Input @@MDMKubePodInventoryTag = "mdm.kubepodinventory" @@hostName = (OMS::Common.get_hostname) - + def initialize super @@ -27,6 +27,7 @@ def initialize require_relative "oms_common" require_relative "omslog" require_relative "constants" + require_relative "extension_utils" # refer tomlparser-agent-config for updating defaults # this configurable via configmap @@ -39,12 +40,12 @@ def initialize @winContainerCount = 0 @controllerData = {} @podInventoryE2EProcessingLatencyMs = 0 - @podsAPIE2ELatencyMs = 0 - + @podsAPIE2ELatencyMs = 0 + @kubeperfTag = "oneagent.containerInsights.LINUX_PERF_BLOB" @kubeservicesTag = "oneagent.containerInsights.KUBE_SERVICES_BLOB" @containerInventoryTag = "oneagent.containerInsights.CONTAINER_INVENTORY_BLOB" - @insightsMetricsTag = "oneagent.containerInsights.INSIGHTS_METRICS_BLOB" + @insightsMetricsTag = "oneagent.containerInsights.INSIGHTS_METRICS_BLOB" end config_param :run_interval, :time, :default => 60 @@ -55,7 +56,7 @@ def configure(conf) @inventoryToMdmConvertor = Inventory2MdmConvertor.new() end - def start + def start if @run_interval super if !ENV["PODS_CHUNK_SIZE"].nil? && !ENV["PODS_CHUNK_SIZE"].empty? && ENV["PODS_CHUNK_SIZE"].to_i > 0 @@ -107,7 +108,30 @@ def enumerate(podList = nil) batchTime = currentTime.utc.iso8601 serviceRecords = [] @podInventoryE2EProcessingLatencyMs = 0 - podInventoryStartTime = (Time.now.to_f * 1000).to_i + podInventoryStartTime = (Time.now.to_f * 1000).to_i + if ExtensionUtils.isAADMSIAuthMode() + $log.info("in_kube_podinventory::enumerate: AAD AUTH MSI MODE") + if @kubeperfTag.nil? || !@kubeperfTag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) + @kubeperfTag = ExtensionUtils.getOutputStreamId(Constants::PERF_DATA_TYPE) + end + if @kubeservicesTag.nil? || !@kubeservicesTag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) + @kubeservicesTag = ExtensionUtils.getOutputStreamId(Constants::KUBE_SERVICES_DATA_TYPE) + end + if @containerInventoryTag.nil? || !@containerInventoryTag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) + @containerInventoryTag = ExtensionUtils.getOutputStreamId(Constants::CONTAINER_INVENTORY_DATA_TYPE) + end + if @insightsMetricsTag.nil? || !@insightsMetricsTag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) + @insightsMetricsTag = ExtensionUtils.getOutputStreamId(Constants::INSIGHTS_METRICS_DATA_TYPE) + end + if @tag.nil? || !@tag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) + @tag = ExtensionUtils.getOutputStreamId(Constants::KUBE_POD_INVENTORY_DATA_TYPE) + end + $log.info("in_kube_podinventory::enumerate: using perf tag -#{@kubeperfTag} @ #{Time.now.utc.iso8601}") + $log.info("in_kube_podinventory::enumerate: using kubeservices tag -#{@kubeservicesTag} @ #{Time.now.utc.iso8601}") + $log.info("in_kube_podinventory::enumerate: using containerinventory tag -#{@containerInventoryTag} @ #{Time.now.utc.iso8601}") + $log.info("in_kube_podinventory::enumerate: using insightsmetrics tag -#{@insightsMetricsTag} @ #{Time.now.utc.iso8601}") + $log.info("in_kube_podinventory::enumerate: using kubepodinventory tag -#{@tag} @ #{Time.now.utc.iso8601}") + end # Get services first so that we dont need to make a call for very chunk $log.info("in_kube_podinventory::enumerate : Getting services from Kube API @ #{Time.now.utc.iso8601}") @@ -197,8 +221,8 @@ def enumerate(podList = nil) end def parse_and_emit_records(podInventory, serviceRecords, continuationToken, batchTime = Time.utc.iso8601) - currentTime = Time.now - emitTime = Fluent::Engine.now + currentTime = Time.now + emitTime = Fluent::Engine.now #batchTime = currentTime.utc.iso8601 eventStream = Fluent::MultiEventStream.new containerInventoryStream = Fluent::MultiEventStream.new @@ -214,8 +238,8 @@ def parse_and_emit_records(podInventory, serviceRecords, continuationToken, batc podInventoryRecords = getPodInventoryRecords(item, serviceRecords, batchTime) podInventoryRecords.each do |record| if !record.nil? - eventStream.add(emitTime, record) if record - @inventoryToMdmConvertor.process_pod_inventory_record(record) + eventStream.add(emitTime, record) if record + @inventoryToMdmConvertor.process_pod_inventory_record(record) end end # Setting this flag to true so that we can send ContainerInventory records for containers @@ -232,7 +256,7 @@ def parse_and_emit_records(podInventory, serviceRecords, continuationToken, batc # Send container inventory records for containers on windows nodes @winContainerCount += containerInventoryRecords.length containerInventoryRecords.each do |cirecord| - if !cirecord.nil? + if !cirecord.nil? containerInventoryStream.add(emitTime, cirecord) if cirecord end end @@ -255,7 +279,7 @@ def parse_and_emit_records(podInventory, serviceRecords, continuationToken, batc containerMetricDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimits(item, "limits", "cpu", "cpuLimitNanoCores", batchTime)) containerMetricDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimits(item, "limits", "memory", "memoryLimitBytes", batchTime)) - containerMetricDataItems.each do |record| + containerMetricDataItems.each do |record| kubePerfEventStream.add(emitTime, record) if record end @@ -274,7 +298,7 @@ def parse_and_emit_records(podInventory, serviceRecords, continuationToken, batc containerGPUInsightsMetricsDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimitsAsInsightsMetrics(item, "limits", "nvidia.com/gpu", "containerGpuLimits", batchTime)) containerGPUInsightsMetricsDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimitsAsInsightsMetrics(item, "requests", "amd.com/gpu", "containerGpuRequests", batchTime)) containerGPUInsightsMetricsDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimitsAsInsightsMetrics(item, "limits", "amd.com/gpu", "containerGpuLimits", batchTime)) - containerGPUInsightsMetricsDataItems.each do |insightsMetricsRecord| + containerGPUInsightsMetricsDataItems.each do |insightsMetricsRecord| insightsMetricsEventStream.add(emitTime, insightsMetricsRecord) if insightsMetricsRecord end @@ -341,7 +365,7 @@ def parse_and_emit_records(podInventory, serviceRecords, continuationToken, batc if !kubeServiceRecord.nil? # adding before emit to reduce memory foot print kubeServiceRecord["ClusterId"] = KubernetesApiClient.getClusterId - kubeServiceRecord["ClusterName"] = KubernetesApiClient.getClusterName + kubeServiceRecord["ClusterName"] = KubernetesApiClient.getClusterName kubeServicesEventStream.add(emitTime, kubeServiceRecord) if kubeServiceRecord if @PODS_EMIT_STREAM_BATCH_SIZE > 0 && kubeServicesEventStream.count >= @PODS_EMIT_STREAM_BATCH_SIZE $log.info("in_kube_podinventory::parse_and_emit_records: number of service records emitted #{@PODS_EMIT_STREAM_BATCH_SIZE} @ #{Time.now.utc.iso8601}") @@ -648,6 +672,6 @@ def getServiceNameFromLabels(namespace, labels, serviceRecords) ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) end return serviceName - end + end end # Kube_Pod_Input end # module diff --git a/source/plugins/ruby/in_kube_pvinventory.rb b/source/plugins/ruby/in_kube_pvinventory.rb index 40eebac8a..fccfd459d 100644 --- a/source/plugins/ruby/in_kube_pvinventory.rb +++ b/source/plugins/ruby/in_kube_pvinventory.rb @@ -20,6 +20,7 @@ def initialize require_relative "oms_common" require_relative "omslog" require_relative "constants" + require_relative "extension_utils" # Response size is around 1500 bytes per PV @PV_CHUNK_SIZE = "5000" @@ -33,7 +34,7 @@ def configure(conf) super end - def start + def start if @run_interval super @finished = false @@ -61,7 +62,13 @@ def enumerate telemetryFlush = false @pvTypeToCountHash = {} currentTime = Time.now - batchTime = currentTime.utc.iso8601 + batchTime = currentTime.utc.iso8601 + if ExtensionUtils.isAADMSIAuthMode() + $log.info("in_kube_pvinventory::enumerate: AAD AUTH MSI MODE") + if @tag.nil? || !@tag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) + @tag = ExtensionUtils.getOutputStreamId(Constants::KUBE_PV_INVENTORY_DATA_TYPE) + end + end continuationToken = nil $log.info("in_kube_pvinventory::enumerate : Getting PVs from Kube API @ #{Time.now.utc.iso8601}") @@ -93,7 +100,7 @@ def enumerate if (timeDifferenceInMinutes >= Constants::TELEMETRY_FLUSH_INTERVAL_IN_MINUTES) telemetryFlush = true end - + # Flush AppInsights telemetry once all the processing is done if telemetryFlush == true telemetryProperties = {} @@ -110,8 +117,8 @@ def enumerate end # end enumerate def parse_and_emit_records(pvInventory, batchTime = Time.utc.iso8601) - currentTime = Time.now - emitTime = Fluent::Engine.now + currentTime = Time.now + emitTime = Fluent::Engine.now eventStream = Fluent::MultiEventStream.new @@istestvar = ENV["ISTEST"] begin @@ -152,8 +159,8 @@ def parse_and_emit_records(pvInventory, batchTime = Time.utc.iso8601) end records.each do |record| - if !record.nil? - eventStream.add(emitTime, record) + if !record.nil? + eventStream.add(emitTime, record) end end @@ -191,7 +198,7 @@ def getTypeInfo(item) begin if !item["spec"].nil? (Constants::PV_TYPES).each do |pvType| - + # PV is this type if !item["spec"][pvType].nil? @@ -252,6 +259,6 @@ def run_periodic @mutex.lock end @mutex.unlock - end + end end # Kube_PVInventory_Input end # module diff --git a/source/plugins/ruby/in_kubestate_deployments.rb b/source/plugins/ruby/in_kubestate_deployments.rb index 182c3ffc1..0b563a890 100644 --- a/source/plugins/ruby/in_kubestate_deployments.rb +++ b/source/plugins/ruby/in_kubestate_deployments.rb @@ -22,6 +22,7 @@ def initialize require_relative "omslog" require_relative "ApplicationInsightsUtility" require_relative "constants" + require_relative "extension_utils" # refer tomlparser-agent-config for defaults # this configurable via configmap @@ -44,7 +45,7 @@ def configure(conf) super end - def start + def start if @run_interval super if !ENV["DEPLOYMENTS_CHUNK_SIZE"].nil? && !ENV["DEPLOYMENTS_CHUNK_SIZE"].empty? && ENV["DEPLOYMENTS_CHUNK_SIZE"].to_i > 0 @@ -55,11 +56,11 @@ def start @DEPLOYMENTS_CHUNK_SIZE = 500 end $log.info("in_kubestate_deployments::start : DEPLOYMENTS_CHUNK_SIZE @ #{@DEPLOYMENTS_CHUNK_SIZE}") - + @finished = false @condition = ConditionVariable.new @mutex = Mutex.new - @thread = Thread.new(&method(:run_periodic)) + @thread = Thread.new(&method(:run_periodic)) end end @@ -81,8 +82,14 @@ def enumerate batchTime = currentTime.utc.iso8601 #set the running total for this batch to 0 - @deploymentsRunningTotal = 0 - + @deploymentsRunningTotal = 0 + + if ExtensionUtils.isAADMSIAuthMode() + $log.info("in_kubestate_deployments::enumerate: AAD AUTH MSI MODE") + if @tag.nil? || !@tag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) + @tag = ExtensionUtils.getOutputStreamId(Constants::INSIGHTS_METRICS_DATA_TYPE) + end + end # Initializing continuation token to nil continuationToken = nil $log.info("in_kubestate_deployments::enumerate : Getting deployments from Kube API @ #{Time.now.utc.iso8601}") @@ -186,7 +193,7 @@ def parse_and_emit_records(deployments, batchTime = Time.utc.iso8601) end time = Fluent::Engine.now - metricItems.each do |insightsMetricsRecord| + metricItems.each do |insightsMetricsRecord| insightsMetricsEventStream.add(time, insightsMetricsRecord) if insightsMetricsRecord end @@ -233,6 +240,6 @@ def run_periodic @mutex.lock end @mutex.unlock - end + end end end diff --git a/source/plugins/ruby/in_kubestate_hpa.rb b/source/plugins/ruby/in_kubestate_hpa.rb index 8f60bfb72..178f7944f 100644 --- a/source/plugins/ruby/in_kubestate_hpa.rb +++ b/source/plugins/ruby/in_kubestate_hpa.rb @@ -18,7 +18,8 @@ def initialize require_relative "oms_common" require_relative "omslog" require_relative "ApplicationInsightsUtility" - require_relative "constants" + require_relative "constants" + require_relative "extension_utils" # refer tomlparser-agent-config for defaults # this configurable via configmap @@ -41,7 +42,7 @@ def configure(conf) super end - def start + def start if @run_interval super if !ENV["HPA_CHUNK_SIZE"].nil? && !ENV["HPA_CHUNK_SIZE"].empty? && ENV["HPA_CHUNK_SIZE"].to_i > 0 @@ -78,7 +79,14 @@ def enumerate batchTime = currentTime.utc.iso8601 @hpaCount = 0 - + + if ExtensionUtils.isAADMSIAuthMode() + $log.info("in_kubestate_hpa::enumerate: AAD AUTH MSI MODE") + if @tag.nil? || !@tag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) + @tag = ExtensionUtils.getOutputStreamId(Constants::INSIGHTS_METRICS_DATA_TYPE) + end + $log.info("in_kubestate_hpa::enumerate: using tag -#{@tag} @ #{Time.now.utc.iso8601}") + end # Initializing continuation token to nil continuationToken = nil $log.info("in_kubestate_hpa::enumerate : Getting HPAs from Kube API @ #{Time.now.utc.iso8601}") @@ -186,7 +194,7 @@ def parse_and_emit_records(hpas, batchTime = Time.utc.iso8601) end time = Fluent::Engine.now - metricItems.each do |insightsMetricsRecord| + metricItems.each do |insightsMetricsRecord| insightsMetricsEventStream.add(time, insightsMetricsRecord) if insightsMetricsRecord end @@ -231,6 +239,6 @@ def run_periodic @mutex.lock end @mutex.unlock - end + end end end diff --git a/source/plugins/ruby/in_win_cadvisor_perf.rb b/source/plugins/ruby/in_win_cadvisor_perf.rb index 9ab2474b1..dd462fdf2 100644 --- a/source/plugins/ruby/in_win_cadvisor_perf.rb +++ b/source/plugins/ruby/in_win_cadvisor_perf.rb @@ -20,6 +20,7 @@ def initialize require_relative "oms_common" require_relative "omslog" require_relative "constants" + require_relative "extension_utils" @insightsMetricsTag = "oneagent.containerInsights.INSIGHTS_METRICS_BLOB" end @@ -58,6 +59,17 @@ def enumerate() timeDifference = (DateTime.now.to_time.to_i - @@winNodeQueryTimeTracker).abs timeDifferenceInMinutes = timeDifference / 60 @@istestvar = ENV["ISTEST"] + if ExtensionUtils.isAADMSIAuthMode() + $log.info("in_win_cadvisor_perf::enumerate: AAD AUTH MSI MODE") + if @tag.nil? || !@tag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) + @tag = ExtensionUtils.getOutputStreamId(Constants::PERF_DATA_TYPE) + end + if @insightsMetricsTag.nil? || !@insightsMetricsTag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) + @insightsMetricsTag = ExtensionUtils.getOutputStreamId(Constants::INSIGHTS_METRICS_DATA_TYPE) + end + $log.info("in_win_cadvisor_perf::enumerate: using perf tag -#{@kubeperfTag} @ #{Time.now.utc.iso8601}") + $log.info("in_win_cadvisor_perf::enumerate: using insightsmetrics tag -#{@insightsMetricsTag} @ #{Time.now.utc.iso8601}") + end #Resetting this cache so that it is populated with the current set of containers with every call CAdvisorMetricsAPIClient.resetWinContainerIdCache() diff --git a/source/plugins/ruby/out_mdm.rb b/source/plugins/ruby/out_mdm.rb index 8e80fb753..82d6e07db 100644 --- a/source/plugins/ruby/out_mdm.rb +++ b/source/plugins/ruby/out_mdm.rb @@ -21,6 +21,9 @@ def initialize require_relative "proxy_utils" @@token_resource_url = "https://monitoring.azure.com/" + # AAD auth supported only in public cloud and handle other clouds when enabled + # this is unified new token audience for LA AAD MSI auth & metrics + @@token_resource_audience = "https://monitor.azure.com/" @@grant_type = "client_credentials" @@azure_json_path = "/etc/kubernetes/host/azure.json" @@post_request_url_template = "https://%{aks_region}.monitoring.azure.com%{aks_resource_id}/metrics" @@ -28,6 +31,8 @@ def initialize # msiEndpoint is the well known endpoint for getting MSI authentications tokens @@msi_endpoint_template = "http://169.254.169.254/metadata/identity/oauth2/token?api-version=2018-02-01&client_id=%{user_assigned_client_id}&resource=%{resource}" + # IMDS msiEndpoint for AAD MSI Auth is the proxy endpoint whcih serves the MSI auth tokens with resource claim + @@imds_msi_endpoint_template = "http://169.254.169.254/metadata/identity/oauth2/token?api-version=2018-02-01&resource=%{resource}" @@user_assigned_client_id = ENV["USER_ASSIGNED_IDENTITY_CLIENT_ID"] @@plugin_name = "AKSCustomMetricsMDM" @@ -46,6 +51,7 @@ def initialize @last_telemetry_sent_time = nil # Setting useMsi to false by default @useMsi = false + @isAADMSIAuth = false @metrics_flushed_count = 0 @cluster_identity = nil @@ -124,7 +130,14 @@ def start @parsed_token_uri = URI.parse(aad_token_url) else @useMsi = true - msi_endpoint = @@msi_endpoint_template % { user_assigned_client_id: @@user_assigned_client_id, resource: @@token_resource_url } + if !@@user_assigned_client_id.nil? && !@@user_assigned_client_id.empty? + msi_endpoint = @@msi_endpoint_template % { user_assigned_client_id: @@user_assigned_client_id, resource: @@token_resource_url } + else + # in case of aad msi auth user_assigned_client_id will be empty + @log.info "using aad msi auth" + @isAADMSIAuth = true + msi_endpoint = @@imds_msi_endpoint_template % { resource: @@token_resource_audience } + end @parsed_token_uri = URI.parse(msi_endpoint) end @@ -148,8 +161,14 @@ def get_access_token @log.info "Refreshing access token for out_mdm plugin.." if (!!@useMsi) - @log.info "Using msi to get the token to post MDM data" - ApplicationInsightsUtility.sendCustomEvent("AKSCustomMetricsMDMToken-MSI", {}) + properties = {} + if (!!@isAADMSIAuth) + @log.info "Using aad msi auth to get the token to post MDM data" + properties["aadAuthMSIMode"] = "true" + else + @log.info "Using msi to get the token to post MDM data" + end + ApplicationInsightsUtility.sendCustomEvent("AKSCustomMetricsMDMToken-MSI", properties) @log.info "Opening TCP connection" http_access_token = Net::HTTP.start(@parsed_token_uri.host, @parsed_token_uri.port, :use_ssl => false) # http_access_token.use_ssl = false @@ -320,7 +339,7 @@ def send_to_mdm(post_body) ApplicationInsightsUtility.sendCustomEvent("AKSCustomMetricsMDMSendSuccessful", {}) @last_telemetry_sent_time = Time.now end - rescue Net::HTTPClientException => e # see https://docs.ruby-lang.org/en/2.6.0/NEWS.html about deprecating HTTPServerException and adding HTTPClientException + rescue Net::HTTPClientException => e # see https://docs.ruby-lang.org/en/2.6.0/NEWS.html about deprecating HTTPServerException and adding HTTPClientException if !response.nil? && !response.body.nil? #body will have actual error @log.info "Failed to Post Metrics to MDM : #{e} Response.body: #{response.body}" else diff --git a/source/plugins/utils/extension.rb b/source/plugins/utils/extension.rb new file mode 100644 index 000000000..78236fe15 --- /dev/null +++ b/source/plugins/utils/extension.rb @@ -0,0 +1,77 @@ +require "socket" +require "msgpack" +require "securerandom" +require "singleton" +require_relative "omslog" +require_relative "constants" +require_relative "ApplicationInsightsUtility" + + +class Extension + include Singleton + + def initialize + @cache = {} + @cache_lock = Mutex.new + $log.info("Extension::initialize complete") + end + + def get_output_stream_id(datatypeId) + @cache_lock.synchronize { + if @cache.has_key?(datatypeId) + return @cache[datatypeId] + else + @cache = get_config() + return @cache[datatypeId] + end + } + end + + private + def get_config() + extConfig = Hash.new + $log.info("Extension::get_config start ...") + begin + clientSocket = UNIXSocket.open(Constants::ONEAGENT_FLUENT_SOCKET_NAME) + requestId = SecureRandom.uuid.to_s + requestBodyJSON = { "Request" => "AgentTaggedData", "RequestId" => requestId, "Tag" => Constants::CI_EXTENSION_NAME, "Version" => Constants::CI_EXTENSION_VERSION }.to_json + $log.info("Extension::get_config::sending request with request body: #{requestBodyJSON}") + requestBodyMsgPack = requestBodyJSON.to_msgpack + clientSocket.write(requestBodyMsgPack) + clientSocket.flush + $log.info("reading the response from fluent socket: #{Constants::ONEAGENT_FLUENT_SOCKET_NAME}") + resp = clientSocket.recv(Constants::CI_EXTENSION_CONFIG_MAX_BYTES) + if !resp.nil? && !resp.empty? + $log.info("Extension::get_config::successfully read the extension config from fluentsocket and number of bytes read is #{resp.length}") + respJSON = JSON.parse(resp) + taggedData = respJSON["TaggedData"] + if !taggedData.nil? && !taggedData.empty? + taggedAgentData = JSON.parse(taggedData) + extensionConfigurations = taggedAgentData["extensionConfigurations"] + if !extensionConfigurations.nil? && !extensionConfigurations.empty? + extensionConfigurations.each do |extensionConfig| + outputStreams = extensionConfig["outputStreams"] + if !outputStreams.nil? && !outputStreams.empty? + outputStreams.each do |datatypeId, streamId| + $log.info("Extension::get_config datatypeId:#{datatypeId}, streamId: #{streamId}") + extConfig[datatypeId] = streamId + end + else + $log.warn("Extension::get_config::received outputStreams is either nil or empty") + end + end + else + $log.warn("Extension::get_config::received extensionConfigurations from fluentsocket is either nil or empty") + end + end + end + rescue => errorStr + $log.warn("Extension::get_config failed: #{errorStr}") + ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) + ensure + clientSocket.close unless clientSocket.nil? + end + $log.info("Extension::get_config complete ...") + return extConfig + end +end diff --git a/source/plugins/utils/extension_utils.rb b/source/plugins/utils/extension_utils.rb new file mode 100644 index 000000000..5d439c6b2 --- /dev/null +++ b/source/plugins/utils/extension_utils.rb @@ -0,0 +1,27 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +#!/usr/local/bin/ruby +# frozen_string_literal: true + +require_relative "extension" + +class ExtensionUtils + class << self + def getOutputStreamId(dataType) + outputStreamId = "" + begin + if !dataType.nil? && !dataType.empty? + outputStreamId = Extension.instance.get_output_stream_id(dataType) + $log.info("ExtensionUtils::getOutputStreamId: got streamid: #{outputStreamId} for datatype: #{dataType}") + else + $log.warn("ExtensionUtils::getOutputStreamId: dataType shouldnt be nil or empty") + end + rescue => errorStr + $log.warn("ExtensionUtils::getOutputStreamId: failed with an exception: #{errorStr}") + end + return outputStreamId + end + def isAADMSIAuthMode() + return !ENV["AAD_MSI_AUTH_MODE"].nil? && !ENV["AAD_MSI_AUTH_MODE"].empty? && ENV["AAD_MSI_AUTH_MODE"].downcase == "true" + end + end +end From 13eb3a640ac094888648048e07eb01eb76a1d286 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Tue, 20 Jul 2021 11:20:34 -0700 Subject: [PATCH 129/301] Gangams/remove chart version dependency (#589) * remove chart version dependency * remove unused code * fix resource type * fix * handle weird cli chars * update release process --- ReleaseProcess.md | 29 ++++++------ .../onboarding/managed/enable-monitoring.ps1 | 23 ++++------ .../onboarding/managed/enable-monitoring.sh | 42 +++++++++++------- .../onboarding/managed/upgrade-monitoring.sh | 44 ++++++++++++------- 4 files changed, 76 insertions(+), 62 deletions(-) diff --git a/ReleaseProcess.md b/ReleaseProcess.md index 8ec91546c..09de5e84f 100644 --- a/ReleaseProcess.md +++ b/ReleaseProcess.md @@ -39,48 +39,49 @@ Image automatically synched to MCR CN from Public cloud MCR. Make PR against [AKS-Engine](https://github.com/Azure/aks-engine). Refer PR https://github.com/Azure/aks-engine/pull/2318 -## Arc for Kubernetes +## Arc for Kubernetes -Ev2 pipeline used to deploy the chart of the Arc K8s Container Insights Extension as per Safe Deployment Process. +Ev2 pipeline used to deploy the chart of the Arc K8s Container Insights Extension as per Safe Deployment Process. Here is the high level process ``` 1. Specify chart version of the release candidate and trigger [container-insights-arc-k8s-extension-ci_prod-release](https://github-private.visualstudio.com/microsoft/_release?_a=releases&view=all) 2. Get the approval from one of team member for the release - 3. Once the approved, release should be triggered automatically + 3. Once the approved, release should be triggered automatically 4. use `cimon-arck8s-eastus2euap` for validating latest release in canary region 5. TBD - Notify vendor team for the validation on all Arc K8s supported platforms ``` ## Microsoft Charts Repo release for On-prem K8s +> Note: This chart repo being used in the ARO v4 onboarding script as well. -Since HELM charts repo being deprecated, Microsoft charts repo being used for HELM chart release of on-prem K8s clusters. -To make chart release PR, fork [Microsoft-charts-repo]([https://github.com/microsoft/charts/tree/gh-pages) and make the PR against `gh-pages` branch of the upstream repo. +Since HELM charts repo being deprecated, Microsoft charts repo being used for HELM chart release of on-prem K8s clusters. +To make chart release PR, fork [Microsoft-charts-repo]([https://github.com/microsoft/charts/tree/gh-pages) and make the PR against `gh-pages` branch of the upstream repo. Refer PR - https://github.com/microsoft/charts/pull/23 for example. Once the PR merged, latest version of HELM chart should be available in couple of mins in https://microsoft.github.io/charts/repo and https://artifacthub.io/. Instructions to create PR ``` -# 1. create helm package for the release candidate +# 1. create helm package for the release candidate git clone git@github.com:microsoft/Docker-Provider.git git checkout ci_prod cd ~/Docker-Provider/charts/azuremonitor-containers # this path based on where you have cloned the repo - helm package . + helm package . -# 2. clone your fork repo and checkout gh_pages branch # gh_pages branch used as release branch - cd ~ +# 2. clone your fork repo and checkout gh_pages branch # gh_pages branch used as release branch + cd ~ git clone cd ~/charts # assumed the root dir of the clone is charts git checkout gh_pages -# 3. copy release candidate helm package - cd ~/charts/repo/azuremonitor-containers +# 3. copy release candidate helm package + cd ~/charts/repo/azuremonitor-containers # update chart version value with the version of chart being released - cp ~/Docker-Provider/charts/azuremonitor-containers/azuremonitor-containers-.tgz . + cp ~/Docker-Provider/charts/azuremonitor-containers/azuremonitor-containers-.tgz . cd ~/charts/repo - # update repo index file + # update repo index file helm repo index . - + # 4. Review the changes and make PR. Please note, you may need to revert unrelated changes automatically added by `helm repo index .` command ``` diff --git a/scripts/onboarding/managed/enable-monitoring.ps1 b/scripts/onboarding/managed/enable-monitoring.ps1 index 828d061ac..e79ef2138 100644 --- a/scripts/onboarding/managed/enable-monitoring.ps1 +++ b/scripts/onboarding/managed/enable-monitoring.ps1 @@ -62,11 +62,10 @@ $isArcK8sCluster = $false $isAksCluster = $false $isUsingServicePrincipal = $false -# released chart version in mcr -$mcr = "mcr.microsoft.com" -$mcrChartVersion = "2.8.3" -$mcrChartRepoPath = "azuremonitor/containerinsights/preview/azuremonitor-containers" -$helmLocalRepoName = "." +# microsoft helm chart repo +$microsoftHelmRepo="https://microsoft.github.io/charts/repo" +$microsoftHelmRepoName="microsoft" + $omsAgentDomainName="opinsights.azure.com" if ([string]::IsNullOrEmpty($azureCloudName) -eq $true) { @@ -547,16 +546,12 @@ Write-Host "Helm version" : $helmVersion Write-Host("Installing or upgrading if exists, Azure Monitor for containers HELM chart ...") try { - Write-Host("pull the chart from mcr.microsoft.com") - [System.Environment]::SetEnvironmentVariable("HELM_EXPERIMENTAL_OCI", 1, "Process") - - Write-Host("pull the chart from mcr.microsoft.com") - helm chart pull ${mcr}/${mcrChartRepoPath}:${mcrChartVersion} - - Write-Host("export the chart from local cache to current directory") - helm chart export ${mcr}/${mcrChartRepoPath}:${mcrChartVersion} --destination . + Write-Host("Add helm chart repo- ${microsoftHelmRepoName} with repo path: ${microsoftHelmRepo}") + helm repo add ${microsoftHelmRepoName} ${microsoftHelmRepo} + Write-Host("Updating the helm chart repo- ${microsoftHelmRepoName} to get latest chart versions") + helm repo update ${microsoftHelmRepoName} - $helmChartRepoPath = "${helmLocalRepoName}" + "/" + "${helmChartName}" + $helmChartRepoPath = "${microsoftHelmRepoName}" + "/" + "${helmChartName}" Write-Host("helmChartRepoPath is : ${helmChartRepoPath}") diff --git a/scripts/onboarding/managed/enable-monitoring.sh b/scripts/onboarding/managed/enable-monitoring.sh index f27f944fd..588d193a3 100644 --- a/scripts/onboarding/managed/enable-monitoring.sh +++ b/scripts/onboarding/managed/enable-monitoring.sh @@ -43,11 +43,9 @@ defaultAzureCloud="AzureCloud" # default domain will be for public cloud omsAgentDomainName="opinsights.azure.com" -# released chart version in mcr -mcrChartVersion="2.8.3" -mcr="mcr.microsoft.com" -mcrChartRepoPath="azuremonitor/containerinsights/preview/azuremonitor-containers" -helmLocalRepoName="." +# microsoft helm chart repo +microsoftHelmRepo="https://microsoft.github.io/charts/repo" +microsoftHelmRepoName="microsoft" helmChartName="azuremonitor-containers" # default release name used during onboarding @@ -435,9 +433,10 @@ create_default_log_analytics_workspace() { workspaceResourceGroup="DefaultResourceGroup-"$workspaceRegionCode isRGExists=$(az group exists -g $workspaceResourceGroup) + isRGExists=$(echo $isRGExists | tr -d '"\r\n') workspaceName="DefaultWorkspace-"$subscriptionId"-"$workspaceRegionCode - if $isRGExists; then + if [ "${isRGExists}" == "true" ]; then echo "using existing default resource group:"$workspaceResourceGroup else echo "creating resource group: $workspaceResourceGroup in region: $workspaceRegion" @@ -455,7 +454,7 @@ create_default_log_analytics_workspace() { fi workspaceResourceId=$(az resource show -g $workspaceResourceGroup -n $workspaceName --resource-type $workspaceResourceProvider --query id -o json) - workspaceResourceId=$(echo $workspaceResourceId | tr -d '"') + workspaceResourceId=$(echo $workspaceResourceId | tr -d '"' | tr -d '"\r\n') echo "workspace resource Id: ${workspaceResourceId}" } @@ -495,10 +494,16 @@ install_helm_chart() { adminUserName=$(az aro list-credentials -g $clusterResourceGroup -n $clusterName --query 'kubeadminUsername' -o tsv) adminPassword=$(az aro list-credentials -g $clusterResourceGroup -n $clusterName --query 'kubeadminPassword' -o tsv) apiServer=$(az aro show -g $clusterResourceGroup -n $clusterName --query apiserverProfile.url -o tsv) + # certain az cli versions adds /r/n so trim them + adminUserName=$(echo $adminUserName | tr -d '"\r\n') + adminPassword=$(echo $adminPassword | tr -d '"\r\n') + apiServer=$(echo $apiServer | tr -d '"\r\n') echo "login to the cluster via oc login" oc login $apiServer -u $adminUserName -p $adminPassword - echo "creating project azure-monitor-for-containers" + echo "creating project: azure-monitor-for-containers" oc new-project $openshiftProjectName + echo "swicthing to project: azure-monitor-for-containers" + oc project $openshiftProjectName echo "getting config-context of aro v4 cluster" kubeconfigContext=$(oc config current-context) fi @@ -513,15 +518,7 @@ install_helm_chart() { clusterRegion=$(az resource show --ids ${clusterResourceId} --query location -o tsv) echo "cluster region is : ${clusterRegion}" - echo "pull the chart version ${mcrChartVersion} from ${mcr}/${mcrChartRepoPath}" - export HELM_EXPERIMENTAL_OCI=1 - helm chart pull $mcr/$mcrChartRepoPath:$mcrChartVersion - - echo "export the chart from local cache to current directory" - helm chart export $mcr/$mcrChartRepoPath:$mcrChartVersion --destination . - - helmChartRepoPath=$helmLocalRepoName/$helmChartName - + helmChartRepoPath=$microsoftHelmRepoName/$helmChartName echo "helm chart repo path: ${helmChartRepoPath}" if [ ! -z "$proxyEndpoint" ]; then @@ -581,6 +578,14 @@ enable_aks_monitoring_addon() { echo "status after enabling of aks monitoringa addon:$status" } +# add helm chart repo and update repo to get latest chart version +add_and_update_helm_chart_repo() { + echo "adding helm repo: ${microsoftHelmRepoName} with repo path: ${microsoftHelmRepo}" + helm repo add ${microsoftHelmRepoName} ${microsoftHelmRepo} + echo "updating helm repo: ${microsoftHelmRepoName} to get local charts updated with latest ones" + helm repo update +} + # parse and validate args parse_args $@ @@ -644,6 +649,9 @@ else attach_monitoring_tags fi +# add helm repo & update to get the latest chart version +add_and_update_helm_chart_repo + # install helm chart install_helm_chart diff --git a/scripts/onboarding/managed/upgrade-monitoring.sh b/scripts/onboarding/managed/upgrade-monitoring.sh index 5456a7072..83643f3fa 100644 --- a/scripts/onboarding/managed/upgrade-monitoring.sh +++ b/scripts/onboarding/managed/upgrade-monitoring.sh @@ -19,14 +19,14 @@ set -e set -o pipefail -# released chart version for Azure Arc enabled Kubernetes public preview -mcrChartVersion="2.8.3" -mcr="mcr.microsoft.com" -mcrChartRepoPath="azuremonitor/containerinsights/preview/azuremonitor-containers" - +# microsoft helm chart repo +microsoftHelmRepo="https://microsoft.github.io/charts/repo" +microsoftHelmRepoName="microsoft" # default to public cloud since only supported cloud is azure public clod defaultAzureCloud="AzureCloud" -helmLocalRepoName="." +# microsoft helm chart repo +microsoftHelmRepo="https://microsoft.github.io/charts/repo" +microsoftHelmRepoName="microsoft" helmChartName="azuremonitor-containers" # default release name used during onboarding @@ -38,6 +38,9 @@ arcK8sResourceProvider="Microsoft.Kubernetes/connectedClusters" # default of resourceProvider is Azure Arc enabled Kubernetes and this will get updated based on the provider cluster resource resourceProvider="Microsoft.Kubernetes/connectedClusters" +# resource provider for azure redhat openshift v4 cluster +aroV4ResourceProvider="Microsoft.RedHatOpenShift/OpenShiftClusters" + # Azure Arc enabled Kubernetes cluster resource isArcK8sCluster=false @@ -235,10 +238,14 @@ upgrade_helm_chart_release() { adminUserName=$(az aro list-credentials -g $clusterResourceGroup -n $clusterName --query 'kubeadminUsername' -o tsv) adminPassword=$(az aro list-credentials -g $clusterResourceGroup -n $clusterName --query 'kubeadminPassword' -o tsv) apiServer=$(az aro show -g $clusterResourceGroup -n $clusterName --query apiserverProfile.url -o tsv) + # certain az cli versions adds /r/n so trim them + adminUserName=$(echo $adminUserName |tr -d '"\r\n') + adminPassword=$(echo $adminPassword |tr -d '"\r\n') + apiServer=$(echo $apiServer |tr -d '"\r\n') echo "login to the cluster via oc login" oc login $apiServer -u $adminUserName -p $adminPassword - echo "creating project azure-monitor-for-containers" - oc new-project $openshiftProjectName + echo "switching to project azure-monitor-for-containers" + oc project $openshiftProjectName echo "getting config-context of aro v4 cluster" kubeconfigContext=$(oc config current-context) fi @@ -249,15 +256,7 @@ upgrade_helm_chart_release() { echo "installing Azure Monitor for containers HELM chart on to the cluster with kubecontext:${kubeconfigContext} ..." fi - export HELM_EXPERIMENTAL_OCI=1 - - echo "pull the chart from ${mcr}/${mcrChartRepoPath}:${mcrChartVersion}" - helm chart pull ${mcr}/${mcrChartRepoPath}:${mcrChartVersion} - - echo "export the chart from local cache to current directory" - helm chart export ${mcr}/${mcrChartRepoPath}:${mcrChartVersion} --destination . - - helmChartRepoPath=$helmLocalRepoName/$helmChartName + helmChartRepoPath=$microsoftHelmRepoName/$helmChartName echo "upgrading the release: $releaseName to chart version : ${mcrChartVersion}" helm get values $releaseName -o yaml | helm upgrade --install $releaseName $helmChartRepoPath -f - @@ -296,6 +295,14 @@ validate_and_configure_supported_cloud() { fi } +# add helm chart repo and update repo to get latest chart version +add_and_update_helm_chart_repo() { + echo "adding helm repo: ${microsoftHelmRepoName} with repo path: ${microsoftHelmRepo}" + helm repo add ${microsoftHelmRepoName} ${microsoftHelmRepo} + echo "updating helm repo: ${microsoftHelmRepoName} to get local charts updated with latest ones" + helm repo update +} + # parse and validate args parse_args $@ @@ -322,6 +329,9 @@ fi # validate the cluster has monitoring tags validate_monitoring_tags +# add helm repo & update to get the latest chart version +add_and_update_helm_chart_repo + # upgrade helm chart release upgrade_helm_chart_release From 63f22d93aa509f270459764f306372226e813926 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Thu, 22 Jul 2021 18:23:13 -0700 Subject: [PATCH 130/301] Gangams/july 2021 release tasks 3 (#613) * use artifact and pipeline creds for image push * minor update * add vuln fix here so that pr can be merged --- ...l.all_tag.all_phase.all_config.ci_prod.yml | 3 +- .pipelines/pipeline.user.linux.yml | 1 + ...l.all_tag.all_phase.all_config.ci_prod.yml | 1 + .pipelines/pipeline.user.windows.yml | 1 + .pipelines/release-agent.sh | 74 +++++++++++++++++++ kubernetes/linux/setup.sh | 3 + 6 files changed, 82 insertions(+), 1 deletion(-) create mode 100644 .pipelines/release-agent.sh diff --git a/.pipelines/pipeline.user.linux.official.all_tag.all_phase.all_config.ci_prod.yml b/.pipelines/pipeline.user.linux.official.all_tag.all_phase.all_config.ci_prod.yml index d47a60ffe..1e9909ee8 100644 --- a/.pipelines/pipeline.user.linux.official.all_tag.all_phase.all_config.ci_prod.yml +++ b/.pipelines/pipeline.user.linux.official.all_tag.all_phase.all_config.ci_prod.yml @@ -40,5 +40,6 @@ package: # to be named differently. Defaults to Dockerfile. # In effect, the -f option value passed to docker build will be repository_checkout_folder/src/DockerFinal/Foo.dockerfile. repository_name: 'cdpxlinux' # only supported ones are cdpx acr repos - tag: 'ciprod' # OPTIONAL: Defaults to latest. The tag for the built image. Final tag will be 1.0.0alpha, 1.0.0-timestamp-commitID. + tag: 'ciprod' # OPTIONAL: Defaults to latest. The tag for the built image. Final tag will be 1.0.0alpha, 1.0.0-timestamp-commitID. latest: false # OPTIONAL: Defaults to false. If tag is not set to latest and this flag is set, then tag as latest as well and push latest as well. + export_to_artifact_path: 'agentimage.tar.gz' # path for exported image and use this instead of fixed tag diff --git a/.pipelines/pipeline.user.linux.yml b/.pipelines/pipeline.user.linux.yml index 565661d64..9977e7a1a 100644 --- a/.pipelines/pipeline.user.linux.yml +++ b/.pipelines/pipeline.user.linux.yml @@ -47,3 +47,4 @@ package: repository_name: 'cdpxlinux' # only supported ones are cdpx acr repos tag: 'cidev' # OPTIONAL: Defaults to latest. The tag for the built image. Final tag will be 1.0.0alpha, 1.0.0-timestamp-commitID. latest: false # OPTIONAL: Defaults to false. If tag is not set to latest and this flag is set, then tag as latest as well and push latest as well. + export_to_artifact_path: 'agentimage.tar.gz' # path for exported image and use this instead of fixed tag diff --git a/.pipelines/pipeline.user.windows.official.all_tag.all_phase.all_config.ci_prod.yml b/.pipelines/pipeline.user.windows.official.all_tag.all_phase.all_config.ci_prod.yml index e0286fbd6..8462f8e40 100644 --- a/.pipelines/pipeline.user.windows.official.all_tag.all_phase.all_config.ci_prod.yml +++ b/.pipelines/pipeline.user.windows.official.all_tag.all_phase.all_config.ci_prod.yml @@ -53,3 +53,4 @@ package: repository_name: 'cdpxwin1809' # only supported ones are cdpx acr repos tag: 'win-ciprod' # OPTIONAL: Defaults to latest. The tag for the built image. Final tag will be 1.0.0alpha, 1.0.0-timestamp-commitID. latest: false # OPTIONAL: Defaults to false. If tag is not set to latest and this flag is set, then tag as latest as well and push latest as well. + export_to_artifact_path: 'agentimage.tar.gz' # path for exported image and use this instead of fixed tag diff --git a/.pipelines/pipeline.user.windows.yml b/.pipelines/pipeline.user.windows.yml index 2b7a54ae9..1690ad700 100644 --- a/.pipelines/pipeline.user.windows.yml +++ b/.pipelines/pipeline.user.windows.yml @@ -53,3 +53,4 @@ package: repository_name: 'cdpxwin1809' # only supported ones are cdpx acr repos tag: 'win-cidev' # OPTIONAL: Defaults to latest. The tag for the built image. Final tag will be 1.0.0alpha, 1.0.0-timestamp-commitID. latest: false # OPTIONAL: Defaults to false. If tag is not set to latest and this flag is set, then tag as latest as well and push latest as well. + export_to_artifact_path: 'agentimage.tar.gz' # path for exported image and use this instead of fixed tag diff --git a/.pipelines/release-agent.sh b/.pipelines/release-agent.sh new file mode 100644 index 000000000..b34dd9995 --- /dev/null +++ b/.pipelines/release-agent.sh @@ -0,0 +1,74 @@ +#!/bin/bash + +# Note - This script used in the pipeline as inline script + +# These are plain pipeline variable which can be modified anyone in the team +# AGENT_RELEASE=cidev +# AGENT_IMAGE_TAG_SUFFIX=07222021 + +#Name of the ACR for ciprod & cidev images +ACR_NAME=containerinsightsprod.azurecr.io +AGENT_IMAGE_FULL_PATH=${ACR_NAME}/public/azuremonitor/containerinsights/${AGENT_RELEASE}:${AGENT_RELEASE}${AGENT_IMAGE_TAG_SUFFIX} +AGENT_IMAGE_TAR_FILE_NAME=agentimage.tar.gz + +if [ -z $AGENT_IMAGE_TAG_SUFFIX ]; then + echo "-e error value of AGENT_RELEASE variable shouldnt be empty" + exit 1 +fi + +if [ -z $AGENT_RELEASE ]; then + echo "-e error AGENT_RELEASE shouldnt be empty" + exit 1 +fi + +echo "ACR NAME - ${ACR_NAME}" +echo "AGENT RELEASE - ${AGENT_RELEASE}" +echo "AGENT IMAGE TAG SUFFIX - ${AGENT_IMAGE_TAG_SUFFIX}" +echo "AGENT IMAGE FULL PATH - ${AGENT_IMAGE_FULL_PATH}" +echo "AGENT IMAGE TAR FILE PATH - ${AGENT_IMAGE_TAR_FILE_NAME}" + +echo "loading linuxagent image tarball" +IMAGE_NAME=$(docker load -i ${AGENT_IMAGE_TAR_FILE_NAME}) +echo IMAGE_NAME: $IMAGE_NAME +if [ $? -ne 0 ]; then + echo "-e error, on loading linux agent tarball from ${AGENT_IMAGE_TAR_FILE_NAME}" + echo "** Please check if this caused due to build error **" + exit 1 +else + echo "successfully loaded linux agent image tarball" +fi +# IMAGE_ID=$(docker images $IMAGE_NAME | awk '{print $3 }' | tail -1) +# echo "Image Id is : ${IMAGE_ID}" +prefix="Loadedimage:" +IMAGE_NAME=$(echo $IMAGE_NAME | tr -d '"' | tr -d "[:space:]") +IMAGE_NAME=${IMAGE_NAME/#$prefix} +echo "*** trimmed image name-:${IMAGE_NAME}" +echo "tagging the image $IMAGE_NAME as ${AGENT_IMAGE_FULL_PATH}" +# docker tag $IMAGE_NAME ${AGENT_IMAGE_FULL_PATH} +docker tag $IMAGE_NAME $AGENT_IMAGE_FULL_PATH + +if [ $? -ne 0 ]; then + echo "-e error tagging the image $IMAGE_NAME as ${AGENT_IMAGE_FULL_PATH}" + exit 1 +else + echo "successfully tagged the image $IMAGE_NAME as ${AGENT_IMAGE_FULL_PATH}" +fi + +# used pipeline identity to push the image to ciprod acr +echo "logging to acr: ${ACR_NAME}" +az acr login --name ${ACR_NAME} +if [ $? -ne 0 ]; then + echo "-e error log into acr failed: ${ACR_NAME}" + exit 1 +else + echo "successfully logged into acr:${ACR_NAME}" +fi + +echo "pushing ${AGENT_IMAGE_FULL_PATH}" +docker push ${AGENT_IMAGE_FULL_PATH} +if [ $? -ne 0 ]; then + echo "-e error on pushing the image ${AGENT_IMAGE_FULL_PATH}" + exit 1 +else + echo "Successfully pushed the image ${AGENT_IMAGE_FULL_PATH}" +fi diff --git a/kubernetes/linux/setup.sh b/kubernetes/linux/setup.sh index 933c14aed..623f33cea 100644 --- a/kubernetes/linux/setup.sh +++ b/kubernetes/linux/setup.sh @@ -20,6 +20,9 @@ cp -f $TMPDIR/envmdsd /etc/mdsd.d sudo apt-get update sudo apt-get install inotify-tools -y +#upgrade libsystemd0 to address CVE-2021-33910 +apt-get upgrade libsystemd0 -y + #used to parse response of kubelet apis #ref: https://packages.ubuntu.com/search?keywords=jq sudo apt-get install jq=1.5+dfsg-2 -y From 902c939562d4b573f00c7be5c8f1b5a126ca59bb Mon Sep 17 00:00:00 2001 From: Vishwanath Date: Thu, 22 Jul 2021 21:10:30 -0700 Subject: [PATCH 131/301] remove un-used output plugin (#614) --- build/linux/installer/conf/telegraf-rs.conf | 20 -------------------- build/linux/installer/conf/telegraf.conf | 20 -------------------- 2 files changed, 40 deletions(-) diff --git a/build/linux/installer/conf/telegraf-rs.conf b/build/linux/installer/conf/telegraf-rs.conf index 0ca07f7e5..5de35d82c 100644 --- a/build/linux/installer/conf/telegraf-rs.conf +++ b/build/linux/installer/conf/telegraf-rs.conf @@ -124,26 +124,6 @@ namedrop = ["agent_telemetry", "file"] #tagdrop = ["AgentVersion","AKS_RESOURCE_ID", "ACS_RESOURCE_NAME", "Region","ClusterName","ClusterType", "Computer", "ControllerType"] -[[outputs.application_insights]] - ## Instrumentation key of the Application Insights resource. - instrumentation_key = "$TELEMETRY_APPLICATIONINSIGHTS_KEY" - - ## Timeout for closing (default: 5s). - # timeout = "5s" - - ## Enable additional diagnostic logging. - # enable_diagnostic_logging = false - - ## Context Tag Sources add Application Insights context tags to a tag value. - ## - ## For list of allowed context tag keys see: - ## https://github.com/Microsoft/ApplicationInsights-Go/blob/master/appinsights/contracts/contexttagkeys.go - # [outputs.application_insights.context_tag_sources] - # "ai.cloud.role" = "kubernetes_container_name" - # "ai.cloud.roleInstance" = "kubernetes_pod_name" - namepass = ["agent_telemetry"] - #tagdrop = ["nodeName"] - ############################################################################### # PROCESSOR PLUGINS # ############################################################################### diff --git a/build/linux/installer/conf/telegraf.conf b/build/linux/installer/conf/telegraf.conf index 8b6e2ad4b..0e4824e70 100644 --- a/build/linux/installer/conf/telegraf.conf +++ b/build/linux/installer/conf/telegraf.conf @@ -158,26 +158,6 @@ namepass = ["container.azm.ms/disk"] #fieldpass = ["used_percent"] -[[outputs.application_insights]] - ## Instrumentation key of the Application Insights resource. - instrumentation_key = "$TELEMETRY_APPLICATIONINSIGHTS_KEY" - - ## Timeout for closing (default: 5s). - # timeout = "5s" - - ## Enable additional diagnostic logging. - # enable_diagnostic_logging = false - - ## Context Tag Sources add Application Insights context tags to a tag value. - ## - ## For list of allowed context tag keys see: - ## https://github.com/Microsoft/ApplicationInsights-Go/blob/master/appinsights/contracts/contexttagkeys.go - # [outputs.application_insights.context_tag_sources] - # "ai.cloud.role" = "kubernetes_container_name" - # "ai.cloud.roleInstance" = "kubernetes_pod_name" - namepass = ["agent_telemetry"] - #tagdrop = ["nodeName"] - ############################################################################### # PROCESSOR PLUGINS # ############################################################################### From a76905a10afb0273f4ad9263e09fa3e71645d5fb Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Fri, 23 Jul 2021 09:55:40 -0700 Subject: [PATCH 132/301] fix telegraf telemetry and improve fluentd liveness (#611) * fix telegraf telemetry and improve fluentd liveness * address identified vuln with libsystemd0 * fix exported image file extension --- ...al.all_tag.all_phase.all_config.ci_prod.yml | 2 +- .pipelines/pipeline.user.windows.yml | 2 +- build/linux/installer/scripts/livenessprobe.sh | 18 +++++++++++++++++- source/plugins/go/src/oms.go | 2 ++ 4 files changed, 21 insertions(+), 3 deletions(-) diff --git a/.pipelines/pipeline.user.windows.official.all_tag.all_phase.all_config.ci_prod.yml b/.pipelines/pipeline.user.windows.official.all_tag.all_phase.all_config.ci_prod.yml index 8462f8e40..8ae069e90 100644 --- a/.pipelines/pipeline.user.windows.official.all_tag.all_phase.all_config.ci_prod.yml +++ b/.pipelines/pipeline.user.windows.official.all_tag.all_phase.all_config.ci_prod.yml @@ -53,4 +53,4 @@ package: repository_name: 'cdpxwin1809' # only supported ones are cdpx acr repos tag: 'win-ciprod' # OPTIONAL: Defaults to latest. The tag for the built image. Final tag will be 1.0.0alpha, 1.0.0-timestamp-commitID. latest: false # OPTIONAL: Defaults to false. If tag is not set to latest and this flag is set, then tag as latest as well and push latest as well. - export_to_artifact_path: 'agentimage.tar.gz' # path for exported image and use this instead of fixed tag + export_to_artifact_path: 'agentimage.tar.zip' # path for exported image and use this instead of fixed tag diff --git a/.pipelines/pipeline.user.windows.yml b/.pipelines/pipeline.user.windows.yml index 1690ad700..82dd30cd0 100644 --- a/.pipelines/pipeline.user.windows.yml +++ b/.pipelines/pipeline.user.windows.yml @@ -53,4 +53,4 @@ package: repository_name: 'cdpxwin1809' # only supported ones are cdpx acr repos tag: 'win-cidev' # OPTIONAL: Defaults to latest. The tag for the built image. Final tag will be 1.0.0alpha, 1.0.0-timestamp-commitID. latest: false # OPTIONAL: Defaults to false. If tag is not set to latest and this flag is set, then tag as latest as well and push latest as well. - export_to_artifact_path: 'agentimage.tar.gz' # path for exported image and use this instead of fixed tag + export_to_artifact_path: 'agentimage.tar.zip' # path for exported image and use this instead of fixed tag diff --git a/build/linux/installer/scripts/livenessprobe.sh b/build/linux/installer/scripts/livenessprobe.sh index 252f471e9..8ecb7fe44 100644 --- a/build/linux/installer/scripts/livenessprobe.sh +++ b/build/linux/installer/scripts/livenessprobe.sh @@ -11,13 +11,29 @@ fi #optionally test to exit non zero value if fluentd is not running #fluentd not used in sidecar container -if [ "${CONTAINER_TYPE}" != "PrometheusSidecar" ]; then +if [ "${CONTAINER_TYPE}" != "PrometheusSidecar" ]; then (ps -ef | grep "fluentd" | grep -v "grep") if [ $? -ne 0 ] then echo "fluentd is not running" > /dev/termination-log exit 1 fi + # fluentd launches by default supervisor and worker process + # so adding the liveness checks individually to handle scenario if any of the process dies + # supervisor process + (ps -ef | grep "fluentd" | grep "supervisor" | grep -v "grep") + if [ $? -ne 0 ] + then + echo "fluentd supervisor is not running" > /dev/termination-log + exit 1 + fi + # worker process + (ps -ef | grep "fluentd" | grep -v "supervisor" | grep -v "grep" ) + if [ $? -ne 0 ] + then + echo "fluentd worker is not running" > /dev/termination-log + exit 1 + fi fi #test to exit non zero value if fluentbit is not running diff --git a/source/plugins/go/src/oms.go b/source/plugins/go/src/oms.go index 0761ef664..026d36d6c 100644 --- a/source/plugins/go/src/oms.go +++ b/source/plugins/go/src/oms.go @@ -959,6 +959,7 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int if er != nil { Log("Error::mdsd::Failed to write to mdsd %d records after %s. Will retry ... error : %s", len(msgPackEntries), elapsed, er.Error()) + UpdateNumTelegrafMetricsSentTelemetry(0, 1, 0) if MdsdInsightsMetricsMsgpUnixSocketClient != nil { MdsdInsightsMetricsMsgpUnixSocketClient.Close() MdsdInsightsMetricsMsgpUnixSocketClient = nil @@ -970,6 +971,7 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int return output.FLB_RETRY } else { numTelegrafMetricsRecords := len(msgPackEntries) + UpdateNumTelegrafMetricsSentTelemetry(numTelegrafMetricsRecords, 0, 0) Log("Success::mdsd::Successfully flushed %d telegraf metrics records that was %d bytes to mdsd in %s ", numTelegrafMetricsRecords, bts, elapsed) } } From 52612b59d70629b94eb25f28149bf896d6b1e913 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Fri, 23 Jul 2021 11:54:11 -0700 Subject: [PATCH 133/301] Gangams/july 2021 release tasks 2 (#612) * tail rs mdsd err logs * configure mdsd log rotation * log rotation for mdsd log files --- .../linux/installer/conf/td-agent-bit-rs.conf | 13 +++++++ kubernetes/linux/Dockerfile | 2 +- kubernetes/linux/logrotate.conf | 39 +++++++++++++++++++ kubernetes/linux/main.sh | 6 +++ kubernetes/linux/setup.sh | 3 ++ 5 files changed, 62 insertions(+), 1 deletion(-) create mode 100644 kubernetes/linux/logrotate.conf diff --git a/build/linux/installer/conf/td-agent-bit-rs.conf b/build/linux/installer/conf/td-agent-bit-rs.conf index 9613c270d..da3738da7 100644 --- a/build/linux/installer/conf/td-agent-bit-rs.conf +++ b/build/linux/installer/conf/td-agent-bit-rs.conf @@ -23,6 +23,19 @@ Skip_Long_Lines On Ignore_Older 2m +[INPUT] + Name tail + Tag oms.container.log.flbplugin.mdsd.* + Path /var/opt/microsoft/linuxmonagent/log/mdsd.err + Read_from_Head true + DB /var/opt/microsoft/docker-cimprov/state/mdsd-ai.db + DB.Sync Off + Parser docker + Mem_Buf_Limit 1m + Path_Key filepath + Skip_Long_Lines On + Ignore_Older 2m + [INPUT] Name tcp Tag oms.container.perf.telegraf.* diff --git a/kubernetes/linux/Dockerfile b/kubernetes/linux/Dockerfile index 1ae7bef61..b47841757 100644 --- a/kubernetes/linux/Dockerfile +++ b/kubernetes/linux/Dockerfile @@ -17,7 +17,7 @@ ENV KUBE_CLIENT_BACKOFF_BASE 1 ENV KUBE_CLIENT_BACKOFF_DURATION 0 ENV RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR 0.9 RUN /usr/bin/apt-get update && /usr/bin/apt-get install -y libc-bin wget openssl curl sudo python-ctypes init-system-helpers net-tools rsyslog cron vim dmidecode apt-transport-https gnupg && rm -rf /var/lib/apt/lists/* -COPY setup.sh main.sh defaultpromenvvariables defaultpromenvvariables-rs defaultpromenvvariables-sidecar mdsd.xml envmdsd $tmpdir/ +COPY setup.sh main.sh defaultpromenvvariables defaultpromenvvariables-rs defaultpromenvvariables-sidecar mdsd.xml envmdsd logrotate.conf $tmpdir/ WORKDIR ${tmpdir} # copy docker provider shell bundle to use the agent image diff --git a/kubernetes/linux/logrotate.conf b/kubernetes/linux/logrotate.conf new file mode 100644 index 000000000..921371fd0 --- /dev/null +++ b/kubernetes/linux/logrotate.conf @@ -0,0 +1,39 @@ +/var/opt/microsoft/linuxmonagent/log/mdsd.err { + copytruncate + rotate 7 + missingok + notifempty + delaycompress + compress + size 10M +} + +/var/opt/microsoft/linuxmonagent/log/mdsd.warn { + copytruncate + rotate 7 + missingok + notifempty + delaycompress + compress + size 10M +} + +/var/opt/microsoft/linuxmonagent/log/mdsd.info { + copytruncate + rotate 7 + missingok + notifempty + delaycompress + compress + size 10M +} + +/var/opt/microsoft/linuxmonagent/log/mdsd.qos { + copytruncate + rotate 7 + missingok + notifempty + delaycompress + compress + size 10M +} diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index 428e6f35a..4579787b3 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -521,6 +521,12 @@ else mdsd ${MDSD_AAD_MSI_AUTH_ARGS} -e ${MDSD_LOG}/mdsd.err -w ${MDSD_LOG}/mdsd.warn -o ${MDSD_LOG}/mdsd.info -q ${MDSD_LOG}/mdsd.qos & fi +# Set up a cron job for logrotation +if [ ! -f /etc/cron.d/ci-agent ]; then + echo "setting up cronjob for ci agent log rotation" + echo "*/5 * * * * root /usr/sbin/logrotate -s /var/lib/logrotate/ci-agent-status /etc/logrotate.d/ci-agent >/dev/null 2>&1" > /etc/cron.d/ci-agent +fi + # no dependency on fluentd for prometheus side car container if [ "${CONTAINER_TYPE}" != "PrometheusSidecar" ]; then if [ ! -e "/etc/config/kube.conf" ]; then diff --git a/kubernetes/linux/setup.sh b/kubernetes/linux/setup.sh index 623f33cea..51e5f9efb 100644 --- a/kubernetes/linux/setup.sh +++ b/kubernetes/linux/setup.sh @@ -16,6 +16,9 @@ wget https://github.com/microsoft/Docker-Provider/releases/download/06242021-one cp -f $TMPDIR/mdsd.xml /etc/mdsd.d cp -f $TMPDIR/envmdsd /etc/mdsd.d +#log rotate conf for mdsd and can be extended for other log files as well +cp -f $TMPDIR/logrotate.conf /etc/logrotate.d/ci-agent + #download inotify tools for watching configmap changes sudo apt-get update sudo apt-get install inotify-tools -y From 5b5d048fdb662ed8427b7825c159feafba3cbcac Mon Sep 17 00:00:00 2001 From: Grace Wehner Date: Fri, 13 Aug 2021 08:57:29 -0700 Subject: [PATCH 134/301] Fix out_oms.go dependency vulnerabilities (#623) --- .github/workflows/pr-checker.yml | 2 +- source/plugins/go/src/go.mod | 28 +- source/plugins/go/src/go.sum | 490 +++++++++++++++++++++++++++---- source/plugins/go/src/oms.go | 4 +- 4 files changed, 437 insertions(+), 87 deletions(-) diff --git a/.github/workflows/pr-checker.yml b/.github/workflows/pr-checker.yml index f3bdb27e8..bae117dbe 100644 --- a/.github/workflows/pr-checker.yml +++ b/.github/workflows/pr-checker.yml @@ -56,7 +56,7 @@ jobs: format: 'table' severity: 'CRITICAL,HIGH' vuln-type: 'os,library' - skip-dirs: 'opt/telegraf' + skip-dirs: 'opt/telegraf,usr/sbin/telegraf' exit-code: '1' timeout: '5m0s' WINDOWS-build: diff --git a/source/plugins/go/src/go.mod b/source/plugins/go/src/go.mod index db29a0553..4ead145ac 100644 --- a/source/plugins/go/src/go.mod +++ b/source/plugins/go/src/go.mod @@ -3,33 +3,17 @@ module Docker-Provider/source/plugins/go/src go 1.14 require ( - code.cloudfoundry.org/clock v1.0.1-0.20200131002207-86534f4ca3a5 // indirect github.com/Azure/azure-kusto-go v0.3.2 github.com/Azure/go-autorest/autorest/azure/auth v0.4.2 + github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect + github.com/dnaeon/go-vcr v1.2.0 // indirect github.com/fluent/fluent-bit-go v0.0.0-20171103221316-c4a158a6e3a7 - github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680 // indirect - github.com/gogo/protobuf v0.0.0-20170330071051-c0656edd0d9e // indirect - github.com/golang/glog v0.0.0-20141105023935-44145f04b68c // indirect - github.com/google/btree v0.0.0-20160524151835-7d79101e329e // indirect - github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367 // indirect - github.com/google/uuid v1.1.1 - github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d // indirect - github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7 // indirect - github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3 // indirect + github.com/google/uuid v1.1.2 github.com/microsoft/ApplicationInsights-Go v0.4.3 - github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da // indirect - github.com/peterbourgon/diskv v2.0.1+incompatible // indirect - github.com/philhofer/fwd v1.0.0 // indirect - github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b // indirect + github.com/philhofer/fwd v1.1.1 // indirect github.com/tinylib/msgp v1.1.2 github.com/ugorji/go v1.1.2-0.20180813092308-00b869d2f4a5 - golang.org/x/net v0.0.0-20200421231249-e086a090c8fd // indirect - golang.org/x/time v0.0.0-20161028155119-f51c12702a4d // indirect - gopkg.in/inf.v0 v0.9.0 // indirect gopkg.in/natefinch/lumberjack.v2 v2.0.0-20170531160350-a96e63847dc3 - k8s.io/api v0.0.0-20180628040859-072894a440bd // indirect - k8s.io/apimachinery v0.0.0-20180621070125-103fd098999d - k8s.io/client-go v8.0.0+incompatible - golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f + k8s.io/apimachinery v0.21.0 + k8s.io/client-go v0.21.0 ) diff --git a/source/plugins/go/src/go.sum b/source/plugins/go/src/go.sum index 7e8b3d765..7f93bb260 100644 --- a/source/plugins/go/src/go.sum +++ b/source/plugins/go/src/go.sum @@ -1,10 +1,28 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +code.cloudfoundry.org/clock v0.0.0-20180518195852-02e53af36e6c h1:5eeuG0BHx1+DHeT3AP+ISKZ2ht1UjGhm581ljqYpVeQ= code.cloudfoundry.org/clock v0.0.0-20180518195852-02e53af36e6c/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8= -code.cloudfoundry.org/clock v1.0.1-0.20200131002207-86534f4ca3a5 h1:LTlZ2AD8IV/d1JRzB+HHfZfF1M+K8lyOlN28zDEpw7U= -code.cloudfoundry.org/clock v1.0.1-0.20200131002207-86534f4ca3a5/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8= -github.com/Azure/azure-kusto-go v0.1.3 h1:0u+YqfIvwj5PHd+moXwtlxVePt8xTLU1ixM8Q6PjJ3o= -github.com/Azure/azure-kusto-go v0.1.3/go.mod h1:55hwXJ3PaahmWZFP7VC4+PlgsSUuetSA30rFtYFabfc= -github.com/Azure/azure-kusto-go v0.1.4-0.20200427191510-041d4ed55f86 h1:vyhCediIKg1gZ9H/kMcutU8F8BFNhxLk76Gti8UAOzo= -github.com/Azure/azure-kusto-go v0.1.4-0.20200427191510-041d4ed55f86/go.mod h1:55hwXJ3PaahmWZFP7VC4+PlgsSUuetSA30rFtYFabfc= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/azure-kusto-go v0.3.2 h1:XpS9co6GvEDl2oICF9HsjEsQVwEpRK6wbNWb9Z+uqsY= github.com/Azure/azure-kusto-go v0.3.2/go.mod h1:wd50n4qlsSxh+G4f80t+Fnl2ShK9AcXD+lMOstiKuYo= github.com/Azure/azure-pipeline-go v0.1.8/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg= @@ -16,18 +34,21 @@ github.com/Azure/azure-storage-blob-go v0.8.0 h1:53qhf0Oxa0nOjgbDeeYPUeyiNmafAFE github.com/Azure/azure-storage-blob-go v0.8.0/go.mod h1:lPI3aLPpuLTeUwh1sViKXFxwl2B6teiRqI0deQUvsw0= github.com/Azure/azure-storage-queue-go v0.0.0-20191125232315-636801874cdd h1:b3wyxBl3vvr15tUAziPBPK354y+LSdfPCpex5oBttHo= github.com/Azure/azure-storage-queue-go v0.0.0-20191125232315-636801874cdd/go.mod h1:K6am8mT+5iFXgingS9LUc7TmbsW6XBw3nxaRyaMyWc8= -github.com/Azure/go-autorest v1.1.1 h1:4G9tVCqooRY3vDTB2bA1Z01PlSALtnUbji0AfzthUSs= -github.com/Azure/go-autorest v14.1.1+incompatible h1:m2F62e1Zk5DV3HENGdH/wEuzvJZIynHG4fHF7oiQwgE= +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0= github.com/Azure/go-autorest/autorest v0.10.0 h1:mvdtztBqcL8se7MdrUweNieTNi4kfNG6GOJuurQJpuY= github.com/Azure/go-autorest/autorest v0.10.0/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= -github.com/Azure/go-autorest/autorest v0.10.2 h1:NuSF3gXetiHyUbVdneJMEVyPUYAe5wh+aN08JYAf1tI= +github.com/Azure/go-autorest/autorest v0.11.12 h1:gI8ytXbxMfI+IVbI9mP2JGCTXIuhHLgRlvQ9X4PsnHE= +github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= github.com/Azure/go-autorest/autorest/adal v0.8.2 h1:O1X4oexUxnZCaEUGsvMnr8ZGj8HI37tNezwY4npRqA0= github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= +github.com/Azure/go-autorest/autorest/adal v0.9.5 h1:Y3bBUV4rTuxenJJs41HU3qmqsb+auo+a3Lz+PlJPpL0= +github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= github.com/Azure/go-autorest/autorest/azure/auth v0.4.2 h1:iM6UAvjR97ZIeR93qTcwpKNMpV+/FTWjwEbuPD495Tk= github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM= github.com/Azure/go-autorest/autorest/azure/cli v0.3.1 h1:LXl088ZQlP0SBppGFsRZonW6hSvwgL5gRByMbvUbx8U= @@ -35,128 +56,471 @@ github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9 github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM= github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= +github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/logger v0.2.0 h1:e4RVHVZKC5p6UANLJHkM4OfR1UKZPj8Wt8Pcx+3oqrE= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= -github.com/Microsoft/ApplicationInsights-Go v0.4.2 h1:HIZoGXMiKNwAtMAgCSSX35j9mP+DjGF9ezfBvxMDLLg= -github.com/Microsoft/ApplicationInsights-Go v0.4.2/go.mod h1:CukZ/G66zxXtI+h/VcVn3eVVDGDHfXM2zVILF7bMmsg= +github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= +github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= +github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fluent/fluent-bit-go v0.0.0-20171103221316-c4a158a6e3a7 h1:mck6KdLX2FTh2/ZD27dK69ehWDZR4hCk+nLf+HvAbDk= github.com/fluent/fluent-bit-go v0.0.0-20171103221316-c4a158a6e3a7/go.mod h1:JVF1Nl3QOPpKTR8xDjhkm0xINYUX0z4XdJvOpIUF+Eo= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680 h1:ZktWZesgun21uEDrwW7iEV1zPCGQldM2atlJZ3TdvVM= -github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/gogo/protobuf v0.0.0-20170330071051-c0656edd0d9e h1:ago6fNuQ6IhszPsXkeU7qRCyfsIX7L67WDybsAPkLl8= -github.com/gogo/protobuf v0.0.0-20170330071051-c0656edd0d9e/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/golang/glog v0.0.0-20141105023935-44145f04b68c h1:CbdkBQ1/PiAo0FYJhQGwASD8wrgNvTdf01g6+O9tNuA= -github.com/golang/glog v0.0.0-20141105023935-44145f04b68c/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/protobuf v1.1.0 h1:0iH4Ffd/meGoXqF2lSAhZHt8X+cPgkfn/cb6Cce5Vpc= -github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc= +github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/google/btree v0.0.0-20160524151835-7d79101e329e h1:JHB7F/4TJCrYBW8+GZO8VkWDj1jxcWuCl6uxKODiyi4= -github.com/google/btree v0.0.0-20160524151835-7d79101e329e/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367 h1:ScAXWS+TR6MZKex+7Z8rneuSJH+FSDqd6ocQyl+ZHo4= -github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d h1:7XGaL1e6bYS1yIonGp9761ExpPPV1ui0SAC59Yube9k= -github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7 h1:6TSoaYExHper8PYsJu23GWVNOyYRCSnIFyxKgLSZ54w= -github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I= +github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3 h1:/UewZcckqhvnnS0C6r3Sher2hSEbVmM6Ogpcjen08+Y= -github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149 h1:HfxbT6/JcvIljmERptWhwa8XzP7H3T+Z2N26gTsaDaA= github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= -github.com/microsoft/ApplicationInsights-Go v0.4.2 h1:LCv4NtCpXpsUF6ZUzZdpVG2x4RwebY7tiJUb25uYXiM= -github.com/microsoft/ApplicationInsights-Go v0.4.2/go.mod h1:DupRHRNoeuH4j8Yv3nux9/IXo3HZ0kO5A1ykNK4vR2E= github.com/microsoft/ApplicationInsights-Go v0.4.3 h1:gBuy5rM3o6Zo69QTkq1Ens8wx6sVf+mpgMjjfayiRcw= github.com/microsoft/ApplicationInsights-Go v0.4.3/go.mod h1:ih0t3h84PdzV1qGeUs89o9wL8eCuwf24M7TZp/nyqXk= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da h1:ZQGIPjr1iTtUPXZFk8WShqb5G+Qg65VHFLtSvmHh+Mw= -github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ= -github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= +github.com/philhofer/fwd v1.1.1 h1:GdGcTjf5RNAxwS4QLsiMzJYj5KEvPJD3Abr261yRQXQ= +github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b h1:gQZ0qzfKHQIybLANtM3mBXNUtOfsCFXeTsnBqCsx1KM= -github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/tedsuo/ifrit v0.0.0-20180802180643-bea94bb476cc h1:LUUe4cdABGrIJAhl1P1ZpWY76AwukVszFdwkVFVLwIk= github.com/tedsuo/ifrit v0.0.0-20180802180643-bea94bb476cc/go.mod h1:eyZnKCc955uh98WQvzOm0dgAeLnf2O0Rz0LPoC5ze+0= github.com/tinylib/msgp v1.1.2 h1:gWmO7n0Ys2RBEb7GPYB9Ujq8Mk5p2U08lRnmMcGy6BQ= github.com/tinylib/msgp v1.1.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/ugorji/go v1.1.2-0.20180813092308-00b869d2f4a5 h1:JRe7Bc0YQq+x7Bm3p/LIBIb4aopsdr3H0KRKRI8g6oY= github.com/ugorji/go v1.1.2-0.20180813092308-00b869d2f4a5/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ= -golang.org/x/crypto v0.0.0-20180222182404-49796115aa4b h1:/GxqO8gbyb+sNnviFY2IIMrtm8vGg6NEJDft68wJY/g= -golang.org/x/crypto v0.0.0-20180222182404-49796115aa4b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975 h1:/Tl7pH94bvbAAHBdZJT947M/+gp0+CqQXDtMRC0fseo= -golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f h1:aZp0e2vLN4MToVqnjNEYEtrEA8RH8U8FN1CU7JgqsPU= -golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/net v0.0.0-20170809000501-1c05540f6879 h1:0rFa7EaCGdQPmZVbo9F7MNF65b8dyzS6EUnXjs9Cllk= -golang.org/x/net v0.0.0-20170809000501-1c05540f6879/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83 h1:/ZScEX8SfEmUGRHs0gxpqteO5nfNW6axyZbBdw9A12g= +golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200421231249-e086a090c8fd h1:QPwSajcTUrFriMF1nJ3XzgoqakqQEsnZf9LdXdi2nkI= -golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210224082022-3d97a244fca7 h1:OgUuv8lsRpBibGNbSizVwKWlysjaNzmC9gYMhPVfqFM= +golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20171031081856-95c657629925 h1:nCH33NboKIsT4HoXBsXTWX8ul303HxWgkc5s2Ezwacg= -golang.org/x/sys v0.0.0-20171031081856-95c657629925/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073 h1:8qxJSnu+7dRq6upnbntrmriWByIakBuct5OM/MdQC1M= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221 h1:/ZHdbVpdR/jk3g30/d4yUL0JU9kksj8+F/bnQUVLGDM= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= -golang.org/x/text v0.0.0-20170810154203-b19bf474d317 h1:WKW+OPdYPlvOTVGHuMfjnIC6yY2SI93yFB0pZ7giBmQ= -golang.org/x/text v0.0.0-20170810154203-b19bf474d317/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE= +golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/time v0.0.0-20161028155119-f51c12702a4d h1:TnM+PKb3ylGmZvyPXmo9m/wktg7Jn/a/fNmr33HSj8g= -golang.org/x/time v0.0.0-20161028155119-f51c12702a4d/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba h1:O8mE0/t419eoIwhTFpKVkHiTs/Igowgfkj25AcZrtiE= +golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/inf.v0 v0.9.0 h1:3zYtXIO92bvsdS3ggAdA8Gb4Azj0YU+TVY1uGYNFA8o= -gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/natefinch/lumberjack.v2 v2.0.0-20170531160350-a96e63847dc3 h1:AFxeG48hTWHhDTQDk/m2gorfVHUEa9vo3tp3D7TzwjI= gopkg.in/natefinch/lumberjack.v2 v2.0.0-20170531160350-a96e63847dc3/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.0.0-20170721113624-670d4cfef054 h1:ROF+R/wHHruzF40n5DfPv2jwm7rCJwvs8fz+RTZWjLE= -gopkg.in/yaml.v2 v2.0.0-20170721113624-670d4cfef054/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -k8s.io/api v0.0.0-20180628040859-072894a440bd h1:HzgYeLDS1jLxw8DGr68KJh9cdQ5iZJizG0HZWstIhfQ= -k8s.io/api v0.0.0-20180628040859-072894a440bd/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= -k8s.io/apimachinery v0.0.0-20180621070125-103fd098999d h1:MZjlsu9igBoVPZkXpIGoxI6EonqNsXXZU7hhvfQLkd4= -k8s.io/apimachinery v0.0.0-20180621070125-103fd098999d/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= -k8s.io/client-go v8.0.0+incompatible h1:tTI4hRmb1DRMl4fG6Vclfdi6nTM82oIrTT7HfitmxC4= -k8s.io/client-go v8.0.0+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.21.0 h1:gu5iGF4V6tfVCQ/R+8Hc0h7H1JuEhzyEi9S4R5LM8+Y= +k8s.io/api v0.21.0/go.mod h1:+YbrhBBGgsxbF6o6Kj4KJPJnBmAKuXDeS3E18bgHNVU= +k8s.io/apimachinery v0.21.0 h1:3Fx+41if+IRavNcKOz09FwEXDBG6ORh6iMsTSelhkMA= +k8s.io/apimachinery v0.21.0/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= +k8s.io/client-go v0.21.0 h1:n0zzzJsAQmJngpC0IhgFcApZyoGXPrDIAD601HD09ag= +k8s.io/client-go v0.21.0/go.mod h1:nNBytTF9qPFDEhoqgEPaarobC8QPae13bElIVHzIglA= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.8.0 h1:Q3gmuM9hKEjefWFFYF0Mat+YyFJvsUyYuwyNNJ5C9Ts= +k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= +k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920 h1:CbnUZsM497iRC5QMVkHwyl8s2tB3g7yaSHkYPkpgelw= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.1.0 h1:C4r9BgJ98vrKnnVCjwCSXcWjWe0NKcUQkmzDXZXGwH8= +sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/source/plugins/go/src/oms.go b/source/plugins/go/src/oms.go index 026d36d6c..a2937073b 100644 --- a/source/plugins/go/src/oms.go +++ b/source/plugins/go/src/oms.go @@ -417,7 +417,9 @@ func updateContainerImageNameMaps() { listOptions := metav1.ListOptions{} listOptions.FieldSelector = fmt.Sprintf("spec.nodeName=%s", Computer) - pods, err := ClientSet.CoreV1().Pods("").List(listOptions) + + // Context was added as a parameter, but we want the same behavior as before: see https://pkg.go.dev/context#TODO + pods, err := ClientSet.CoreV1().Pods("").List(context.TODO(), listOptions) if err != nil { message := fmt.Sprintf("Error getting pods %s\nIt is ok to log here and continue, because the logs will be missing image and Name, but the logs will still have the containerID", err.Error()) From 2a0f4ecb0be63af173c7072c9f0543a380a065bd Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Fri, 13 Aug 2021 09:58:59 -0700 Subject: [PATCH 135/301] revert libsystemd0 update (#616) --- kubernetes/linux/setup.sh | 3 --- 1 file changed, 3 deletions(-) diff --git a/kubernetes/linux/setup.sh b/kubernetes/linux/setup.sh index 51e5f9efb..b7cddffbc 100644 --- a/kubernetes/linux/setup.sh +++ b/kubernetes/linux/setup.sh @@ -23,9 +23,6 @@ cp -f $TMPDIR/logrotate.conf /etc/logrotate.d/ci-agent sudo apt-get update sudo apt-get install inotify-tools -y -#upgrade libsystemd0 to address CVE-2021-33910 -apt-get upgrade libsystemd0 -y - #used to parse response of kubelet apis #ref: https://packages.ubuntu.com/search?keywords=jq sudo apt-get install jq=1.5+dfsg-2 -y From 45f35aeb44f16f9f5de1e83c541f1a1ffd1a42e5 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Fri, 13 Aug 2021 13:36:03 -0700 Subject: [PATCH 136/301] updates for ci-prod release instructions (#619) --- ReleaseProcess.md | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/ReleaseProcess.md b/ReleaseProcess.md index 09de5e84f..7bd858561 100644 --- a/ReleaseProcess.md +++ b/ReleaseProcess.md @@ -13,14 +13,12 @@ Here are the high-level instructions to get the CIPROD`

` image for 2. Make PR to ci_dev branch and once the PR approved, merge the changes to ci_dev 3. Latest bits of ci_dev automatically deployed to CIDEV cluster in build subscription so just validated E2E to make sure everthing works 4. If everything validated in DEV, make merge PR from ci_dev and ci_prod and merge once this reviewed by dev team -6. Update following pipeline variables under ReleaseCandiate with version of chart and image tag - - CIHELMCHARTVERSION # For example, 2.7.4 - - CIImageTagSuffix # ciprod08072020 or ciprod08072020-1 etc. -7. Merge ci_dev and ci_prod branch which will trigger automatic deployment of latest bits to CIPROD cluster with CIPROD`
` image to test and scale cluters, AKS, AKS-Engine - > Note: production image automatically pushed to CIPROD Public cloud ACR which will inturn replicated to Public cloud MCR. +5. Once the PR to ci_prod approved, please go-ahead and merge, and wait for ci_prod build successfully completed +6. Once the merged PR build successfully completed, update the value of AGENT_IMAGE_TAG_SUFFIX pipeline variable by editing the Release [ci-prod-release](https://github-private.visualstudio.com/microsoft/_release?_a=releases&view=mine&definitionId=38) + > Note - value format of AGENT_IMAGE_TAG_SUFFIX pipeline should be in `
` for our releases +7. Create a release by selecting the targetted build version of the _docker-provider_Official-ci_prod release 8. Validate all the scenarios against clusters in build subscription and scale clusters - # 2. Perf and scale testing Deploy latest omsagent yaml with release candidate agent image in to supported k8s versions and validate all the critical scenarios. In perticular, throughly validate the updates going as part of this release and also make sure no regressions. If this passes, deploy onto scale cluster and validate perf and scale aspects. Scale cluster in AME cloud and co-ordinate with agent team who has access to this cluster to deploy the release candiate onto this cluster. From 10b2ea63d84acf3415fae8d290bd43b1f002f764 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Fri, 13 Aug 2021 14:05:08 -0700 Subject: [PATCH 137/301] cherry pick changes from ci_prod (#622) --- ReleaseNotes.md | 13 +++ kubernetes/linux/Dockerfile | 2 +- kubernetes/linux/setup.sh | 11 ++- kubernetes/omsagent.yaml | 20 ++--- source/plugins/go/src/oms.go | 168 +++++++++++++++++------------------ 5 files changed, 115 insertions(+), 99 deletions(-) diff --git a/ReleaseNotes.md b/ReleaseNotes.md index 0c51b737c..dc42e7d51 100644 --- a/ReleaseNotes.md +++ b/ReleaseNotes.md @@ -11,6 +11,19 @@ additional questions or comments. Note : The agent version(s) below has dates (ciprod), which indicate the agent build dates (not release dates) +### 08/05/2021 - +##### Version microsoft/oms:ciprod08052021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08052021 (linux) +##### Code change log +- Linux Agent + - Fix for CPU spike which occurrs at around 6.30am UTC on every day because of unattended package upgrades + - Update MDSD build which has fixes for the following issues + - Undeterministic Core dump issue because of the non 200 status code and runtime exception stack unwindings + - Reduce the verbosity of the error logs for OMS & ODS code paths. + - Increase Timeout for OMS Homing service API calls from 30s to 60s + - Fix for https://github.com/Azure/AKS/issues/2457 + - In replicaset, tailing of the mdsd.err log file to agent telemetry + + ### 07/13/2021 - ##### Version microsoft/oms:win-ciprod06112021-2 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod06112021-2 (windows) ##### Code change log diff --git a/kubernetes/linux/Dockerfile b/kubernetes/linux/Dockerfile index b47841757..07af7f4a7 100644 --- a/kubernetes/linux/Dockerfile +++ b/kubernetes/linux/Dockerfile @@ -2,7 +2,7 @@ FROM ubuntu:18.04 MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=ciprod06112021 +ARG IMAGE_TAG=ciprod08052021 ENV AGENT_VERSION ${IMAGE_TAG} ENV tmpdir /opt ENV APPLICATIONINSIGHTS_AUTH NzAwZGM5OGYtYTdhZC00NThkLWI5NWMtMjA3ZjM3NmM3YmRi diff --git a/kubernetes/linux/setup.sh b/kubernetes/linux/setup.sh index b7cddffbc..df32afc7e 100644 --- a/kubernetes/linux/setup.sh +++ b/kubernetes/linux/setup.sh @@ -9,8 +9,8 @@ sed -i -e 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen && \ dpkg-reconfigure --frontend=noninteractive locales && \ update-locale LANG=en_US.UTF-8 -#install oneagent - Official bits (06/24/2021) -wget https://github.com/microsoft/Docker-Provider/releases/download/06242021-oneagent/azure-mdsd_1.10.3-build.master.241_x86_64.deb +#install oneagent - Official bits (08/04/2021) +wget https://github.com/microsoft/Docker-Provider/releases/download/08042021-oneagent/azure-mdsd_1.10.1-build.master.251_x86_64.deb /usr/bin/dpkg -i $TMPDIR/azure-mdsd*.deb cp -f $TMPDIR/mdsd.xml /etc/mdsd.d @@ -47,8 +47,8 @@ sudo apt-get update sudo apt-get install td-agent-bit=1.6.8 -y # install ruby2.6 -sudo apt-get install software-properties-common -y -sudo apt-add-repository ppa:brightbox/ruby-ng -y +sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys F5DA5F09C3173AA6 +sudo echo "deb http://ppa.launchpad.net/brightbox/ruby-ng/ubuntu bionic main" >> /etc/apt/sources.list sudo apt-get update sudo apt-get install ruby2.6 ruby2.6-dev gcc make -y # fluentd v1 gem @@ -62,6 +62,9 @@ rm -f $TMPDIR/azure-mdsd*.deb rm -f $TMPDIR/mdsd.xml rm -f $TMPDIR/envmdsd +# remove build dependencies +sudo apt-get remove ruby2.6-dev gcc make -y + # Remove settings for cron.daily that conflict with the node's cron.daily. Since both are trying to rotate the same files # in /var/log at the same time, the rotation doesn't happen correctly and then the *.1 file is forever logged to. rm /etc/logrotate.d/alternatives /etc/logrotate.d/apt /etc/logrotate.d/azure-mdsd /etc/logrotate.d/rsyslog diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index 855f3a8e1..49d4586c1 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -362,13 +362,13 @@ spec: schema-versions: "v1" spec: serviceAccountName: omsagent - dnsConfig: + dnsConfig: options: - name: ndots - value: "3" + value: "3" containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod06112021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08052021" imagePullPolicy: IfNotPresent resources: limits: @@ -384,7 +384,7 @@ spec: - name: AKS_REGION value: "VALUE_AKS_RESOURCE_REGION_VALUE" # this used for e2e test and setting this just emits some additional log statements which used for the e2e tests - - name: ISTEST + - name: ISTEST value: "true" #Uncomment below two lines for ACS clusters and set the cluster names manually. Also comment out the above two lines for ACS clusters #- name: ACS_RESOURCE_NAME @@ -446,7 +446,7 @@ spec: timeoutSeconds: 15 #Only in sidecar scraping mode - name: omsagent-prometheus - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod06112021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08052021" imagePullPolicy: IfNotPresent resources: limits: @@ -589,7 +589,7 @@ spec: serviceAccountName: omsagent containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod06112021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08052021" imagePullPolicy: IfNotPresent resources: limits: @@ -604,8 +604,8 @@ spec: - name: AKS_REGION value: "VALUE_AKS_RESOURCE_REGION_VALUE" # this used for e2e test and setting this just emits some additional log statements which used for the e2e tests - - name: ISTEST - value: "true" + - name: ISTEST + value: "true" # Uncomment below two lines for ACS clusters and set the cluster names manually. Also comment out the above two lines for ACS clusters #- name: ACS_RESOURCE_NAME # value: "my_acs_cluster_name" @@ -754,10 +754,10 @@ spec: schema-versions: "v1" spec: serviceAccountName: omsagent - dnsConfig: + dnsConfig: options: - name: ndots - value: "3" + value: "3" containers: - name: omsagent-win image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod06112021-2" diff --git a/source/plugins/go/src/oms.go b/source/plugins/go/src/oms.go index a2937073b..91a5b4b40 100644 --- a/source/plugins/go/src/oms.go +++ b/source/plugins/go/src/oms.go @@ -165,17 +165,17 @@ var ( // ADX tenantID AdxTenantID string //ADX client secret - AdxClientSecret string + AdxClientSecret string // container log or container log v2 tag name for oneagent route - MdsdContainerLogTagName string + MdsdContainerLogTagName string // kubemonagent events tag name for oneagent route MdsdKubeMonAgentEventsTagName string // InsightsMetrics tag name for oneagent route - MdsdInsightsMetricsTagName string + MdsdInsightsMetricsTagName string // flag to check if its Windows OS IsWindows bool - // container type - ContainerType string + // container type + ContainerType string // flag to check whether LA AAD MSI Auth Enabled or not IsAADMSIAuthMode bool ) @@ -206,7 +206,7 @@ var ( // IngestionAuthTokenUpdateMutex read and write mutex access for ODSIngestionAuthToken IngestionAuthTokenUpdateMutex = &sync.Mutex{} // ODSIngestionAuthToken for windows agent AAD MSI Auth - ODSIngestionAuthToken string + ODSIngestionAuthToken string ) var ( @@ -355,12 +355,12 @@ const ( ) // DataType to be used as enum per data type socket client creation -type DataType int +type DataType int const ( // DataType to be used as enum per data type socket client creation ContainerLogV2 DataType = iota - KubeMonAgentEvents - InsightsMetrics + KubeMonAgentEvents + InsightsMetrics ) func createLogger() *log.Logger { @@ -610,7 +610,7 @@ func flushKubeMonAgentEventRecords() { Message: k, Tags: fmt.Sprintf("%s", tagJson), } - laKubeMonAgentEventsRecords = append(laKubeMonAgentEventsRecords, laKubeMonAgentEventsRecord) + laKubeMonAgentEventsRecords = append(laKubeMonAgentEventsRecords, laKubeMonAgentEventsRecord) var stringMap map[string]string jsonBytes, err := json.Marshal(&laKubeMonAgentEventsRecord) if err != nil { @@ -623,10 +623,10 @@ func flushKubeMonAgentEventRecords() { Log(message) SendException(message) } else { - msgPackEntry := MsgPackEntry{ + msgPackEntry := MsgPackEntry{ Record: stringMap, } - msgPackEntries = append(msgPackEntries, msgPackEntry) + msgPackEntries = append(msgPackEntries, msgPackEntry) } } } @@ -649,23 +649,23 @@ func flushKubeMonAgentEventRecords() { Message: k, Tags: fmt.Sprintf("%s", tagJson), } - laKubeMonAgentEventsRecords = append(laKubeMonAgentEventsRecords, laKubeMonAgentEventsRecord) + laKubeMonAgentEventsRecords = append(laKubeMonAgentEventsRecords, laKubeMonAgentEventsRecord) var stringMap map[string]string jsonBytes, err := json.Marshal(&laKubeMonAgentEventsRecord) if err != nil { message := fmt.Sprintf("Error while Marshalling laKubeMonAgentEventsRecord to json bytes: %s", err.Error()) Log(message) SendException(message) - } else { - if err := json.Unmarshal(jsonBytes, &stringMap); err != nil { + } else { + if err := json.Unmarshal(jsonBytes, &stringMap); err != nil { message := fmt.Sprintf("Error while UnMarhalling json bytes to stringmap: %s", err.Error()) Log(message) SendException(message) } else { - msgPackEntry := MsgPackEntry{ + msgPackEntry := MsgPackEntry{ Record: stringMap, - } - msgPackEntries = append(msgPackEntries, msgPackEntry) + } + msgPackEntries = append(msgPackEntries, msgPackEntry) } } } @@ -698,66 +698,66 @@ func flushKubeMonAgentEventRecords() { Message: "No errors", Tags: fmt.Sprintf("%s", tagJson), } - laKubeMonAgentEventsRecords = append(laKubeMonAgentEventsRecords, laKubeMonAgentEventsRecord) + laKubeMonAgentEventsRecords = append(laKubeMonAgentEventsRecords, laKubeMonAgentEventsRecord) var stringMap map[string]string jsonBytes, err := json.Marshal(&laKubeMonAgentEventsRecord) - if err != nil { + if err != nil { message := fmt.Sprintf("Error while Marshalling laKubeMonAgentEventsRecord to json bytes: %s", err.Error()) Log(message) SendException(message) } else { - if err := json.Unmarshal(jsonBytes, &stringMap); err != nil { + if err := json.Unmarshal(jsonBytes, &stringMap); err != nil { message := fmt.Sprintf("Error while UnMarshalling json bytes to stringmap: %s", err.Error()) Log(message) SendException(message) - } else { - msgPackEntry := MsgPackEntry{ + } else { + msgPackEntry := MsgPackEntry{ Record: stringMap, } - msgPackEntries = append(msgPackEntries, msgPackEntry) + msgPackEntries = append(msgPackEntries, msgPackEntry) } } } } - if (IsWindows == false && len(msgPackEntries) > 0) { //for linux, mdsd route + if (IsWindows == false && len(msgPackEntries) > 0) { //for linux, mdsd route if IsAADMSIAuthMode == true && strings.HasPrefix(MdsdKubeMonAgentEventsTagName, MdsdOutputStreamIdTagPrefix) == false { Log("Info::mdsd::obtaining output stream id for data type: %s", KubeMonAgentEventDataType) MdsdKubeMonAgentEventsTagName = extension.GetInstance(FLBLogger, ContainerType).GetOutputStreamId(KubeMonAgentEventDataType) - } + } Log("Info::mdsd:: using mdsdsource name for KubeMonAgentEvents: %s", MdsdKubeMonAgentEventsTagName) - msgpBytes := convertMsgPackEntriesToMsgpBytes(MdsdKubeMonAgentEventsTagName, msgPackEntries) + msgpBytes := convertMsgPackEntriesToMsgpBytes(MdsdKubeMonAgentEventsTagName, msgPackEntries) if MdsdKubeMonMsgpUnixSocketClient == nil { Log("Error::mdsd::mdsd connection for KubeMonAgentEvents does not exist. re-connecting ...") CreateMDSDClient(KubeMonAgentEvents, ContainerType) if MdsdKubeMonMsgpUnixSocketClient == nil { - Log("Error::mdsd::Unable to create mdsd client for KubeMonAgentEvents. Please check error log.") + Log("Error::mdsd::Unable to create mdsd client for KubeMonAgentEvents. Please check error log.") ContainerLogTelemetryMutex.Lock() defer ContainerLogTelemetryMutex.Unlock() - KubeMonEventsMDSDClientCreateErrors += 1 - } + KubeMonEventsMDSDClientCreateErrors += 1 + } } - if MdsdKubeMonMsgpUnixSocketClient != nil { + if MdsdKubeMonMsgpUnixSocketClient != nil { deadline := 10 * time.Second - MdsdKubeMonMsgpUnixSocketClient.SetWriteDeadline(time.Now().Add(deadline)) //this is based of clock time, so cannot reuse + MdsdKubeMonMsgpUnixSocketClient.SetWriteDeadline(time.Now().Add(deadline)) //this is based of clock time, so cannot reuse bts, er := MdsdKubeMonMsgpUnixSocketClient.Write(msgpBytes) - elapsed = time.Since(start) + elapsed = time.Since(start) if er != nil { message := fmt.Sprintf("Error::mdsd::Failed to write to kubemonagent mdsd %d records after %s. Will retry ... error : %s", len(msgPackEntries), elapsed, er.Error()) Log(message) if MdsdKubeMonMsgpUnixSocketClient != nil { MdsdKubeMonMsgpUnixSocketClient.Close() MdsdKubeMonMsgpUnixSocketClient = nil - } + } SendException(message) } else { numRecords := len(msgPackEntries) Log("FlushKubeMonAgentEventRecords::Info::Successfully flushed %d records that was %d bytes in %s", numRecords, bts, elapsed) // Send telemetry to AppInsights resource SendEvent(KubeMonAgentEventsFlushedEvent, telemetryDimensions) - } + } } else { - Log("Error::mdsd::Unable to create mdsd client for KubeMonAgentEvents. Please check error log.") - } + Log("Error::mdsd::Unable to create mdsd client for KubeMonAgentEvents. Please check error log.") + } } else if len(laKubeMonAgentEventsRecords) > 0 { //for windows, ODS direct kubeMonAgentEventEntry := KubeMonAgentEventBlob{ DataType: KubeMonAgentEventDataType, @@ -784,10 +784,10 @@ func flushKubeMonAgentEventRecords() { if IsAADMSIAuthMode == true { IngestionAuthTokenUpdateMutex.Lock() ingestionAuthToken := ODSIngestionAuthToken - IngestionAuthTokenUpdateMutex.Unlock() - if ingestionAuthToken == "" { - Log("Error::ODS Ingestion Auth Token is empty. Please check error log.") - } + IngestionAuthTokenUpdateMutex.Unlock() + if ingestionAuthToken == "" { + Log("Error::ODS Ingestion Auth Token is empty. Please check error log.") + } req.Header.Set("Authorization", "Bearer "+ingestionAuthToken) } @@ -900,15 +900,15 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int message := fmt.Sprintf("PostTelegrafMetricsToLA::Info:derived %v metrics from %v timeseries", len(laMetrics), len(telegrafRecords)) Log(message) } - + if IsWindows == false { //for linux, mdsd route - var msgPackEntries []MsgPackEntry + var msgPackEntries []MsgPackEntry var i int start := time.Now() var elapsed time.Duration - for i = 0; i < len(laMetrics); i++ { - var interfaceMap map[string]interface{} + for i = 0; i < len(laMetrics); i++ { + var interfaceMap map[string]interface{} stringMap := make(map[string]string) jsonBytes, err := json.Marshal(*laMetrics[i]) if err != nil { @@ -917,35 +917,35 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int SendException(message) return output.FLB_OK } else { - if err := json.Unmarshal(jsonBytes, &interfaceMap); err != nil { + if err := json.Unmarshal(jsonBytes, &interfaceMap); err != nil { message := fmt.Sprintf("Error while UnMarshalling json bytes to interfaceMap: %s", err.Error()) Log(message) SendException(message) return output.FLB_OK - } else { + } else { for key, value := range interfaceMap { strKey := fmt.Sprintf("%v", key) strValue := fmt.Sprintf("%v", value) stringMap[strKey] = strValue - } - msgPackEntry := MsgPackEntry{ + } + msgPackEntry := MsgPackEntry{ Record: stringMap, } - msgPackEntries = append(msgPackEntries, msgPackEntry) - } + msgPackEntries = append(msgPackEntries, msgPackEntry) + } } } - if (len(msgPackEntries) > 0) { + if (len(msgPackEntries) > 0) { if IsAADMSIAuthMode == true && (strings.HasPrefix(MdsdInsightsMetricsTagName, MdsdOutputStreamIdTagPrefix) == false) { Log("Info::mdsd::obtaining output stream id for InsightsMetricsDataType since Log Analytics AAD MSI Auth Enabled") MdsdInsightsMetricsTagName = extension.GetInstance(FLBLogger, ContainerType).GetOutputStreamId(InsightsMetricsDataType) - } - msgpBytes := convertMsgPackEntriesToMsgpBytes(MdsdInsightsMetricsTagName, msgPackEntries) + } + msgpBytes := convertMsgPackEntriesToMsgpBytes(MdsdInsightsMetricsTagName, msgPackEntries) if MdsdInsightsMetricsMsgpUnixSocketClient == nil { Log("Error::mdsd::mdsd connection does not exist. re-connecting ...") CreateMDSDClient(InsightsMetrics, ContainerType) if MdsdInsightsMetricsMsgpUnixSocketClient == nil { - Log("Error::mdsd::Unable to create mdsd client for insights metrics. Please check error log.") + Log("Error::mdsd::Unable to create mdsd client for insights metrics. Please check error log.") ContainerLogTelemetryMutex.Lock() defer ContainerLogTelemetryMutex.Unlock() InsightsMetricsMDSDClientCreateErrors += 1 @@ -954,7 +954,7 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int } deadline := 10 * time.Second - MdsdInsightsMetricsMsgpUnixSocketClient.SetWriteDeadline(time.Now().Add(deadline)) //this is based of clock time, so cannot reuse + MdsdInsightsMetricsMsgpUnixSocketClient.SetWriteDeadline(time.Now().Add(deadline)) //this is based of clock time, so cannot reuse bts, er := MdsdInsightsMetricsMsgpUnixSocketClient.Write(msgpBytes) elapsed = time.Since(start) @@ -969,7 +969,7 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int ContainerLogTelemetryMutex.Lock() defer ContainerLogTelemetryMutex.Unlock() - InsightsMetricsMDSDClientCreateErrors += 1 + InsightsMetricsMDSDClientCreateErrors += 1 return output.FLB_RETRY } else { numTelegrafMetricsRecords := len(msgPackEntries) @@ -977,7 +977,7 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int Log("Success::mdsd::Successfully flushed %d telegraf metrics records that was %d bytes to mdsd in %s ", numTelegrafMetricsRecords, bts, elapsed) } } - + } else { // for windows, ODS direct var metrics []laTelegrafMetric @@ -1019,9 +1019,9 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int if IsAADMSIAuthMode == true { IngestionAuthTokenUpdateMutex.Lock() ingestionAuthToken := ODSIngestionAuthToken - IngestionAuthTokenUpdateMutex.Unlock() - if ingestionAuthToken == "" { - message := "Error::ODS Ingestion Auth Token is empty. Please check error log." + IngestionAuthTokenUpdateMutex.Unlock() + if ingestionAuthToken == "" { + message := "Error::ODS Ingestion Auth Token is empty. Please check error log." Log(message) return output.FLB_RETRY } @@ -1232,7 +1232,7 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { numContainerLogRecords := 0 if len(msgPackEntries) > 0 && ContainerLogsRouteV2 == true { - //flush to mdsd + //flush to mdsd if IsAADMSIAuthMode == true && strings.HasPrefix(MdsdContainerLogTagName, MdsdOutputStreamIdTagPrefix) == false { Log("Info::mdsd::obtaining output stream id") if ContainerLogSchemaV2 == true { @@ -1242,7 +1242,7 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { } Log("Info::mdsd:: using mdsdsource name: %s", MdsdContainerLogTagName) } - + fluentForward := MsgPackForward{ Tag: MdsdContainerLogTagName, Entries: msgPackEntries, @@ -1359,7 +1359,7 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { numContainerLogRecords = len(dataItemsADX) Log("Success::ADX::Successfully wrote %d container log records to ADX in %s", numContainerLogRecords, elapsed) - } else { //ODS + } else if ((ContainerLogSchemaV2 == true && len(dataItemsLAv2) > 0) || len(dataItemsLAv1) > 0) { //ODS var logEntry interface{} recordType := "" loglinesCount := 0 @@ -1401,19 +1401,19 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { if ResourceCentric == true { req.Header.Set("x-ms-AzureResourceId", ResourceID) } - + if IsAADMSIAuthMode == true { IngestionAuthTokenUpdateMutex.Lock() ingestionAuthToken := ODSIngestionAuthToken IngestionAuthTokenUpdateMutex.Unlock() - if ingestionAuthToken == "" { - Log("Error::ODS Ingestion Auth Token is empty. Please check error log.") + if ingestionAuthToken == "" { + Log("Error::ODS Ingestion Auth Token is empty. Please check error log.") return output.FLB_RETRY } // add authorization header to the req req.Header.Set("Authorization", "Bearer "+ingestionAuthToken) - } - + } + resp, err := HTTPClient.Do(req) elapsed = time.Since(start) @@ -1422,7 +1422,7 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { Log(message) // Commenting this out for now. TODO - Add better telemetry for ods errors using aggregation //SendException(message) - + Log("Failed to flush %d records after %s", loglinesCount, elapsed) return output.FLB_RETRY @@ -1510,7 +1510,7 @@ func GetContainerIDK8sNamespacePodNameFromFileName(filename string) (string, str } // InitializePlugin reads and populates plugin configuration -func InitializePlugin(pluginConfPath string, agentVersion string) { +func InitializePlugin(pluginConfPath string, agentVersion string) { go func() { isTest := os.Getenv("ISTEST") if strings.Compare(strings.ToLower(strings.TrimSpace(isTest)), "true") == 0 { @@ -1550,10 +1550,10 @@ func InitializePlugin(pluginConfPath string, agentVersion string) { } ContainerType = os.Getenv(ContainerTypeEnv) - Log("Container Type %s", ContainerType) + Log("Container Type %s", ContainerType) osType := os.Getenv("OS_TYPE") - IsWindows = false + IsWindows = false // Linux if strings.Compare(strings.ToLower(osType), "windows") != 0 { Log("Reading configuration for Linux from %s", pluginConfPath) @@ -1572,7 +1572,7 @@ func InitializePlugin(pluginConfPath string, agentVersion string) { SendException(message) time.Sleep(30 * time.Second) log.Fatalln(message) - } + } OMSEndpoint = "https://" + WorkspaceID + ".ods." + LogAnalyticsWorkspaceDomain + "/OperationalData.svc/PostJsonDataItems" // Populate Computer field containerHostName, err1 := ioutil.ReadFile(pluginConfig["container_host_file_path"]) @@ -1602,7 +1602,7 @@ func InitializePlugin(pluginConfPath string, agentVersion string) { } } else { // windows - IsWindows = true + IsWindows = true Computer = os.Getenv("HOSTNAME") WorkspaceID = os.Getenv("WSID") logAnalyticsDomain := os.Getenv("DOMAIN") @@ -1614,7 +1614,7 @@ func InitializePlugin(pluginConfPath string, agentVersion string) { IsAADMSIAuthMode = false if strings.Compare(strings.ToLower(os.Getenv(AADMSIAuthMode)), "true") == 0 { IsAADMSIAuthMode = true - Log("AAD MSI Auth Mode Configured") + Log("AAD MSI Auth Mode Configured") } ResourceID = os.Getenv(envAKSResourceID) @@ -1689,13 +1689,13 @@ func InitializePlugin(pluginConfPath string, agentVersion string) { Log(message) } - PluginConfiguration = pluginConfig + PluginConfiguration = pluginConfig ContainerLogsRoute := strings.TrimSpace(strings.ToLower(os.Getenv("AZMON_CONTAINER_LOGS_ROUTE"))) Log("AZMON_CONTAINER_LOGS_ROUTE:%s", ContainerLogsRoute) - ContainerLogsRouteV2 = false - ContainerLogsRouteADX = false + ContainerLogsRouteV2 = false + ContainerLogsRouteADX = false if strings.Compare(ContainerLogsRoute, ContainerLogsADXRoute) == 0 { //check if adx clusteruri, clientid & secret are set @@ -1728,14 +1728,14 @@ func InitializePlugin(pluginConfPath string, agentVersion string) { Log("Routing container logs thru %s route...", ContainerLogsADXRoute) fmt.Fprintf(os.Stdout, "Routing container logs thru %s route...\n", ContainerLogsADXRoute) } - } else if strings.Compare(strings.ToLower(osType), "windows") != 0 { //for linux, oneagent will be default route + } else if strings.Compare(strings.ToLower(osType), "windows") != 0 { //for linux, oneagent will be default route ContainerLogsRouteV2 = true //default is mdsd route - if strings.Compare(ContainerLogsRoute, ContainerLogsV1Route) == 0 { + if strings.Compare(ContainerLogsRoute, ContainerLogsV1Route) == 0 { ContainerLogsRouteV2 = false //fallback option when hiddensetting set } Log("Routing container logs thru %s route...", ContainerLogsRoute) fmt.Fprintf(os.Stdout, "Routing container logs thru %s route... \n", ContainerLogsRoute) - } + } if ContainerLogsRouteV2 == true { CreateMDSDClient(ContainerLogV2, ContainerType) @@ -1748,7 +1748,7 @@ func InitializePlugin(pluginConfPath string, agentVersion string) { if IsWindows == false { // mdsd linux specific Log("Creating MDSD clients for KubeMonAgentEvents & InsightsMetrics") - CreateMDSDClient(KubeMonAgentEvents, ContainerType) + CreateMDSDClient(KubeMonAgentEvents, ContainerType) CreateMDSDClient(InsightsMetrics, ContainerType) } @@ -1787,7 +1787,7 @@ func InitializePlugin(pluginConfPath string, agentVersion string) { } MdsdInsightsMetricsTagName = MdsdInsightsMetricsSourceName - MdsdKubeMonAgentEventsTagName = MdsdKubeMonAgentEventsSourceName + MdsdKubeMonAgentEventsTagName = MdsdKubeMonAgentEventsSourceName Log("ContainerLogsRouteADX: %v, IsWindows: %v, IsAADMSIAuthMode = %v \n", ContainerLogsRouteADX, IsWindows, IsAADMSIAuthMode) if !ContainerLogsRouteADX && IsWindows && IsAADMSIAuthMode { Log("defaultIngestionAuthTokenRefreshIntervalSeconds = %d \n", defaultIngestionAuthTokenRefreshIntervalSeconds) From ad31c55dbc49bc49bfe3cb18bb1d44fbda974947 Mon Sep 17 00:00:00 2001 From: Vladimir Date: Sat, 14 Aug 2021 01:18:23 +0100 Subject: [PATCH 138/301] Support az login for passwords starting with dash ('-') (#626) Co-authored-by: Vladimir Babichev --- scripts/onboarding/managed/disable-monitoring.sh | 2 +- scripts/onboarding/managed/enable-monitoring.sh | 2 +- scripts/onboarding/managed/upgrade-monitoring.sh | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/onboarding/managed/disable-monitoring.sh b/scripts/onboarding/managed/disable-monitoring.sh index 29b755331..40b0793bc 100644 --- a/scripts/onboarding/managed/disable-monitoring.sh +++ b/scripts/onboarding/managed/disable-monitoring.sh @@ -116,7 +116,7 @@ remove_monitoring_tags() if [ "$isUsingServicePrincipal" = true ] ; then echo "login to the azure using provided service principal creds" - az login --service-principal --username $servicePrincipalClientId --password $servicePrincipalClientSecret --tenant $servicePrincipalTenantId + az login --service-principal --username="$servicePrincipalClientId" --password="$servicePrincipalClientSecret" --tenant="$servicePrincipalTenantId" else echo "login to the azure interactively" az login --use-device-code diff --git a/scripts/onboarding/managed/enable-monitoring.sh b/scripts/onboarding/managed/enable-monitoring.sh index 588d193a3..5fc241517 100644 --- a/scripts/onboarding/managed/enable-monitoring.sh +++ b/scripts/onboarding/managed/enable-monitoring.sh @@ -547,7 +547,7 @@ install_helm_chart() { login_to_azure() { if [ "$isUsingServicePrincipal" = true ]; then echo "login to the azure using provided service principal creds" - az login --service-principal --username $servicePrincipalClientId --password $servicePrincipalClientSecret --tenant $servicePrincipalTenantId + az login --service-principal --username="$servicePrincipalClientId" --password="$servicePrincipalClientSecret" --tenant="$servicePrincipalTenantId" else echo "login to the azure interactively" az login --use-device-code diff --git a/scripts/onboarding/managed/upgrade-monitoring.sh b/scripts/onboarding/managed/upgrade-monitoring.sh index 83643f3fa..edd48c938 100644 --- a/scripts/onboarding/managed/upgrade-monitoring.sh +++ b/scripts/onboarding/managed/upgrade-monitoring.sh @@ -266,7 +266,7 @@ upgrade_helm_chart_release() { login_to_azure() { if [ "$isUsingServicePrincipal" = true ]; then echo "login to the azure using provided service principal creds" - az login --service-principal --username $servicePrincipalClientId --password $servicePrincipalClientSecret --tenant $servicePrincipalTenantId + az login --service-principal --username="$servicePrincipalClientId" --password="$servicePrincipalClientSecret" --tenant="$servicePrincipalTenantId" else echo "login to the azure interactively" az login --use-device-code From 57beb59f38de4626b6f635a430b1e1bfa5d656ff Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Tue, 17 Aug 2021 15:15:03 -0700 Subject: [PATCH 139/301] Gangams/add telemetry fbit settings (#628) * add telemetry to track fbit settings * add telemetry to track fbit settings --- source/plugins/go/src/telemetry.go | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/source/plugins/go/src/telemetry.go b/source/plugins/go/src/telemetry.go index debe003e4..31818dbb3 100644 --- a/source/plugins/go/src/telemetry.go +++ b/source/plugins/go/src/telemetry.go @@ -145,8 +145,8 @@ func SendContainerLogPluginMetrics(telemetryPushIntervalProperty string) { ContainerLogTelemetryMutex.Unlock() if strings.Compare(strings.ToLower(os.Getenv("CONTROLLER_TYPE")), "daemonset") == 0 { + telemetryDimensions := make(map[string]string) if strings.Compare(strings.ToLower(os.Getenv("CONTAINER_TYPE")), "prometheussidecar") == 0 { - telemetryDimensions := make(map[string]string) telemetryDimensions["CustomPromMonitorPods"] = promMonitorPods if promMonitorPodsNamespaceLength > 0 { telemetryDimensions["CustomPromMonitorPodsNamespaceLength"] = strconv.Itoa(promMonitorPodsNamespaceLength) @@ -168,7 +168,23 @@ func SendContainerLogPluginMetrics(telemetryPushIntervalProperty string) { SendEvent(eventNameCustomPrometheusSidecarHeartbeat, telemetryDimensions) } else { - SendEvent(eventNameDaemonSetHeartbeat, make(map[string]string)) + fbitFlushIntervalSecs := os.Getenv("FBIT_SERVICE_FLUSH_INTERVAL") + if fbitFlushIntervalSecs != "" { + telemetryDimensions["FbitServiceFlushIntervalSecs"] = fbitFlushIntervalSecs + } + fbitTailBufferChunkSizeMBs := os.Getenv("FBIT_TAIL_BUFFER_CHUNK_SIZE") + if fbitTailBufferChunkSizeMBs != "" { + telemetryDimensions["FbitBufferChunkSizeMBs"] = fbitTailBufferChunkSizeMBs + } + fbitTailBufferMaxSizeMBs := os.Getenv("FBIT_TAIL_BUFFER_MAX_SIZE") + if fbitTailBufferMaxSizeMBs != "" { + telemetryDimensions["FbitBufferMaxSizeMBs"] = fbitTailBufferMaxSizeMBs + } + fbitTailMemBufLimitMBs := os.Getenv("FBIT_TAIL_MEM_BUF_LIMIT") + if fbitTailMemBufLimitMBs != "" { + telemetryDimensions["FbitMemBufLimitSizeMBs"] = fbitTailMemBufLimitMBs + } + SendEvent(eventNameDaemonSetHeartbeat, telemetryDimensions) flushRateMetric := appinsights.NewMetricTelemetry(metricNameAvgFlushRate, flushRate) TelemetryClient.Track(flushRateMetric) logRateMetric := appinsights.NewMetricTelemetry(metricNameAvgLogGenerationRate, logRate) From cf4775a802ca8c7c3aac451274459236e2b79c47 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Thu, 19 Aug 2021 15:05:53 -0700 Subject: [PATCH 140/301] check onboarding status (#629) --- kubernetes/linux/main.sh | 47 +++++++++++++++++++++++++++++++++++++++ kubernetes/linux/setup.sh | 2 +- 2 files changed, 48 insertions(+), 1 deletion(-) diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index 4579787b3..4986e3113 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -38,6 +38,51 @@ waitforlisteneronTCPport() { fi } +checkAgentOnboardingStatus() { + local sleepdurationsecs=1 + local totalsleptsecs=0 + local isaadmsiauthmode=$1 + local waittimesecs=$2 + local numeric='^[0-9]+$' + + if [ -z "$1" ] || [ -z "$2" ]; then + echo "${FUNCNAME[0]} called with incorrect arguments<$1 , $2>. Required arguments <#isaadmsiauthmode, #wait-time-in-seconds>" + return -1 + else + + if [[ $waittimesecs =~ $numeric ]]; then + successMessage="Onboarding success" + failureMessage="Failed to register certificate with OMS Homing service, giving up" + if [ "${isaadmsiauthmode}" == "true" ]; then + successMessage="Loaded data sources" + failureMessage="Failed to load data sources into config" + fi + while true + do + if [ $totalsleptsecs -gt $waittimesecs ]; then + echo "${FUNCNAME[0]} giving up checking agent onboarding status after $totalsleptsecs secs" + return 1 + fi + + if grep "$successMessage" "${MDSD_LOG}/mdsd.info"; then + echo "Onboarding success" + return 0 + elif grep "$failureMessage" "${MDSD_LOG}/mdsd.err"; then + echo "Onboarding Failure: Reason: Failed to onboard the agent" + echo "Onboarding Failure: Please verify log analytics workspace configuration such as existence of the workspace, workspace key and workspace enabled for public ingestion" + return 1 + fi + sleep $sleepdurationsecs + totalsleptsecs=$(($totalsleptsecs+1)) + done + else + echo "${FUNCNAME[0]} called with non-numeric arguments<$2>. Required arguments <#wait-time-in-seconds>" + return -1 + fi + fi +} + + #using /var/opt/microsoft/docker-cimprov/state instead of /var/opt/microsoft/omsagent/state since the latter gets deleted during onboarding mkdir -p /var/opt/microsoft/docker-cimprov/state @@ -672,6 +717,8 @@ service rsyslog stop echo "getting rsyslog status..." service rsyslog status +checkAgentOnboardingStatus $AAD_MSI_AUTH_MODE 30 + shutdown() { pkill -f mdsd } diff --git a/kubernetes/linux/setup.sh b/kubernetes/linux/setup.sh index df32afc7e..c14007d35 100644 --- a/kubernetes/linux/setup.sh +++ b/kubernetes/linux/setup.sh @@ -10,7 +10,7 @@ sed -i -e 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen && \ update-locale LANG=en_US.UTF-8 #install oneagent - Official bits (08/04/2021) -wget https://github.com/microsoft/Docker-Provider/releases/download/08042021-oneagent/azure-mdsd_1.10.1-build.master.251_x86_64.deb +wget https://github.com/microsoft/Docker-Provider/releases/download/06242021-oneagent/azure-mdsd_1.10.3-build.master.257_x86_64.deb /usr/bin/dpkg -i $TMPDIR/azure-mdsd*.deb cp -f $TMPDIR/mdsd.xml /etc/mdsd.d From da55fe53612aa9900331b6a0c798ea7f46d1fbf1 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Thu, 19 Aug 2021 15:06:32 -0700 Subject: [PATCH 141/301] Gangams/arc k8s conformance test updates (#617) * conf test updates * clean up * wip * update with mcr cidev image * handle log path * cleanup * clean up * wip * working * update for mcr image * minor * image update * handle latency of connected cluster resource creation * update conftest image --- README.md | 2 +- test/e2e/conformance.yaml | 15 ++ test/e2e/e2e-tests.yaml | 21 +- test/e2e/src/common/constants.py | 6 +- test/e2e/src/core/Dockerfile | 17 +- test/e2e/src/core/conftest.py | 38 +-- test/e2e/src/core/e2e_tests.sh | 200 ++++++++++++++- test/e2e/src/core/setup_failure_handler.py | 18 ++ test/e2e/src/tests/test_ds_workflows.py | 28 ++- test/e2e/src/tests/test_e2e_workflows.py | 231 +++++++++--------- .../tests/test_node_metrics_e2e_workflow.py | 66 ++--- .../tests/test_pod_metrics_e2e_workflow.py | 15 +- test/e2e/src/tests/test_resource_status.py | 13 +- test/e2e/src/tests/test_rs_workflows.py | 18 +- 14 files changed, 500 insertions(+), 188 deletions(-) create mode 100644 test/e2e/conformance.yaml create mode 100644 test/e2e/src/core/setup_failure_handler.py diff --git a/README.md b/README.md index 73bf858cd..e3ceedc8e 100644 --- a/README.md +++ b/README.md @@ -326,7 +326,7 @@ For DEV and PROD branches, automatically deployed latest yaml with latest agent docker build -f ./core/Dockerfile -t /: . docker push /: ``` -3. update existing agentest image tag in e2e-tests.yaml with newly built image tag with MCR repo +3. update existing agentest image tag in e2e-tests.yaml & conformance.yaml with newly built image tag with MCR repo # Scenario Tests Clusters are used in release pipeline already has the yamls under test\scenario deployed. Make sure to validate these scenarios. diff --git a/test/e2e/conformance.yaml b/test/e2e/conformance.yaml new file mode 100644 index 000000000..ff790e690 --- /dev/null +++ b/test/e2e/conformance.yaml @@ -0,0 +1,15 @@ +sonobuoy-config: + driver: Job + plugin-name: azure-arc-ci-conformance + result-format: junit +spec: + image: mcr.microsoft.com/azuremonitor/containerinsights/cidev:ciconftest08142021 + imagePullPolicy: Always + name: plugin + resources: {} + volumes: + - name: results + emptyDir: {} + volumeMounts: + - mountPath: /tmp/results + name: results diff --git a/test/e2e/e2e-tests.yaml b/test/e2e/e2e-tests.yaml index 06dfa1fb0..25817be12 100644 --- a/test/e2e/e2e-tests.yaml +++ b/test/e2e/e2e-tests.yaml @@ -68,7 +68,7 @@ data: containers: [] restartPolicy: Never serviceAccountName: sonobuoy-serviceaccount - nodeSelector: + nodeSelector: kubernetes.io/os: linux tolerations: - effect: NoSchedule @@ -84,8 +84,11 @@ data: result-format: junit spec: env: + # this should be false if the test environment is non ARC K8s for example AKS + - name: IS_NON_ARC_K8S_TEST_ENVIRONMENT + value: "true" # Update values of CLIENT_ID, CLIENT_SECRET of the service principal which has permission to query LA ad Metrics API - # Update value of TENANT_ID corresponding your Azure Service principal + # Update value of TENANT_ID corresponding your Azure Service principal - name: CLIENT_ID value: "SP_CLIENT_ID_VALUE" - name: CLIENT_SECRET @@ -93,15 +96,15 @@ data: - name: TENANT_ID value: "SP_TENANT_ID_VALUE" - name: DEFAULT_QUERY_TIME_INTERVAL_IN_MINUTES - value: "10" + value: "10" - name: DEFAULT_METRICS_QUERY_TIME_INTERVAL_IN_MINUTES - value: "10" + value: "10" - name: AGENT_POD_EXPECTED_RESTART_COUNT - value: "0" + value: "0" - name: AZURE_CLOUD - value: "AZURE_PUBLIC_CLOUD" - # image tag should be updated if new tests being added after this image - image: mcr.microsoft.com/azuremonitor/containerinsights/cidev:ciagenttest02152021 + value: "AZURE_PUBLIC_CLOUD" + # image tag should be updated if new tests being added after this image + image: mcr.microsoft.com/azuremonitor/containerinsights/cidev:ciconftest08142021 imagePullPolicy: IfNotPresent name: plugin resources: {} @@ -144,7 +147,7 @@ spec: name: output-volume restartPolicy: Never serviceAccountName: sonobuoy-serviceaccount - nodeSelector: + nodeSelector: kubernetes.io/os: linux tolerations: - key: "kubernetes.io/e2e-evict-taint-key" diff --git a/test/e2e/src/common/constants.py b/test/e2e/src/common/constants.py index 770964cb5..392b10554 100644 --- a/test/e2e/src/common/constants.py +++ b/test/e2e/src/common/constants.py @@ -40,6 +40,8 @@ TIMEOUT = 300 +# WAIT TIME BEFORE READING THE AGENT LOGS +AGENT_WAIT_TIME_SECS = "180" # Azure Monitor for Container Extension related AGENT_RESOURCES_NAMESPACE = 'kube-system' AGENT_DEPLOYMENT_NAME = 'omsagent-rs' @@ -47,7 +49,9 @@ AGENT_WIN_DAEMONSET_NAME = 'omsagent-win' AGENT_DEPLOYMENT_PODS_LABEL_SELECTOR = 'rsName=omsagent-rs' -AGENT_DAEMON_SET_PODS_LABEL_SELECTOR = 'component=oms-agent' +AGENT_DAEMON_SET_PODS_LABEL_SELECTOR = 'dsName=omsagent-ds' +AGENT_DAEMON_SET_PODS_LABEL_SELECTOR_NON_ARC = 'component=oms-agent' +AGENT_FLUENTD_LOG_PATH = '/var/opt/microsoft/docker-cimprov/log/fluentd.log' AGENT_OMSAGENT_LOG_PATH = '/var/opt/microsoft/omsagent/log/omsagent.log' AGENT_REPLICASET_WORKFLOWS = ["kubePodInventoryEmitStreamSuccess", "kubeNodeInventoryEmitStreamSuccess"] diff --git a/test/e2e/src/core/Dockerfile b/test/e2e/src/core/Dockerfile index 9f85bdf4c..cd85aee40 100644 --- a/test/e2e/src/core/Dockerfile +++ b/test/e2e/src/core/Dockerfile @@ -1,11 +1,26 @@ FROM python:3.6 -RUN pip install --trusted-host pypi.org --trusted-host files.pythonhosted.org pytest pytest-xdist filelock requests kubernetes adal msrestazure +RUN pip install --trusted-host pypi.org --trusted-host files.pythonhosted.org pytest pytest-xdist filelock requests kubernetes adal msrestazure RUN curl https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash \ && helm version +RUN apt-get update && apt-get -y upgrade && \ + apt-get -f -y install curl apt-transport-https lsb-release gnupg python3-pip python-pip && \ + curl -sL https://packages.microsoft.com/keys/microsoft.asc | gpg --dearmor > /etc/apt/trusted.gpg.d/microsoft.asc.gpg && \ + CLI_REPO=$(lsb_release -cs) && \ + echo "deb [arch=amd64] https://packages.microsoft.com/repos/azure-cli/ ${CLI_REPO} main" \ + > /etc/apt/sources.list.d/azure-cli.list && \ + apt-get update && \ + apt-get install -y azure-cli && \ + rm -rf /var/lib/apt/lists/* + +RUN python3 -m pip install junit_xml + +COPY --from=lachlanevenson/k8s-kubectl:v1.20.5 /usr/local/bin/kubectl /usr/local/bin/kubectl + COPY ./core/e2e_tests.sh / +COPY ./core/setup_failure_handler.py / COPY ./core/pytest.ini /e2etests/ COPY ./core/conftest.py /e2etests/ COPY ./core/helper.py /e2etests/ diff --git a/test/e2e/src/core/conftest.py b/test/e2e/src/core/conftest.py index e659d5189..02f644a18 100644 --- a/test/e2e/src/core/conftest.py +++ b/test/e2e/src/core/conftest.py @@ -22,42 +22,48 @@ def env_dict(): create_results_dir('/tmp/results') # Setting some environment variables - env_dict['SETUP_LOG_FILE'] = '/tmp/results/setup' + env_dict['SETUP_LOG_FILE'] = '/tmp/results/setup' env_dict['TEST_AGENT_LOG_FILE'] = '/tmp/results/containerinsights' env_dict['NUM_TESTS_COMPLETED'] = 0 - + print("Starting setup...") append_result_output("Starting setup...\n", env_dict['SETUP_LOG_FILE']) - + # Collecting environment variables env_dict['TENANT_ID'] = os.getenv('TENANT_ID') env_dict['CLIENT_ID'] = os.getenv('CLIENT_ID') env_dict['CLIENT_SECRET'] = os.getenv('CLIENT_SECRET') - + env_dict['IS_NON_ARC_K8S_TEST_ENVIRONMENT'] = os.getenv('IS_NON_ARC_K8S_TEST_ENVIRONMENT') + # released agent for Arc K8s still uses omsagent and when we rollout the agent with mdsd + # this shouldnt set after agent rollout with mdsd + env_dict['USING_OMSAGENT_BASE_AGENT'] = os.getenv('USING_OMSAGENT_BASE_AGENT') + + waitTimeInterval = int(os.getenv('AGENT_WAIT_TIME_SECS')) if os.getenv('AGENT_WAIT_TIME_SECS') else constants.AGENT_WAIT_TIME_SECS + env_dict['AGENT_WAIT_TIME_SECS'] = waitTimeInterval # get default query time interval for log analytics queries queryTimeInterval = int(os.getenv('DEFAULT_QUERY_TIME_INTERVAL_IN_MINUTES')) if os.getenv('DEFAULT_QUERY_TIME_INTERVAL_IN_MINUTES') else constants.DEFAULT_QUERY_TIME_INTERVAL_IN_MINUTES # add minute suffix since this format required for LA queries env_dict['DEFAULT_QUERY_TIME_INTERVAL_IN_MINUTES'] = str(queryTimeInterval) + "m" - + # get default query time interval for metrics queries env_dict['DEFAULT_METRICS_QUERY_TIME_INTERVAL_IN_MINUTES'] = int(os.getenv('DEFAULT_METRICS_QUERY_TIME_INTERVAL_IN_MINUTES')) if os.getenv('DEFAULT_METRICS_QUERY_TIME_INTERVAL_IN_MINUTES') else constants.DEFAULT_METRICS_QUERY_TIME_INTERVAL_IN_MINUTES - - - # expected agent pod restart count + + + # expected agent pod restart count env_dict['AGENT_POD_EXPECTED_RESTART_COUNT'] = int(os.getenv('AGENT_POD_EXPECTED_RESTART_COUNT')) if os.getenv('AGENT_POD_EXPECTED_RESTART_COUNT') else constants.AGENT_POD_EXPECTED_RESTART_COUNT # default to azure public cloud if AZURE_CLOUD not specified env_dict['AZURE_ENDPOINTS'] = constants.AZURE_CLOUD_DICT.get(os.getenv('AZURE_CLOUD')) if os.getenv('AZURE_CLOUD') else constants.AZURE_PUBLIC_CLOUD_ENDPOINTS - + if not env_dict.get('TENANT_ID'): pytest.fail('ERROR: variable TENANT_ID is required.') - + if not env_dict.get('CLIENT_ID'): pytest.fail('ERROR: variable CLIENT_ID is required.') - + if not env_dict.get('CLIENT_SECRET'): pytest.fail('ERROR: variable CLIENT_SECRET is required.') - + print("Setup Complete.") append_result_output("Setup Complete.\n", env_dict['SETUP_LOG_FILE']) @@ -66,22 +72,22 @@ def env_dict(): else: with Path.open(my_file, "rb") as f: env_dict = pickle.load(f) - + yield env_dict - + my_file = Path("env.pkl") with FileLock(str(my_file) + ".lock"): with Path.open(my_file, "rb") as f: env_dict = pickle.load(f) env_dict['NUM_TESTS_COMPLETED'] = 1 + env_dict.get('NUM_TESTS_COMPLETED') - if env_dict['NUM_TESTS_COMPLETED'] == int(os.getenv('NUM_TESTS')): + if env_dict['NUM_TESTS_COMPLETED'] == int(os.getenv('NUM_TESTS')): # Checking if cleanup is required. if os.getenv('SKIP_CLEANUP'): return print('Starting cleanup...') append_result_output("Starting Cleanup...\n", env_dict['SETUP_LOG_FILE']) - + print("Cleanup Complete.") append_result_output("Cleanup Complete.\n", env_dict['SETUP_LOG_FILE']) return diff --git a/test/e2e/src/core/e2e_tests.sh b/test/e2e/src/core/e2e_tests.sh index 3bfafdce9..dd9d93073 100644 --- a/test/e2e/src/core/e2e_tests.sh +++ b/test/e2e/src/core/e2e_tests.sh @@ -1,7 +1,158 @@ -#!/bin/sh +#!/bin/bash +set -x results_dir="${RESULTS_DIR:-/tmp/results}" +waitForResourcesReady() { + ready=false + max_retries=60 + sleep_seconds=10 + NAMESPACE=$1 + RESOURCETYPE=$2 + RESOURCE=$3 + # if resource not specified, set to --all + if [ -z $RESOURCE ]; then + RESOURCE="--all" + fi + for i in $(seq 1 $max_retries) + do + if [[ ! $(kubectl wait --for=condition=Ready ${RESOURCETYPE} ${RESOURCE} --namespace ${NAMESPACE}) ]]; then + echo "waiting for the resource:${RESOURCE} of the type:${RESOURCETYPE} in namespace:${NAMESPACE} to be ready state, iteration:${i}" + sleep ${sleep_seconds} + else + echo "resource:${RESOURCE} of the type:${RESOURCETYPE} in namespace:${NAMESPACE} in ready state" + ready=true + break + fi + done + + echo "waitForResourcesReady state: $ready" +} + + +waitForArcK8sClusterCreated() { + connectivityState=false + max_retries=60 + sleep_seconds=10 + for i in $(seq 1 $max_retries) + do + echo "iteration: ${i}, clustername: ${CLUSTER_NAME}, resourcegroup: ${RESOURCE_GROUP}" + clusterState=$(az connectedk8s show --name $CLUSTER_NAME --resource-group $RESOURCE_GROUP --query connectivityStatus -o json) + clusterState=$(echo $clusterState | tr -d '"' | tr -d '"\r\n') + echo "cluster current state: ${clusterState}" + if [ ! -z "$clusterState" ]; then + if [[ ("${clusterState}" == "Connected") || ("${clusterState}" == "Connecting") ]]; then + connectivityState=true + break + fi + fi + sleep ${sleep_seconds} + done + echo "Arc K8s cluster connectivityState: $connectivityState" +} + +waitForCIExtensionInstalled() { + installedState=false + max_retries=60 + sleep_seconds=10 + for i in $(seq 1 $max_retries) + do + echo "iteration: ${i}, clustername: ${CLUSTER_NAME}, resourcegroup: ${RESOURCE_GROUP}" + installState=$(az k8s-extension show --cluster-name $CLUSTER_NAME --resource-group $RESOURCE_GROUP --cluster-type connectedClusters --name azuremonitor-containers --query installState -o json) + installState=$(echo $installState | tr -d '"' | tr -d '"\r\n') + echo "extension install state: ${installState}" + if [ ! -z "$installState" ]; then + if [ "${installState}" == "Installed" ]; then + installedState=true + break + fi + fi + sleep ${sleep_seconds} + done + echo "container insights extension installedState: $installedState" +} + +validateCommonParameters() { + if [ -z $TENANT_ID ]; then + echo "ERROR: parameter TENANT_ID is required." > ${results_dir}/error + python3 setup_failure_handler.py + fi + if [ -z $CLIENT_ID ]; then + echo "ERROR: parameter CLIENT_ID is required." > ${results_dir}/error + python3 setup_failure_handler.py + fi + + if [ -z $CLIENT_SECRET ]; then + echo "ERROR: parameter CLIENT_SECRET is required." > ${results_dir}/error + python3 setup_failure_handler.py + fi +} + +validateArcConfTestParameters() { + if [ -z $SUBSCRIPTION_ID ]; then + echo "ERROR: parameter SUBSCRIPTION_ID is required." > ${results_dir}/error + python3 setup_failure_handler.py + fi + + if [ -z $RESOURCE_GROUP ]]; then + echo "ERROR: parameter RESOURCE_GROUP is required." > ${results_dir}/error + python3 setup_failure_handler.py + fi + + if [ -z $CLUSTER_NAME ]; then + echo "ERROR: parameter CLUSTER_NAME is required." > ${results_dir}/error + python3 setup_failure_handler.py + fi +} + +addArcConnectedK8sExtension() { + echo "adding Arc K8s connectedk8s extension" + az extension add --name connectedk8s 2> ${results_dir}/error || python3 setup_failure_handler.py +} + +addArcK8sCLIExtension() { + echo "adding Arc K8s k8s-extension extension" + az extension add --name k8s-extension +} + +createArcCIExtension() { + echo "creating extension type: Microsoft.AzureMonitor.Containers" + basicparameters="--cluster-name $CLUSTER_NAME --resource-group $RESOURCE_GROUP --cluster-type connectedClusters --extension-type Microsoft.AzureMonitor.Containers --scope cluster --name azuremonitor-containers" + if [ ! -z "$CI_ARC_RELEASE_TRAIN" ]; then + basicparameters="$basicparameters --release-train $CI_ARC_RELEASE_TRAIN" + fi + if [ ! -z "$CI_ARC_VERSION" ]; then + basicparameters="$basicparameters --version $CI_ARC_VERSION" + fi + + az k8s-extension create $basicparameters --configuration-settings omsagent.ISTEST=true +} + +showArcCIExtension() { + echo "arc ci extension status" + az k8s-extension show --cluster-name $CLUSTER_NAME --resource-group $RESOURCE_GROUP --cluster-type connectedClusters --name azuremonitor-containers +} + +deleteArcCIExtension() { + az k8s-extension delete --name azuremonitor-containers \ + --cluster-type connectedClusters \ + --cluster-name $CLUSTER_NAME \ + --resource-group $RESOURCE_GROUP --yes +} + +login_to_azure() { + # Login with service principal + echo "login to azure using the SP creds" + az login --service-principal \ + -u ${CLIENT_ID} \ + -p ${CLIENT_SECRET} \ + --tenant ${TENANT_ID} 2> ${results_dir}/error || python3 setup_failure_handler.py + + echo "setting subscription: ${SUBSCRIPTION_ID} as default subscription" + az account set -s $SUBSCRIPTION_ID +} + + # saveResults prepares the results for handoff to the Sonobuoy worker. # See: https://github.com/vmware-tanzu/sonobuoy/blob/master/docs/plugins.md saveResults() { @@ -17,6 +168,50 @@ saveResults() { # Ensure that we tell the Sonobuoy worker we are done regardless of results. trap saveResults EXIT +# validate common params +validateCommonParameters + +IS_ARC_K8S_ENV="true" +if [ -z $IS_NON_ARC_K8S_TEST_ENVIRONMENT ]; then + echo "arc k8s environment" +else + if [ "$IS_NON_ARC_K8S_TEST_ENVIRONMENT" = "true" ]; then + IS_ARC_K8S_ENV="false" + echo "non arc k8s environment" + fi +fi + +if [ "$IS_ARC_K8S_ENV" = "false" ]; then + echo "skipping installing of ARC K8s container insights extension since the test environment is non-arc K8s" +else + # validate params + validateArcConfTestParameters + + # login to azure + login_to_azure + + # add arc k8s connectedk8s extension + addArcConnectedK8sExtension + + # wait for arc k8s pods to be ready state + waitForResourcesReady azure-arc pods + + # wait for Arc K8s cluster to be created + waitForArcK8sClusterCreated + + # add CLI extension + addArcK8sCLIExtension + + # add ARC K8s container insights extension + createArcCIExtension + + # show the ci extension status + showArcCIExtension + + #wait for extension state to be installed + waitForCIExtensionInstalled +fi + # The variable 'TEST_LIST' should be provided if we want to run specific tests. If not provided, all tests are run NUM_PROCESS=$(pytest /e2etests/ --collect-only -k "$TEST_NAME_LIST" -m "$TEST_MARKER_LIST" | grep " Date: Fri, 20 Aug 2021 09:43:42 -0700 Subject: [PATCH 142/301] upgrade golang version for windows in pipeline build and locally (#630) --- ....windows.official.all_tag.all_phase.all_config.ci_prod.yml | 2 +- .pipelines/pipeline.user.windows.yml | 2 +- scripts/build/windows/install-build-pre-requisites.ps1 | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.pipelines/pipeline.user.windows.official.all_tag.all_phase.all_config.ci_prod.yml b/.pipelines/pipeline.user.windows.official.all_tag.all_phase.all_config.ci_prod.yml index 8ae069e90..0dc0a47c5 100644 --- a/.pipelines/pipeline.user.windows.official.all_tag.all_phase.all_config.ci_prod.yml +++ b/.pipelines/pipeline.user.windows.official.all_tag.all_phase.all_config.ci_prod.yml @@ -5,7 +5,7 @@ environment: version: '2019' runtime: provider: 'appcontainer' - image: 'cdpxwin1809.azurecr.io/user/azure-monitor/container-insights:6.0' + image: 'cdpxwin1809.azurecr.io/user/azure-monitor/container-insights:latest' source_mode: 'map' version: diff --git a/.pipelines/pipeline.user.windows.yml b/.pipelines/pipeline.user.windows.yml index 82dd30cd0..e9d0105ab 100644 --- a/.pipelines/pipeline.user.windows.yml +++ b/.pipelines/pipeline.user.windows.yml @@ -5,7 +5,7 @@ environment: version: '2019' runtime: provider: 'appcontainer' - image: 'cdpxwin1809.azurecr.io/user/azure-monitor/container-insights:6.0' + image: 'cdpxwin1809.azurecr.io/user/azure-monitor/container-insights:latest' source_mode: 'map' version: diff --git a/scripts/build/windows/install-build-pre-requisites.ps1 b/scripts/build/windows/install-build-pre-requisites.ps1 index 3bb56ac2a..7f1c9b54f 100755 --- a/scripts/build/windows/install-build-pre-requisites.ps1 +++ b/scripts/build/windows/install-build-pre-requisites.ps1 @@ -13,8 +13,8 @@ function Install-Go { exit } - $url = "https://dl.google.com/go/go1.14.1.windows-amd64.msi" - $output = Join-Path -Path $tempGo -ChildPath "go1.14.1.windows-amd64.msi" + $url = "https://dl.google.com/go/go1.15.14.windows-amd64.msi" + $output = Join-Path -Path $tempGo -ChildPath "go1.15.14.windows-amd64.msi" Write-Host("downloading go msi into directory path : " + $output + " ...") Invoke-WebRequest -Uri $url -OutFile $output -ErrorAction Stop Write-Host("downloading of go msi into directory path : " + $output + " completed") From 3a02a4f89fd8ff8bd47a1de0ab7bbd6f86a65f71 Mon Sep 17 00:00:00 2001 From: David Michelman Date: Tue, 24 Aug 2021 17:37:38 -0700 Subject: [PATCH 143/301] Updating a link in Readme.md (#632) The link to the build pipelines now goes directly to our build pipelines (instead of to all github-private pipelines) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index e3ceedc8e..3cd466bb9 100644 --- a/README.md +++ b/README.md @@ -259,7 +259,7 @@ docker push /: # Azure DevOps Build Pipeline -Navigate to https://github-private.visualstudio.com/microsoft/_build?view=pipelines to see Linux and Windows Agent build pipelines. These pipelines are configured with CI triggers for ci_dev and ci_prod. +Navigate to https://github-private.visualstudio.com/microsoft/_build?definitionScope=%5CCDPX%5Cdocker-provider to see Linux and Windows Agent build pipelines. These pipelines are configured with CI triggers for ci_dev and ci_prod. Docker Images will be pushed to CDPX ACR repos and these needs to retagged and pushed to corresponding ACR or docker hub. Only onboarded Azure AD AppId has permission to pull the images from CDPx ACRs. From e56c74ba66744f87e0b630d7f6bbb5ba3a56428c Mon Sep 17 00:00:00 2001 From: rashmichandrashekar Date: Wed, 25 Aug 2021 15:30:11 -0700 Subject: [PATCH 144/301] Updating omsagent yaml to have parity with omsagent yaml file in AKS RP (#615) --- kubernetes/omsagent.yaml | 124 +++++++++++++++++++++------------------ 1 file changed, 67 insertions(+), 57 deletions(-) diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index 49d4586c1..d84e46701 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -400,6 +400,8 @@ spec: value: "" - name: AZMON_CONTAINERLOGS_ONEAGENT_REGIONS value: "koreacentral,norwayeast,eastus2" + - name: USING_AAD_MSI_AUTH + value: "false" securityContext: privileged: true ports: @@ -445,59 +447,65 @@ spec: periodSeconds: 60 timeoutSeconds: 15 #Only in sidecar scraping mode - - name: omsagent-prometheus - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08052021" - imagePullPolicy: IfNotPresent - resources: - limits: - cpu: 500m - memory: 1Gi - requests: - cpu: 75m - memory: 225Mi - env: - # azure devops pipeline uses AKS_RESOURCE_ID and AKS_REGION hence ensure to uncomment these - - name: AKS_RESOURCE_ID - value: "VALUE_AKS_RESOURCE_ID_VALUE" - - name: AKS_REGION - value: "VALUE_AKS_RESOURCE_REGION_VALUE" - #Uncomment below two lines for ACS clusters and set the cluster names manually. Also comment out the above two lines for ACS clusters - #- name: ACS_RESOURCE_NAME - # value: "my_acs_cluster_name" - - name: CONTAINER_TYPE - value: "PrometheusSidecar" - - name: CONTROLLER_TYPE - value: "DaemonSet" - - name: NODE_IP - valueFrom: - fieldRef: - fieldPath: status.hostIP - # Update this with the user assigned msi client id for omsagent - - name: USER_ASSIGNED_IDENTITY_CLIENT_ID - value: "" - securityContext: - privileged: true - volumeMounts: - - mountPath: /etc/kubernetes/host - name: azure-json-path - - mountPath: /etc/omsagent-secret - name: omsagent-secret - readOnly: true - - mountPath: /etc/config/settings - name: settings-vol-config - readOnly: true - - mountPath: /etc/config/osm-settings - name: osm-settings-vol-config - readOnly: true - livenessProbe: - exec: - command: - - /bin/bash - - -c - - /opt/livenessprobe.sh - initialDelaySeconds: 60 - periodSeconds: 60 - timeoutSeconds: 15 + # - name: omsagent-prometheus + # image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod06112021" + # imagePullPolicy: IfNotPresent + # resources: + # limits: + # cpu: 500m + # memory: 1Gi + # requests: + # cpu: 75m + # memory: 225Mi + # env: + # # azure devops pipeline uses AKS_RESOURCE_ID and AKS_REGION hence ensure to uncomment these + # - name: AKS_CLUSTER_NAME + # value: "VALUE_AKS_CLUSTER_NAME" + # - name: AKS_RESOURCE_ID + # value: "VALUE_AKS_RESOURCE_ID_VALUE" + # - name: AKS_REGION + # value: "VALUE_AKS_RESOURCE_REGION_VALUE" + # - name: AKS_NODE_RESOURCE_GROUP + # value: "VALUE_AKS_NODE_RESOURCE_GROUP" + # #Uncomment below two lines for ACS clusters and set the cluster names manually. Also comment out the above two lines for ACS clusters + # #- name: ACS_RESOURCE_NAME + # # value: "my_acs_cluster_name" + # - name: CONTAINER_TYPE + # value: "PrometheusSidecar" + # - name: CONTROLLER_TYPE + # value: "DaemonSet" + # - name: NODE_IP + # valueFrom: + # fieldRef: + # fieldPath: status.hostIP + # # Update this with the user assigned msi client id for omsagent + # - name: USER_ASSIGNED_IDENTITY_CLIENT_ID + # value: "" + # - name: USING_AAD_MSI_AUTH + # value: "false" + # securityContext: + # privileged: true + # volumeMounts: + # - mountPath: /etc/kubernetes/host + # name: azure-json-path + # - mountPath: /etc/omsagent-secret + # name: omsagent-secret + # readOnly: true + # - mountPath: /etc/config/settings + # name: settings-vol-config + # readOnly: true + # - mountPath: /etc/config/osm-settings + # name: osm-settings-vol-config + # readOnly: true + # livenessProbe: + # exec: + # command: + # - /bin/bash + # - -c + # - /opt/livenessprobe.sh + # initialDelaySeconds: 60 + # periodSeconds: 60 + # timeoutSeconds: 15 affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: @@ -620,7 +628,9 @@ spec: value: "" # Add the below environment variable to true only in sidecar enabled regions, else set it to false - name: SIDECAR_SCRAPING_ENABLED - value: "true" + value: "false" + - name: USING_AAD_MSI_AUTH + value: "false" securityContext: privileged: true ports: @@ -789,13 +799,13 @@ spec: fieldRef: fieldPath: status.hostIP - name: SIDECAR_SCRAPING_ENABLED - value: "true" + value: "false" # Update this with the user assigned msi client id for omsagent - name: USER_ASSIGNED_IDENTITY_CLIENT_ID value: "" # Add this only for clouds that require cert bootstrapping - - name: REQUIRES_CERT_BOOTSTRAP - value: "true" + # - name: REQUIRES_CERT_BOOTSTRAP + # value: "true" volumeMounts: - mountPath: C:\ProgramData\docker\containers name: docker-windows-containers From d2817cb644027e0038e58b8dadd7710e9dfedbf4 Mon Sep 17 00:00:00 2001 From: David Michelman Date: Fri, 27 Aug 2021 01:37:36 -0700 Subject: [PATCH 145/301] Unit test tooling (#625) Added tooling and examples for unit tests --- .github/workflows/run_unit_tests.yml | 30 + .gitignore | 4 + Dev Guide.md | 125 ++ .../installer/datafiles/base_container.data | 8 +- build/windows/Makefile.ps1 | 8 +- kubernetes/windows/Dockerfile | 1 - kubernetes/windows/Dockerfile-dev-image | 1 - source/plugins/go/src/extension/extension.go | 54 +- .../go/src/extension/extension_test.go | 74 + .../plugins/go/src/extension/socket_writer.go | 59 +- source/plugins/go/src/go.mod | 1 + source/plugins/go/src/go.sum | 1 + source/plugins/go/src/utils.go | 120 +- source/plugins/go/src/utils_test.go | 79 + .../plugins/ruby/CAdvisorMetricsAPIClient.rb | 4 +- source/plugins/ruby/KubernetesApiClient.rb | 41 +- source/plugins/ruby/constants.rb | 4 +- source/plugins/{utils => ruby}/extension.rb | 0 .../{utils => ruby}/extension_utils.rb | 0 source/plugins/ruby/in_kube_nodes.rb | 195 +- source/plugins/ruby/in_kube_nodes_test.rb | 171 ++ source/plugins/{utils => ruby}/oms_common.rb | 0 source/plugins/{utils => ruby}/omslog.rb | 0 .../kube-nodes-malformed.txt | 1674 +++++++++++++++++ .../canned-api-responses/kube-nodes.txt | 851 +++++++++ test/unit-tests/run_go_tests.sh | 12 + test/unit-tests/run_ruby_tests.sh | 13 + test/unit-tests/test_driver.rb | 13 + 28 files changed, 3331 insertions(+), 212 deletions(-) create mode 100644 .github/workflows/run_unit_tests.yml create mode 100644 Dev Guide.md create mode 100644 source/plugins/go/src/extension/extension_test.go create mode 100644 source/plugins/go/src/utils_test.go rename source/plugins/{utils => ruby}/extension.rb (100%) rename source/plugins/{utils => ruby}/extension_utils.rb (100%) create mode 100644 source/plugins/ruby/in_kube_nodes_test.rb rename source/plugins/{utils => ruby}/oms_common.rb (100%) rename source/plugins/{utils => ruby}/omslog.rb (100%) create mode 100644 test/unit-tests/canned-api-responses/kube-nodes-malformed.txt create mode 100644 test/unit-tests/canned-api-responses/kube-nodes.txt create mode 100755 test/unit-tests/run_go_tests.sh create mode 100755 test/unit-tests/run_ruby_tests.sh create mode 100644 test/unit-tests/test_driver.rb diff --git a/.github/workflows/run_unit_tests.yml b/.github/workflows/run_unit_tests.yml new file mode 100644 index 000000000..29f5afc7a --- /dev/null +++ b/.github/workflows/run_unit_tests.yml @@ -0,0 +1,30 @@ +name: Run Unit Tests +on: + pull_request: + types: [opened, synchronize, reopened] + branches: + - ci_dev + - ci_prod +jobs: + Golang-Tests: + runs-on: ubuntu-latest + steps: + - name: Check out repository code + uses: actions/checkout@v2 + - name: Run unit tests + run: | + cd ${{ github.workspace }} + ./test/unit-tests/run_go_tests.sh + Ruby-Tests: + runs-on: ubuntu-latest + steps: + - name: Check out repository code + uses: actions/checkout@v2 + - name: install fluent + run: | + sudo gem install fluentd -v "1.12.2" --no-document + sudo fluentd --setup ./fluent + - name: Run unit tests + run: | + cd ${{ github.workspace }} + ./test/unit-tests/run_ruby_tests.sh diff --git a/.gitignore b/.gitignore index 2e2978e91..b0467519c 100644 --- a/.gitignore +++ b/.gitignore @@ -26,3 +26,7 @@ intermediate kubernetes/linux/Linux_ULINUX_1.0_x64_64_Release # ignore generated .h files for go source/plugins/go/src/*.h +*_mock.go +*_log.txt +*.log +*.byebug_history diff --git a/Dev Guide.md b/Dev Guide.md new file mode 100644 index 000000000..7057a4afe --- /dev/null +++ b/Dev Guide.md @@ -0,0 +1,125 @@ +# Dev Guide + +More advanced information needed to develop or build the docker provider will live here + + + +## Testing +Last updated 8/18/2021 + +To run all unit tests run the commands `test/unit-tests/run_go_tests.sh` and `test/unit-tests/run_ruby_tests.sh` + +#### Conventions: +1. Unit tests should go in their own file, but in the same folder as the source code their testing. For example, the tests for `in_kube_nodes.rb` are in `in_kube_nodes_test.rb`. Both files are in the folder `source/plugin/ruby`. + +### Ruby +Sample tests are provided in [in_kube_nodes_test.rb](source/plugin/ruby/in_kube_nodes_test.rb). They are meant to demo the tooling used for unit tests (as opposed to being comprehensive tests). Basic techniques like mocking are demonstrated there. + +#### Conventions: +1. When modifying a fluentd plugin for unit testing, any mocked classes (like KubernetesApiClient, applicationInsightsUtility, env, etc.) should be passed in as optional arguments of initialize. For example: +``` + def initialize + super +``` +would be turned into +``` + def initialize (kubernetesApiClient=nil, applicationInsightsUtility=nil, extensionUtils=nil, env=nil) + super() +``` + +2. Having end-to-end tests of all fluentd plugins is a longshot. We care more about unit testing smaller blocks of functionality (like all the helper functions in KubeNodeInventory.rb). Unit tests for fluentd plugins are not expected. + +### Golang + +Since golang is statically compiled, mocking requires a lot more work than in ruby. Sample tests are provided in [utils_test.go](source/plugin/go/src/utils_test.go) and [extension_test.go](source/plugin/go/src/extension/extension_test.go). Again, they are meant to demo the tooling used for unit tests (as opposed to being comprehensive tests). Basic techniques like mocking are demonstrated there. + +#### Mocking: +Mocks are generated with gomock (mockgen). +* Mock files should be called *_mock.go (socket_writer.go => socket_writer_mock.go) +* Mocks should not be checked in to git. (they have been added to the .gitignore) +* The command to generate mock files should go in a `//go:generate` comment at the top of the mocked file (see [socket_writer.go](source/plugin/go/src/extension/socket_writer.go) for an example). This way mocks can be generated by the unit test script. +* Mocks also go in the same folder as the mocked files. This is unfortunate, but necessary to avoid circular package dependencies (anyone else feel free to figure out how to move mocks to a separate folder) + +Using mocks is also a little tricky. In order to mock functions in a package with gomock, they must be converted to reciever methods of a struct. This way the struct can be swapped out at runtime to change which implementaions of a method are called. See the example below: + +``` +// declare all functions to be mocked in this interface +type registrationPreCheckerInterface interface { + FUT(string) bool +} + +// Create a struct which implements the above interface +type regPreCheck struct{} + +func (r regPreCheck) FUT(email string) bool { + fmt.Println("real FUT() called") + return true +} + +// Create a global variable and assign it to the struct +var regPreCondVar registrationPreCheckerInterface + +func init() { + regPreCondVar = regPreCheck{} +} +``` + +Now any code wishing to call FUT() will call `regPreCondVar.FUT("")` + +A unit test can substitute its own implementaion of FUT() like so + +``` +// This will hold the mock of FUT we want to substitute +var FUTMock func(email string) bool + +// create a new struct which implements the earlier interface +type regPreCheckMock struct{} + +func (u regPreCheckMock) FUT(email string) bool { + return FUTMock(email) +} +``` + +Everything is set up. Now a unit test can substitute in a mock like so: + +``` +func someUnitTest() { + // This will call the actual implementaion of FUT() + regPreCondVar.FUT("") + + // Now the test creates another struct to substitue. After this like all calls to FUT() will be diverted + regPreCondVar = regPreCheckMock{} + + // substute another function to run instead of FUT() + FUTMock = func(email string) bool { + fmt.Println("FUT 1 called") + return false + } + // This will call the function defined right above + regPreCondVar.FUT("") + + // We can substitue another implementation + FUTMock = func(email string) bool { + fmt.Println("FUT 2 called") + return false + } + regPreCondVar.FUT("") + + // put the old behavior back + regPreCondVar = regPreCheck{} + // this will call the actual implementation of FUT() + regPreCondVar.FUT("") + +} +``` + +A concrete example of this can be found in [socket_writer.go](source/plugin/go/src/extension/socket_writer.go) and [extension_test.go](source/plugin/go/src/extension/extension_test.go). Again, if anybody has a better way feel free to update this guide. + + + +A simpler way to test a specific function is to write wrapper functions. Test code calls the inner function (ReadFileContentsImpl) and product code calls the wrapper function (ReadFileContents). The wrapper function provides any outside state which a unit test would want to control (like a function to read a file). This option makes product code more verbose, but probably easier to read too. Either way is acceptable. +``` +func ReadFileContents(fullPathToFileName string) (string, error) { + return ReadFileContentsImpl(fullPathToFileName, ioutil.ReadFile) +} +``` diff --git a/build/linux/installer/datafiles/base_container.data b/build/linux/installer/datafiles/base_container.data index b71cafd49..d104a5084 100644 --- a/build/linux/installer/datafiles/base_container.data +++ b/build/linux/installer/datafiles/base_container.data @@ -148,10 +148,10 @@ MAINTAINER: 'Microsoft Corporation' /etc/fluent/plugin/MdmMetricsGenerator.rb; source/plugins/ruby/MdmMetricsGenerator.rb; 644; root; root /etc/fluent/plugin/MdmAlertTemplates.rb; source/plugins/ruby/MdmAlertTemplates.rb; 644; root; root -/etc/fluent/plugin/omslog.rb; source/plugins/utils/omslog.rb; 644; root; root -/etc/fluent/plugin/oms_common.rb; source/plugins/utils/oms_common.rb; 644; root; root -/etc/fluent/plugin/extension.rb; source/plugins/utils/extension.rb; 644; root; root -/etc/fluent/plugin/extension_utils.rb; source/plugins/utils/extension_utils.rb; 644; root; root +/etc/fluent/plugin/omslog.rb; source/plugins/ruby/omslog.rb; 644; root; root +/etc/fluent/plugin/oms_common.rb; source/plugins/ruby/oms_common.rb; 644; root; root +/etc/fluent/plugin/extension.rb; source/plugins/ruby/extension.rb; 644; root; root +/etc/fluent/plugin/extension_utils.rb; source/plugins/ruby/extension_utils.rb; 644; root; root /etc/fluent/kube.conf; build/linux/installer/conf/kube.conf; 644; root; root diff --git a/build/windows/Makefile.ps1 b/build/windows/Makefile.ps1 index 737abc92a..b9bd1f3e4 100644 --- a/build/windows/Makefile.ps1 +++ b/build/windows/Makefile.ps1 @@ -183,11 +183,7 @@ Write-Host("successfully copied installer files conf and scripts from :" + $inst $rubyplugindir = Join-Path -Path $rootdir -ChildPath "source\plugins\ruby" Write-Host("copying ruby source files from :" + $rubyplugindir + " to :" + $publishdir + " ...") Copy-Item -Path $rubyplugindir -Destination $publishdir -Recurse -Force +Get-ChildItem $Path | Where{$_.Name -Match ".*_test\.rb"} | Remove-Item Write-Host("successfully copied ruby source files from :" + $rubyplugindir + " to :" + $publishdir + " ") -ForegroundColor Green -$utilsplugindir = Join-Path -Path $rootdir -ChildPath "source\plugins\utils" -Write-Host("copying ruby util files from :" + $utilsplugindir + " to :" + $publishdir + " ...") -Copy-Item -Path $utilsplugindir -Destination $publishdir -Recurse -Force -Write-Host("successfully copied ruby util files from :" + $utilsplugindir + " to :" + $publishdir + " ") -ForegroundColor Green - -Set-Location $currentdir \ No newline at end of file +Set-Location $currentdir diff --git a/kubernetes/windows/Dockerfile b/kubernetes/windows/Dockerfile index 0ba64cd75..290deef40 100644 --- a/kubernetes/windows/Dockerfile +++ b/kubernetes/windows/Dockerfile @@ -71,7 +71,6 @@ COPY ./omsagentwindows/installer/scripts/rubyKeepCertificateAlive/*.rb /etc/flue #Copy fluentd ruby plugins COPY ./omsagentwindows/ruby/ /etc/fluent/plugin/ -COPY ./omsagentwindows/utils/*.rb /etc/fluent/plugin/ ENV AGENT_VERSION ${IMAGE_TAG} ENV OS_TYPE "windows" diff --git a/kubernetes/windows/Dockerfile-dev-image b/kubernetes/windows/Dockerfile-dev-image index 6764ef8c4..35aa83bd9 100644 --- a/kubernetes/windows/Dockerfile-dev-image +++ b/kubernetes/windows/Dockerfile-dev-image @@ -33,7 +33,6 @@ COPY ./omsagentwindows/installer/scripts/rubyKeepCertificateAlive/*.rb /etc/flue #Copy fluentd ruby plugins COPY ./omsagentwindows/ruby/ /etc/fluent/plugin/ -COPY ./omsagentwindows/utils/*.rb /etc/fluent/plugin/ ENV AGENT_VERSION ${IMAGE_TAG} ENV OS_TYPE "windows" diff --git a/source/plugins/go/src/extension/extension.go b/source/plugins/go/src/extension/extension.go index c68140ded..4d78380bc 100644 --- a/source/plugins/go/src/extension/extension.go +++ b/source/plugins/go/src/extension/extension.go @@ -1,12 +1,13 @@ package extension -import ( +import ( "encoding/json" "fmt" - "log" + "log" + "strings" "sync" - "strings" - uuid "github.com/google/uuid" + + uuid "github.com/google/uuid" "github.com/ugorji/go/codec" ) @@ -14,31 +15,31 @@ type Extension struct { datatypeStreamIdMap map[string]string } -var singleton *Extension +var singleton *Extension var once sync.Once var extensionconfiglock sync.Mutex var logger *log.Logger -var containerType string +var containerType string -func GetInstance(flbLogger *log.Logger, containerType string) *Extension { - once.Do(func() { - singleton = &Extension{make(map[string]string)} +func GetInstance(flbLogger *log.Logger, containertype string) *Extension { + once.Do(func() { + singleton = &Extension{make(map[string]string)} flbLogger.Println("Extension Instance created") - }) + }) logger = flbLogger - containerType = containerType - return singleton + containerType = containertype + return singleton } func (e *Extension) GetOutputStreamId(datatype string) string { extensionconfiglock.Lock() - defer extensionconfiglock.Unlock() + defer extensionconfiglock.Unlock() if len(e.datatypeStreamIdMap) > 0 && e.datatypeStreamIdMap[datatype] != "" { message := fmt.Sprintf("OutputstreamId: %s for the datatype: %s", e.datatypeStreamIdMap[datatype], datatype) logger.Printf(message) return e.datatypeStreamIdMap[datatype] } - var err error + var err error e.datatypeStreamIdMap, err = getDataTypeToStreamIdMapping() if err != nil { message := fmt.Sprintf("Error getting datatype to streamid mapping: %s", err.Error()) @@ -54,29 +55,30 @@ func getDataTypeToStreamIdMapping() (map[string]string, error) { taggedData := map[string]interface{}{"Request": "AgentTaggedData", "RequestId": guid.String(), "Tag": "ContainerInsights", "Version": "1"} jsonBytes, err := json.Marshal(taggedData) + // TODO: this error is unhandled var data []byte - enc := codec.NewEncoderBytes(&data, new(codec.MsgpackHandle)) + enc := codec.NewEncoderBytes(&data, new(codec.MsgpackHandle)) if err := enc.Encode(string(jsonBytes)); err != nil { return datatypeOutputStreamMap, err } - - fs := &FluentSocketWriter{ } + + fs := &FluentSocket{} fs.sockAddress = "/var/run/mdsd/default_fluent.socket" if containerType != "" && strings.Compare(strings.ToLower(containerType), "prometheussidecar") == 0 { fs.sockAddress = fmt.Sprintf("/var/run/mdsd-%s/default_fluent.socket", containerType) - } - responseBytes, err := fs.WriteAndRead(data) - defer fs.disConnect() + } + responseBytes, err := FluentSocketWriter.writeAndRead(fs, data) + defer FluentSocketWriter.disconnect(fs) logger.Printf("Info::mdsd::Making call to FluentSocket: %s to write and read the config data", fs.sockAddress) if err != nil { return datatypeOutputStreamMap, err } - response := string(responseBytes) + response := string(responseBytes) // TODO: why is this converted to a string then back into a []byte? var responseObjet AgentTaggedDataResponse err = json.Unmarshal([]byte(response), &responseObjet) - if err != nil { + if err != nil { logger.Printf("Error::mdsd::Failed to unmarshal config data. Error message: %s", string(err.Error())) return datatypeOutputStreamMap, err } @@ -84,16 +86,16 @@ func getDataTypeToStreamIdMapping() (map[string]string, error) { var extensionData TaggedData json.Unmarshal([]byte(responseObjet.TaggedData), &extensionData) - extensionConfigs := extensionData.ExtensionConfigs - logger.Printf("Info::mdsd::build the datatype and streamid map -- start") + extensionConfigs := extensionData.ExtensionConfigs + logger.Printf("Info::mdsd::build the datatype and streamid map -- start") for _, extensionConfig := range extensionConfigs { outputStreams := extensionConfig.OutputStreams for dataType, outputStreamID := range outputStreams { logger.Printf("Info::mdsd::datatype: %s, outputstreamId: %s", dataType, outputStreamID) datatypeOutputStreamMap[dataType] = outputStreamID.(string) - } + } } - logger.Printf("Info::mdsd::build the datatype and streamid map -- end") + logger.Printf("Info::mdsd::build the datatype and streamid map -- end") logger.Printf("extensionconfig::getDataTypeToStreamIdMapping:: getting extension config from fluent socket-end") diff --git a/source/plugins/go/src/extension/extension_test.go b/source/plugins/go/src/extension/extension_test.go new file mode 100644 index 000000000..c3b5ef472 --- /dev/null +++ b/source/plugins/go/src/extension/extension_test.go @@ -0,0 +1,74 @@ +package extension + +import ( + "fmt" + "log" + "os" + reflect "reflect" + "testing" + + "github.com/golang/mock/gomock" +) + +type FluentSocketWriterMock struct{} + +func Test_getDataTypeToStreamIdMapping(t *testing.T) { + + type test_struct struct { + testName string + mdsdResponse string + fluentSocket FluentSocket + output map[string]string + err error + } + + // This is a pretty useless unit test, but it demonstrates the concept (putting together a real test + // would require some large json structs). If getDataTypeToStreamIdMapping() is ever updated, that + // would be a good opertunity to add some real test cases. + tests := []test_struct{ + { + "basic test", + "{}", + FluentSocket{}, + map[string]string{}, + nil, + }, + } + + for _, tt := range tests { + t.Run(tt.testName, func(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + mock := NewMockIFluentSocketWriter(mockCtrl) + sock := &FluentSocket{} + sock.sockAddress = "/var/run/mdsd/default_fluent.socket" + mock.EXPECT().writeAndRead(sock, gomock.Any()).Return([]byte(tt.mdsdResponse), nil).Times(1) + mock.EXPECT().disconnect(sock).Return(nil).Times(1) + + // This is where calls to the normal socket writer calls are redirected to the mock. + ActualFluentSocketWriter := FluentSocketWriter // save the old struct so that we can put it back later + FluentSocketWriter = mock + + logfile, err := os.Create("logFile.txt") + if err != nil { + fmt.Println(err.Error()) + } + + // use an actual logger here. Using a real logger then cleaning up the log file later is easier than mocking the logger. + GetInstance(log.New(logfile, "", 0), "ContainerType") + defer os.Remove("logFile.txt") + + got, reterr := getDataTypeToStreamIdMapping() + if reterr != nil { + t.Errorf("got error") + t.Errorf(err.Error()) + } + if !reflect.DeepEqual(got, tt.output) { + t.Errorf("getDataTypeToStreamIdMapping() = %v, want %v", got, tt.output) + } + + // stop redirecting method calls to the mock + FluentSocketWriter = ActualFluentSocketWriter + }) + } +} diff --git a/source/plugins/go/src/extension/socket_writer.go b/source/plugins/go/src/extension/socket_writer.go index 1b16b319c..bfd35f5e6 100644 --- a/source/plugins/go/src/extension/socket_writer.go +++ b/source/plugins/go/src/extension/socket_writer.go @@ -4,20 +4,45 @@ import ( "net" ) +//go:generate mockgen -destination=socket_writer_mock.go -package=extension Docker-Provider/source/plugins/go/src/extension IFluentSocketWriter + //MaxRetries for trying to write data to the socket const MaxRetries = 5 //ReadBufferSize for reading data from sockets //Current CI extension config size is ~5KB and going with 20KB to handle any future scenarios -const ReadBufferSize = 20480 +const ReadBufferSize = 20480 //FluentSocketWriter writes data to AMA's default fluent socket -type FluentSocketWriter struct { - socket net.Conn - sockAddress string +type FluentSocket struct { + socket net.Conn + sockAddress string +} + +// begin mocking boilerplate +type IFluentSocketWriter interface { + connect(fluentSocket *FluentSocket) error + disconnect(fluentSocket *FluentSocket) error + writeWithRetries(fluentSocket *FluentSocket, data []byte) (int, error) + read(fluentSocket *FluentSocket) ([]byte, error) + write(fluentSocket *FluentSocket, payload []byte) (int, error) + writeAndRead(fluentSocket *FluentSocket, payload []byte) ([]byte, error) +} + +type FluentSocketWriterImpl struct{} + +// Methods in this file can by mocked by replacing FluentSocketWriter with a different struct. The methods +// in this file are all tied to the FluentSocketWriterImpl struct, but other structs could implement +// IFluentSocketWriter and be used instead +var FluentSocketWriter IFluentSocketWriter + +func init() { + FluentSocketWriter = FluentSocketWriterImpl{} } -func (fs *FluentSocketWriter) connect() error { +// end mocking boilerplate + +func (FluentSocketWriterImpl) connect(fs *FluentSocket) error { c, err := net.Dial("unix", fs.sockAddress) if err != nil { return err @@ -26,15 +51,15 @@ func (fs *FluentSocketWriter) connect() error { return nil } -func (fs *FluentSocketWriter) disConnect() error { - if (fs.socket != nil) { - fs.socket.Close() +func (FluentSocketWriterImpl) disconnect(fs *FluentSocket) error { + if fs.socket != nil { + fs.socket.Close() fs.socket = nil } return nil } -func (fs *FluentSocketWriter) writeWithRetries(data []byte) (int, error) { +func (FluentSocketWriterImpl) writeWithRetries(fs *FluentSocket, data []byte) (int, error) { var ( err error n int @@ -54,7 +79,7 @@ func (fs *FluentSocketWriter) writeWithRetries(data []byte) (int, error) { return 0, err } -func (fs *FluentSocketWriter) read() ([]byte, error) { +func (FluentSocketWriterImpl) read(fs *FluentSocket) ([]byte, error) { buf := make([]byte, ReadBufferSize) n, err := fs.socket.Read(buf) if err != nil { @@ -64,22 +89,22 @@ func (fs *FluentSocketWriter) read() ([]byte, error) { } -func (fs *FluentSocketWriter) Write(payload []byte) (int, error) { +func (FluentSocketWriterImpl) write(fs *FluentSocket, payload []byte) (int, error) { if fs.socket == nil { // previous write failed with permanent error and socket was closed. - if err := fs.connect(); err != nil { + if err := FluentSocketWriter.connect(fs); err != nil { return 0, err } } - return fs.writeWithRetries(payload) + return FluentSocketWriter.writeWithRetries(fs, payload) } -//WriteAndRead writes data to the socket and sends the response back -func (fs *FluentSocketWriter) WriteAndRead(payload []byte) ([]byte, error) { - _, err := fs.Write(payload) +//writeAndRead writes data to the socket and sends the response back +func (FluentSocketWriterImpl) writeAndRead(fs *FluentSocket, payload []byte) ([]byte, error) { + _, err := FluentSocketWriter.write(fs, payload) if err != nil { return nil, err } - return fs.read() + return FluentSocketWriter.read(fs) } diff --git a/source/plugins/go/src/go.mod b/source/plugins/go/src/go.mod index 4ead145ac..58e668597 100644 --- a/source/plugins/go/src/go.mod +++ b/source/plugins/go/src/go.mod @@ -8,6 +8,7 @@ require ( github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect github.com/dnaeon/go-vcr v1.2.0 // indirect github.com/fluent/fluent-bit-go v0.0.0-20171103221316-c4a158a6e3a7 + github.com/golang/mock v1.4.1 github.com/google/uuid v1.1.2 github.com/microsoft/ApplicationInsights-Go v0.4.3 github.com/philhofer/fwd v1.1.1 // indirect diff --git a/source/plugins/go/src/go.sum b/source/plugins/go/src/go.sum index 7f93bb260..ad9e40089 100644 --- a/source/plugins/go/src/go.sum +++ b/source/plugins/go/src/go.sum @@ -130,6 +130,7 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1 h1:ocYkMQY5RrXTYgXl7ICpV0IXwlEQGwKIsery4gyXa1U= github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= diff --git a/source/plugins/go/src/utils.go b/source/plugins/go/src/utils.go index 02d30607e..6b3036f85 100644 --- a/source/plugins/go/src/utils.go +++ b/source/plugins/go/src/utils.go @@ -12,8 +12,8 @@ import ( "net/url" "os" "strings" - "time" - + "time" + "github.com/Azure/azure-kusto-go/kusto" "github.com/Azure/azure-kusto-go/kusto/ingest" "github.com/Azure/go-autorest/autorest/azure/auth" @@ -87,7 +87,7 @@ func CreateHTTPClient() { } tlsConfig.BuildNameToCertificate() - transport = &http.Transport{TLSClientConfig: tlsConfig} + transport = &http.Transport{TLSClientConfig: tlsConfig} } // set the proxy if the proxy configured if ProxyEndpoint != "" { @@ -105,7 +105,7 @@ func CreateHTTPClient() { HTTPClient = http.Client{ Transport: transport, Timeout: 30 * time.Second, - } + } Log("Successfully created HTTP Client") } @@ -123,57 +123,57 @@ func ToString(s interface{}) string { //mdsdSocketClient to write msgp messages func CreateMDSDClient(dataType DataType, containerType string) { - mdsdfluentSocket := "/var/run/mdsd/default_fluent.socket" + mdsdfluentSocket := "/var/run/mdsd/default_fluent.socket" if containerType != "" && strings.Compare(strings.ToLower(containerType), "prometheussidecar") == 0 { - mdsdfluentSocket = fmt.Sprintf("/var/run/mdsd-%s/default_fluent.socket", containerType) - } + mdsdfluentSocket = fmt.Sprintf("/var/run/mdsd-%s/default_fluent.socket", containerType) + } switch dataType { - case ContainerLogV2: - if MdsdMsgpUnixSocketClient != nil { - MdsdMsgpUnixSocketClient.Close() - MdsdMsgpUnixSocketClient = nil - } - /*conn, err := fluent.New(fluent.Config{FluentNetwork:"unix", - FluentSocketPath:"/var/run/mdsd/default_fluent.socket", - WriteTimeout: 5 * time.Second, - RequestAck: true}) */ - conn, err := net.DialTimeout("unix", - mdsdfluentSocket, 10*time.Second) - if err != nil { - Log("Error::mdsd::Unable to open MDSD msgp socket connection for ContainerLogV2 %s", err.Error()) - //log.Fatalf("Unable to open MDSD msgp socket connection %s", err.Error()) - } else { - Log("Successfully created MDSD msgp socket connection for ContainerLogV2: %s", mdsdfluentSocket) - MdsdMsgpUnixSocketClient = conn - } - case KubeMonAgentEvents: - if MdsdKubeMonMsgpUnixSocketClient != nil { - MdsdKubeMonMsgpUnixSocketClient.Close() - MdsdKubeMonMsgpUnixSocketClient = nil - } - conn, err := net.DialTimeout("unix", - mdsdfluentSocket, 10*time.Second) - if err != nil { - Log("Error::mdsd::Unable to open MDSD msgp socket connection for KubeMon events %s", err.Error()) - //log.Fatalf("Unable to open MDSD msgp socket connection %s", err.Error()) - } else { - Log("Successfully created MDSD msgp socket connection for KubeMon events:%s", mdsdfluentSocket) - MdsdKubeMonMsgpUnixSocketClient = conn - } - case InsightsMetrics: - if MdsdInsightsMetricsMsgpUnixSocketClient != nil { - MdsdInsightsMetricsMsgpUnixSocketClient.Close() - MdsdInsightsMetricsMsgpUnixSocketClient = nil - } - conn, err := net.DialTimeout("unix", - mdsdfluentSocket, 10*time.Second) - if err != nil { - Log("Error::mdsd::Unable to open MDSD msgp socket connection for insights metrics %s", err.Error()) - //log.Fatalf("Unable to open MDSD msgp socket connection %s", err.Error()) - } else { - Log("Successfully created MDSD msgp socket connection for Insights metrics %s", mdsdfluentSocket) - MdsdInsightsMetricsMsgpUnixSocketClient = conn - } + case ContainerLogV2: + if MdsdMsgpUnixSocketClient != nil { + MdsdMsgpUnixSocketClient.Close() + MdsdMsgpUnixSocketClient = nil + } + /*conn, err := fluent.New(fluent.Config{FluentNetwork:"unix", + FluentSocketPath:"/var/run/mdsd/default_fluent.socket", + WriteTimeout: 5 * time.Second, + RequestAck: true}) */ + conn, err := net.DialTimeout("unix", + mdsdfluentSocket, 10*time.Second) + if err != nil { + Log("Error::mdsd::Unable to open MDSD msgp socket connection for ContainerLogV2 %s", err.Error()) + //log.Fatalf("Unable to open MDSD msgp socket connection %s", err.Error()) + } else { + Log("Successfully created MDSD msgp socket connection for ContainerLogV2: %s", mdsdfluentSocket) + MdsdMsgpUnixSocketClient = conn + } + case KubeMonAgentEvents: + if MdsdKubeMonMsgpUnixSocketClient != nil { + MdsdKubeMonMsgpUnixSocketClient.Close() + MdsdKubeMonMsgpUnixSocketClient = nil + } + conn, err := net.DialTimeout("unix", + mdsdfluentSocket, 10*time.Second) + if err != nil { + Log("Error::mdsd::Unable to open MDSD msgp socket connection for KubeMon events %s", err.Error()) + //log.Fatalf("Unable to open MDSD msgp socket connection %s", err.Error()) + } else { + Log("Successfully created MDSD msgp socket connection for KubeMon events:%s", mdsdfluentSocket) + MdsdKubeMonMsgpUnixSocketClient = conn + } + case InsightsMetrics: + if MdsdInsightsMetricsMsgpUnixSocketClient != nil { + MdsdInsightsMetricsMsgpUnixSocketClient.Close() + MdsdInsightsMetricsMsgpUnixSocketClient = nil + } + conn, err := net.DialTimeout("unix", + mdsdfluentSocket, 10*time.Second) + if err != nil { + Log("Error::mdsd::Unable to open MDSD msgp socket connection for insights metrics %s", err.Error()) + //log.Fatalf("Unable to open MDSD msgp socket connection %s", err.Error()) + } else { + Log("Successfully created MDSD msgp socket connection for Insights metrics %s", mdsdfluentSocket) + MdsdInsightsMetricsMsgpUnixSocketClient = conn + } } } @@ -202,11 +202,15 @@ func CreateADXClient() { } func ReadFileContents(fullPathToFileName string) (string, error) { + return ReadFileContentsImpl(fullPathToFileName, ioutil.ReadFile) +} + +func ReadFileContentsImpl(fullPathToFileName string, readfilefunc func(string) ([]byte, error)) (string, error) { fullPathToFileName = strings.TrimSpace(fullPathToFileName) if len(fullPathToFileName) == 0 { return "", errors.New("ReadFileContents::filename is empty") } - content, err := ioutil.ReadFile(fullPathToFileName) //no need to close + content, err := readfilefunc(fullPathToFileName) //no need to close if err != nil { return "", errors.New("ReadFileContents::Unable to open file " + fullPathToFileName) } else { @@ -228,7 +232,7 @@ func isValidUrl(uri string) bool { func convertMsgPackEntriesToMsgpBytes(fluentForwardTag string, msgPackEntries []MsgPackEntry) []byte { var msgpBytes []byte - + fluentForward := MsgPackForward{ Tag: fluentForwardTag, Entries: msgPackEntries, @@ -239,7 +243,7 @@ func convertMsgPackEntriesToMsgpBytes(fluentForwardTag string, msgPackEntries [] msgpSize += 1 + msgp.Int64Size + msgp.GuessSize(fluentForward.Entries[i].Record) } - //allocate buffer for msgp message + //allocate buffer for msgp message msgpBytes = msgp.Require(nil, msgpSize) //construct the stream @@ -252,6 +256,6 @@ func convertMsgPackEntriesToMsgpBytes(fluentForwardTag string, msgPackEntries [] msgpBytes = msgp.AppendInt64(msgpBytes, batchTime) msgpBytes = msgp.AppendMapStrStr(msgpBytes, fluentForward.Entries[entry].Record) } - - return msgpBytes + + return msgpBytes } diff --git a/source/plugins/go/src/utils_test.go b/source/plugins/go/src/utils_test.go new file mode 100644 index 000000000..ab61ce751 --- /dev/null +++ b/source/plugins/go/src/utils_test.go @@ -0,0 +1,79 @@ +package main + +import ( + "errors" + "testing" +) + +func Test_isValidUrl(t *testing.T) { + type test_struct struct { + isValid bool + url string + } + + tests := []test_struct{ + {true, "https://www.microsoft.com"}, + {true, "http://abc.xyz"}, + {true, "https://www.microsoft.com/tests"}, + {false, "()"}, + {false, "https//www.microsoft.com"}, + {false, "https:/www.microsoft.com"}, + {false, "https:/www.microsoft.com*"}, + {false, ""}, + } + + for _, tt := range tests { + t.Run(tt.url, func(t *testing.T) { + got := isValidUrl(tt.url) + if got != tt.isValid { + t.Errorf("isValidUrl(%s) = %t, want %t", tt.url, got, tt.isValid) + } + }) + } +} + +func Test_ReadFileContents(t *testing.T) { + type mock_struct struct { + expectedFilePath string + fileContents []byte + err error + } + type test_struct struct { + testname string + calledFilePath string + subcall_spec mock_struct + output string + err bool + } + + tests := []test_struct{ + {"normal", "foobar.txt", mock_struct{"foobar.txt", []byte("asdf"), nil}, "asdf", false}, + {"extra whitespace", "foobar.txt ", mock_struct{"foobar.txt", []byte("asdf \t"), nil}, "asdf", false}, + {"empty filename", "", mock_struct{"", []byte(""), nil}, "", true}, + {"file doesn't exist", "asdf.txt", mock_struct{"asdf", []byte(""), errors.New("this error doesn't matter much")}, "", true}, + } + + for _, tt := range tests { + t.Run(string(tt.testname), func(t *testing.T) { + + readfileFunc := func(filename string) ([]byte, error) { + if filename == tt.subcall_spec.expectedFilePath { + return tt.subcall_spec.fileContents, nil + } + return []byte(""), errors.New("file not found") + } + + got, err := ReadFileContentsImpl(tt.calledFilePath, readfileFunc) + + if got != tt.output || !(tt.err == (err != nil)) { + t.Errorf("ReadFileContents(%v) = (%v, %v), want (%v, %v)", tt.calledFilePath, got, err, tt.output, tt.err) + if got != tt.output { + t.Errorf("output strings are not equal") + } + if tt.err == (err != nil) { + t.Errorf("errors are not equal") + } + } + }) + } +} diff --git a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb index da6e94f5f..017bfb08d 100644 --- a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb +++ b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb @@ -40,9 +40,9 @@ class CAdvisorMetricsAPIClient @os_type = ENV["OS_TYPE"] if !@os_type.nil? && !@os_type.empty? && @os_type.strip.casecmp("windows") == 0 - @LogPath = "/etc/omsagentwindows/kubernetes_perf_log.txt" + @LogPath = Constants::WINDOWS_LOG_PATH + "kubernetes_perf_log.txt" else - @LogPath = "/var/opt/microsoft/docker-cimprov/log/kubernetes_perf_log.txt" + @LogPath = Constants::LINUX_LOG_PATH + "kubernetes_perf_log.txt" end @Log = Logger.new(@LogPath, 2, 10 * 1048576) #keep last 2 files, max log file size = 10M # @@rxBytesLast = nil diff --git a/source/plugins/ruby/KubernetesApiClient.rb b/source/plugins/ruby/KubernetesApiClient.rb index 4b50e20d8..8925248d7 100644 --- a/source/plugins/ruby/KubernetesApiClient.rb +++ b/source/plugins/ruby/KubernetesApiClient.rb @@ -25,11 +25,12 @@ class KubernetesApiClient #@@IsValidRunningNode = nil #@@IsLinuxCluster = nil @@KubeSystemNamespace = "kube-system" + @os_type = ENV["OS_TYPE"] if !@os_type.nil? && !@os_type.empty? && @os_type.strip.casecmp("windows") == 0 - @LogPath = "/etc/omsagentwindows/kubernetes_client_log.txt" + @LogPath = Constants::WINDOWS_LOG_PATH + "kubernetes_client_log.txt" else - @LogPath = "/var/opt/microsoft/docker-cimprov/log/kubernetes_client_log.txt" + @LogPath = Constants::LINUX_LOG_PATH + "kubernetes_client_log.txt" end @Log = Logger.new(@LogPath, 2, 10 * 1048576) #keep last 2 files, max log file size = 10M @@TokenFileName = "/var/run/secrets/kubernetes.io/serviceaccount/token" @@ -87,42 +88,42 @@ def getTokenStr end end - def getClusterRegion - if ENV["AKS_REGION"] - return ENV["AKS_REGION"] + def getClusterRegion(env=ENV) + if env["AKS_REGION"] + return env["AKS_REGION"] else @Log.warn ("Kubernetes environment variable not set AKS_REGION. Unable to get cluster region.") return nil end end - def getResourceUri(resource, api_group) + def getResourceUri(resource, api_group, env=ENV) begin - if ENV["KUBERNETES_SERVICE_HOST"] && ENV["KUBERNETES_PORT_443_TCP_PORT"] + if env["KUBERNETES_SERVICE_HOST"] && env["KUBERNETES_PORT_443_TCP_PORT"] if api_group.nil? - return "https://#{ENV["KUBERNETES_SERVICE_HOST"]}:#{ENV["KUBERNETES_PORT_443_TCP_PORT"]}/api/" + @@ApiVersion + "/" + resource + return "https://#{env["KUBERNETES_SERVICE_HOST"]}:#{env["KUBERNETES_PORT_443_TCP_PORT"]}/api/" + @@ApiVersion + "/" + resource elsif api_group == @@ApiGroupApps - return "https://#{ENV["KUBERNETES_SERVICE_HOST"]}:#{ENV["KUBERNETES_PORT_443_TCP_PORT"]}/apis/apps/" + @@ApiVersionApps + "/" + resource + return "https://#{env["KUBERNETES_SERVICE_HOST"]}:#{env["KUBERNETES_PORT_443_TCP_PORT"]}/apis/apps/" + @@ApiVersionApps + "/" + resource elsif api_group == @@ApiGroupHPA - return "https://#{ENV["KUBERNETES_SERVICE_HOST"]}:#{ENV["KUBERNETES_PORT_443_TCP_PORT"]}/apis/" + @@ApiGroupHPA + "/" + @@ApiVersionHPA + "/" + resource + return "https://#{env["KUBERNETES_SERVICE_HOST"]}:#{env["KUBERNETES_PORT_443_TCP_PORT"]}/apis/" + @@ApiGroupHPA + "/" + @@ApiVersionHPA + "/" + resource end else - @Log.warn ("Kubernetes environment variable not set KUBERNETES_SERVICE_HOST: #{ENV["KUBERNETES_SERVICE_HOST"]} KUBERNETES_PORT_443_TCP_PORT: #{ENV["KUBERNETES_PORT_443_TCP_PORT"]}. Unable to form resourceUri") + @Log.warn ("Kubernetes environment variable not set KUBERNETES_SERVICE_HOST: #{env["KUBERNETES_SERVICE_HOST"]} KUBERNETES_PORT_443_TCP_PORT: #{env["KUBERNETES_PORT_443_TCP_PORT"]}. Unable to form resourceUri") return nil end end end - def getClusterName + def getClusterName(env=ENV) return @@ClusterName if !@@ClusterName.nil? @@ClusterName = "None" begin #try getting resource ID for aks - cluster = ENV["AKS_RESOURCE_ID"] + cluster = env["AKS_RESOURCE_ID"] if cluster && !cluster.nil? && !cluster.empty? @@ClusterName = cluster.split("/").last else - cluster = ENV["ACS_RESOURCE_NAME"] + cluster = env["ACS_RESOURCE_NAME"] if cluster && !cluster.nil? && !cluster.empty? @@ClusterName = cluster else @@ -147,7 +148,7 @@ def getClusterName return @@ClusterName end - def getClusterId + def getClusterId(env=ENV) return @@ClusterId if !@@ClusterId.nil? #By default initialize ClusterId to ClusterName. # In ACS/On-prem, we need to figure out how we can generate ClusterId @@ -155,7 +156,7 @@ def getClusterId # e.g. md5 digest is 128 bits = 32 character in hex. Get first 16 and get a guid, and the next 16 to get resource id @@ClusterId = getClusterName begin - cluster = ENV["AKS_RESOURCE_ID"] + cluster = env["AKS_RESOURCE_ID"] if cluster && !cluster.nil? && !cluster.empty? @@ClusterId = cluster end @@ -777,13 +778,13 @@ def getResourcesAndContinuationToken(uri, api_group: nil) return continuationToken, resourceInventory end #getResourcesAndContinuationToken - def getKubeAPIServerUrl + def getKubeAPIServerUrl(env=ENV) apiServerUrl = nil begin - if ENV["KUBERNETES_SERVICE_HOST"] && ENV["KUBERNETES_PORT_443_TCP_PORT"] - apiServerUrl = "https://#{ENV["KUBERNETES_SERVICE_HOST"]}:#{ENV["KUBERNETES_PORT_443_TCP_PORT"]}" + if env["KUBERNETES_SERVICE_HOST"] && env["KUBERNETES_PORT_443_TCP_PORT"] + apiServerUrl = "https://#{env["KUBERNETES_SERVICE_HOST"]}:#{env["KUBERNETES_PORT_443_TCP_PORT"]}" else - @Log.warn "Kubernetes environment variable not set KUBERNETES_SERVICE_HOST: #{ENV["KUBERNETES_SERVICE_HOST"]} KUBERNETES_PORT_443_TCP_PORT: #{ENV["KUBERNETES_PORT_443_TCP_PORT"]}. Unable to form resourceUri" + @Log.warn "Kubernetes environment variable not set KUBERNETES_SERVICE_HOST: #{env["KUBERNETES_SERVICE_HOST"]} KUBERNETES_PORT_443_TCP_PORT: #{env["KUBERNETES_PORT_443_TCP_PORT"]}. Unable to form resourceUri" end rescue => errorStr @Log.warn "KubernetesApiClient::getKubeAPIServerUrl:Failed #{errorStr}" diff --git a/source/plugins/ruby/constants.rb b/source/plugins/ruby/constants.rb index 40fa80c14..69da56488 100644 --- a/source/plugins/ruby/constants.rb +++ b/source/plugins/ruby/constants.rb @@ -129,5 +129,7 @@ class Constants ONEAGENT_FLUENT_SOCKET_NAME = "/var/run/mdsd/default_fluent.socket" #Tag prefix for output stream EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX = "dcr-" - + + LINUX_LOG_PATH = $in_unit_test.nil? ? "/var/opt/microsoft/docker-cimprov/log/" : "./" + WINDOWS_LOG_PATH = $in_unit_test.nil? ? "/etc/omsagentwindows/" : "./" end diff --git a/source/plugins/utils/extension.rb b/source/plugins/ruby/extension.rb similarity index 100% rename from source/plugins/utils/extension.rb rename to source/plugins/ruby/extension.rb diff --git a/source/plugins/utils/extension_utils.rb b/source/plugins/ruby/extension_utils.rb similarity index 100% rename from source/plugins/utils/extension_utils.rb rename to source/plugins/ruby/extension_utils.rb diff --git a/source/plugins/ruby/in_kube_nodes.rb b/source/plugins/ruby/in_kube_nodes.rb index bc62756a1..a32a32769 100644 --- a/source/plugins/ruby/in_kube_nodes.rb +++ b/source/plugins/ruby/in_kube_nodes.rb @@ -7,26 +7,13 @@ module Fluent::Plugin class Kube_nodeInventory_Input < Input Fluent::Plugin.register_input("kube_nodes", self) - @@configMapMountPath = "/etc/config/settings/log-data-collection-settings" - @@promConfigMountPath = "/etc/config/settings/prometheus-data-collection-settings" - @@osmConfigMountPath = "/etc/config/osm-settings/osm-metric-collection-configuration" - @@AzStackCloudFileName = "/etc/kubernetes/host/azurestackcloud.json" - - - @@rsPromInterval = ENV["TELEMETRY_RS_PROM_INTERVAL"] - @@rsPromFieldPassCount = ENV["TELEMETRY_RS_PROM_FIELDPASS_LENGTH"] - @@rsPromFieldDropCount = ENV["TELEMETRY_RS_PROM_FIELDDROP_LENGTH"] - @@rsPromK8sServiceCount = ENV["TELEMETRY_RS_PROM_K8S_SERVICES_LENGTH"] - @@rsPromUrlCount = ENV["TELEMETRY_RS_PROM_URLS_LENGTH"] - @@rsPromMonitorPods = ENV["TELEMETRY_RS_PROM_MONITOR_PODS"] - @@rsPromMonitorPodsNamespaceLength = ENV["TELEMETRY_RS_PROM_MONITOR_PODS_NS_LENGTH"] - @@rsPromMonitorPodsLabelSelectorLength = ENV["TELEMETRY_RS_PROM_LABEL_SELECTOR_LENGTH"] - @@rsPromMonitorPodsFieldSelectorLength = ENV["TELEMETRY_RS_PROM_FIELD_SELECTOR_LENGTH"] - @@collectAllKubeEvents = ENV["AZMON_CLUSTER_COLLECT_ALL_KUBE_EVENTS"] - @@osmNamespaceCount = ENV["TELEMETRY_OSM_CONFIGURATION_NAMESPACES_COUNT"] - - def initialize - super + def initialize (kubernetesApiClient=nil, + applicationInsightsUtility=nil, + extensionUtils=nil, + env=nil, + telemetry_flush_interval=nil) + super() + require "yaml" require "yajl/json_gem" require "yajl" @@ -38,6 +25,31 @@ def initialize require_relative "omslog" require_relative "extension_utils" + @kubernetesApiClient = kubernetesApiClient == nil ? KubernetesApiClient : kubernetesApiClient + @applicationInsightsUtility = applicationInsightsUtility == nil ? ApplicationInsightsUtility : applicationInsightsUtility + @extensionUtils = extensionUtils == nil ? ExtensionUtils : extensionUtils + @env = env == nil ? ENV : env + @TELEMETRY_FLUSH_INTERVAL_IN_MINUTES = telemetry_flush_interval == nil ? Constants::TELEMETRY_FLUSH_INTERVAL_IN_MINUTES : telemetry_flush_interval + + # these defines were previously at class scope Moving them into the constructor so that they can be set by unit tests + @@configMapMountPath = "/etc/config/settings/log-data-collection-settings" + @@promConfigMountPath = "/etc/config/settings/prometheus-data-collection-settings" + @@osmConfigMountPath = "/etc/config/osm-settings/osm-metric-collection-configuration" + @@AzStackCloudFileName = "/etc/kubernetes/host/azurestackcloud.json" + + + @@rsPromInterval = @env["TELEMETRY_RS_PROM_INTERVAL"] + @@rsPromFieldPassCount = @env["TELEMETRY_RS_PROM_FIELDPASS_LENGTH"] + @@rsPromFieldDropCount = @env["TELEMETRY_RS_PROM_FIELDDROP_LENGTH"] + @@rsPromK8sServiceCount = @env["TELEMETRY_RS_PROM_K8S_SERVICES_LENGTH"] + @@rsPromUrlCount = @env["TELEMETRY_RS_PROM_URLS_LENGTH"] + @@rsPromMonitorPods = @env["TELEMETRY_RS_PROM_MONITOR_PODS"] + @@rsPromMonitorPodsNamespaceLength = @env["TELEMETRY_RS_PROM_MONITOR_PODS_NS_LENGTH"] + @@rsPromMonitorPodsLabelSelectorLength = @env["TELEMETRY_RS_PROM_LABEL_SELECTOR_LENGTH"] + @@rsPromMonitorPodsFieldSelectorLength = @env["TELEMETRY_RS_PROM_FIELD_SELECTOR_LENGTH"] + @@collectAllKubeEvents = @env["AZMON_CLUSTER_COLLECT_ALL_KUBE_EVENTS"] + @@osmNamespaceCount = @env["TELEMETRY_OSM_CONFIGURATION_NAMESPACES_COUNT"] + @ContainerNodeInventoryTag = "oneagent.containerInsights.CONTAINER_NODE_INVENTORY_BLOB" @insightsMetricsTag = "oneagent.containerInsights.INSIGHTS_METRICS_BLOB" @MDMKubeNodeInventoryTag = "mdm.kubenodeinventory" @@ -64,8 +76,8 @@ def configure(conf) def start if @run_interval super - if !ENV["NODES_CHUNK_SIZE"].nil? && !ENV["NODES_CHUNK_SIZE"].empty? && ENV["NODES_CHUNK_SIZE"].to_i > 0 - @NODES_CHUNK_SIZE = ENV["NODES_CHUNK_SIZE"].to_i + if !@env["NODES_CHUNK_SIZE"].nil? && !@env["NODES_CHUNK_SIZE"].empty? && @env["NODES_CHUNK_SIZE"].to_i > 0 + @NODES_CHUNK_SIZE = @env["NODES_CHUNK_SIZE"].to_i else # this shouldnt happen just setting default here as safe guard $log.warn("in_kube_nodes::start: setting to default value since got NODES_CHUNK_SIZE nil or empty") @@ -73,8 +85,8 @@ def start end $log.info("in_kube_nodes::start : NODES_CHUNK_SIZE @ #{@NODES_CHUNK_SIZE}") - if !ENV["NODES_EMIT_STREAM_BATCH_SIZE"].nil? && !ENV["NODES_EMIT_STREAM_BATCH_SIZE"].empty? && ENV["NODES_EMIT_STREAM_BATCH_SIZE"].to_i > 0 - @NODES_EMIT_STREAM_BATCH_SIZE = ENV["NODES_EMIT_STREAM_BATCH_SIZE"].to_i + if !@env["NODES_EMIT_STREAM_BATCH_SIZE"].nil? && !@env["NODES_EMIT_STREAM_BATCH_SIZE"].empty? && @env["NODES_EMIT_STREAM_BATCH_SIZE"].to_i > 0 + @NODES_EMIT_STREAM_BATCH_SIZE = @env["NODES_EMIT_STREAM_BATCH_SIZE"].to_i else # this shouldnt happen just setting default here as safe guard $log.warn("in_kube_nodes::start: setting to default value since got NODES_EMIT_STREAM_BATCH_SIZE nil or empty") @@ -112,19 +124,19 @@ def enumerate @nodeInventoryE2EProcessingLatencyMs = 0 nodeInventoryStartTime = (Time.now.to_f * 1000).to_i - if ExtensionUtils.isAADMSIAuthMode() + if @extensionUtils.isAADMSIAuthMode() $log.info("in_kube_nodes::enumerate: AAD AUTH MSI MODE") if @kubeperfTag.nil? || !@kubeperfTag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) - @kubeperfTag = ExtensionUtils.getOutputStreamId(Constants::PERF_DATA_TYPE) + @kubeperfTag = @extensionUtils.getOutputStreamId(Constants::PERF_DATA_TYPE) end if @insightsMetricsTag.nil? || !@insightsMetricsTag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) - @insightsMetricsTag = ExtensionUtils.getOutputStreamId(Constants::INSIGHTS_METRICS_DATA_TYPE) + @insightsMetricsTag = @extensionUtils.getOutputStreamId(Constants::INSIGHTS_METRICS_DATA_TYPE) end if @ContainerNodeInventoryTag.nil? || !@ContainerNodeInventoryTag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) - @ContainerNodeInventoryTag = ExtensionUtils.getOutputStreamId(Constants::CONTAINER_NODE_INVENTORY_DATA_TYPE) + @ContainerNodeInventoryTag = @extensionUtils.getOutputStreamId(Constants::CONTAINER_NODE_INVENTORY_DATA_TYPE) end if @tag.nil? || !@tag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) - @tag = ExtensionUtils.getOutputStreamId(Constants::KUBE_NODE_INVENTORY_DATA_TYPE) + @tag = @extensionUtils.getOutputStreamId(Constants::KUBE_NODE_INVENTORY_DATA_TYPE) end $log.info("in_kube_nodes::enumerate: using perf tag -#{@kubeperfTag} @ #{Time.now.utc.iso8601}") $log.info("in_kube_nodes::enumerate: using insightsmetrics tag -#{@insightsMetricsTag} @ #{Time.now.utc.iso8601}") @@ -136,8 +148,9 @@ def enumerate # Initializing continuation token to nil continuationToken = nil $log.info("in_kube_nodes::enumerate : Getting nodes from Kube API @ #{Time.now.utc.iso8601}") + # KubernetesApiClient.getNodesResourceUri is a pure function, so call it from the actual module instead of from the mock resourceUri = KubernetesApiClient.getNodesResourceUri("nodes?limit=#{@NODES_CHUNK_SIZE}") - continuationToken, nodeInventory = KubernetesApiClient.getResourcesAndContinuationToken(resourceUri) + continuationToken, nodeInventory = @kubernetesApiClient.getResourcesAndContinuationToken(resourceUri) $log.info("in_kube_nodes::enumerate : Done getting nodes from Kube API @ #{Time.now.utc.iso8601}") nodesAPIChunkEndTime = (Time.now.to_f * 1000).to_i @nodesAPIE2ELatencyMs = (nodesAPIChunkEndTime - nodesAPIChunkStartTime) @@ -151,7 +164,7 @@ def enumerate #If we receive a continuation token, make calls, process and flush data until we have processed all data while (!continuationToken.nil? && !continuationToken.empty?) nodesAPIChunkStartTime = (Time.now.to_f * 1000).to_i - continuationToken, nodeInventory = KubernetesApiClient.getResourcesAndContinuationToken(resourceUri + "&continue=#{continuationToken}") + continuationToken, nodeInventory = @kubernetesApiClient.getResourcesAndContinuationToken(resourceUri + "&continue=#{continuationToken}") nodesAPIChunkEndTime = (Time.now.to_f * 1000).to_i @nodesAPIE2ELatencyMs = @nodesAPIE2ELatencyMs + (nodesAPIChunkEndTime - nodesAPIChunkStartTime) if (!nodeInventory.nil? && !nodeInventory.empty? && nodeInventory.key?("items") && !nodeInventory["items"].nil? && !nodeInventory["items"].empty?) @@ -165,9 +178,9 @@ def enumerate @nodeInventoryE2EProcessingLatencyMs = ((Time.now.to_f * 1000).to_i - nodeInventoryStartTime) timeDifference = (DateTime.now.to_time.to_i - @@nodeInventoryLatencyTelemetryTimeTracker).abs timeDifferenceInMinutes = timeDifference / 60 - if (timeDifferenceInMinutes >= Constants::TELEMETRY_FLUSH_INTERVAL_IN_MINUTES) - ApplicationInsightsUtility.sendMetricTelemetry("NodeInventoryE2EProcessingLatencyMs", @nodeInventoryE2EProcessingLatencyMs, {}) - ApplicationInsightsUtility.sendMetricTelemetry("NodesAPIE2ELatencyMs", @nodesAPIE2ELatencyMs, {}) + if (timeDifferenceInMinutes >= @TELEMETRY_FLUSH_INTERVAL_IN_MINUTES) + @applicationInsightsUtility.sendMetricTelemetry("NodeInventoryE2EProcessingLatencyMs", @nodeInventoryE2EProcessingLatencyMs, {}) + @applicationInsightsUtility.sendMetricTelemetry("NodesAPIE2ELatencyMs", @nodesAPIE2ELatencyMs, {}) @@nodeInventoryLatencyTelemetryTimeTracker = DateTime.now.to_time.to_i end # Setting this to nil so that we dont hold memory until GC kicks in @@ -175,7 +188,7 @@ def enumerate rescue => errorStr $log.warn "in_kube_nodes::enumerate:Failed in enumerate: #{errorStr}" $log.debug_backtrace(errorStr.backtrace) - ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) + @applicationInsightsUtility.sendExceptionTelemetry(errorStr) end end # end enumerate @@ -188,7 +201,7 @@ def parse_and_emit_records(nodeInventory, batchTime = Time.utc.iso8601) containerNodeInventoryEventStream = Fluent::MultiEventStream.new insightsMetricsEventStream = Fluent::MultiEventStream.new kubePerfEventStream = Fluent::MultiEventStream.new - @@istestvar = ENV["ISTEST"] + @@istestvar = @env["ISTEST"] #get node inventory nodeInventory["items"].each do |item| # node inventory @@ -299,49 +312,79 @@ def parse_and_emit_records(nodeInventory, batchTime = Time.utc.iso8601) # Adding telemetry to send node telemetry every 10 minutes timeDifference = (DateTime.now.to_time.to_i - @@nodeTelemetryTimeTracker).abs timeDifferenceInMinutes = timeDifference / 60 - if (timeDifferenceInMinutes >= Constants::TELEMETRY_FLUSH_INTERVAL_IN_MINUTES) - properties = getNodeTelemetryProps(item) - properties["KubernetesProviderID"] = nodeInventoryRecord["KubernetesProviderID"] - capacityInfo = item["status"]["capacity"] - - ApplicationInsightsUtility.sendMetricTelemetry("NodeMemory", capacityInfo["memory"], properties) + if (timeDifferenceInMinutes >= @TELEMETRY_FLUSH_INTERVAL_IN_MINUTES) begin - if (!capacityInfo["nvidia.com/gpu"].nil?) && (!capacityInfo["nvidia.com/gpu"].empty?) - properties["nvigpus"] = capacityInfo["nvidia.com/gpu"] + properties = getNodeTelemetryProps(item) + properties["KubernetesProviderID"] = nodeInventoryRecord["KubernetesProviderID"] + capacityInfo = item["status"]["capacity"] + + ApplicationInsightsUtility.sendMetricTelemetry("NodeMemory", capacityInfo["memory"], properties) + begin + if (!capacityInfo["nvidia.com/gpu"].nil?) && (!capacityInfo["nvidia.com/gpu"].empty?) + properties["nvigpus"] = capacityInfo["nvidia.com/gpu"] + end + + if (!capacityInfo["amd.com/gpu"].nil?) && (!capacityInfo["amd.com/gpu"].empty?) + properties["amdgpus"] = capacityInfo["amd.com/gpu"] + end + rescue => errorStr + $log.warn "Failed in getting GPU telemetry in_kube_nodes : #{errorStr}" + $log.debug_backtrace(errorStr.backtrace) + ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) end - if (!capacityInfo["amd.com/gpu"].nil?) && (!capacityInfo["amd.com/gpu"].empty?) - properties["amdgpus"] = capacityInfo["amd.com/gpu"] + # Telemetry for data collection config for replicaset + if (File.file?(@@configMapMountPath)) + properties["collectAllKubeEvents"] = @@collectAllKubeEvents end - rescue => errorStr - $log.warn "Failed in getting GPU telemetry in_kube_nodes : #{errorStr}" - $log.debug_backtrace(errorStr.backtrace) - ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) - end - # Telemetry for data collection config for replicaset - if (File.file?(@@configMapMountPath)) - properties["collectAllKubeEvents"] = @@collectAllKubeEvents - end + #telemetry about prometheus metric collections settings for replicaset + if (File.file?(@@promConfigMountPath)) + properties["rsPromInt"] = @@rsPromInterval + properties["rsPromFPC"] = @@rsPromFieldPassCount + properties["rsPromFDC"] = @@rsPromFieldDropCount + properties["rsPromServ"] = @@rsPromK8sServiceCount + properties["rsPromUrl"] = @@rsPromUrlCount + properties["rsPromMonPods"] = @@rsPromMonitorPods + properties["rsPromMonPodsNs"] = @@rsPromMonitorPodsNamespaceLength + properties["rsPromMonPodsLabelSelectorLength"] = @@rsPromMonitorPodsLabelSelectorLength + properties["rsPromMonPodsFieldSelectorLength"] = @@rsPromMonitorPodsFieldSelectorLength + end + # telemetry about osm metric settings for replicaset + if (File.file?(@@osmConfigMountPath)) + properties["osmNamespaceCount"] = @@osmNamespaceCount + end + ApplicationInsightsUtility.sendMetricTelemetry("NodeCoreCapacity", capacityInfo["cpu"], properties) + telemetrySent = true - #telemetry about prometheus metric collections settings for replicaset - if (File.file?(@@promConfigMountPath)) - properties["rsPromInt"] = @@rsPromInterval - properties["rsPromFPC"] = @@rsPromFieldPassCount - properties["rsPromFDC"] = @@rsPromFieldDropCount - properties["rsPromServ"] = @@rsPromK8sServiceCount - properties["rsPromUrl"] = @@rsPromUrlCount - properties["rsPromMonPods"] = @@rsPromMonitorPods - properties["rsPromMonPodsNs"] = @@rsPromMonitorPodsNamespaceLength - properties["rsPromMonPodsLabelSelectorLength"] = @@rsPromMonitorPodsLabelSelectorLength - properties["rsPromMonPodsFieldSelectorLength"] = @@rsPromMonitorPodsFieldSelectorLength - end - # telemetry about osm metric settings for replicaset - if (File.file?(@@osmConfigMountPath)) - properties["osmNamespaceCount"] = @@osmNamespaceCount + # Telemetry for data collection config for replicaset + if (File.file?(@@configMapMountPath)) + properties["collectAllKubeEvents"] = @@collectAllKubeEvents + end + + #telemetry about prometheus metric collections settings for replicaset + if (File.file?(@@promConfigMountPath)) + properties["rsPromInt"] = @@rsPromInterval + properties["rsPromFPC"] = @@rsPromFieldPassCount + properties["rsPromFDC"] = @@rsPromFieldDropCount + properties["rsPromServ"] = @@rsPromK8sServiceCount + properties["rsPromUrl"] = @@rsPromUrlCount + properties["rsPromMonPods"] = @@rsPromMonitorPods + properties["rsPromMonPodsNs"] = @@rsPromMonitorPodsNamespaceLength + properties["rsPromMonPodsLabelSelectorLength"] = @@rsPromMonitorPodsLabelSelectorLength + properties["rsPromMonPodsFieldSelectorLength"] = @@rsPromMonitorPodsFieldSelectorLength + end + # telemetry about osm metric settings for replicaset + if (File.file?(@@osmConfigMountPath)) + properties["osmNamespaceCount"] = @@osmNamespaceCount + end + @applicationInsightsUtility.sendMetricTelemetry("NodeCoreCapacity", capacityInfo["cpu"], properties) + telemetrySent = true + rescue => errorStr + $log.warn "Failed in getting telemetry in_kube_nodes : #{errorStr}" + $log.debug_backtrace(errorStr.backtrace) + @applicationInsightsUtility.sendExceptionTelemetry(errorStr) end - ApplicationInsightsUtility.sendMetricTelemetry("NodeCoreCapacity", capacityInfo["cpu"], properties) - telemetrySent = true end end if telemetrySent == true @@ -385,7 +428,7 @@ def parse_and_emit_records(nodeInventory, batchTime = Time.utc.iso8601) rescue => errorStr $log.warn "Failed to retrieve node inventory: #{errorStr}" $log.debug_backtrace(errorStr.backtrace) - ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) + @applicationInsightsUtility.sendExceptionTelemetry(errorStr) end $log.info "in_kube_nodes::parse_and_emit_records:End #{Time.now.utc.iso8601}" end @@ -414,7 +457,7 @@ def run_periodic $log.info("in_kube_nodes::run_periodic.enumerate.end #{Time.now.utc.iso8601}") rescue => errorStr $log.warn "in_kube_nodes::run_periodic: enumerate Failed to retrieve node inventory: #{errorStr}" - ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) + @applicationInsightsUtility.sendExceptionTelemetry(errorStr) end end @mutex.lock @@ -428,8 +471,8 @@ def getNodeInventoryRecord(item, batchTime = Time.utc.iso8601) begin record["CollectionTime"] = batchTime #This is the time that is mapped to become TimeGenerated record["Computer"] = item["metadata"]["name"] - record["ClusterName"] = KubernetesApiClient.getClusterName - record["ClusterId"] = KubernetesApiClient.getClusterId + record["ClusterName"] = @kubernetesApiClient.getClusterName + record["ClusterId"] = @kubernetesApiClient.getClusterId record["CreationTimeStamp"] = item["metadata"]["creationTimestamp"] record["Labels"] = [item["metadata"]["labels"]] record["Status"] = "" diff --git a/source/plugins/ruby/in_kube_nodes_test.rb b/source/plugins/ruby/in_kube_nodes_test.rb new file mode 100644 index 000000000..8f4984c6c --- /dev/null +++ b/source/plugins/ruby/in_kube_nodes_test.rb @@ -0,0 +1,171 @@ +require 'minitest/autorun' + +require 'fluent/test' +require 'fluent/test/driver/input' +require 'fluent/test/helpers' + +require_relative 'in_kube_nodes.rb' + +class InKubeNodesTests < Minitest::Test + include Fluent::Test::Helpers + + def setup + Fluent::Test.setup + end + + def create_driver(conf = {}, kubernetesApiClient=nil, applicationInsightsUtility=nil, extensionUtils=nil, env=nil, telemetry_flush_interval=nil) + Fluent::Test::Driver::Input.new(Fluent::Plugin::Kube_nodeInventory_Input.new(kubernetesApiClient=kubernetesApiClient, + applicationInsightsUtility=applicationInsightsUtility, + extensionUtils=extensionUtils, + env=env)).configure(conf) + end + + # Collection time of scrapped data will always be different. Overwrite it in any records returned by in_kube_ndes.rb + def overwrite_collection_time(data) + if data.key?("CollectionTime") + data["CollectionTime"] = "~CollectionTime~" + end + if data.key?("Timestamp") + data["Timestamp"] = "~Timestamp~" + end + return data + end + + def test_basic_single_node + kubeApiClient = Minitest::Mock.new + appInsightsUtil = Minitest::Mock.new + extensionUtils = Minitest::Mock.new + env = {} + env["NODES_CHUNK_SIZE"] = "200" + + kubeApiClient.expect(:==, false, [nil]) + appInsightsUtil.expect(:==, false, [nil]) + extensionUtils.expect(:==, false, [nil]) + + # isAADMSIAuthMode() is called multiple times and we don't really care how many time it is called. This is the same as mocking + # but it doesn't track how many times isAADMSIAuthMode is called + def extensionUtils.isAADMSIAuthMode + false + end + + nodes_api_response = eval(File.open("test/unit-tests/canned-api-responses/kube-nodes.txt").read) + kubeApiClient.expect(:getResourcesAndContinuationToken, [nil, nodes_api_response], ["nodes?limit=200"]) + kubeApiClient.expect(:getClusterName, "/cluster-name") + kubeApiClient.expect(:getClusterId, "/cluster-id") + + config = "run_interval 999999999" # only run once + + d = create_driver(config, kubernetesApiClient=kubeApiClient, applicationInsightsUtility=appInsightsUtil, extensionUtils=extensionUtils, env=env) + d.instance.start + d.instance.enumerate + d.run(timeout: 99999) # Input plugins decide when to run, so we have to give it enough time to run + + + expected_responses = { ["oneagent.containerInsights.KUBE_NODE_INVENTORY_BLOB", overwrite_collection_time({"CollectionTime"=>"2021-08-17T20:24:18Z", "Computer"=>"aks-nodepool1-24816391-vmss000000", "ClusterName"=>"/cluster-name", "ClusterId"=>"/cluster-id", "CreationTimeStamp"=>"2021-07-21T23:40:14Z", "Labels"=>[{"agentpool"=>"nodepool1", "beta.kubernetes.io/arch"=>"amd64", "beta.kubernetes.io/instance-type"=>"Standard_DS2_v2", "beta.kubernetes.io/os"=>"linux", "failure-domain.beta.kubernetes.io/region"=>"westus2", "failure-domain.beta.kubernetes.io/zone"=>"0", "kubernetes.azure.com/cluster"=>"MC_davidaks16_davidaks16_westus2", "kubernetes.azure.com/mode"=>"system", "kubernetes.azure.com/node-image-version"=>"AKSUbuntu-1804gen2containerd-2021.07.03", "kubernetes.azure.com/os-sku"=>"Ubuntu", "kubernetes.azure.com/role"=>"agent", "kubernetes.io/arch"=>"amd64", "kubernetes.io/hostname"=>"aks-nodepool1-24816391-vmss000000", "kubernetes.io/os"=>"linux", "kubernetes.io/role"=>"agent", "node-role.kubernetes.io/agent"=>"", "node.kubernetes.io/instance-type"=>"Standard_DS2_v2", "storageprofile"=>"managed", "storagetier"=>"Premium_LRS", "topology.kubernetes.io/region"=>"westus2", "topology.kubernetes.io/zone"=>"0"}], "Status"=>"Ready", "KubernetesProviderID"=>"azure", "LastTransitionTimeReady"=>"2021-07-21T23:40:24Z", "KubeletVersion"=>"v1.19.11", "KubeProxyVersion"=>"v1.19.11"})] => true, + ["mdm.kubenodeinventory", overwrite_collection_time({"CollectionTime"=>"2021-08-17T20:24:18Z", "Computer"=>"aks-nodepool1-24816391-vmss000000", "ClusterName"=>"/cluster-name", "ClusterId"=>"/cluster-id", "CreationTimeStamp"=>"2021-07-21T23:40:14Z", "Labels"=>[{"agentpool"=>"nodepool1", "beta.kubernetes.io/arch"=>"amd64", "beta.kubernetes.io/instance-type"=>"Standard_DS2_v2", "beta.kubernetes.io/os"=>"linux", "failure-domain.beta.kubernetes.io/region"=>"westus2", "failure-domain.beta.kubernetes.io/zone"=>"0", "kubernetes.azure.com/cluster"=>"MC_davidaks16_davidaks16_westus2", "kubernetes.azure.com/mode"=>"system", "kubernetes.azure.com/node-image-version"=>"AKSUbuntu-1804gen2containerd-2021.07.03", "kubernetes.azure.com/os-sku"=>"Ubuntu", "kubernetes.azure.com/role"=>"agent", "kubernetes.io/arch"=>"amd64", "kubernetes.io/hostname"=>"aks-nodepool1-24816391-vmss000000", "kubernetes.io/os"=>"linux", "kubernetes.io/role"=>"agent", "node-role.kubernetes.io/agent"=>"", "node.kubernetes.io/instance-type"=>"Standard_DS2_v2", "storageprofile"=>"managed", "storagetier"=>"Premium_LRS", "topology.kubernetes.io/region"=>"westus2", "topology.kubernetes.io/zone"=>"0"}], "Status"=>"Ready", "KubernetesProviderID"=>"azure", "LastTransitionTimeReady"=>"2021-07-21T23:40:24Z", "KubeletVersion"=>"v1.19.11", "KubeProxyVersion"=>"v1.19.11"})] => true, + ["oneagent.containerInsights.CONTAINER_NODE_INVENTORY_BLOB", overwrite_collection_time({"CollectionTime"=>"2021-08-17T20:24:18Z", "Computer"=>"aks-nodepool1-24816391-vmss000000", "OperatingSystem"=>"Ubuntu 18.04.5 LTS", "DockerVersion"=>"containerd://1.4.4+azure"})] => true, + ["oneagent.containerInsights.LINUX_PERF_BLOB", overwrite_collection_time({"Timestamp"=>"2021-08-17T20:24:18Z", "Host"=>"aks-nodepool1-24816391-vmss000000", "Computer"=>"aks-nodepool1-24816391-vmss000000", "ObjectName"=>"K8SNode", "InstanceName"=>"None/aks-nodepool1-24816391-vmss000000", "json_Collections"=>"[{\"CounterName\":\"cpuAllocatableNanoCores\",\"Value\":1900000000.0}]"})] => true, + ["oneagent.containerInsights.LINUX_PERF_BLOB", overwrite_collection_time({"Timestamp"=>"2021-08-17T20:24:18Z", "Host"=>"aks-nodepool1-24816391-vmss000000", "Computer"=>"aks-nodepool1-24816391-vmss000000", "ObjectName"=>"K8SNode", "InstanceName"=>"None/aks-nodepool1-24816391-vmss000000", "json_Collections"=>"[{\"CounterName\":\"memoryAllocatableBytes\",\"Value\":4787511296.0}]"})] => true, + ["oneagent.containerInsights.LINUX_PERF_BLOB", overwrite_collection_time({"Timestamp"=>"2021-08-17T20:24:18Z", "Host"=>"aks-nodepool1-24816391-vmss000000", "Computer"=>"aks-nodepool1-24816391-vmss000000", "ObjectName"=>"K8SNode", "InstanceName"=>"None/aks-nodepool1-24816391-vmss000000", "json_Collections"=>"[{\"CounterName\":\"cpuCapacityNanoCores\",\"Value\":2000000000.0}]"})] => true, + ["oneagent.containerInsights.LINUX_PERF_BLOB", overwrite_collection_time({"Timestamp"=>"2021-08-17T20:24:18Z", "Host"=>"aks-nodepool1-24816391-vmss000000", "Computer"=>"aks-nodepool1-24816391-vmss000000", "ObjectName"=>"K8SNode", "InstanceName"=>"None/aks-nodepool1-24816391-vmss000000", "json_Collections"=>"[{\"CounterName\":\"memoryCapacityBytes\",\"Value\":7291510784.0}]"})] => true} + + d.events.each do |tag, time, record| + cleaned_record = overwrite_collection_time record + if expected_responses.key?([tag, cleaned_record]) + expected_responses[[tag, cleaned_record]] = true + else + assert(false, "got unexpected record") + end + end + + expected_responses.each do |key, val| + assert(val, "expected record not emitted: #{key}") + end + + # make sure all mocked methods were called the expected number of times + kubeApiClient.verify + appInsightsUtil.verify + extensionUtils.verify + end + + # Sometimes customer tooling creates invalid node specs in the Kube API server (its happened more than once). + # This test makes sure that it doesn't creash the entire input plugin and other nodes are still collected + def test_malformed_node_spec + kubeApiClient = Minitest::Mock.new + appInsightsUtil = Minitest::Mock.new + extensionUtils = Minitest::Mock.new + env = {} + env["NODES_CHUNK_SIZE"] = "200" + + kubeApiClient.expect(:==, false, [nil]) + appInsightsUtil.expect(:==, false, [nil]) + extensionUtils.expect(:==, false, [nil]) + + # isAADMSIAuthMode() is called multiple times and we don't really care how many time it is called. This is the same as mocking + # but it doesn't track how many times isAADMSIAuthMode is called + def extensionUtils.isAADMSIAuthMode + false + end + + # Set up the KubernetesApiClient Mock. Note: most of the functions in KubernetesApiClient are pure (access no + # state other than their arguments), so there is no need to mock them (this test file would be far longer and + # more brittle). Instead, in_kube_nodes bypasses the mock and directly calls these functions in KubernetesApiClient. + # Ideally the pure functions in KubernetesApiClient would be refactored into their own file to reduce confusion. + nodes_api_response = eval(File.open("test/unit-tests/canned-api-responses/kube-nodes-malformed.txt").read) + kubeApiClient.expect(:getResourcesAndContinuationToken, [nil, nodes_api_response], ["nodes?limit=200"]) + kubeApiClient.expect(:getClusterName, "/cluster-name") + kubeApiClient.expect(:getClusterName, "/cluster-name") + kubeApiClient.expect(:getClusterId, "/cluster-id") + kubeApiClient.expect(:getClusterId, "/cluster-id") + + def appInsightsUtil.sendExceptionTelemetry(exception) + if exception.to_s != "undefined method `[]' for nil:NilClass" + raise "an unexpected exception has occured" + end + end + + # This test doesn't care if metric telemetry is sent properly. Looking for an unnecessary value would make it needlessly rigid + def appInsightsUtil.sendMetricTelemetry(a, b, c) + end + + config = "run_interval 999999999" # only run once + + d = create_driver(config, kubernetesApiClient=kubeApiClient, applicationInsightsUtility=appInsightsUtil, extensionUtils=extensionUtils, env=env, telemetry_flush_interval=0) + d.instance.start + + d.instance.enumerate + d.run(timeout: 99999) #TODO: is this necessary? + + expected_responses = { + ["oneagent.containerInsights.KUBE_NODE_INVENTORY_BLOB", {"CollectionTime"=>"~CollectionTime~", "Computer"=>"correct-node", "ClusterName"=>"/cluster-name", "ClusterId"=>"/cluster-id", "CreationTimeStamp"=>"2021-07-21T23:40:14Z", "Labels"=>[{"agentpool"=>"nodepool1", "beta.kubernetes.io/arch"=>"amd64", "beta.kubernetes.io/instance-type"=>"Standard_DS2_v2", "beta.kubernetes.io/os"=>"linux", "failure-domain.beta.kubernetes.io/region"=>"westus2", "failure-domain.beta.kubernetes.io/zone"=>"0", "kubernetes.azure.com/cluster"=>"MC_davidaks16_davidaks16_westus2", "kubernetes.azure.com/mode"=>"system", "kubernetes.azure.com/node-image-version"=>"AKSUbuntu-1804gen2containerd-2021.07.03", "kubernetes.azure.com/os-sku"=>"Ubuntu", "kubernetes.azure.com/role"=>"agent", "kubernetes.io/arch"=>"amd64", "kubernetes.io/hostname"=>"correct-node", "kubernetes.io/os"=>"linux", "kubernetes.io/role"=>"agent", "node-role.kubernetes.io/agent"=>"", "node.kubernetes.io/instance-type"=>"Standard_DS2_v2", "storageprofile"=>"managed", "storagetier"=>"Premium_LRS", "topology.kubernetes.io/region"=>"westus2", "topology.kubernetes.io/zone"=>"0"}], "Status"=>"Ready", "KubernetesProviderID"=>"azure", "LastTransitionTimeReady"=>"2021-07-21T23:40:24Z", "KubeletVersion"=>"v1.19.11", "KubeProxyVersion"=>"v1.19.11"}] => false, + ["mdm.kubenodeinventory", {"CollectionTime"=>"~CollectionTime~", "Computer"=>"correct-node", "ClusterName"=>"/cluster-name", "ClusterId"=>"/cluster-id", "CreationTimeStamp"=>"2021-07-21T23:40:14Z", "Labels"=>[{"agentpool"=>"nodepool1", "beta.kubernetes.io/arch"=>"amd64", "beta.kubernetes.io/instance-type"=>"Standard_DS2_v2", "beta.kubernetes.io/os"=>"linux", "failure-domain.beta.kubernetes.io/region"=>"westus2", "failure-domain.beta.kubernetes.io/zone"=>"0", "kubernetes.azure.com/cluster"=>"MC_davidaks16_davidaks16_westus2", "kubernetes.azure.com/mode"=>"system", "kubernetes.azure.com/node-image-version"=>"AKSUbuntu-1804gen2containerd-2021.07.03", "kubernetes.azure.com/os-sku"=>"Ubuntu", "kubernetes.azure.com/role"=>"agent", "kubernetes.io/arch"=>"amd64", "kubernetes.io/hostname"=>"correct-node", "kubernetes.io/os"=>"linux", "kubernetes.io/role"=>"agent", "node-role.kubernetes.io/agent"=>"", "node.kubernetes.io/instance-type"=>"Standard_DS2_v2", "storageprofile"=>"managed", "storagetier"=>"Premium_LRS", "topology.kubernetes.io/region"=>"westus2", "topology.kubernetes.io/zone"=>"0"}], "Status"=>"Ready", "KubernetesProviderID"=>"azure", "LastTransitionTimeReady"=>"2021-07-21T23:40:24Z", "KubeletVersion"=>"v1.19.11", "KubeProxyVersion"=>"v1.19.11"}] => false, + ["oneagent.containerInsights.CONTAINER_NODE_INVENTORY_BLOB", {"CollectionTime"=>"~CollectionTime~", "Computer"=>"correct-node", "OperatingSystem"=>"Ubuntu 18.04.5 LTS", "DockerVersion"=>"containerd://1.4.4+azure"}] => false, + ["oneagent.containerInsights.LINUX_PERF_BLOB", {"Timestamp"=>"~Timestamp~", "Host"=>"correct-node", "Computer"=>"correct-node", "ObjectName"=>"K8SNode", "InstanceName"=>"None/correct-node", "json_Collections"=>"[{\"CounterName\":\"cpuAllocatableNanoCores\",\"Value\":1000000.0}]"}] => false, + ["oneagent.containerInsights.LINUX_PERF_BLOB", {"Timestamp"=>"~Timestamp~", "Host"=>"correct-node", "Computer"=>"correct-node", "ObjectName"=>"K8SNode", "InstanceName"=>"None/correct-node", "json_Collections"=>"[{\"CounterName\":\"memoryAllocatableBytes\",\"Value\":444.0}]"}] => false, + ["oneagent.containerInsights.LINUX_PERF_BLOB", {"Timestamp"=>"~Timestamp~", "Host"=>"correct-node", "Computer"=>"correct-node", "ObjectName"=>"K8SNode", "InstanceName"=>"None/correct-node", "json_Collections"=>"[{\"CounterName\":\"cpuCapacityNanoCores\",\"Value\":2000000.0}]"}] => false, + ["oneagent.containerInsights.LINUX_PERF_BLOB", {"Timestamp"=>"~Timestamp~", "Host"=>"correct-node", "Computer"=>"correct-node", "ObjectName"=>"K8SNode", "InstanceName"=>"None/correct-node", "json_Collections"=>"[{\"CounterName\":\"memoryCapacityBytes\",\"Value\":555.0}]"}] => false, + + # these records are for the malformed node (it doesn't have limits or requests set so there are no PERF records) + ["oneagent.containerInsights.KUBE_NODE_INVENTORY_BLOB", {"CollectionTime"=>"~CollectionTime~", "Computer"=>"malformed-node", "ClusterName"=>"/cluster-name", "ClusterId"=>"/cluster-id", "CreationTimeStamp"=>"2021-07-21T23:40:14Z", "Labels"=>[{"agentpool"=>"nodepool1", "beta.kubernetes.io/arch"=>"amd64", "beta.kubernetes.io/instance-type"=>"Standard_DS2_v2", "beta.kubernetes.io/os"=>"linux", "failure-domain.beta.kubernetes.io/region"=>"westus2", "failure-domain.beta.kubernetes.io/zone"=>"0", "kubernetes.azure.com/cluster"=>"MC_davidaks16_davidaks16_westus2", "kubernetes.azure.com/mode"=>"system", "kubernetes.azure.com/node-image-version"=>"AKSUbuntu-1804gen2containerd-2021.07.03", "kubernetes.azure.com/os-sku"=>"Ubuntu", "kubernetes.azure.com/role"=>"agent", "kubernetes.io/arch"=>"amd64", "kubernetes.io/hostname"=>"malformed-node", "kubernetes.io/os"=>"linux", "kubernetes.io/role"=>"agent", "node-role.kubernetes.io/agent"=>"", "node.kubernetes.io/instance-type"=>"Standard_DS2_v2", "storageprofile"=>"managed", "storagetier"=>"Premium_LRS", "topology.kubernetes.io/region"=>"westus2", "topology.kubernetes.io/zone"=>"0"}], "Status"=>"Ready", "KubernetesProviderID"=>"azure", "LastTransitionTimeReady"=>"2021-07-21T23:40:24Z", "KubeletVersion"=>"v1.19.11", "KubeProxyVersion"=>"v1.19.11"}] => false, + ["mdm.kubenodeinventory", {"CollectionTime"=>"~CollectionTime~", "Computer"=>"malformed-node", "ClusterName"=>"/cluster-name", "ClusterId"=>"/cluster-id", "CreationTimeStamp"=>"2021-07-21T23:40:14Z", "Labels"=>[{"agentpool"=>"nodepool1", "beta.kubernetes.io/arch"=>"amd64", "beta.kubernetes.io/instance-type"=>"Standard_DS2_v2", "beta.kubernetes.io/os"=>"linux", "failure-domain.beta.kubernetes.io/region"=>"westus2", "failure-domain.beta.kubernetes.io/zone"=>"0", "kubernetes.azure.com/cluster"=>"MC_davidaks16_davidaks16_westus2", "kubernetes.azure.com/mode"=>"system", "kubernetes.azure.com/node-image-version"=>"AKSUbuntu-1804gen2containerd-2021.07.03", "kubernetes.azure.com/os-sku"=>"Ubuntu", "kubernetes.azure.com/role"=>"agent", "kubernetes.io/arch"=>"amd64", "kubernetes.io/hostname"=>"malformed-node", "kubernetes.io/os"=>"linux", "kubernetes.io/role"=>"agent", "node-role.kubernetes.io/agent"=>"", "node.kubernetes.io/instance-type"=>"Standard_DS2_v2", "storageprofile"=>"managed", "storagetier"=>"Premium_LRS", "topology.kubernetes.io/region"=>"westus2", "topology.kubernetes.io/zone"=>"0"}], "Status"=>"Ready", "KubernetesProviderID"=>"azure", "LastTransitionTimeReady"=>"2021-07-21T23:40:24Z", "KubeletVersion"=>"v1.19.11", "KubeProxyVersion"=>"v1.19.11"}] => false, + ["oneagent.containerInsights.CONTAINER_NODE_INVENTORY_BLOB", {"CollectionTime"=>"~CollectionTime~", "Computer"=>"malformed-node", "OperatingSystem"=>"Ubuntu 18.04.5 LTS", "DockerVersion"=>"containerd://1.4.4+azure"}] => false + } + + d.events.each do |tag, time, record| + cleaned_record = overwrite_collection_time record + if expected_responses.key?([tag, cleaned_record]) + expected_responses[[tag, cleaned_record]] = true + end + # don't do anything if an unexpected record was emitted. Since the node spec is malformed, there will be some partial data. + # we care more that the non-malformed data is still emitted + end + + expected_responses.each do |key, val| + assert(val, "expected record not emitted: #{key}") + end + + kubeApiClient.verify + appInsightsUtil.verify + extensionUtils.verify + end +end diff --git a/source/plugins/utils/oms_common.rb b/source/plugins/ruby/oms_common.rb similarity index 100% rename from source/plugins/utils/oms_common.rb rename to source/plugins/ruby/oms_common.rb diff --git a/source/plugins/utils/omslog.rb b/source/plugins/ruby/omslog.rb similarity index 100% rename from source/plugins/utils/omslog.rb rename to source/plugins/ruby/omslog.rb diff --git a/test/unit-tests/canned-api-responses/kube-nodes-malformed.txt b/test/unit-tests/canned-api-responses/kube-nodes-malformed.txt new file mode 100644 index 000000000..bb4c61ca5 --- /dev/null +++ b/test/unit-tests/canned-api-responses/kube-nodes-malformed.txt @@ -0,0 +1,1674 @@ +{ + "kind"=>"NodeList", + "apiVersion"=>"v1", + "metadata"=>{ + "selfLink"=>"/api/v1/nodes", + "resourceVersion"=>"5974879" + }, + "items"=>[ + { + "metadata"=>{ + "name"=>"malformed-node", + "selfLink"=>"/api/v1/nodes/malformed-node", + "uid"=>"fe073f0a-e6bf-4d68-b4e5-ffaa42b91528", + "resourceVersion"=>"5974522", + "creationTimestamp"=>"2021-07-21T23:40:14Z", + "labels"=>{ + "agentpool"=>"nodepool1", + "beta.kubernetes.io/arch"=>"amd64", + "beta.kubernetes.io/instance-type"=>"Standard_DS2_v2", + "beta.kubernetes.io/os"=>"linux", + "failure-domain.beta.kubernetes.io/region"=>"westus2", + "failure-domain.beta.kubernetes.io/zone"=>"0", + "kubernetes.azure.com/cluster"=>"MC_davidaks16_davidaks16_westus2", + "kubernetes.azure.com/mode"=>"system", + "kubernetes.azure.com/node-image-version"=>"AKSUbuntu-1804gen2containerd-2021.07.03", + "kubernetes.azure.com/os-sku"=>"Ubuntu", + "kubernetes.azure.com/role"=>"agent", + "kubernetes.io/arch"=>"amd64", + "kubernetes.io/hostname"=>"malformed-node", + "kubernetes.io/os"=>"linux", + "kubernetes.io/role"=>"agent", + "node-role.kubernetes.io/agent"=>"", + "node.kubernetes.io/instance-type"=>"Standard_DS2_v2", + "storageprofile"=>"managed", + "storagetier"=>"Premium_LRS", + "topology.kubernetes.io/region"=>"westus2", + "topology.kubernetes.io/zone"=>"0" + }, + "annotations"=>{ + "node.alpha.kubernetes.io/ttl"=>"0", + "volumes.kubernetes.io/controller-managed-attach-detach"=>"true" + }, + "managedFields"=>[ + { + "manager"=>"kube-controller-manager", + "operation"=>"Update", + "apiVersion"=>"v1", + "time"=>"2021-07-21T23:40:20Z", + "fieldsType"=>"FieldsV1", + "fieldsV1"=>{ + "f:metadata"=>{ + "f:annotations"=>{ + "f:node.alpha.kubernetes.io/ttl"=>{} + } + } + } + }, + { + "manager"=>"kubelet", + "operation"=>"Update", + "apiVersion"=>"v1", + "time"=>"2021-07-21T23:40:24Z", + "fieldsType"=>"FieldsV1", + "fieldsV1"=>{ + "f:metadata"=>{ + "f:annotations"=>{ + "."=>{}, + "f:volumes.kubernetes.io/controller-managed-attach-detach"=>{} + }, + "f:labels"=>{ + "."=>{}, + "f:agentpool"=>{}, + "f:beta.kubernetes.io/arch"=>{}, + "f:beta.kubernetes.io/instance-type"=>{}, + "f:beta.kubernetes.io/os"=>{}, + "f:failure-domain.beta.kubernetes.io/region"=>{}, + "f:failure-domain.beta.kubernetes.io/zone"=>{}, + "f:kubernetes.azure.com/cluster"=>{}, + "f:kubernetes.azure.com/mode"=>{}, + "f:kubernetes.azure.com/node-image-version"=>{}, + "f:kubernetes.azure.com/os-sku"=>{}, + "f:kubernetes.azure.com/role"=>{}, + "f:kubernetes.io/arch"=>{}, + "f:kubernetes.io/hostname"=>{}, + "f:kubernetes.io/os"=>{}, + "f:node.kubernetes.io/instance-type"=>{}, + "f:storageprofile"=>{}, + "f:storagetier"=>{}, + "f:topology.kubernetes.io/region"=>{}, + "f:topology.kubernetes.io/zone"=>{} + } + }, + "f:spec"=>{ + "f:providerID"=>{} + }, + "f:status"=>{ + "f:addresses"=>{ + "."=>{}, + "k:{\"type\":\"Hostname\"}"=>{ + "."=>{}, + "f:address"=>{}, + "f:type"=>{} + }, + "k:{\"type\":\"InternalIP\"}"=>{ + "."=>{}, + "f:address"=>{}, + "f:type"=>{} + } + }, + "f:allocatable"=>{ + "."=>{}, + "f:attachable-volumes-azure-disk"=>{}, + "f:cpu"=>{}, + "f:ephemeral-storage"=>{}, + "f:hugepages-1Gi"=>{}, + "f:hugepages-2Mi"=>{}, + "f:memory"=>{}, + "f:pods"=>{} + }, + "f:capacity"=>{ + "."=>{}, + "f:attachable-volumes-azure-disk"=>{}, + "f:cpu"=>{}, + "f:ephemeral-storage"=>{}, + "f:hugepages-1Gi"=>{}, + "f:hugepages-2Mi"=>{}, + "f:memory"=>{}, + "f:pods"=>{} + }, + "f:conditions"=>{ + "."=>{}, + "k:{\"type\":\"DiskPressure\"}"=>{ + "."=>{}, + "f:lastHeartbeatTime"=>{}, + "f:lastTransitionTime"=>{}, + "f:message"=>{}, + "f:reason"=>{}, + "f:status"=>{}, + "f:type"=>{} + }, + "k:{\"type\":\"MemoryPressure\"}"=>{ + "."=>{}, + "f:lastHeartbeatTime"=>{}, + "f:lastTransitionTime"=>{}, + "f:message"=>{}, + "f:reason"=>{}, + "f:status"=>{}, + "f:type"=>{} + }, + "k:{\"type\":\"PIDPressure\"}"=>{ + "."=>{}, + "f:lastHeartbeatTime"=>{}, + "f:lastTransitionTime"=>{}, + "f:message"=>{}, + "f:reason"=>{}, + "f:status"=>{}, + "f:type"=>{} + }, + "k:{\"type\":\"Ready\"}"=>{ + "."=>{}, + "f:lastHeartbeatTime"=>{}, + "f:lastTransitionTime"=>{}, + "f:message"=>{}, + "f:reason"=>{}, + "f:status"=>{}, + "f:type"=>{} + } + }, + "f:config"=>{}, + "f:daemonEndpoints"=>{ + "f:kubeletEndpoint"=>{ + "f:Port"=>{} + } + }, + "f:images"=>{}, + "f:nodeInfo"=>{ + "f:architecture"=>{}, + "f:bootID"=>{}, + "f:containerRuntimeVersion"=>{}, + "f:kernelVersion"=>{}, + "f:kubeProxyVersion"=>{}, + "f:kubeletVersion"=>{}, + "f:machineID"=>{}, + "f:operatingSystem"=>{}, + "f:osImage"=>{}, + "f:systemUUID"=>{} + } + } + } + }, + { + "manager"=>"kubectl-label", + "operation"=>"Update", + "apiVersion"=>"v1", + "time"=>"2021-07-21T23:40:53Z", + "fieldsType"=>"FieldsV1", + "fieldsV1"=>{ + "f:metadata"=>{ + "f:labels"=>{ + "f:kubernetes.io/role"=>{}, + "f:node-role.kubernetes.io/agent"=>{} + } + } + } + }, + { + "manager"=>"node-problem-detector", + "operation"=>"Update", + "apiVersion"=>"v1", + "time"=>"2021-08-10T18:10:02Z", + "fieldsType"=>"FieldsV1", + "fieldsV1"=>{ + "f:status"=>{ + "f:conditions"=>{ + "k:{\"type\":\"ContainerRuntimeProblem\"}"=>{ + "."=>{}, + "f:lastHeartbeatTime"=>{}, + "f:lastTransitionTime"=>{}, + "f:message"=>{}, + "f:reason"=>{}, + "f:status"=>{}, + "f:type"=>{} + }, + "k:{\"type\":\"FilesystemCorruptionProblem\"}"=>{ + "."=>{}, + "f:lastHeartbeatTime"=>{}, + "f:lastTransitionTime"=>{}, + "f:message"=>{}, + "f:reason"=>{}, + "f:status"=>{}, + "f:type"=>{} + }, + "k:{\"type\":\"FreezeScheduled\"}"=>{ + "."=>{}, + "f:lastHeartbeatTime"=>{}, + "f:lastTransitionTime"=>{}, + "f:message"=>{}, + "f:reason"=>{}, + "f:status"=>{}, + "f:type"=>{} + }, + "k:{\"type\":\"FrequentContainerdRestart\"}"=>{ + "."=>{}, + "f:lastHeartbeatTime"=>{}, + "f:lastTransitionTime"=>{}, + "f:message"=>{}, + "f:reason"=>{}, + "f:status"=>{}, + "f:type"=>{} + }, + "k:{\"type\":\"FrequentDockerRestart\"}"=>{ + "."=>{}, + "f:lastHeartbeatTime"=>{}, + "f:lastTransitionTime"=>{}, + "f:message"=>{}, + "f:reason"=>{}, + "f:status"=>{}, + "f:type"=>{} + }, + "k:{\"type\":\"FrequentKubeletRestart\"}"=>{ + "."=>{}, + "f:lastHeartbeatTime"=>{}, + "f:lastTransitionTime"=>{}, + "f:message"=>{}, + "f:reason"=>{}, + "f:status"=>{}, + "f:type"=>{} + }, + "k:{\"type\":\"FrequentUnregisterNetDevice\"}"=>{ + "."=>{}, + "f:lastHeartbeatTime"=>{}, + "f:lastTransitionTime"=>{}, + "f:message"=>{}, + "f:reason"=>{}, + "f:status"=>{}, + "f:type"=>{} + }, + "k:{\"type\":\"KernelDeadlock\"}"=>{ + "."=>{}, + "f:lastHeartbeatTime"=>{}, + "f:lastTransitionTime"=>{}, + "f:message"=>{}, + "f:reason"=>{}, + "f:status"=>{}, + "f:type"=>{} + }, + "k:{\"type\":\"KubeletProblem\"}"=>{ + "."=>{}, + "f:lastHeartbeatTime"=>{}, + "f:lastTransitionTime"=>{}, + "f:message"=>{}, + "f:reason"=>{}, + "f:status"=>{}, + "f:type"=>{} + }, + "k:{\"type\":\"PreemptScheduled\"}"=>{ + "."=>{}, + "f:lastHeartbeatTime"=>{}, + "f:lastTransitionTime"=>{}, + "f:message"=>{}, + "f:reason"=>{}, + "f:status"=>{}, + "f:type"=>{} + }, + "k:{\"type\":\"ReadonlyFilesystem\"}"=>{ + "."=>{}, + "f:lastHeartbeatTime"=>{}, + "f:lastTransitionTime"=>{}, + "f:message"=>{}, + "f:reason"=>{}, + "f:status"=>{}, + "f:type"=>{} + }, + "k:{\"type\":\"RebootScheduled\"}"=>{ + "."=>{}, + "f:lastHeartbeatTime"=>{}, + "f:lastTransitionTime"=>{}, + "f:message"=>{}, + "f:reason"=>{}, + "f:status"=>{}, + "f:type"=>{} + }, + "k:{\"type\":\"RedeployScheduled\"}"=>{ + "."=>{}, + "f:lastHeartbeatTime"=>{}, + "f:lastTransitionTime"=>{}, + "f:message"=>{}, + "f:reason"=>{}, + "f:status"=>{}, + "f:type"=>{} + }, + "k:{\"type\":\"TerminateScheduled\"}"=>{ + "."=>{}, + "f:lastHeartbeatTime"=>{}, + "f:lastTransitionTime"=>{}, + "f:message"=>{}, + "f:reason"=>{}, + "f:status"=>{}, + "f:type"=>{} + } + } + } + } + } + ] + }, + "spec"=>{ + "providerID"=>"azure:///subscriptions/3b875bf3-0eec-4d8c-bdee-25c7ccc1f130/resourceGroups/mc_davidaks16_davidaks16_westus2/providers/Microsoft.Compute/virtualMachineScaleSets/aks-nodepool1-24816391-vmss/virtualMachines/0" + }, + "status"=>{ + "conditions"=>[ + { + "type"=>"FrequentDockerRestart", + "status"=>"False", + "lastHeartbeatTime"=>"2021-08-17T19:25:56Z", + "lastTransitionTime"=>"2021-08-10T18:10:01Z", + "reason"=>"NoFrequentDockerRestart", + "message"=>"docker is functioning properly" + }, + { + "type"=>"FilesystemCorruptionProblem", + "status"=>"False", + "lastHeartbeatTime"=>"2021-08-17T19:25:56Z", + "lastTransitionTime"=>"2021-08-10T18:10:01Z", + "reason"=>"FilesystemIsOK", + "message"=>"Filesystem is healthy" + }, + { + "type"=>"KernelDeadlock", + "status"=>"False", + "lastHeartbeatTime"=>"2021-08-17T19:25:56Z", + "lastTransitionTime"=>"2021-08-10T18:10:01Z", + "reason"=>"KernelHasNoDeadlock", + "message"=>"kernel has no deadlock" + }, + { + "type"=>"FrequentContainerdRestart", + "status"=>"False", + "lastHeartbeatTime"=>"2021-08-17T19:25:56Z", + "lastTransitionTime"=>"2021-08-10T18:10:01Z", + "reason"=>"NoFrequentContainerdRestart", + "message"=>"containerd is functioning properly" + }, + { + "type"=>"FreezeScheduled", + "status"=>"False", + "lastHeartbeatTime"=>"2021-08-17T19:25:56Z", + "lastTransitionTime"=>"2021-08-11T23:25:04Z", + "reason"=>"NoFreezeScheduled", + "message"=>"VM has no scheduled Freeze event" + }, + { + "type"=>"FrequentUnregisterNetDevice", + "status"=>"False", + "lastHeartbeatTime"=>"2021-08-17T19:25:56Z", + "lastTransitionTime"=>"2021-08-10T18:10:01Z", + "reason"=>"NoFrequentUnregisterNetDevice", + "message"=>"node is functioning properly" + }, + { + "type"=>"TerminateScheduled", + "status"=>"False", + "lastHeartbeatTime"=>"2021-08-17T19:25:56Z", + "lastTransitionTime"=>"2021-08-10T18:10:01Z", + "reason"=>"NoTerminateScheduled", + "message"=>"VM has no scheduled Terminate event" + }, + { + "type"=>"ReadonlyFilesystem", + "status"=>"False", + "lastHeartbeatTime"=>"2021-08-17T19:25:56Z", + "lastTransitionTime"=>"2021-08-10T18:10:01Z", + "reason"=>"FilesystemIsNotReadOnly", + "message"=>"Filesystem is not read-only" + }, + { + "type"=>"RedeployScheduled", + "status"=>"False", + "lastHeartbeatTime"=>"2021-08-17T19:25:56Z", + "lastTransitionTime"=>"2021-08-10T18:10:01Z", + "reason"=>"NoRedeployScheduled", + "message"=>"VM has no scheduled Redeploy event" + }, + { + "type"=>"KubeletProblem", + "status"=>"False", + "lastHeartbeatTime"=>"2021-08-17T19:25:56Z", + "lastTransitionTime"=>"2021-08-10T18:10:01Z", + "reason"=>"KubeletIsUp", + "message"=>"kubelet service is up" + }, + { + "type"=>"PreemptScheduled", + "status"=>"False", + "lastHeartbeatTime"=>"2021-08-17T19:25:56Z", + "lastTransitionTime"=>"2021-08-10T18:11:11Z", + "reason"=>"NoPreemptScheduled", + "message"=>"VM has no scheduled Preempt event" + }, + { + "type"=>"RebootScheduled", + "status"=>"False", + "lastHeartbeatTime"=>"2021-08-17T19:25:56Z", + "lastTransitionTime"=>"2021-08-10T18:10:01Z", + "reason"=>"NoRebootScheduled", + "message"=>"VM has no scheduled Reboot event" + }, + { + "type"=>"ContainerRuntimeProblem", + "status"=>"False", + "lastHeartbeatTime"=>"2021-08-17T19:25:56Z", + "lastTransitionTime"=>"2021-08-10T18:10:01Z", + "reason"=>"ContainerRuntimeIsUp", + "message"=>"container runtime service is up" + }, + { + "type"=>"FrequentKubeletRestart", + "status"=>"False", + "lastHeartbeatTime"=>"2021-08-17T19:25:56Z", + "lastTransitionTime"=>"2021-08-10T18:10:01Z", + "reason"=>"NoFrequentKubeletRestart", + "message"=>"kubelet is functioning properly" + }, + { + "type"=>"MemoryPressure", + "status"=>"False", + "lastHeartbeatTime"=>"2021-08-17T19:28:21Z", + "lastTransitionTime"=>"2021-07-21T23:40:14Z", + "reason"=>"KubeletHasSufficientMemory", + "message"=>"kubelet has sufficient memory available" + }, + { + "type"=>"DiskPressure", + "status"=>"False", + "lastHeartbeatTime"=>"2021-08-17T19:28:21Z", + "lastTransitionTime"=>"2021-07-21T23:40:14Z", + "reason"=>"KubeletHasNoDiskPressure", + "message"=>"kubelet has no disk pressure" + }, + { + "type"=>"PIDPressure", + "status"=>"False", + "lastHeartbeatTime"=>"2021-08-17T19:28:21Z", + "lastTransitionTime"=>"2021-07-21T23:40:14Z", + "reason"=>"KubeletHasSufficientPID", + "message"=>"kubelet has sufficient PID available" + }, + { + "type"=>"Ready", + "status"=>"True", + "lastHeartbeatTime"=>"2021-08-17T19:28:21Z", + "lastTransitionTime"=>"2021-07-21T23:40:24Z", + "reason"=>"KubeletReady", + "message"=>"kubelet is posting ready status. AppArmor enabled" + } + ], + "addresses"=>[ + { + "type"=>"Hostname", + "address"=>"malformed-node" + }, + { + "type"=>"InternalIP", + "address"=>"10.240.0.4" + } + ], + "daemonEndpoints"=>{ + "kubeletEndpoint"=>{ + "Port"=>10250 + } + }, + "nodeInfo"=>{ + "machineID"=>"17a654260e2c4a9bb3a3eb4b4188e4b4", + "systemUUID"=>"7ff599e4-909e-4950-a044-ff8613af3af9", + "bootID"=>"02bb865b-a469-43cd-8b0b-5ceb4ecd80b0", + "kernelVersion"=>"5.4.0-1051-azure", + "osImage"=>"Ubuntu 18.04.5 LTS", + "containerRuntimeVersion"=>"containerd://1.4.4+azure", + "kubeletVersion"=>"v1.19.11", + "kubeProxyVersion"=>"v1.19.11", + "operatingSystem"=>"linux", + "architecture"=>"amd64" + }, + "images"=>[ + { + "names"=>[ + "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod06112021-1" + ], + "sizeBytes"=>331689060 + }, + { + "names"=>[ + "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod06112021" + ], + "sizeBytes"=>330099815 + }, + { + "names"=>[ + "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod05202021-hotfix" + ], + "sizeBytes"=>271471426 + }, + { + "names"=>[ + "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod05202021" + ], + "sizeBytes"=>269703297 + }, + { + "names"=>[ + "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod03262021" + ], + "sizeBytes"=>264732875 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/kubernetes/ingress/nginx-ingress-controller:0.19.0" + ], + "sizeBytes"=>166352383 + }, + { + "names"=>[ + "mcr.microsoft.com/aks/hcp/hcp-tunnel-front:master.210623.2" + ], + "sizeBytes"=>147750148 + }, + { + "names"=>[ + "mcr.microsoft.com/aks/hcp/hcp-tunnel-front:master.210524.1" + ], + "sizeBytes"=>146446618 + }, + { + "names"=>[ + "mcr.microsoft.com/aks/hcp/hcp-tunnel-front:master.210427.1" + ], + "sizeBytes"=>136242776 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/calico/node:v3.8.9.5" + ], + "sizeBytes"=>101794833 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/kubernetes/ingress/nginx-ingress-controller:0.47.0" + ], + "sizeBytes"=>101445696 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/kubernetes/autoscaler/cluster-proportional-autoscaler:1.3.0_v0.0.5" + ], + "sizeBytes"=>101194562 + }, + { + "names"=>[ + "mcr.microsoft.com/aks/hcp/tunnel-openvpn:master.210623.2" + ], + "sizeBytes"=>96125176 + }, + { + "names"=>[ + "mcr.microsoft.com/aks/hcp/tunnel-openvpn:master.210524.1" + ], + "sizeBytes"=>95879501 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/kubernetes/exechealthz:1.2_v0.0.5" + ], + "sizeBytes"=>94348102 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/calico/node:v3.8.9.2" + ], + "sizeBytes"=>93537927 + }, + { + "names"=>[ + "mcr.microsoft.com/aks/acc/sgx-attestation:2.0" + ], + "sizeBytes"=>91841669 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/kubernetes-csi/azurefile-csi:v1.4.0" + ], + "sizeBytes"=>91324193 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/kubernetes-csi/azurefile-csi:v1.2.0" + ], + "sizeBytes"=>89103171 + }, + { + "names"=>[ + "mcr.microsoft.com/azure-application-gateway/kubernetes-ingress:1.0.1-rc3" + ], + "sizeBytes"=>86839805 + }, + { + "names"=>[ + "mcr.microsoft.com/azure-application-gateway/kubernetes-ingress:1.2.0" + ], + "sizeBytes"=>86488586 + }, + { + "names"=>[ + "mcr.microsoft.com/aks/hcp/tunnel-openvpn:master.210427.1" + ], + "sizeBytes"=>86120048 + }, + { + "names"=>[ + "mcr.microsoft.com/azure-application-gateway/kubernetes-ingress:1.3.0" + ], + "sizeBytes"=>81252495 + }, + { + "names"=>[ + "mcr.microsoft.com/azure-application-gateway/kubernetes-ingress:1.4.0" + ], + "sizeBytes"=>79586703 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.4.0" + ], + "sizeBytes"=>78795016 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.2.0" + ], + "sizeBytes"=>76527179 + }, + { + "names"=>[ + "mcr.microsoft.com/containernetworking/azure-npm:v1.1.8" + ], + "sizeBytes"=>75025803 + }, + { + "names"=>[ + "mcr.microsoft.com/containernetworking/azure-npm:v1.2.2_hotfix" + ], + "sizeBytes"=>73533889 + }, + { + "names"=>[ + "mcr.microsoft.com/containernetworking/azure-npm:v1.3.1" + ], + "sizeBytes"=>72242894 + }, + { + "names"=>[ + "mcr.microsoft.com/containernetworking/azure-npm:v1.2.8" + ], + "sizeBytes"=>70622822 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/nvidia/k8s-device-plugin:v0.9.0" + ], + "sizeBytes"=>67291599 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/kubernetes/dashboard:v2.0.1" + ], + "sizeBytes"=>66415836 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/kubernetes/dashboard:v2.0.0-rc7" + ], + "sizeBytes"=>65965658 + }, + { + "names"=>[ + "mcr.microsoft.com/containernetworking/azure-npm:v1.2.1" + ], + "sizeBytes"=>64123775 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/calico/cni:v3.8.9.3" + ], + "sizeBytes"=>63581323 + }, + { + "names"=>[ + "mcr.microsoft.com/containernetworking/networkmonitor:v1.1.8" + ], + "sizeBytes"=>63154716 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/calico/cni:v3.8.9.2" + ], + "sizeBytes"=>61626312 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/calico/node:v3.18.1" + ], + "sizeBytes"=>60500885 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/calico/node:v3.17.2" + ], + "sizeBytes"=>58419768 + }, + { + "names"=>[ + "mcr.microsoft.com/containernetworking/networkmonitor:v1.1.8_hotfix", + "mcr.microsoft.com/containernetworking/networkmonitor:v1.1.8post2" + ], + "sizeBytes"=>56368756 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/kubernetes/kube-proxy@sha256:282543237a1aa3f407656290f454b7068a92e1abe2156082c750d5abfbcad90c", + "mcr.microsoft.com/oss/kubernetes/kube-proxy:v1.19.11-hotfix.20210526.2" + ], + "sizeBytes"=>56310724 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/calico/node:v3.19.0" + ], + "sizeBytes"=>55228749 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/kubernetes/kube-proxy:v1.19.11-hotfix.20210526.1" + ], + "sizeBytes"=>54692048 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/kubernetes/dashboard:v2.0.0-rc3" + ], + "sizeBytes"=>50803639 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/kubernetes-csi/secrets-store/driver:v0.0.19" + ], + "sizeBytes"=>49759361 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/azure/aad-pod-identity/nmi:v1.7.5" + ], + "sizeBytes"=>49704644 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/kubernetes-csi/secrets-store/driver:v0.0.21" + ], + "sizeBytes"=>49372390 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/kubernetes/kube-proxy@sha256:a64d3538b72905b07356881314755b02db3675ff47ee2bcc49dd7be856e285d5", + "mcr.microsoft.com/oss/kubernetes/kube-proxy:v1.19.11-hotfix.20210526" + ], + "sizeBytes"=>49322942 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/azure/aad-pod-identity/nmi:v1.7.4" + ], + "sizeBytes"=>48108311 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/kubernetes/kubernetes-dashboard:v1.10.1" + ], + "sizeBytes"=>44907744 + } + ], + "config"=>{} + } + }, + { + "metadata"=>{ + "name"=>"correct-node", + "selfLink"=>"/api/v1/nodes/correct-node", + "uid"=>"fe073f0a-e6bf-4d68-b4e5-ffaa42b91528", + "resourceVersion"=>"5974522", + "creationTimestamp"=>"2021-07-21T23:40:14Z", + "labels"=>{ + "agentpool"=>"nodepool1", + "beta.kubernetes.io/arch"=>"amd64", + "beta.kubernetes.io/instance-type"=>"Standard_DS2_v2", + "beta.kubernetes.io/os"=>"linux", + "failure-domain.beta.kubernetes.io/region"=>"westus2", + "failure-domain.beta.kubernetes.io/zone"=>"0", + "kubernetes.azure.com/cluster"=>"MC_davidaks16_davidaks16_westus2", + "kubernetes.azure.com/mode"=>"system", + "kubernetes.azure.com/node-image-version"=>"AKSUbuntu-1804gen2containerd-2021.07.03", + "kubernetes.azure.com/os-sku"=>"Ubuntu", + "kubernetes.azure.com/role"=>"agent", + "kubernetes.io/arch"=>"amd64", + "kubernetes.io/hostname"=>"correct-node", + "kubernetes.io/os"=>"linux", + "kubernetes.io/role"=>"agent", + "node-role.kubernetes.io/agent"=>"", + "node.kubernetes.io/instance-type"=>"Standard_DS2_v2", + "storageprofile"=>"managed", + "storagetier"=>"Premium_LRS", + "topology.kubernetes.io/region"=>"westus2", + "topology.kubernetes.io/zone"=>"0" + }, + "annotations"=>{ + "node.alpha.kubernetes.io/ttl"=>"0", + "volumes.kubernetes.io/controller-managed-attach-detach"=>"true" + }, + "managedFields"=>[ + { + "manager"=>"kube-controller-manager", + "operation"=>"Update", + "apiVersion"=>"v1", + "time"=>"2021-07-21T23:40:20Z", + "fieldsType"=>"FieldsV1", + "fieldsV1"=>{ + "f:metadata"=>{ + "f:annotations"=>{ + "f:node.alpha.kubernetes.io/ttl"=>{} + } + } + } + }, + { + "manager"=>"kubelet", + "operation"=>"Update", + "apiVersion"=>"v1", + "time"=>"2021-07-21T23:40:24Z", + "fieldsType"=>"FieldsV1", + "fieldsV1"=>{ + "f:metadata"=>{ + "f:annotations"=>{ + "."=>{}, + "f:volumes.kubernetes.io/controller-managed-attach-detach"=>{} + }, + "f:labels"=>{ + "."=>{}, + "f:agentpool"=>{}, + "f:beta.kubernetes.io/arch"=>{}, + "f:beta.kubernetes.io/instance-type"=>{}, + "f:beta.kubernetes.io/os"=>{}, + "f:failure-domain.beta.kubernetes.io/region"=>{}, + "f:failure-domain.beta.kubernetes.io/zone"=>{}, + "f:kubernetes.azure.com/cluster"=>{}, + "f:kubernetes.azure.com/mode"=>{}, + "f:kubernetes.azure.com/node-image-version"=>{}, + "f:kubernetes.azure.com/os-sku"=>{}, + "f:kubernetes.azure.com/role"=>{}, + "f:kubernetes.io/arch"=>{}, + "f:kubernetes.io/hostname"=>{}, + "f:kubernetes.io/os"=>{}, + "f:node.kubernetes.io/instance-type"=>{}, + "f:storageprofile"=>{}, + "f:storagetier"=>{}, + "f:topology.kubernetes.io/region"=>{}, + "f:topology.kubernetes.io/zone"=>{} + } + }, + "f:spec"=>{ + "f:providerID"=>{} + }, + "f:status"=>{ + "f:addresses"=>{ + "."=>{}, + "k:{\"type\":\"Hostname\"}"=>{ + "."=>{}, + "f:address"=>{}, + "f:type"=>{} + }, + "k:{\"type\":\"InternalIP\"}"=>{ + "."=>{}, + "f:address"=>{}, + "f:type"=>{} + } + }, + "f:allocatable"=>{ + "."=>{}, + "f:attachable-volumes-azure-disk"=>{}, + "f:cpu"=>{}, + "f:ephemeral-storage"=>{}, + "f:hugepages-1Gi"=>{}, + "f:hugepages-2Mi"=>{}, + "f:memory"=>{}, + "f:pods"=>{} + }, + "f:capacity"=>{ + "."=>{}, + "f:attachable-volumes-azure-disk"=>{}, + "f:cpu"=>{}, + "f:ephemeral-storage"=>{}, + "f:hugepages-1Gi"=>{}, + "f:hugepages-2Mi"=>{}, + "f:memory"=>{}, + "f:pods"=>{} + }, + "f:conditions"=>{ + "."=>{}, + "k:{\"type\":\"DiskPressure\"}"=>{ + "."=>{}, + "f:lastHeartbeatTime"=>{}, + "f:lastTransitionTime"=>{}, + "f:message"=>{}, + "f:reason"=>{}, + "f:status"=>{}, + "f:type"=>{} + }, + "k:{\"type\":\"MemoryPressure\"}"=>{ + "."=>{}, + "f:lastHeartbeatTime"=>{}, + "f:lastTransitionTime"=>{}, + "f:message"=>{}, + "f:reason"=>{}, + "f:status"=>{}, + "f:type"=>{} + }, + "k:{\"type\":\"PIDPressure\"}"=>{ + "."=>{}, + "f:lastHeartbeatTime"=>{}, + "f:lastTransitionTime"=>{}, + "f:message"=>{}, + "f:reason"=>{}, + "f:status"=>{}, + "f:type"=>{} + }, + "k:{\"type\":\"Ready\"}"=>{ + "."=>{}, + "f:lastHeartbeatTime"=>{}, + "f:lastTransitionTime"=>{}, + "f:message"=>{}, + "f:reason"=>{}, + "f:status"=>{}, + "f:type"=>{} + } + }, + "f:config"=>{}, + "f:daemonEndpoints"=>{ + "f:kubeletEndpoint"=>{ + "f:Port"=>{} + } + }, + "f:images"=>{}, + "f:nodeInfo"=>{ + "f:architecture"=>{}, + "f:bootID"=>{}, + "f:containerRuntimeVersion"=>{}, + "f:kernelVersion"=>{}, + "f:kubeProxyVersion"=>{}, + "f:kubeletVersion"=>{}, + "f:machineID"=>{}, + "f:operatingSystem"=>{}, + "f:osImage"=>{}, + "f:systemUUID"=>{} + } + } + } + }, + { + "manager"=>"kubectl-label", + "operation"=>"Update", + "apiVersion"=>"v1", + "time"=>"2021-07-21T23:40:53Z", + "fieldsType"=>"FieldsV1", + "fieldsV1"=>{ + "f:metadata"=>{ + "f:labels"=>{ + "f:kubernetes.io/role"=>{}, + "f:node-role.kubernetes.io/agent"=>{} + } + } + } + }, + { + "manager"=>"node-problem-detector", + "operation"=>"Update", + "apiVersion"=>"v1", + "time"=>"2021-08-10T18:10:02Z", + "fieldsType"=>"FieldsV1", + "fieldsV1"=>{ + "f:status"=>{ + "f:conditions"=>{ + "k:{\"type\":\"ContainerRuntimeProblem\"}"=>{ + "."=>{}, + "f:lastHeartbeatTime"=>{}, + "f:lastTransitionTime"=>{}, + "f:message"=>{}, + "f:reason"=>{}, + "f:status"=>{}, + "f:type"=>{} + }, + "k:{\"type\":\"FilesystemCorruptionProblem\"}"=>{ + "."=>{}, + "f:lastHeartbeatTime"=>{}, + "f:lastTransitionTime"=>{}, + "f:message"=>{}, + "f:reason"=>{}, + "f:status"=>{}, + "f:type"=>{} + }, + "k:{\"type\":\"FreezeScheduled\"}"=>{ + "."=>{}, + "f:lastHeartbeatTime"=>{}, + "f:lastTransitionTime"=>{}, + "f:message"=>{}, + "f:reason"=>{}, + "f:status"=>{}, + "f:type"=>{} + }, + "k:{\"type\":\"FrequentContainerdRestart\"}"=>{ + "."=>{}, + "f:lastHeartbeatTime"=>{}, + "f:lastTransitionTime"=>{}, + "f:message"=>{}, + "f:reason"=>{}, + "f:status"=>{}, + "f:type"=>{} + }, + "k:{\"type\":\"FrequentDockerRestart\"}"=>{ + "."=>{}, + "f:lastHeartbeatTime"=>{}, + "f:lastTransitionTime"=>{}, + "f:message"=>{}, + "f:reason"=>{}, + "f:status"=>{}, + "f:type"=>{} + }, + "k:{\"type\":\"FrequentKubeletRestart\"}"=>{ + "."=>{}, + "f:lastHeartbeatTime"=>{}, + "f:lastTransitionTime"=>{}, + "f:message"=>{}, + "f:reason"=>{}, + "f:status"=>{}, + "f:type"=>{} + }, + "k:{\"type\":\"FrequentUnregisterNetDevice\"}"=>{ + "."=>{}, + "f:lastHeartbeatTime"=>{}, + "f:lastTransitionTime"=>{}, + "f:message"=>{}, + "f:reason"=>{}, + "f:status"=>{}, + "f:type"=>{} + }, + "k:{\"type\":\"KernelDeadlock\"}"=>{ + "."=>{}, + "f:lastHeartbeatTime"=>{}, + "f:lastTransitionTime"=>{}, + "f:message"=>{}, + "f:reason"=>{}, + "f:status"=>{}, + "f:type"=>{} + }, + "k:{\"type\":\"KubeletProblem\"}"=>{ + "."=>{}, + "f:lastHeartbeatTime"=>{}, + "f:lastTransitionTime"=>{}, + "f:message"=>{}, + "f:reason"=>{}, + "f:status"=>{}, + "f:type"=>{} + }, + "k:{\"type\":\"PreemptScheduled\"}"=>{ + "."=>{}, + "f:lastHeartbeatTime"=>{}, + "f:lastTransitionTime"=>{}, + "f:message"=>{}, + "f:reason"=>{}, + "f:status"=>{}, + "f:type"=>{} + }, + "k:{\"type\":\"ReadonlyFilesystem\"}"=>{ + "."=>{}, + "f:lastHeartbeatTime"=>{}, + "f:lastTransitionTime"=>{}, + "f:message"=>{}, + "f:reason"=>{}, + "f:status"=>{}, + "f:type"=>{} + }, + "k:{\"type\":\"RebootScheduled\"}"=>{ + "."=>{}, + "f:lastHeartbeatTime"=>{}, + "f:lastTransitionTime"=>{}, + "f:message"=>{}, + "f:reason"=>{}, + "f:status"=>{}, + "f:type"=>{} + }, + "k:{\"type\":\"RedeployScheduled\"}"=>{ + "."=>{}, + "f:lastHeartbeatTime"=>{}, + "f:lastTransitionTime"=>{}, + "f:message"=>{}, + "f:reason"=>{}, + "f:status"=>{}, + "f:type"=>{} + }, + "k:{\"type\":\"TerminateScheduled\"}"=>{ + "."=>{}, + "f:lastHeartbeatTime"=>{}, + "f:lastTransitionTime"=>{}, + "f:message"=>{}, + "f:reason"=>{}, + "f:status"=>{}, + "f:type"=>{} + } + } + } + } + } + ] + }, + "spec"=>{ + "providerID"=>"azure:///subscriptions/3b875bf3-0eec-4d8c-bdee-25c7ccc1f130/resourceGroups/mc_davidaks16_davidaks16_westus2/providers/Microsoft.Compute/virtualMachineScaleSets/aks-nodepool1-24816391-vmss/virtualMachines/0" + }, + "status"=>{ + "capacity"=>{ + "attachable-volumes-azure-disk"=>"8", + "cpu"=>"2m", + "ephemeral-storage"=>"666", + "hugepages-1Gi"=>"0", + "hugepages-2Mi"=>"0", + "memory"=>"555", + "pods"=>"30" + }, + "allocatable"=>{ + "attachable-volumes-azure-disk"=>"8", + "cpu"=>"1m", + "ephemeral-storage"=>"333", + "hugepages-1Gi"=>"0", + "hugepages-2Mi"=>"0", + "memory"=>"444", + "pods"=>"30" + }, + "conditions"=>[ + { + "type"=>"FrequentDockerRestart", + "status"=>"False", + "lastHeartbeatTime"=>"2021-08-17T19:25:56Z", + "lastTransitionTime"=>"2021-08-10T18:10:01Z", + "reason"=>"NoFrequentDockerRestart", + "message"=>"docker is functioning properly" + }, + { + "type"=>"FilesystemCorruptionProblem", + "status"=>"False", + "lastHeartbeatTime"=>"2021-08-17T19:25:56Z", + "lastTransitionTime"=>"2021-08-10T18:10:01Z", + "reason"=>"FilesystemIsOK", + "message"=>"Filesystem is healthy" + }, + { + "type"=>"KernelDeadlock", + "status"=>"False", + "lastHeartbeatTime"=>"2021-08-17T19:25:56Z", + "lastTransitionTime"=>"2021-08-10T18:10:01Z", + "reason"=>"KernelHasNoDeadlock", + "message"=>"kernel has no deadlock" + }, + { + "type"=>"FrequentContainerdRestart", + "status"=>"False", + "lastHeartbeatTime"=>"2021-08-17T19:25:56Z", + "lastTransitionTime"=>"2021-08-10T18:10:01Z", + "reason"=>"NoFrequentContainerdRestart", + "message"=>"containerd is functioning properly" + }, + { + "type"=>"FreezeScheduled", + "status"=>"False", + "lastHeartbeatTime"=>"2021-08-17T19:25:56Z", + "lastTransitionTime"=>"2021-08-11T23:25:04Z", + "reason"=>"NoFreezeScheduled", + "message"=>"VM has no scheduled Freeze event" + }, + { + "type"=>"FrequentUnregisterNetDevice", + "status"=>"False", + "lastHeartbeatTime"=>"2021-08-17T19:25:56Z", + "lastTransitionTime"=>"2021-08-10T18:10:01Z", + "reason"=>"NoFrequentUnregisterNetDevice", + "message"=>"node is functioning properly" + }, + { + "type"=>"TerminateScheduled", + "status"=>"False", + "lastHeartbeatTime"=>"2021-08-17T19:25:56Z", + "lastTransitionTime"=>"2021-08-10T18:10:01Z", + "reason"=>"NoTerminateScheduled", + "message"=>"VM has no scheduled Terminate event" + }, + { + "type"=>"ReadonlyFilesystem", + "status"=>"False", + "lastHeartbeatTime"=>"2021-08-17T19:25:56Z", + "lastTransitionTime"=>"2021-08-10T18:10:01Z", + "reason"=>"FilesystemIsNotReadOnly", + "message"=>"Filesystem is not read-only" + }, + { + "type"=>"RedeployScheduled", + "status"=>"False", + "lastHeartbeatTime"=>"2021-08-17T19:25:56Z", + "lastTransitionTime"=>"2021-08-10T18:10:01Z", + "reason"=>"NoRedeployScheduled", + "message"=>"VM has no scheduled Redeploy event" + }, + { + "type"=>"KubeletProblem", + "status"=>"False", + "lastHeartbeatTime"=>"2021-08-17T19:25:56Z", + "lastTransitionTime"=>"2021-08-10T18:10:01Z", + "reason"=>"KubeletIsUp", + "message"=>"kubelet service is up" + }, + { + "type"=>"PreemptScheduled", + "status"=>"False", + "lastHeartbeatTime"=>"2021-08-17T19:25:56Z", + "lastTransitionTime"=>"2021-08-10T18:11:11Z", + "reason"=>"NoPreemptScheduled", + "message"=>"VM has no scheduled Preempt event" + }, + { + "type"=>"RebootScheduled", + "status"=>"False", + "lastHeartbeatTime"=>"2021-08-17T19:25:56Z", + "lastTransitionTime"=>"2021-08-10T18:10:01Z", + "reason"=>"NoRebootScheduled", + "message"=>"VM has no scheduled Reboot event" + }, + { + "type"=>"ContainerRuntimeProblem", + "status"=>"False", + "lastHeartbeatTime"=>"2021-08-17T19:25:56Z", + "lastTransitionTime"=>"2021-08-10T18:10:01Z", + "reason"=>"ContainerRuntimeIsUp", + "message"=>"container runtime service is up" + }, + { + "type"=>"FrequentKubeletRestart", + "status"=>"False", + "lastHeartbeatTime"=>"2021-08-17T19:25:56Z", + "lastTransitionTime"=>"2021-08-10T18:10:01Z", + "reason"=>"NoFrequentKubeletRestart", + "message"=>"kubelet is functioning properly" + }, + { + "type"=>"MemoryPressure", + "status"=>"False", + "lastHeartbeatTime"=>"2021-08-17T19:28:21Z", + "lastTransitionTime"=>"2021-07-21T23:40:14Z", + "reason"=>"KubeletHasSufficientMemory", + "message"=>"kubelet has sufficient memory available" + }, + { + "type"=>"DiskPressure", + "status"=>"False", + "lastHeartbeatTime"=>"2021-08-17T19:28:21Z", + "lastTransitionTime"=>"2021-07-21T23:40:14Z", + "reason"=>"KubeletHasNoDiskPressure", + "message"=>"kubelet has no disk pressure" + }, + { + "type"=>"PIDPressure", + "status"=>"False", + "lastHeartbeatTime"=>"2021-08-17T19:28:21Z", + "lastTransitionTime"=>"2021-07-21T23:40:14Z", + "reason"=>"KubeletHasSufficientPID", + "message"=>"kubelet has sufficient PID available" + }, + { + "type"=>"Ready", + "status"=>"True", + "lastHeartbeatTime"=>"2021-08-17T19:28:21Z", + "lastTransitionTime"=>"2021-07-21T23:40:24Z", + "reason"=>"KubeletReady", + "message"=>"kubelet is posting ready status. AppArmor enabled" + } + ], + "addresses"=>[ + { + "type"=>"Hostname", + "address"=>"correct-node" + }, + { + "type"=>"InternalIP", + "address"=>"10.240.0.4" + } + ], + "daemonEndpoints"=>{ + "kubeletEndpoint"=>{ + "Port"=>10250 + } + }, + "nodeInfo"=>{ + "machineID"=>"17a654260e2c4a9bb3a3eb4b4188e4b4", + "systemUUID"=>"7ff599e4-909e-4950-a044-ff8613af3af9", + "bootID"=>"02bb865b-a469-43cd-8b0b-5ceb4ecd80b0", + "kernelVersion"=>"5.4.0-1051-azure", + "osImage"=>"Ubuntu 18.04.5 LTS", + "containerRuntimeVersion"=>"containerd://1.4.4+azure", + "kubeletVersion"=>"v1.19.11", + "kubeProxyVersion"=>"v1.19.11", + "operatingSystem"=>"linux", + "architecture"=>"amd64" + }, + "images"=>[ + { + "names"=>[ + "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod06112021-1" + ], + "sizeBytes"=>331689060 + }, + { + "names"=>[ + "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod06112021" + ], + "sizeBytes"=>330099815 + }, + { + "names"=>[ + "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod05202021-hotfix" + ], + "sizeBytes"=>271471426 + }, + { + "names"=>[ + "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod05202021" + ], + "sizeBytes"=>269703297 + }, + { + "names"=>[ + "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod03262021" + ], + "sizeBytes"=>264732875 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/kubernetes/ingress/nginx-ingress-controller:0.19.0" + ], + "sizeBytes"=>166352383 + }, + { + "names"=>[ + "mcr.microsoft.com/aks/hcp/hcp-tunnel-front:master.210623.2" + ], + "sizeBytes"=>147750148 + }, + { + "names"=>[ + "mcr.microsoft.com/aks/hcp/hcp-tunnel-front:master.210524.1" + ], + "sizeBytes"=>146446618 + }, + { + "names"=>[ + "mcr.microsoft.com/aks/hcp/hcp-tunnel-front:master.210427.1" + ], + "sizeBytes"=>136242776 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/calico/node:v3.8.9.5" + ], + "sizeBytes"=>101794833 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/kubernetes/ingress/nginx-ingress-controller:0.47.0" + ], + "sizeBytes"=>101445696 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/kubernetes/autoscaler/cluster-proportional-autoscaler:1.3.0_v0.0.5" + ], + "sizeBytes"=>101194562 + }, + { + "names"=>[ + "mcr.microsoft.com/aks/hcp/tunnel-openvpn:master.210623.2" + ], + "sizeBytes"=>96125176 + }, + { + "names"=>[ + "mcr.microsoft.com/aks/hcp/tunnel-openvpn:master.210524.1" + ], + "sizeBytes"=>95879501 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/kubernetes/exechealthz:1.2_v0.0.5" + ], + "sizeBytes"=>94348102 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/calico/node:v3.8.9.2" + ], + "sizeBytes"=>93537927 + }, + { + "names"=>[ + "mcr.microsoft.com/aks/acc/sgx-attestation:2.0" + ], + "sizeBytes"=>91841669 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/kubernetes-csi/azurefile-csi:v1.4.0" + ], + "sizeBytes"=>91324193 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/kubernetes-csi/azurefile-csi:v1.2.0" + ], + "sizeBytes"=>89103171 + }, + { + "names"=>[ + "mcr.microsoft.com/azure-application-gateway/kubernetes-ingress:1.0.1-rc3" + ], + "sizeBytes"=>86839805 + }, + { + "names"=>[ + "mcr.microsoft.com/azure-application-gateway/kubernetes-ingress:1.2.0" + ], + "sizeBytes"=>86488586 + }, + { + "names"=>[ + "mcr.microsoft.com/aks/hcp/tunnel-openvpn:master.210427.1" + ], + "sizeBytes"=>86120048 + }, + { + "names"=>[ + "mcr.microsoft.com/azure-application-gateway/kubernetes-ingress:1.3.0" + ], + "sizeBytes"=>81252495 + }, + { + "names"=>[ + "mcr.microsoft.com/azure-application-gateway/kubernetes-ingress:1.4.0" + ], + "sizeBytes"=>79586703 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.4.0" + ], + "sizeBytes"=>78795016 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.2.0" + ], + "sizeBytes"=>76527179 + }, + { + "names"=>[ + "mcr.microsoft.com/containernetworking/azure-npm:v1.1.8" + ], + "sizeBytes"=>75025803 + }, + { + "names"=>[ + "mcr.microsoft.com/containernetworking/azure-npm:v1.2.2_hotfix" + ], + "sizeBytes"=>73533889 + }, + { + "names"=>[ + "mcr.microsoft.com/containernetworking/azure-npm:v1.3.1" + ], + "sizeBytes"=>72242894 + }, + { + "names"=>[ + "mcr.microsoft.com/containernetworking/azure-npm:v1.2.8" + ], + "sizeBytes"=>70622822 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/nvidia/k8s-device-plugin:v0.9.0" + ], + "sizeBytes"=>67291599 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/kubernetes/dashboard:v2.0.1" + ], + "sizeBytes"=>66415836 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/kubernetes/dashboard:v2.0.0-rc7" + ], + "sizeBytes"=>65965658 + }, + { + "names"=>[ + "mcr.microsoft.com/containernetworking/azure-npm:v1.2.1" + ], + "sizeBytes"=>64123775 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/calico/cni:v3.8.9.3" + ], + "sizeBytes"=>63581323 + }, + { + "names"=>[ + "mcr.microsoft.com/containernetworking/networkmonitor:v1.1.8" + ], + "sizeBytes"=>63154716 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/calico/cni:v3.8.9.2" + ], + "sizeBytes"=>61626312 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/calico/node:v3.18.1" + ], + "sizeBytes"=>60500885 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/calico/node:v3.17.2" + ], + "sizeBytes"=>58419768 + }, + { + "names"=>[ + "mcr.microsoft.com/containernetworking/networkmonitor:v1.1.8_hotfix", + "mcr.microsoft.com/containernetworking/networkmonitor:v1.1.8post2" + ], + "sizeBytes"=>56368756 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/kubernetes/kube-proxy@sha256:282543237a1aa3f407656290f454b7068a92e1abe2156082c750d5abfbcad90c", + "mcr.microsoft.com/oss/kubernetes/kube-proxy:v1.19.11-hotfix.20210526.2" + ], + "sizeBytes"=>56310724 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/calico/node:v3.19.0" + ], + "sizeBytes"=>55228749 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/kubernetes/kube-proxy:v1.19.11-hotfix.20210526.1" + ], + "sizeBytes"=>54692048 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/kubernetes/dashboard:v2.0.0-rc3" + ], + "sizeBytes"=>50803639 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/kubernetes-csi/secrets-store/driver:v0.0.19" + ], + "sizeBytes"=>49759361 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/azure/aad-pod-identity/nmi:v1.7.5" + ], + "sizeBytes"=>49704644 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/kubernetes-csi/secrets-store/driver:v0.0.21" + ], + "sizeBytes"=>49372390 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/kubernetes/kube-proxy@sha256:a64d3538b72905b07356881314755b02db3675ff47ee2bcc49dd7be856e285d5", + "mcr.microsoft.com/oss/kubernetes/kube-proxy:v1.19.11-hotfix.20210526" + ], + "sizeBytes"=>49322942 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/azure/aad-pod-identity/nmi:v1.7.4" + ], + "sizeBytes"=>48108311 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/kubernetes/kubernetes-dashboard:v1.10.1" + ], + "sizeBytes"=>44907744 + } + ], + "config"=>{} + } + } + ] +} \ No newline at end of file diff --git a/test/unit-tests/canned-api-responses/kube-nodes.txt b/test/unit-tests/canned-api-responses/kube-nodes.txt new file mode 100644 index 000000000..ed411c2e5 --- /dev/null +++ b/test/unit-tests/canned-api-responses/kube-nodes.txt @@ -0,0 +1,851 @@ +{ + "kind"=>"NodeList", + "apiVersion"=>"v1", + "metadata"=>{ + "selfLink"=>"/api/v1/nodes", + "resourceVersion"=>"5974879" + }, + "items"=>[ + { + "metadata"=>{ + "name"=>"aks-nodepool1-24816391-vmss000000", + "selfLink"=>"/api/v1/nodes/aks-nodepool1-24816391-vmss000000", + "uid"=>"fe073f0a-e6bf-4d68-b4e5-ffaa42b91528", + "resourceVersion"=>"5974522", + "creationTimestamp"=>"2021-07-21T23:40:14Z", + "labels"=>{ + "agentpool"=>"nodepool1", + "beta.kubernetes.io/arch"=>"amd64", + "beta.kubernetes.io/instance-type"=>"Standard_DS2_v2", + "beta.kubernetes.io/os"=>"linux", + "failure-domain.beta.kubernetes.io/region"=>"westus2", + "failure-domain.beta.kubernetes.io/zone"=>"0", + "kubernetes.azure.com/cluster"=>"MC_davidaks16_davidaks16_westus2", + "kubernetes.azure.com/mode"=>"system", + "kubernetes.azure.com/node-image-version"=>"AKSUbuntu-1804gen2containerd-2021.07.03", + "kubernetes.azure.com/os-sku"=>"Ubuntu", + "kubernetes.azure.com/role"=>"agent", + "kubernetes.io/arch"=>"amd64", + "kubernetes.io/hostname"=>"aks-nodepool1-24816391-vmss000000", + "kubernetes.io/os"=>"linux", + "kubernetes.io/role"=>"agent", + "node-role.kubernetes.io/agent"=>"", + "node.kubernetes.io/instance-type"=>"Standard_DS2_v2", + "storageprofile"=>"managed", + "storagetier"=>"Premium_LRS", + "topology.kubernetes.io/region"=>"westus2", + "topology.kubernetes.io/zone"=>"0" + }, + "annotations"=>{ + "node.alpha.kubernetes.io/ttl"=>"0", + "volumes.kubernetes.io/controller-managed-attach-detach"=>"true" + }, + "managedFields"=>[ + { + "manager"=>"kube-controller-manager", + "operation"=>"Update", + "apiVersion"=>"v1", + "time"=>"2021-07-21T23:40:20Z", + "fieldsType"=>"FieldsV1", + "fieldsV1"=>{ + "f:metadata"=>{ + "f:annotations"=>{ + "f:node.alpha.kubernetes.io/ttl"=>{} + } + } + } + }, + { + "manager"=>"kubelet", + "operation"=>"Update", + "apiVersion"=>"v1", + "time"=>"2021-07-21T23:40:24Z", + "fieldsType"=>"FieldsV1", + "fieldsV1"=>{ + "f:metadata"=>{ + "f:annotations"=>{ + "."=>{}, + "f:volumes.kubernetes.io/controller-managed-attach-detach"=>{} + }, + "f:labels"=>{ + "."=>{}, + "f:agentpool"=>{}, + "f:beta.kubernetes.io/arch"=>{}, + "f:beta.kubernetes.io/instance-type"=>{}, + "f:beta.kubernetes.io/os"=>{}, + "f:failure-domain.beta.kubernetes.io/region"=>{}, + "f:failure-domain.beta.kubernetes.io/zone"=>{}, + "f:kubernetes.azure.com/cluster"=>{}, + "f:kubernetes.azure.com/mode"=>{}, + "f:kubernetes.azure.com/node-image-version"=>{}, + "f:kubernetes.azure.com/os-sku"=>{}, + "f:kubernetes.azure.com/role"=>{}, + "f:kubernetes.io/arch"=>{}, + "f:kubernetes.io/hostname"=>{}, + "f:kubernetes.io/os"=>{}, + "f:node.kubernetes.io/instance-type"=>{}, + "f:storageprofile"=>{}, + "f:storagetier"=>{}, + "f:topology.kubernetes.io/region"=>{}, + "f:topology.kubernetes.io/zone"=>{} + } + }, + "f:spec"=>{ + "f:providerID"=>{} + }, + "f:status"=>{ + "f:addresses"=>{ + "."=>{}, + "k:{\"type\":\"Hostname\"}"=>{ + "."=>{}, + "f:address"=>{}, + "f:type"=>{} + }, + "k:{\"type\":\"InternalIP\"}"=>{ + "."=>{}, + "f:address"=>{}, + "f:type"=>{} + } + }, + "f:allocatable"=>{ + "."=>{}, + "f:attachable-volumes-azure-disk"=>{}, + "f:cpu"=>{}, + "f:ephemeral-storage"=>{}, + "f:hugepages-1Gi"=>{}, + "f:hugepages-2Mi"=>{}, + "f:memory"=>{}, + "f:pods"=>{} + }, + "f:capacity"=>{ + "."=>{}, + "f:attachable-volumes-azure-disk"=>{}, + "f:cpu"=>{}, + "f:ephemeral-storage"=>{}, + "f:hugepages-1Gi"=>{}, + "f:hugepages-2Mi"=>{}, + "f:memory"=>{}, + "f:pods"=>{} + }, + "f:conditions"=>{ + "."=>{}, + "k:{\"type\":\"DiskPressure\"}"=>{ + "."=>{}, + "f:lastHeartbeatTime"=>{}, + "f:lastTransitionTime"=>{}, + "f:message"=>{}, + "f:reason"=>{}, + "f:status"=>{}, + "f:type"=>{} + }, + "k:{\"type\":\"MemoryPressure\"}"=>{ + "."=>{}, + "f:lastHeartbeatTime"=>{}, + "f:lastTransitionTime"=>{}, + "f:message"=>{}, + "f:reason"=>{}, + "f:status"=>{}, + "f:type"=>{} + }, + "k:{\"type\":\"PIDPressure\"}"=>{ + "."=>{}, + "f:lastHeartbeatTime"=>{}, + "f:lastTransitionTime"=>{}, + "f:message"=>{}, + "f:reason"=>{}, + "f:status"=>{}, + "f:type"=>{} + }, + "k:{\"type\":\"Ready\"}"=>{ + "."=>{}, + "f:lastHeartbeatTime"=>{}, + "f:lastTransitionTime"=>{}, + "f:message"=>{}, + "f:reason"=>{}, + "f:status"=>{}, + "f:type"=>{} + } + }, + "f:config"=>{}, + "f:daemonEndpoints"=>{ + "f:kubeletEndpoint"=>{ + "f:Port"=>{} + } + }, + "f:images"=>{}, + "f:nodeInfo"=>{ + "f:architecture"=>{}, + "f:bootID"=>{}, + "f:containerRuntimeVersion"=>{}, + "f:kernelVersion"=>{}, + "f:kubeProxyVersion"=>{}, + "f:kubeletVersion"=>{}, + "f:machineID"=>{}, + "f:operatingSystem"=>{}, + "f:osImage"=>{}, + "f:systemUUID"=>{} + } + } + } + }, + { + "manager"=>"kubectl-label", + "operation"=>"Update", + "apiVersion"=>"v1", + "time"=>"2021-07-21T23:40:53Z", + "fieldsType"=>"FieldsV1", + "fieldsV1"=>{ + "f:metadata"=>{ + "f:labels"=>{ + "f:kubernetes.io/role"=>{}, + "f:node-role.kubernetes.io/agent"=>{} + } + } + } + }, + { + "manager"=>"node-problem-detector", + "operation"=>"Update", + "apiVersion"=>"v1", + "time"=>"2021-08-10T18:10:02Z", + "fieldsType"=>"FieldsV1", + "fieldsV1"=>{ + "f:status"=>{ + "f:conditions"=>{ + "k:{\"type\":\"ContainerRuntimeProblem\"}"=>{ + "."=>{}, + "f:lastHeartbeatTime"=>{}, + "f:lastTransitionTime"=>{}, + "f:message"=>{}, + "f:reason"=>{}, + "f:status"=>{}, + "f:type"=>{} + }, + "k:{\"type\":\"FilesystemCorruptionProblem\"}"=>{ + "."=>{}, + "f:lastHeartbeatTime"=>{}, + "f:lastTransitionTime"=>{}, + "f:message"=>{}, + "f:reason"=>{}, + "f:status"=>{}, + "f:type"=>{} + }, + "k:{\"type\":\"FreezeScheduled\"}"=>{ + "."=>{}, + "f:lastHeartbeatTime"=>{}, + "f:lastTransitionTime"=>{}, + "f:message"=>{}, + "f:reason"=>{}, + "f:status"=>{}, + "f:type"=>{} + }, + "k:{\"type\":\"FrequentContainerdRestart\"}"=>{ + "."=>{}, + "f:lastHeartbeatTime"=>{}, + "f:lastTransitionTime"=>{}, + "f:message"=>{}, + "f:reason"=>{}, + "f:status"=>{}, + "f:type"=>{} + }, + "k:{\"type\":\"FrequentDockerRestart\"}"=>{ + "."=>{}, + "f:lastHeartbeatTime"=>{}, + "f:lastTransitionTime"=>{}, + "f:message"=>{}, + "f:reason"=>{}, + "f:status"=>{}, + "f:type"=>{} + }, + "k:{\"type\":\"FrequentKubeletRestart\"}"=>{ + "."=>{}, + "f:lastHeartbeatTime"=>{}, + "f:lastTransitionTime"=>{}, + "f:message"=>{}, + "f:reason"=>{}, + "f:status"=>{}, + "f:type"=>{} + }, + "k:{\"type\":\"FrequentUnregisterNetDevice\"}"=>{ + "."=>{}, + "f:lastHeartbeatTime"=>{}, + "f:lastTransitionTime"=>{}, + "f:message"=>{}, + "f:reason"=>{}, + "f:status"=>{}, + "f:type"=>{} + }, + "k:{\"type\":\"KernelDeadlock\"}"=>{ + "."=>{}, + "f:lastHeartbeatTime"=>{}, + "f:lastTransitionTime"=>{}, + "f:message"=>{}, + "f:reason"=>{}, + "f:status"=>{}, + "f:type"=>{} + }, + "k:{\"type\":\"KubeletProblem\"}"=>{ + "."=>{}, + "f:lastHeartbeatTime"=>{}, + "f:lastTransitionTime"=>{}, + "f:message"=>{}, + "f:reason"=>{}, + "f:status"=>{}, + "f:type"=>{} + }, + "k:{\"type\":\"PreemptScheduled\"}"=>{ + "."=>{}, + "f:lastHeartbeatTime"=>{}, + "f:lastTransitionTime"=>{}, + "f:message"=>{}, + "f:reason"=>{}, + "f:status"=>{}, + "f:type"=>{} + }, + "k:{\"type\":\"ReadonlyFilesystem\"}"=>{ + "."=>{}, + "f:lastHeartbeatTime"=>{}, + "f:lastTransitionTime"=>{}, + "f:message"=>{}, + "f:reason"=>{}, + "f:status"=>{}, + "f:type"=>{} + }, + "k:{\"type\":\"RebootScheduled\"}"=>{ + "."=>{}, + "f:lastHeartbeatTime"=>{}, + "f:lastTransitionTime"=>{}, + "f:message"=>{}, + "f:reason"=>{}, + "f:status"=>{}, + "f:type"=>{} + }, + "k:{\"type\":\"RedeployScheduled\"}"=>{ + "."=>{}, + "f:lastHeartbeatTime"=>{}, + "f:lastTransitionTime"=>{}, + "f:message"=>{}, + "f:reason"=>{}, + "f:status"=>{}, + "f:type"=>{} + }, + "k:{\"type\":\"TerminateScheduled\"}"=>{ + "."=>{}, + "f:lastHeartbeatTime"=>{}, + "f:lastTransitionTime"=>{}, + "f:message"=>{}, + "f:reason"=>{}, + "f:status"=>{}, + "f:type"=>{} + } + } + } + } + } + ] + }, + "spec"=>{ + "providerID"=>"azure:///subscriptions/3b875bf3-0eec-4d8c-bdee-25c7ccc1f130/resourceGroups/mc_davidaks16_davidaks16_westus2/providers/Microsoft.Compute/virtualMachineScaleSets/aks-nodepool1-24816391-vmss/virtualMachines/0" + }, + "status"=>{ + "capacity"=>{ + "attachable-volumes-azure-disk"=>"8", + "cpu"=>"2", + "ephemeral-storage"=>"129900528Ki", + "hugepages-1Gi"=>"0", + "hugepages-2Mi"=>"0", + "memory"=>"7120616Ki", + "pods"=>"30" + }, + "allocatable"=>{ + "attachable-volumes-azure-disk"=>"8", + "cpu"=>"1900m", + "ephemeral-storage"=>"119716326407", + "hugepages-1Gi"=>"0", + "hugepages-2Mi"=>"0", + "memory"=>"4675304Ki", + "pods"=>"30" + }, + "conditions"=>[ + { + "type"=>"FrequentDockerRestart", + "status"=>"False", + "lastHeartbeatTime"=>"2021-08-17T19:25:56Z", + "lastTransitionTime"=>"2021-08-10T18:10:01Z", + "reason"=>"NoFrequentDockerRestart", + "message"=>"docker is functioning properly" + }, + { + "type"=>"FilesystemCorruptionProblem", + "status"=>"False", + "lastHeartbeatTime"=>"2021-08-17T19:25:56Z", + "lastTransitionTime"=>"2021-08-10T18:10:01Z", + "reason"=>"FilesystemIsOK", + "message"=>"Filesystem is healthy" + }, + { + "type"=>"KernelDeadlock", + "status"=>"False", + "lastHeartbeatTime"=>"2021-08-17T19:25:56Z", + "lastTransitionTime"=>"2021-08-10T18:10:01Z", + "reason"=>"KernelHasNoDeadlock", + "message"=>"kernel has no deadlock" + }, + { + "type"=>"FrequentContainerdRestart", + "status"=>"False", + "lastHeartbeatTime"=>"2021-08-17T19:25:56Z", + "lastTransitionTime"=>"2021-08-10T18:10:01Z", + "reason"=>"NoFrequentContainerdRestart", + "message"=>"containerd is functioning properly" + }, + { + "type"=>"FreezeScheduled", + "status"=>"False", + "lastHeartbeatTime"=>"2021-08-17T19:25:56Z", + "lastTransitionTime"=>"2021-08-11T23:25:04Z", + "reason"=>"NoFreezeScheduled", + "message"=>"VM has no scheduled Freeze event" + }, + { + "type"=>"FrequentUnregisterNetDevice", + "status"=>"False", + "lastHeartbeatTime"=>"2021-08-17T19:25:56Z", + "lastTransitionTime"=>"2021-08-10T18:10:01Z", + "reason"=>"NoFrequentUnregisterNetDevice", + "message"=>"node is functioning properly" + }, + { + "type"=>"TerminateScheduled", + "status"=>"False", + "lastHeartbeatTime"=>"2021-08-17T19:25:56Z", + "lastTransitionTime"=>"2021-08-10T18:10:01Z", + "reason"=>"NoTerminateScheduled", + "message"=>"VM has no scheduled Terminate event" + }, + { + "type"=>"ReadonlyFilesystem", + "status"=>"False", + "lastHeartbeatTime"=>"2021-08-17T19:25:56Z", + "lastTransitionTime"=>"2021-08-10T18:10:01Z", + "reason"=>"FilesystemIsNotReadOnly", + "message"=>"Filesystem is not read-only" + }, + { + "type"=>"RedeployScheduled", + "status"=>"False", + "lastHeartbeatTime"=>"2021-08-17T19:25:56Z", + "lastTransitionTime"=>"2021-08-10T18:10:01Z", + "reason"=>"NoRedeployScheduled", + "message"=>"VM has no scheduled Redeploy event" + }, + { + "type"=>"KubeletProblem", + "status"=>"False", + "lastHeartbeatTime"=>"2021-08-17T19:25:56Z", + "lastTransitionTime"=>"2021-08-10T18:10:01Z", + "reason"=>"KubeletIsUp", + "message"=>"kubelet service is up" + }, + { + "type"=>"PreemptScheduled", + "status"=>"False", + "lastHeartbeatTime"=>"2021-08-17T19:25:56Z", + "lastTransitionTime"=>"2021-08-10T18:11:11Z", + "reason"=>"NoPreemptScheduled", + "message"=>"VM has no scheduled Preempt event" + }, + { + "type"=>"RebootScheduled", + "status"=>"False", + "lastHeartbeatTime"=>"2021-08-17T19:25:56Z", + "lastTransitionTime"=>"2021-08-10T18:10:01Z", + "reason"=>"NoRebootScheduled", + "message"=>"VM has no scheduled Reboot event" + }, + { + "type"=>"ContainerRuntimeProblem", + "status"=>"False", + "lastHeartbeatTime"=>"2021-08-17T19:25:56Z", + "lastTransitionTime"=>"2021-08-10T18:10:01Z", + "reason"=>"ContainerRuntimeIsUp", + "message"=>"container runtime service is up" + }, + { + "type"=>"FrequentKubeletRestart", + "status"=>"False", + "lastHeartbeatTime"=>"2021-08-17T19:25:56Z", + "lastTransitionTime"=>"2021-08-10T18:10:01Z", + "reason"=>"NoFrequentKubeletRestart", + "message"=>"kubelet is functioning properly" + }, + { + "type"=>"MemoryPressure", + "status"=>"False", + "lastHeartbeatTime"=>"2021-08-17T19:28:21Z", + "lastTransitionTime"=>"2021-07-21T23:40:14Z", + "reason"=>"KubeletHasSufficientMemory", + "message"=>"kubelet has sufficient memory available" + }, + { + "type"=>"DiskPressure", + "status"=>"False", + "lastHeartbeatTime"=>"2021-08-17T19:28:21Z", + "lastTransitionTime"=>"2021-07-21T23:40:14Z", + "reason"=>"KubeletHasNoDiskPressure", + "message"=>"kubelet has no disk pressure" + }, + { + "type"=>"PIDPressure", + "status"=>"False", + "lastHeartbeatTime"=>"2021-08-17T19:28:21Z", + "lastTransitionTime"=>"2021-07-21T23:40:14Z", + "reason"=>"KubeletHasSufficientPID", + "message"=>"kubelet has sufficient PID available" + }, + { + "type"=>"Ready", + "status"=>"True", + "lastHeartbeatTime"=>"2021-08-17T19:28:21Z", + "lastTransitionTime"=>"2021-07-21T23:40:24Z", + "reason"=>"KubeletReady", + "message"=>"kubelet is posting ready status. AppArmor enabled" + } + ], + "addresses"=>[ + { + "type"=>"Hostname", + "address"=>"aks-nodepool1-24816391-vmss000000" + }, + { + "type"=>"InternalIP", + "address"=>"10.240.0.4" + } + ], + "daemonEndpoints"=>{ + "kubeletEndpoint"=>{ + "Port"=>10250 + } + }, + "nodeInfo"=>{ + "machineID"=>"17a654260e2c4a9bb3a3eb4b4188e4b4", + "systemUUID"=>"7ff599e4-909e-4950-a044-ff8613af3af9", + "bootID"=>"02bb865b-a469-43cd-8b0b-5ceb4ecd80b0", + "kernelVersion"=>"5.4.0-1051-azure", + "osImage"=>"Ubuntu 18.04.5 LTS", + "containerRuntimeVersion"=>"containerd://1.4.4+azure", + "kubeletVersion"=>"v1.19.11", + "kubeProxyVersion"=>"v1.19.11", + "operatingSystem"=>"linux", + "architecture"=>"amd64" + }, + "images"=>[ + { + "names"=>[ + "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod06112021-1" + ], + "sizeBytes"=>331689060 + }, + { + "names"=>[ + "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod06112021" + ], + "sizeBytes"=>330099815 + }, + { + "names"=>[ + "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod05202021-hotfix" + ], + "sizeBytes"=>271471426 + }, + { + "names"=>[ + "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod05202021" + ], + "sizeBytes"=>269703297 + }, + { + "names"=>[ + "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod03262021" + ], + "sizeBytes"=>264732875 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/kubernetes/ingress/nginx-ingress-controller:0.19.0" + ], + "sizeBytes"=>166352383 + }, + { + "names"=>[ + "mcr.microsoft.com/aks/hcp/hcp-tunnel-front:master.210623.2" + ], + "sizeBytes"=>147750148 + }, + { + "names"=>[ + "mcr.microsoft.com/aks/hcp/hcp-tunnel-front:master.210524.1" + ], + "sizeBytes"=>146446618 + }, + { + "names"=>[ + "mcr.microsoft.com/aks/hcp/hcp-tunnel-front:master.210427.1" + ], + "sizeBytes"=>136242776 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/calico/node:v3.8.9.5" + ], + "sizeBytes"=>101794833 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/kubernetes/ingress/nginx-ingress-controller:0.47.0" + ], + "sizeBytes"=>101445696 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/kubernetes/autoscaler/cluster-proportional-autoscaler:1.3.0_v0.0.5" + ], + "sizeBytes"=>101194562 + }, + { + "names"=>[ + "mcr.microsoft.com/aks/hcp/tunnel-openvpn:master.210623.2" + ], + "sizeBytes"=>96125176 + }, + { + "names"=>[ + "mcr.microsoft.com/aks/hcp/tunnel-openvpn:master.210524.1" + ], + "sizeBytes"=>95879501 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/kubernetes/exechealthz:1.2_v0.0.5" + ], + "sizeBytes"=>94348102 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/calico/node:v3.8.9.2" + ], + "sizeBytes"=>93537927 + }, + { + "names"=>[ + "mcr.microsoft.com/aks/acc/sgx-attestation:2.0" + ], + "sizeBytes"=>91841669 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/kubernetes-csi/azurefile-csi:v1.4.0" + ], + "sizeBytes"=>91324193 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/kubernetes-csi/azurefile-csi:v1.2.0" + ], + "sizeBytes"=>89103171 + }, + { + "names"=>[ + "mcr.microsoft.com/azure-application-gateway/kubernetes-ingress:1.0.1-rc3" + ], + "sizeBytes"=>86839805 + }, + { + "names"=>[ + "mcr.microsoft.com/azure-application-gateway/kubernetes-ingress:1.2.0" + ], + "sizeBytes"=>86488586 + }, + { + "names"=>[ + "mcr.microsoft.com/aks/hcp/tunnel-openvpn:master.210427.1" + ], + "sizeBytes"=>86120048 + }, + { + "names"=>[ + "mcr.microsoft.com/azure-application-gateway/kubernetes-ingress:1.3.0" + ], + "sizeBytes"=>81252495 + }, + { + "names"=>[ + "mcr.microsoft.com/azure-application-gateway/kubernetes-ingress:1.4.0" + ], + "sizeBytes"=>79586703 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.4.0" + ], + "sizeBytes"=>78795016 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.2.0" + ], + "sizeBytes"=>76527179 + }, + { + "names"=>[ + "mcr.microsoft.com/containernetworking/azure-npm:v1.1.8" + ], + "sizeBytes"=>75025803 + }, + { + "names"=>[ + "mcr.microsoft.com/containernetworking/azure-npm:v1.2.2_hotfix" + ], + "sizeBytes"=>73533889 + }, + { + "names"=>[ + "mcr.microsoft.com/containernetworking/azure-npm:v1.3.1" + ], + "sizeBytes"=>72242894 + }, + { + "names"=>[ + "mcr.microsoft.com/containernetworking/azure-npm:v1.2.8" + ], + "sizeBytes"=>70622822 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/nvidia/k8s-device-plugin:v0.9.0" + ], + "sizeBytes"=>67291599 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/kubernetes/dashboard:v2.0.1" + ], + "sizeBytes"=>66415836 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/kubernetes/dashboard:v2.0.0-rc7" + ], + "sizeBytes"=>65965658 + }, + { + "names"=>[ + "mcr.microsoft.com/containernetworking/azure-npm:v1.2.1" + ], + "sizeBytes"=>64123775 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/calico/cni:v3.8.9.3" + ], + "sizeBytes"=>63581323 + }, + { + "names"=>[ + "mcr.microsoft.com/containernetworking/networkmonitor:v1.1.8" + ], + "sizeBytes"=>63154716 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/calico/cni:v3.8.9.2" + ], + "sizeBytes"=>61626312 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/calico/node:v3.18.1" + ], + "sizeBytes"=>60500885 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/calico/node:v3.17.2" + ], + "sizeBytes"=>58419768 + }, + { + "names"=>[ + "mcr.microsoft.com/containernetworking/networkmonitor:v1.1.8_hotfix", + "mcr.microsoft.com/containernetworking/networkmonitor:v1.1.8post2" + ], + "sizeBytes"=>56368756 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/kubernetes/kube-proxy@sha256:282543237a1aa3f407656290f454b7068a92e1abe2156082c750d5abfbcad90c", + "mcr.microsoft.com/oss/kubernetes/kube-proxy:v1.19.11-hotfix.20210526.2" + ], + "sizeBytes"=>56310724 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/calico/node:v3.19.0" + ], + "sizeBytes"=>55228749 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/kubernetes/kube-proxy:v1.19.11-hotfix.20210526.1" + ], + "sizeBytes"=>54692048 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/kubernetes/dashboard:v2.0.0-rc3" + ], + "sizeBytes"=>50803639 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/kubernetes-csi/secrets-store/driver:v0.0.19" + ], + "sizeBytes"=>49759361 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/azure/aad-pod-identity/nmi:v1.7.5" + ], + "sizeBytes"=>49704644 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/kubernetes-csi/secrets-store/driver:v0.0.21" + ], + "sizeBytes"=>49372390 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/kubernetes/kube-proxy@sha256:a64d3538b72905b07356881314755b02db3675ff47ee2bcc49dd7be856e285d5", + "mcr.microsoft.com/oss/kubernetes/kube-proxy:v1.19.11-hotfix.20210526" + ], + "sizeBytes"=>49322942 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/azure/aad-pod-identity/nmi:v1.7.4" + ], + "sizeBytes"=>48108311 + }, + { + "names"=>[ + "mcr.microsoft.com/oss/kubernetes/kubernetes-dashboard:v1.10.1" + ], + "sizeBytes"=>44907744 + } + ], + "config"=>{} + } + } + ] +} \ No newline at end of file diff --git a/test/unit-tests/run_go_tests.sh b/test/unit-tests/run_go_tests.sh new file mode 100755 index 000000000..7036531fd --- /dev/null +++ b/test/unit-tests/run_go_tests.sh @@ -0,0 +1,12 @@ +set -e + +OLD_PATH=$(pwd) +SCRIPTPATH="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" +cd $SCRIPTPATH/../../source/plugins/go/src +echo "# Runnign go generate" +go generate + +echo "# Running go test ." +go test . + +cd $OLD_PATH diff --git a/test/unit-tests/run_ruby_tests.sh b/test/unit-tests/run_ruby_tests.sh new file mode 100755 index 000000000..824346eee --- /dev/null +++ b/test/unit-tests/run_ruby_tests.sh @@ -0,0 +1,13 @@ +# this script will exit with an error if any commands exit with an error +set -e + +# NOTE: to run a specific test (instead of all) use the following arguments: --name test_name +# ex: run_ruby_tests.sh --name test_basic_single_node + +OLD_PATH=$(pwd) +SCRIPTPATH="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" +# cd $SCRIPTPATH/../../source/plugins/ruby +echo "# Running ruby $SCRIPTPATH/test_driver.rb $1 $2" +ruby $SCRIPTPATH/test_driver.rb $1 $2 + +cd $OLD_PATH diff --git a/test/unit-tests/test_driver.rb b/test/unit-tests/test_driver.rb new file mode 100644 index 000000000..32687cc99 --- /dev/null +++ b/test/unit-tests/test_driver.rb @@ -0,0 +1,13 @@ +$in_unit_test = true + +script_path = __dir__ +# go to the base directory of the repository +Dir.chdir(File.join(__dir__, "../..")) + +Dir.glob(File.join(script_path, "../../source/plugins/ruby/*_test.rb")) do |filename| + require_relative filename +end + +Dir.glob(File.join(script_path, "../../build/linux/installer/scripts/*_test.rb")) do |filename| + require_relative filename +end From 32f958b9db2820f662a399712422f8f519762365 Mon Sep 17 00:00:00 2001 From: David Michelman Date: Fri, 27 Aug 2021 11:56:27 -0700 Subject: [PATCH 146/301] run unit tests after a merge too (#634) --- .github/workflows/run_unit_tests.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/run_unit_tests.yml b/.github/workflows/run_unit_tests.yml index 29f5afc7a..94ac4371a 100644 --- a/.github/workflows/run_unit_tests.yml +++ b/.github/workflows/run_unit_tests.yml @@ -5,6 +5,10 @@ on: branches: - ci_dev - ci_prod + push: + branches: + - ci_dev + - ci_prod jobs: Golang-Tests: runs-on: ubuntu-latest From c4a3bbc76b0241d09257b6e6fe61a5cd4df58058 Mon Sep 17 00:00:00 2001 From: Vishwanath Date: Mon, 30 Aug 2021 21:42:31 -0700 Subject: [PATCH 147/301] flag stale PRs & issues --- .github/workflows/stale.yml | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100644 .github/workflows/stale.yml diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml new file mode 100644 index 000000000..1d91df09d --- /dev/null +++ b/.github/workflows/stale.yml @@ -0,0 +1,28 @@ +name: Mark stale issues and pull requests + +on: + schedule: + - cron: "30 10 * * *" + +jobs: + stale: + + runs-on: ubuntu-latest + permissions: + issues: write + pull-requests: write + + steps: + - uses: actions/stale@v3 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + days-before-issue-stale: 7 + days-before-pr-stale: 7 + stale-issue-message: 'This issue is stale because it has been open 7 days with no activity. Remove stale label or comment or this will be closed in 5 days.' + stale-pr-message: 'This PR is stale because it has been open 7 days with no activity. Remove stale label or comment or this will be closed in 5 days.' + close-issue-message: 'This issue was closed because it has been stalled for 12 days with no activity.' + close-pr-message: 'This PR was closed because it has been stalled for 12 days with no activity.' + days-before-issue-close: 5 + days-before-pr-close: 5 + stale-issue-label: 'no-issue-activity' + stale-pr-label: 'no-pr-activity' From beb7f424acfcd79e53bf7c58d951c642c0f949be Mon Sep 17 00:00:00 2001 From: David Michelman Date: Tue, 31 Aug 2021 17:55:55 -0700 Subject: [PATCH 148/301] Adding script to collect logs (for troubleshooting) (#636) * added script for collecting logs * added windows daemonset and prometheus sidecar, as well as some explanatory prints * added kubectl describe and kubectl logs output * changed message to make it more clear some erros are expected --- scripts/troubleshoot/collect_logs.sh | 54 ++++++++++++++++++++++++++++ 1 file changed, 54 insertions(+) create mode 100755 scripts/troubleshoot/collect_logs.sh diff --git a/scripts/troubleshoot/collect_logs.sh b/scripts/troubleshoot/collect_logs.sh new file mode 100755 index 000000000..99a9ad302 --- /dev/null +++ b/scripts/troubleshoot/collect_logs.sh @@ -0,0 +1,54 @@ +#!/bin/bash + +# This script pulls logs from the replicaset agent pod and a random daemonset pod. This script is to make troubleshooting faster + +CYAN='\033[0;36m' +NC='\033[0m' # No Color + +mkdir azure-monitor-logs-tmp +cd azure-monitor-logs-tmp + +export ds_pod=$(kubectl get pods -n kube-system -o custom-columns=NAME:.metadata.name | grep -E omsagent-[a-z0-9]{5} | head -n 1) +export ds_win_pod=$(kubectl get pods -n kube-system -o custom-columns=NAME:.metadata.name | grep -E omsagent-win-[a-z0-9]{5} | head -n 1) +export rs_pod=$(kubectl get pods -n kube-system -o custom-columns=NAME:.metadata.name | grep -E omsagent-rs-[a-z0-9]{5} | head -n 1) + +echo -e "Collecting logs from ${ds_pod}, ${ds_win_pod}, and ${rs_pod}" +echo -e "${CYAN}Note: some errors about pods and files not existing are expected in clusters without windows nodes or sidecar prometheus scraping. They can safely be disregarded ${NC}" + +# grab `kubectl describe` and `kubectl log` +echo "collecting kubectl describe and kubectl log output" + +kubectl describe pod ${ds_pod} --namespace=kube-system > describe_${ds_pod}.txt +kubectl logs ${ds_pod} --container omsagent --namespace=kube-system > logs_${ds_pod}.txt +kubectl logs ${ds_pod} --container omsagent-prometheus --namespace=kube-system > logs_${ds_pod}_prom.txt + +kubectl describe pod ${ds_win_pod} --namespace=kube-system > describe_${ds_win_pod}.txt +kubectl logs ${ds_win_pod} --container omsagent-win --namespace=kube-system > logs_${ds_win_pod}.txt + +kubectl describe pod ${rs_pod} --namespace=kube-system > describe_${rs_pod}.txt +kubectl logs ${rs_pod} --container omsagent --namespace=kube-system > logs_${rs_pod}.txt + + +# now collect log files from in containers +echo "Collecting log files from inside agent containers" + +kubectl cp ${ds_pod}:/var/opt/microsoft/docker-cimprov/log omsagent-daemonset --namespace=kube-system --container omsagent +kubectl cp ${ds_pod}:/var/opt/microsoft/linuxmonagent/log omsagent-daemonset-mdsd --namespace=kube-system --container omsagent + +kubectl cp ${ds_pod}:/var/opt/microsoft/docker-cimprov/log omsagent-prom-daemonset --namespace=kube-system --container omsagent-prometheus +kubectl cp ${ds_pod}:/var/opt/microsoft/linuxmonagent/log omsagent-prom-daemonset-mdsd --namespace=kube-system --container omsagent-prometheus + +# for some reason copying logs out of /etc/omsagentwindows doesn't work (gives a permission error), but exec then cat does work. +# skip collecting these logs for now, would be good to come back and fix this next time a windows support case comes up +# kubectl cp ${ds_win_pod}:/etc/omsagentwindows omsagent-win-daemonset --namespace=kube-system +kubectl cp ${ds_win_pod}:/etc/fluent-bit omsagent-win-daemonset-fbit --namespace=kube-system + +kubectl cp ${rs_pod}:/var/opt/microsoft/docker-cimprov/log omsagent-replicaset --namespace=kube-system +kubectl cp ${rs_pod}:/var/opt/microsoft/linuxmonagent/log omsagent-replicaset-mdsd --namespace=kube-system + +zip -r -q ../azure-monitor-logs.zip * + +cd .. +rm -rf azure-monitor-logs-tmp +echo +echo "log files have been written to azure-monitor-logs.zip" From 01e8178925b3fa284952f49014e42ec3e5abb1bd Mon Sep 17 00:00:00 2001 From: sarahpeiffer <46665092+sarahpeiffer@users.noreply.github.com> Date: Fri, 10 Sep 2021 17:04:27 -0700 Subject: [PATCH 149/301] Sarah/ev2 (#640) * ev2 artifacts for release pipeline * update parameters reference * add artifacts tar file * changes to rollout and service model * change agentimage path * adding agentimage to artifact script * removing charts from tarball * change script to use blob storage * change blob variables * echo variables * change blob uri * use release id for blob prefix * change to delete blob file * add check for if blob storage file exists * fix script errors * update check for file in storage * change true check * comments and change storage account info to pipeline variables * Changes for windows tar file * PR changes --- .pipelines/build-linux.sh | 6 + .../ContainerInsights.Linux.Parameters.json | 84 +++++++++++++ .../ContainerInsights.Windows.Parameters.json | 84 +++++++++++++ .../RolloutSpecs/RolloutSpecs.json | 36 ++++++ .../ScopeBindings/Public.ScopeBindings.json | 48 ++++++++ .../Scripts/pushAgentToAcr.sh | 110 ++++++++++++++++++ .../ServiceModels/Public.ServiceModel.json | 56 +++++++++ .../ServiceGroupRoot/buildver.txt | 1 + 8 files changed, 425 insertions(+) create mode 100644 deployment/agent-deployment/ServiceGroupRoot/Parameters/ContainerInsights.Linux.Parameters.json create mode 100644 deployment/agent-deployment/ServiceGroupRoot/Parameters/ContainerInsights.Windows.Parameters.json create mode 100644 deployment/agent-deployment/ServiceGroupRoot/RolloutSpecs/RolloutSpecs.json create mode 100644 deployment/agent-deployment/ServiceGroupRoot/ScopeBindings/Public.ScopeBindings.json create mode 100644 deployment/agent-deployment/ServiceGroupRoot/Scripts/pushAgentToAcr.sh create mode 100644 deployment/agent-deployment/ServiceGroupRoot/ServiceModels/Public.ServiceModel.json create mode 100644 deployment/agent-deployment/ServiceGroupRoot/buildver.txt diff --git a/.pipelines/build-linux.sh b/.pipelines/build-linux.sh index 53f6a3a07..8dbf57fdc 100644 --- a/.pipelines/build-linux.sh +++ b/.pipelines/build-linux.sh @@ -15,7 +15,13 @@ echo "----------- Build Docker Provider -------------------------------" make cd $DIR +echo "------------ Bundle Shell Extension Scripts for Agent Release -------------------------" +cd $DIR/../deployment/agent-deployment/ServiceGroupRoot/Scripts +tar -czvf ../artifacts.tar.gz pushAgentToAcr.sh +cd $DIR + echo "------------ Bundle Shell Extension Scripts & HELM chart -------------------------" cd $DIR/../deployment/arc-k8s-extension/ServiceGroupRoot/Scripts tar -czvf ../artifacts.tar.gz ../../../../charts/azuremonitor-containers/ pushChartToAcr.sh + diff --git a/deployment/agent-deployment/ServiceGroupRoot/Parameters/ContainerInsights.Linux.Parameters.json b/deployment/agent-deployment/ServiceGroupRoot/Parameters/ContainerInsights.Linux.Parameters.json new file mode 100644 index 000000000..598ce9698 --- /dev/null +++ b/deployment/agent-deployment/ServiceGroupRoot/Parameters/ContainerInsights.Linux.Parameters.json @@ -0,0 +1,84 @@ +{ + "$schema": "http://schema.express.azure.com/schemas/2015-01-01-alpha/RolloutParameters.json", + "contentVersion": "1.0.0.0", + "wait": [ + { + "name": "waitSdpBakeTime", + "properties": { + "duration": "PT24H" + } + } + ], + "shellExtensions": [ + { + "name": "PushAgentToACR", + "type": "ShellExtensionType", + "properties": { + "maxexecutiontime": "PT1H" + }, + "package": { + "reference": { + "path": "artifacts.tar.gz" + } + }, + "launch": { + "command": [ + "/bin/bash", + "pushAgentToAcr.sh" + ], + "environmentVariables": [ + { + "name": "WINDOWS", + "value": "" + }, + { + "name": "AGENT_IMAGE_URI", + "value": "__CONTAINER_URI__" + }, + { + "name": "AGENT_IMAGE_SAS", + "value": "__CONTAINER_SAS_TOKEN__" + }, + { + "name": "STORAGE_CONTAINER_NAME", + "value": "__STORAGE_CONTAINER_NAME__" + }, + { + "name": "STORAGE_ACCOUNT_NAME", + "value": "__STORAGE_ACCOUNT_NAME__" + }, + { + "name": "AGENT_IMAGE_TAR_FILE_NAME", + "value": "agentimage.tar.gz" + }, + { + "name": "RELEASE_ID", + "value": "__RELEASE_ID__" + }, + { + "name": "ACR_NAME", + "value": "__ACR_NAME__" + }, + { + "name": "AGENT_RELEASE", + "value": "__AGENT_RELEASE__" + }, + { + "name": "AGENT_IMAGE_TAG_SUFFIX", + "value": "__AGENT_IMAGE_TAG_SUFFIX__" + }, + { + "name": "AGENT_IMAGE_FULL_PATH", + "value": "__ACR_NAME__/public/azuremonitor/containerinsights/__AGENT_RELEASE__:__AGENT_RELEASE____AGENT_IMAGE_TAG_SUFFIX__" + } + ], + "identity": { + "type": "userAssigned", + "userAssignedIdentities": [ + "__MANAGED_IDENTITY__" + ] + } + } + } + ] + } \ No newline at end of file diff --git a/deployment/agent-deployment/ServiceGroupRoot/Parameters/ContainerInsights.Windows.Parameters.json b/deployment/agent-deployment/ServiceGroupRoot/Parameters/ContainerInsights.Windows.Parameters.json new file mode 100644 index 000000000..9d208e0c6 --- /dev/null +++ b/deployment/agent-deployment/ServiceGroupRoot/Parameters/ContainerInsights.Windows.Parameters.json @@ -0,0 +1,84 @@ +{ + "$schema": "http://schema.express.azure.com/schemas/2015-01-01-alpha/RolloutParameters.json", + "contentVersion": "1.0.0.0", + "wait": [ + { + "name": "waitSdpBakeTime", + "properties": { + "duration": "PT24H" + } + } + ], + "shellExtensions": [ + { + "name": "PushAgentToACR", + "type": "ShellExtensionType", + "properties": { + "maxexecutiontime": "PT1H" + }, + "package": { + "reference": { + "path": "artifacts.tar.gz" + } + }, + "launch": { + "command": [ + "/bin/bash", + "pushAgentToAcr.sh" + ], + "environmentVariables": [ + { + "name": "WINDOWS", + "value": "win-" + }, + { + "name": "AGENT_IMAGE_URI", + "value": "__CONTAINER_URI__" + }, + { + "name": "AGENT_IMAGE_SAS", + "value": "__CONTAINER_SAS_TOKEN__" + }, + { + "name": "STORAGE_CONTAINER_NAME", + "value": "__STORAGE_CONTAINER_NAME__" + }, + { + "name": "STORAGE_ACCOUNT_NAME", + "value": "__STORAGE_ACCOUNT_NAME__" + }, + { + "name": "AGENT_IMAGE_TAR_FILE_NAME", + "value": "agentimage.tar.zip" + }, + { + "name": "RELEASE_ID", + "value": "__RELEASE_ID__" + }, + { + "name": "ACR_NAME", + "value": "__ACR_NAME__" + }, + { + "name": "AGENT_RELEASE", + "value": "__AGENT_RELEASE__" + }, + { + "name": "AGENT_IMAGE_TAG_SUFFIX", + "value": "__AGENT_IMAGE_TAG_SUFFIX__" + }, + { + "name": "AGENT_IMAGE_FULL_PATH", + "value": "__ACR_NAME__/public/azuremonitor/containerinsights/__AGENT_RELEASE__:win-__AGENT_RELEASE____AGENT_IMAGE_TAG_SUFFIX__" + } + ], + "identity": { + "type": "userAssigned", + "userAssignedIdentities": [ + "__MANAGED_IDENTITY__" + ] + } + } + } + ] + } \ No newline at end of file diff --git a/deployment/agent-deployment/ServiceGroupRoot/RolloutSpecs/RolloutSpecs.json b/deployment/agent-deployment/ServiceGroupRoot/RolloutSpecs/RolloutSpecs.json new file mode 100644 index 000000000..f015cf5d3 --- /dev/null +++ b/deployment/agent-deployment/ServiceGroupRoot/RolloutSpecs/RolloutSpecs.json @@ -0,0 +1,36 @@ +{ + "$schema": "https://ev2schema.azure.net/schemas/2020-01-01/rolloutSpecification.json", + "ContentVersion": "1.0.0.0", + "RolloutMetadata": { + "ServiceModelPath": "ServiceModels//Public.ServiceModel.json", + "ScopeBindingsPath": "ScopeBindings//Public.ScopeBindings.json", + "Name": "ContainerInsightsAgent", + "RolloutType": "Major", + "BuildSource": { + "Parameters": { + "VersionFile": "buildver.txt" + } + }, + "Notification": { + "Email": { + "To": "omscontainers@microsoft.com" + } + } + }, + "OrchestratedSteps": [ + { + "name": "PushLinuxAgent", + "targetType": "ServiceResource", + "targetName": "PushLinuxAgent", + "actions": [ "Shell/PushAgentToACR" ], + "dependsOn": [ ] + }, + { + "name": "PushWindowsAgent", + "targetType": "ServiceResource", + "targetName": "PushWindowsAgent", + "actions": [ "Shell/PushAgentToACR" ], + "dependsOn": [ ] + } + ] + } \ No newline at end of file diff --git a/deployment/agent-deployment/ServiceGroupRoot/ScopeBindings/Public.ScopeBindings.json b/deployment/agent-deployment/ServiceGroupRoot/ScopeBindings/Public.ScopeBindings.json new file mode 100644 index 000000000..99acfb68e --- /dev/null +++ b/deployment/agent-deployment/ServiceGroupRoot/ScopeBindings/Public.ScopeBindings.json @@ -0,0 +1,48 @@ +{ + "$schema": "https://ev2schema.azure.net/schemas/2020-01-01/scopeBindings.json", + "contentVersion": "0.0.0.1", + "scopeBindings": [ + { + "scopeTagName": "Global", + "bindings": [ + { + "find": "__ACR_NAME__", + "replaceWith": "$(ACRName)" + }, + { + "find": "__AGENT_RELEASE__", + "replaceWith": "$(AgentRelease)" + }, + { + "find": "__AGENT_IMAGE_TAG_SUFFIX__", + "replaceWith": "$(AgentImageTagSuffix)" + }, + { + "find": "__RELEASE_ID__", + "replaceWith": "$(Release.ReleaseId)" + }, + { + "find": "__MANAGED_IDENTITY__", + "replaceWith": "$(ManagedIdentity)" + }, + { + "find": "__CONTAINER_URI__", + "replaceWith": "$(Storage.StorageContainerUri)" + }, + { + "find": "__CONTAINER_SAS_TOKEN__", + "replaceWith": "$(Storage.StorageContainerSasToken)" + }, + { + "find": "__STORAGE_CONTAINER_NAME__", + "replaceWith": "$(StorageContainerName)" + }, + { + "find": "__STORAGE_ACCOUNT_NAME__", + "replaceWith": "$(StorageAccountName)" + } + + ] + } + ] +} \ No newline at end of file diff --git a/deployment/agent-deployment/ServiceGroupRoot/Scripts/pushAgentToAcr.sh b/deployment/agent-deployment/ServiceGroupRoot/Scripts/pushAgentToAcr.sh new file mode 100644 index 000000000..7e73a6230 --- /dev/null +++ b/deployment/agent-deployment/ServiceGroupRoot/Scripts/pushAgentToAcr.sh @@ -0,0 +1,110 @@ +#!/bin/bash +set -e + +# Note - This script used in the pipeline as inline script + +if [ -z $AGENT_IMAGE_TAG_SUFFIX ]; then + echo "-e error value of AGENT_IMAGE_TAG_SUFFIX variable shouldnt be empty. check release variables" + exit 1 +fi + +if [ -z $AGENT_RELEASE ]; then + echo "-e error AGENT_RELEASE shouldnt be empty. check release variables" + exit 1 +fi + +if [ -z $AGENT_IMAGE_URI ]; then + echo "-e error value of AGENT_IMAGE_URI shouldn't be empty. check output from file copy release task" + exit 1 +fi + +if [ -z $AGENT_IMAGE_SAS ]; then + echo "-e error value of AGENT_IMAGE_SAS shouldn't be empty. check output from file copy release task" + exit 1 +fi + +if [ -z $STORAGE_CONTAINER_NAME ]; then + echo "-e error value of STORAGE_CONTAINER_NAME shouldn't be empty. check release variables" + exit 1 +fi + +if [ -z $STORAGE_ACCOUNT_NAME ]; then + echo "-e error value of STORAGE_ACCOUNT_NAME shouldn't be empty. check release variables" + exit 1 +fi + +if [ -z $ACR_NAME ]; then + echo "-e error value of ACR_NAME shouldn't be empty. check release variables" + exit 1 +fi + +#Download agentimage tarball from blob storage account +echo "Downloading tarball image from $WINDOWS $AGENT_IMAGE_URI" +wget -O $AGENT_IMAGE_TAR_FILE_NAME "${AGENT_IMAGE_URI}${WINDOWS}${RELEASE_ID}${AGENT_IMAGE_SAS}" + + +if [ ! -f $AGENT_IMAGE_TAR_FILE_NAME ]; then + echo "Agent tarfile: ${AGENT_IMAGE_TAR_FILE_NAME} does not exist, unable to continue" + exit 1 +fi + +#Install crane +echo "Installing crane" +wget -O crane.tar.gz https://github.com/google/go-containerregistry/releases/download/v0.4.0/go-containerregistry_Linux_x86_64.tar.gz +if [ $? -eq 0 ]; then + echo "crane downloaded successfully" +else + echo "-e error crane download failed" + exit 1 +fi +tar xzvf crane.tar.gz +echo "Installed crane" + + +#Login to az cli and authenticate to acr +echo "Login cli using managed identity" +az login --identity + +echo "Getting acr credentials" +TOKEN_QUERY_RES=$(az acr login -n "$ACR_NAME" -t) +TOKEN=$(echo "$TOKEN_QUERY_RES" | jq -r '.accessToken') +if [ -z $TOKEN ]; then + echo "-e error failed to get az acr login token" + exit 1 +fi + +DESTINATION_ACR=$(echo "$TOKEN_QUERY_RES" | jq -r '.loginServer') +if [ -z $DESTINATION_ACR ]; then + echo "-e error value of DESTINATION_ACR shouldnt be empty" + exit 1 +fi + +./crane auth login "$DESTINATION_ACR" -u "00000000-0000-0000-0000-000000000000" -p "$TOKEN" + +#Prepare tarball and push to acr +if [[ "$AGENT_IMAGE_TAR_FILE_NAME" == *"tar.gz"* ]]; then + gunzip $AGENT_IMAGE_TAR_FILE_NAME +fi + +if [[ "$AGENT_IMAGE_TAR_FILE_NAME" == *"tar.zip"* ]]; then + unzip $AGENT_IMAGE_TAR_FILE_NAME +fi + +echo "Pushing file $TARBALL_IMAGE_FILE to $AGENT_IMAGE_FULL_PATH" +./crane push *.tar "$AGENT_IMAGE_FULL_PATH" + + +#Delete agentimage tarball from blob storage to prevent future conflicts +echo "Deleting agentimage copy from blob storage" + +BLOB_EXIST_RESULT=$(az storage blob exists --container-name $STORAGE_CONTAINER_NAME --name $WINDOWS$RELEASE_ID --account-name $STORAGE_ACCOUNT_NAME --sas-token $AGENT_IMAGE_SAS) +BLOB_EXIST=$(echo "$BLOB_EXIST_RESULT" | jq -r '.exists') +echo $BLOB_EXIST_RESULT +echo $BLOB_EXIST + +if $BLOB_EXIST; then + az storage blob delete --container-name "${STORAGE_CONTAINER_NAME}" --name "${WINDOWS}${RELEASE_ID}" --account-name "${STORAGE_ACCOUNT_NAME}" --sas-token "${AGENT_IMAGE_SAS}" + echo "Deleted agentimate copy from blob storage" +else + echo "Agentimage has already been deleted from blob storage" +fi \ No newline at end of file diff --git a/deployment/agent-deployment/ServiceGroupRoot/ServiceModels/Public.ServiceModel.json b/deployment/agent-deployment/ServiceGroupRoot/ServiceModels/Public.ServiceModel.json new file mode 100644 index 000000000..b7bd4aa26 --- /dev/null +++ b/deployment/agent-deployment/ServiceGroupRoot/ServiceModels/Public.ServiceModel.json @@ -0,0 +1,56 @@ +{ + "$schema": "https://ev2schema.azure.net/schemas/2020-01-01/serviceModel.json", + "contentVersion": "1.0.0.2", + "ServiceMetadata": { + "ServiceGroup": "ContainerInsightsAgent", + "Environment": "Dev" + }, + "ServiceResourceGroupDefinitions": [ + { + "Name": "CI-Agent-ServiceResourceGroupDefinition", + "ServiceResourceDefinitions": [ + { + "Name": "ShellExtension", + "ComposedOf": { + "Extension": { + "Shell": [ + { + "type": "ShellExtensionType", + "properties": { + "imageName": "adm-ubuntu-1804-l", + "imageVersion": "v18" + } + } + ] + } + } + } + ] + } + ], + "ServiceResourceGroups": [ + { + "AzureResourceGroupName": "ContainerInsightsAgent-Global-Release", + "Location": "eastus2", + "InstanceOf": "CI-Agent-ServiceResourceGroupDefinition", + "AzureSubscriptionId": "728bbd23-3b47-40c1-8c9a-c6c5ccd674fc", + "ScopeTags": [ + { + "Name": "Global" + } + ], + "ServiceResources": [ + { + "Name": "PushLinuxAgent", + "InstanceOf": "ShellExtension", + "RolloutParametersPath": "Parameters\\ContainerInsights.Linux.Parameters.json" + }, + { + "Name": "PushWindowsAgent", + "InstanceOf": "ShellExtension", + "RolloutParametersPath": "Parameters\\ContainerInsights.Windows.Parameters.json" + } + ] + } + ] + } \ No newline at end of file diff --git a/deployment/agent-deployment/ServiceGroupRoot/buildver.txt b/deployment/agent-deployment/ServiceGroupRoot/buildver.txt new file mode 100644 index 000000000..bd2666abb --- /dev/null +++ b/deployment/agent-deployment/ServiceGroupRoot/buildver.txt @@ -0,0 +1 @@ +1.0.0.0 \ No newline at end of file From ef7cb89f7e20e7c89e5f154abe1671c0517cd2ab Mon Sep 17 00:00:00 2001 From: David Michelman Date: Mon, 13 Sep 2021 12:27:28 -0700 Subject: [PATCH 150/301] documenting fbit tail plugin configmap settings. (#638) * documenting fbit tail plugin configmap settings. --- kubernetes/container-azm-ms-agentconfig.yaml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/kubernetes/container-azm-ms-agentconfig.yaml b/kubernetes/container-azm-ms-agentconfig.yaml index 21b31f76f..328acb201 100644 --- a/kubernetes/container-azm-ms-agentconfig.yaml +++ b/kubernetes/container-azm-ms-agentconfig.yaml @@ -144,6 +144,14 @@ data: tcp_listener_chunk_size = 10 tcp_listener_buffer_size = 10 tcp_listener_mem_buf_limit = 200 + + # The following settings are "undocumented", we don't recommend uncommenting them unless directed by Microsoft. + # They increase the maximum stdout/stderr log collection rate but will also cause higher cpu/memory usage. + # [agent_settings.fbit_config] + # log_flush_interval_secs = "1" # default value is 15 + # tail_mem_buf_limit_megabytes = "10" # default value is 10 + # tail_buf_chunksize_megabytes = "1" # default value is 32kb (comment out this line for default) + # tail_buf_maxsize_megabytes = "1" # defautl value is 32kb (comment out this line for default) metadata: name: container-azm-ms-agentconfig From 6b42f139165f3fc055d966446d2d1b8ba239356d Mon Sep 17 00:00:00 2001 From: sarahpeiffer <46665092+sarahpeiffer@users.noreply.github.com> Date: Mon, 13 Sep 2021 13:04:41 -0700 Subject: [PATCH 151/301] Install unzip package on shell extension (#642) --- .../Parameters/ContainerInsights.Linux.Parameters.json | 4 ---- .../ServiceGroupRoot/Scripts/pushAgentToAcr.sh | 8 +++++++- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/deployment/agent-deployment/ServiceGroupRoot/Parameters/ContainerInsights.Linux.Parameters.json b/deployment/agent-deployment/ServiceGroupRoot/Parameters/ContainerInsights.Linux.Parameters.json index 598ce9698..be9ddb6d6 100644 --- a/deployment/agent-deployment/ServiceGroupRoot/Parameters/ContainerInsights.Linux.Parameters.json +++ b/deployment/agent-deployment/ServiceGroupRoot/Parameters/ContainerInsights.Linux.Parameters.json @@ -27,10 +27,6 @@ "pushAgentToAcr.sh" ], "environmentVariables": [ - { - "name": "WINDOWS", - "value": "" - }, { "name": "AGENT_IMAGE_URI", "value": "__CONTAINER_URI__" diff --git a/deployment/agent-deployment/ServiceGroupRoot/Scripts/pushAgentToAcr.sh b/deployment/agent-deployment/ServiceGroupRoot/Scripts/pushAgentToAcr.sh index 7e73a6230..3d4062c91 100644 --- a/deployment/agent-deployment/ServiceGroupRoot/Scripts/pushAgentToAcr.sh +++ b/deployment/agent-deployment/ServiceGroupRoot/Scripts/pushAgentToAcr.sh @@ -87,7 +87,13 @@ if [[ "$AGENT_IMAGE_TAR_FILE_NAME" == *"tar.gz"* ]]; then fi if [[ "$AGENT_IMAGE_TAR_FILE_NAME" == *"tar.zip"* ]]; then - unzip $AGENT_IMAGE_TAR_FILE_NAME + sudo apt-get install unzip + if [ $? -eq 0 ]; then + unzip $AGENT_IMAGE_TAR_FILE_NAME + else + echo "-e error failed to install unzip package and cannot unzip windows agent tarball" + exit 1 + fi fi echo "Pushing file $TARBALL_IMAGE_FILE to $AGENT_IMAGE_FULL_PATH" From 7ef07e12d032058a4577e6b67b9ad00618859feb Mon Sep 17 00:00:00 2001 From: sarahpeiffer <46665092+sarahpeiffer@users.noreply.github.com> Date: Mon, 13 Sep 2021 17:26:22 -0700 Subject: [PATCH 152/301] Changing installation in ev2 script (#644) --- .../agent-deployment/ServiceGroupRoot/Scripts/pushAgentToAcr.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deployment/agent-deployment/ServiceGroupRoot/Scripts/pushAgentToAcr.sh b/deployment/agent-deployment/ServiceGroupRoot/Scripts/pushAgentToAcr.sh index 3d4062c91..bafd62b05 100644 --- a/deployment/agent-deployment/ServiceGroupRoot/Scripts/pushAgentToAcr.sh +++ b/deployment/agent-deployment/ServiceGroupRoot/Scripts/pushAgentToAcr.sh @@ -87,7 +87,7 @@ if [[ "$AGENT_IMAGE_TAR_FILE_NAME" == *"tar.gz"* ]]; then fi if [[ "$AGENT_IMAGE_TAR_FILE_NAME" == *"tar.zip"* ]]; then - sudo apt-get install unzip + apt-get -y install unzip if [ $? -eq 0 ]; then unzip $AGENT_IMAGE_TAR_FILE_NAME else From a025ce7478e7469bede0779ec6ee9f797e0a9c44 Mon Sep 17 00:00:00 2001 From: sarahpeiffer <46665092+sarahpeiffer@users.noreply.github.com> Date: Tue, 21 Sep 2021 09:46:03 -0700 Subject: [PATCH 153/301] Adjust release pipeline to use cdpx acr (#647) * Adjust release pipeline to use cdpx acr * Adjust release pipeline to use cdpx acr * Update CDPX ACR path * Add check for cdpx repo variable --- .pipelines/pipeline.user.linux.yml | 3 + .pipelines/pipeline.user.windows.yml | 3 + .../ContainerInsights.Linux.Parameters.json | 40 +++---- .../ContainerInsights.Windows.Parameters.json | 44 +++----- .../ScopeBindings/Public.ScopeBindings.json | 41 ++++--- .../Scripts/pushAgentToAcr.sh | 102 +++++------------- 6 files changed, 89 insertions(+), 144 deletions(-) diff --git a/.pipelines/pipeline.user.linux.yml b/.pipelines/pipeline.user.linux.yml index 9977e7a1a..a1175263e 100644 --- a/.pipelines/pipeline.user.linux.yml +++ b/.pipelines/pipeline.user.linux.yml @@ -47,4 +47,7 @@ package: repository_name: 'cdpxlinux' # only supported ones are cdpx acr repos tag: 'cidev' # OPTIONAL: Defaults to latest. The tag for the built image. Final tag will be 1.0.0alpha, 1.0.0-timestamp-commitID. latest: false # OPTIONAL: Defaults to false. If tag is not set to latest and this flag is set, then tag as latest as well and push latest as well. + publish_unique_tag: true # If set, the image in the registry is tagged with the unique tag generated by CDPx + metadata_file: + artifact_path: 'linux-image-meta.json' # If defined, the drop outputs relative path to the file into which JSON metadata about the created image is emitted. export_to_artifact_path: 'agentimage.tar.gz' # path for exported image and use this instead of fixed tag diff --git a/.pipelines/pipeline.user.windows.yml b/.pipelines/pipeline.user.windows.yml index e9d0105ab..8be92a316 100644 --- a/.pipelines/pipeline.user.windows.yml +++ b/.pipelines/pipeline.user.windows.yml @@ -53,4 +53,7 @@ package: repository_name: 'cdpxwin1809' # only supported ones are cdpx acr repos tag: 'win-cidev' # OPTIONAL: Defaults to latest. The tag for the built image. Final tag will be 1.0.0alpha, 1.0.0-timestamp-commitID. latest: false # OPTIONAL: Defaults to false. If tag is not set to latest and this flag is set, then tag as latest as well and push latest as well. + publish_unique_tag: true # If set, the image in the registry is tagged with the unique tag generated by CDPx + metadata_file: + artifact_path: 'windows-image-meta.json' # If defined, the drop outputs relative path to the file into which JSON metadata about the created image is emitted. export_to_artifact_path: 'agentimage.tar.zip' # path for exported image and use this instead of fixed tag diff --git a/deployment/agent-deployment/ServiceGroupRoot/Parameters/ContainerInsights.Linux.Parameters.json b/deployment/agent-deployment/ServiceGroupRoot/Parameters/ContainerInsights.Linux.Parameters.json index be9ddb6d6..b9ca8c407 100644 --- a/deployment/agent-deployment/ServiceGroupRoot/Parameters/ContainerInsights.Linux.Parameters.json +++ b/deployment/agent-deployment/ServiceGroupRoot/Parameters/ContainerInsights.Linux.Parameters.json @@ -27,30 +27,6 @@ "pushAgentToAcr.sh" ], "environmentVariables": [ - { - "name": "AGENT_IMAGE_URI", - "value": "__CONTAINER_URI__" - }, - { - "name": "AGENT_IMAGE_SAS", - "value": "__CONTAINER_SAS_TOKEN__" - }, - { - "name": "STORAGE_CONTAINER_NAME", - "value": "__STORAGE_CONTAINER_NAME__" - }, - { - "name": "STORAGE_ACCOUNT_NAME", - "value": "__STORAGE_ACCOUNT_NAME__" - }, - { - "name": "AGENT_IMAGE_TAR_FILE_NAME", - "value": "agentimage.tar.gz" - }, - { - "name": "RELEASE_ID", - "value": "__RELEASE_ID__" - }, { "name": "ACR_NAME", "value": "__ACR_NAME__" @@ -66,6 +42,22 @@ { "name": "AGENT_IMAGE_FULL_PATH", "value": "__ACR_NAME__/public/azuremonitor/containerinsights/__AGENT_RELEASE__:__AGENT_RELEASE____AGENT_IMAGE_TAG_SUFFIX__" + }, + { + "name": "CDPX_ACR", + "value": "__CDPX_LINUX_ACR__" + }, + { + "name": "CDPX_REGISTRY", + "value": "__CDPX_LINUX_REGISTRY__" + }, + { + "name": "CDPX_REPO_NAME", + "value": "__CDPX_LINUX_REPO_NAME__" + }, + { + "name": "CDPX_TAG", + "value": "__CDPX_LINUX_TAG__" } ], "identity": { diff --git a/deployment/agent-deployment/ServiceGroupRoot/Parameters/ContainerInsights.Windows.Parameters.json b/deployment/agent-deployment/ServiceGroupRoot/Parameters/ContainerInsights.Windows.Parameters.json index 9d208e0c6..f7f12218f 100644 --- a/deployment/agent-deployment/ServiceGroupRoot/Parameters/ContainerInsights.Windows.Parameters.json +++ b/deployment/agent-deployment/ServiceGroupRoot/Parameters/ContainerInsights.Windows.Parameters.json @@ -27,34 +27,6 @@ "pushAgentToAcr.sh" ], "environmentVariables": [ - { - "name": "WINDOWS", - "value": "win-" - }, - { - "name": "AGENT_IMAGE_URI", - "value": "__CONTAINER_URI__" - }, - { - "name": "AGENT_IMAGE_SAS", - "value": "__CONTAINER_SAS_TOKEN__" - }, - { - "name": "STORAGE_CONTAINER_NAME", - "value": "__STORAGE_CONTAINER_NAME__" - }, - { - "name": "STORAGE_ACCOUNT_NAME", - "value": "__STORAGE_ACCOUNT_NAME__" - }, - { - "name": "AGENT_IMAGE_TAR_FILE_NAME", - "value": "agentimage.tar.zip" - }, - { - "name": "RELEASE_ID", - "value": "__RELEASE_ID__" - }, { "name": "ACR_NAME", "value": "__ACR_NAME__" @@ -70,6 +42,22 @@ { "name": "AGENT_IMAGE_FULL_PATH", "value": "__ACR_NAME__/public/azuremonitor/containerinsights/__AGENT_RELEASE__:win-__AGENT_RELEASE____AGENT_IMAGE_TAG_SUFFIX__" + }, + { + "name": "CDPX_ACR", + "value": "__CDPX_WINDOWS_ACR__" + }, + { + "name": "CDPX_REGISTRY", + "value": "__CDPX_WINDOWS_REGISTRY__" + }, + { + "name": "CDPX_REPO_NAME", + "value": "__CDPX_WINDOWS_REPO_NAME__" + }, + { + "name": "CDPX_TAG", + "value": "__CDPX_WINDOWS_TAG__" } ], "identity": { diff --git a/deployment/agent-deployment/ServiceGroupRoot/ScopeBindings/Public.ScopeBindings.json b/deployment/agent-deployment/ServiceGroupRoot/ScopeBindings/Public.ScopeBindings.json index 99acfb68e..82a1fae73 100644 --- a/deployment/agent-deployment/ServiceGroupRoot/ScopeBindings/Public.ScopeBindings.json +++ b/deployment/agent-deployment/ServiceGroupRoot/ScopeBindings/Public.ScopeBindings.json @@ -17,31 +17,42 @@ "find": "__AGENT_IMAGE_TAG_SUFFIX__", "replaceWith": "$(AgentImageTagSuffix)" }, - { - "find": "__RELEASE_ID__", - "replaceWith": "$(Release.ReleaseId)" - }, { "find": "__MANAGED_IDENTITY__", "replaceWith": "$(ManagedIdentity)" }, { - "find": "__CONTAINER_URI__", - "replaceWith": "$(Storage.StorageContainerUri)" - }, + "find": "__CDPX_LINUX_ACR__", + "replaceWith": "$(CDPXLinuxACR)" + }, { - "find": "__CONTAINER_SAS_TOKEN__", - "replaceWith": "$(Storage.StorageContainerSasToken)" - }, + "find": "__CDPX_WINDOWS_ACR__", + "replaceWith": "$(CDPXWindowsACR)" + }, + { + "find": "__CDPX_LINUX_REGISTRY__", + "replaceWith": "$(CDPXLinuxRegistry)" + }, + { + "find": "__CDPX_WINDOWS_REGISTRY__", + "replaceWith": "$(CDPXWindowsRegistry)" + }, + { + "find": "__CDPX_LINUX_TAG__", + "replaceWith": "$(CDPXLinuxTag)" + }, + { + "find": "__CDPX_WINDOWS_TAG__", + "replaceWith": "$(CDPXWindowsTag)" + }, { - "find": "__STORAGE_CONTAINER_NAME__", - "replaceWith": "$(StorageContainerName)" + "find": "__CDPX_LINUX_REPO_NAME__", + "replaceWith": "$(CDPXLinuxRepoName)" }, { - "find": "__STORAGE_ACCOUNT_NAME__", - "replaceWith": "$(StorageAccountName)" + "find": "__CDPX_WINDOWS_REPO_NAME__", + "replaceWith": "$(CDPXWindowsRepoName)" } - ] } ] diff --git a/deployment/agent-deployment/ServiceGroupRoot/Scripts/pushAgentToAcr.sh b/deployment/agent-deployment/ServiceGroupRoot/Scripts/pushAgentToAcr.sh index bafd62b05..f319c3bbe 100644 --- a/deployment/agent-deployment/ServiceGroupRoot/Scripts/pushAgentToAcr.sh +++ b/deployment/agent-deployment/ServiceGroupRoot/Scripts/pushAgentToAcr.sh @@ -12,24 +12,29 @@ if [ -z $AGENT_RELEASE ]; then echo "-e error AGENT_RELEASE shouldnt be empty. check release variables" exit 1 fi +#! +if [ -z $AGENT_IMAGE_FULL_PATH ]; then + echo "-e error AGENT_IMAGE_FULL_PATH shouldnt be empty. check release variables" + exit 1 +fi -if [ -z $AGENT_IMAGE_URI ]; then - echo "-e error value of AGENT_IMAGE_URI shouldn't be empty. check output from file copy release task" +if [ -z $CDPX_ACR ]; then + echo "-e error value of CDPX_ACR shouldn't be empty. check release variables" exit 1 fi -if [ -z $AGENT_IMAGE_SAS ]; then - echo "-e error value of AGENT_IMAGE_SAS shouldn't be empty. check output from file copy release task" +if [ -z $CDPX_TAG ]; then + echo "-e error value of CDPX_TAG shouldn't be empty. check release variables" exit 1 fi -if [ -z $STORAGE_CONTAINER_NAME ]; then - echo "-e error value of STORAGE_CONTAINER_NAME shouldn't be empty. check release variables" +if [ -z $CDPX_REGISTRY ]; then + echo "-e error value of CDPX_REGISTRY shouldn't be empty. check release variables" exit 1 fi -if [ -z $STORAGE_ACCOUNT_NAME ]; then - echo "-e error value of STORAGE_ACCOUNT_NAME shouldn't be empty. check release variables" +if [ -z $CDPX_REPO_NAME ]; then + echo "-e error value of CDPX_REPO_NAME shouldn't be empty. check release variables" exit 1 fi @@ -38,79 +43,22 @@ if [ -z $ACR_NAME ]; then exit 1 fi -#Download agentimage tarball from blob storage account -echo "Downloading tarball image from $WINDOWS $AGENT_IMAGE_URI" -wget -O $AGENT_IMAGE_TAR_FILE_NAME "${AGENT_IMAGE_URI}${WINDOWS}${RELEASE_ID}${AGENT_IMAGE_SAS}" - - -if [ ! -f $AGENT_IMAGE_TAR_FILE_NAME ]; then - echo "Agent tarfile: ${AGENT_IMAGE_TAR_FILE_NAME} does not exist, unable to continue" - exit 1 -fi - -#Install crane -echo "Installing crane" -wget -O crane.tar.gz https://github.com/google/go-containerregistry/releases/download/v0.4.0/go-containerregistry_Linux_x86_64.tar.gz -if [ $? -eq 0 ]; then - echo "crane downloaded successfully" -else - echo "-e error crane download failed" - exit 1 -fi -tar xzvf crane.tar.gz -echo "Installed crane" - #Login to az cli and authenticate to acr echo "Login cli using managed identity" az login --identity - -echo "Getting acr credentials" -TOKEN_QUERY_RES=$(az acr login -n "$ACR_NAME" -t) -TOKEN=$(echo "$TOKEN_QUERY_RES" | jq -r '.accessToken') -if [ -z $TOKEN ]; then - echo "-e error failed to get az acr login token" +if [ $? -eq 0 ]; then + echo "Logged in successfully" +else + echo "-e error failed to login to az with managed identity credentials" exit 1 -fi - -DESTINATION_ACR=$(echo "$TOKEN_QUERY_RES" | jq -r '.loginServer') -if [ -z $DESTINATION_ACR ]; then - echo "-e error value of DESTINATION_ACR shouldnt be empty" +fi + +echo "Pushing ${AGENT_IMAGE_FULL_PATH} to ${ACR_NAME}" +az acr import --name $ACR_NAME --registry $CDPX_REGISTRY --source ${CDPX_ACR}/official/${CDPX_REPO_NAME}:${CDPX_TAG} --image $AGENT_IMAGE_FULL_PATH +if [ $? -eq 0 ]; then + echo "Retagged and pushed image successfully" +else + echo "-e error failed to retag and push image to destination ACR" exit 1 -fi - -./crane auth login "$DESTINATION_ACR" -u "00000000-0000-0000-0000-000000000000" -p "$TOKEN" - -#Prepare tarball and push to acr -if [[ "$AGENT_IMAGE_TAR_FILE_NAME" == *"tar.gz"* ]]; then - gunzip $AGENT_IMAGE_TAR_FILE_NAME -fi - -if [[ "$AGENT_IMAGE_TAR_FILE_NAME" == *"tar.zip"* ]]; then - apt-get -y install unzip - if [ $? -eq 0 ]; then - unzip $AGENT_IMAGE_TAR_FILE_NAME - else - echo "-e error failed to install unzip package and cannot unzip windows agent tarball" - exit 1 - fi -fi - -echo "Pushing file $TARBALL_IMAGE_FILE to $AGENT_IMAGE_FULL_PATH" -./crane push *.tar "$AGENT_IMAGE_FULL_PATH" - - -#Delete agentimage tarball from blob storage to prevent future conflicts -echo "Deleting agentimage copy from blob storage" - -BLOB_EXIST_RESULT=$(az storage blob exists --container-name $STORAGE_CONTAINER_NAME --name $WINDOWS$RELEASE_ID --account-name $STORAGE_ACCOUNT_NAME --sas-token $AGENT_IMAGE_SAS) -BLOB_EXIST=$(echo "$BLOB_EXIST_RESULT" | jq -r '.exists') -echo $BLOB_EXIST_RESULT -echo $BLOB_EXIST - -if $BLOB_EXIST; then - az storage blob delete --container-name "${STORAGE_CONTAINER_NAME}" --name "${WINDOWS}${RELEASE_ID}" --account-name "${STORAGE_ACCOUNT_NAME}" --sas-token "${AGENT_IMAGE_SAS}" - echo "Deleted agentimate copy from blob storage" -else - echo "Agentimage has already been deleted from blob storage" fi \ No newline at end of file From c6bc993d4fd652df81c5d816a801c7f133101c6a Mon Sep 17 00:00:00 2001 From: sarahpeiffer <46665092+sarahpeiffer@users.noreply.github.com> Date: Wed, 22 Sep 2021 16:14:55 -0700 Subject: [PATCH 154/301] Sarah/ev2 prod (#649) * Ev2 changes for prod --- ....linux.official.all_tag.all_phase.all_config.ci_prod.yml | 3 +++ ...indows.official.all_tag.all_phase.all_config.ci_prod.yml | 3 +++ .../ServiceGroupRoot/ServiceModels/Public.ServiceModel.json | 6 +++--- 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/.pipelines/pipeline.user.linux.official.all_tag.all_phase.all_config.ci_prod.yml b/.pipelines/pipeline.user.linux.official.all_tag.all_phase.all_config.ci_prod.yml index 1e9909ee8..4f73d7c71 100644 --- a/.pipelines/pipeline.user.linux.official.all_tag.all_phase.all_config.ci_prod.yml +++ b/.pipelines/pipeline.user.linux.official.all_tag.all_phase.all_config.ci_prod.yml @@ -42,4 +42,7 @@ package: repository_name: 'cdpxlinux' # only supported ones are cdpx acr repos tag: 'ciprod' # OPTIONAL: Defaults to latest. The tag for the built image. Final tag will be 1.0.0alpha, 1.0.0-timestamp-commitID. latest: false # OPTIONAL: Defaults to false. If tag is not set to latest and this flag is set, then tag as latest as well and push latest as well. + publish_unique_tag: true # If set, the image in the registry is tagged with the unique tag generated by CDPx + metadata_file: + artifact_path: 'linux-image-meta.json' # If defined, the drop outputs relative path to the file into which JSON metadata about the created image is emitted. export_to_artifact_path: 'agentimage.tar.gz' # path for exported image and use this instead of fixed tag diff --git a/.pipelines/pipeline.user.windows.official.all_tag.all_phase.all_config.ci_prod.yml b/.pipelines/pipeline.user.windows.official.all_tag.all_phase.all_config.ci_prod.yml index 0dc0a47c5..1caf60b7b 100644 --- a/.pipelines/pipeline.user.windows.official.all_tag.all_phase.all_config.ci_prod.yml +++ b/.pipelines/pipeline.user.windows.official.all_tag.all_phase.all_config.ci_prod.yml @@ -53,4 +53,7 @@ package: repository_name: 'cdpxwin1809' # only supported ones are cdpx acr repos tag: 'win-ciprod' # OPTIONAL: Defaults to latest. The tag for the built image. Final tag will be 1.0.0alpha, 1.0.0-timestamp-commitID. latest: false # OPTIONAL: Defaults to false. If tag is not set to latest and this flag is set, then tag as latest as well and push latest as well. + publish_unique_tag: true # If set, the image in the registry is tagged with the unique tag generated by CDPx + metadata_file: + artifact_path: 'windows-image-meta.json' # If defined, the drop outputs relative path to the file into which JSON metadata about the created image is emitted. export_to_artifact_path: 'agentimage.tar.zip' # path for exported image and use this instead of fixed tag diff --git a/deployment/agent-deployment/ServiceGroupRoot/ServiceModels/Public.ServiceModel.json b/deployment/agent-deployment/ServiceGroupRoot/ServiceModels/Public.ServiceModel.json index b7bd4aa26..8c5c7c1b6 100644 --- a/deployment/agent-deployment/ServiceGroupRoot/ServiceModels/Public.ServiceModel.json +++ b/deployment/agent-deployment/ServiceGroupRoot/ServiceModels/Public.ServiceModel.json @@ -3,7 +3,7 @@ "contentVersion": "1.0.0.2", "ServiceMetadata": { "ServiceGroup": "ContainerInsightsAgent", - "Environment": "Dev" + "Environment": "Prod" }, "ServiceResourceGroupDefinitions": [ { @@ -30,10 +30,10 @@ ], "ServiceResourceGroups": [ { - "AzureResourceGroupName": "ContainerInsightsAgent-Global-Release", + "AzureResourceGroupName": "ContainerInsights-Agent-Release", "Location": "eastus2", "InstanceOf": "CI-Agent-ServiceResourceGroupDefinition", - "AzureSubscriptionId": "728bbd23-3b47-40c1-8c9a-c6c5ccd674fc", + "AzureSubscriptionId": "30c56c3a-54da-46ea-b004-06eb33432687", "ScopeTags": [ { "Name": "Global" From 5e379473ce4db7b81c17966624c2dd90f8119868 Mon Sep 17 00:00:00 2001 From: sarahpeiffer <46665092+sarahpeiffer@users.noreply.github.com> Date: Thu, 23 Sep 2021 13:25:05 -0700 Subject: [PATCH 155/301] CDPX repo naming change (#652) --- .../Parameters/ContainerInsights.Linux.Parameters.json | 4 ---- .../Parameters/ContainerInsights.Windows.Parameters.json | 4 ---- .../ScopeBindings/Public.ScopeBindings.json | 8 -------- .../ServiceGroupRoot/Scripts/pushAgentToAcr.sh | 7 +------ 4 files changed, 1 insertion(+), 22 deletions(-) diff --git a/deployment/agent-deployment/ServiceGroupRoot/Parameters/ContainerInsights.Linux.Parameters.json b/deployment/agent-deployment/ServiceGroupRoot/Parameters/ContainerInsights.Linux.Parameters.json index b9ca8c407..6104609a6 100644 --- a/deployment/agent-deployment/ServiceGroupRoot/Parameters/ContainerInsights.Linux.Parameters.json +++ b/deployment/agent-deployment/ServiceGroupRoot/Parameters/ContainerInsights.Linux.Parameters.json @@ -43,10 +43,6 @@ "name": "AGENT_IMAGE_FULL_PATH", "value": "__ACR_NAME__/public/azuremonitor/containerinsights/__AGENT_RELEASE__:__AGENT_RELEASE____AGENT_IMAGE_TAG_SUFFIX__" }, - { - "name": "CDPX_ACR", - "value": "__CDPX_LINUX_ACR__" - }, { "name": "CDPX_REGISTRY", "value": "__CDPX_LINUX_REGISTRY__" diff --git a/deployment/agent-deployment/ServiceGroupRoot/Parameters/ContainerInsights.Windows.Parameters.json b/deployment/agent-deployment/ServiceGroupRoot/Parameters/ContainerInsights.Windows.Parameters.json index f7f12218f..de0bbfe1c 100644 --- a/deployment/agent-deployment/ServiceGroupRoot/Parameters/ContainerInsights.Windows.Parameters.json +++ b/deployment/agent-deployment/ServiceGroupRoot/Parameters/ContainerInsights.Windows.Parameters.json @@ -43,10 +43,6 @@ "name": "AGENT_IMAGE_FULL_PATH", "value": "__ACR_NAME__/public/azuremonitor/containerinsights/__AGENT_RELEASE__:win-__AGENT_RELEASE____AGENT_IMAGE_TAG_SUFFIX__" }, - { - "name": "CDPX_ACR", - "value": "__CDPX_WINDOWS_ACR__" - }, { "name": "CDPX_REGISTRY", "value": "__CDPX_WINDOWS_REGISTRY__" diff --git a/deployment/agent-deployment/ServiceGroupRoot/ScopeBindings/Public.ScopeBindings.json b/deployment/agent-deployment/ServiceGroupRoot/ScopeBindings/Public.ScopeBindings.json index 82a1fae73..cbc6db8b3 100644 --- a/deployment/agent-deployment/ServiceGroupRoot/ScopeBindings/Public.ScopeBindings.json +++ b/deployment/agent-deployment/ServiceGroupRoot/ScopeBindings/Public.ScopeBindings.json @@ -21,14 +21,6 @@ "find": "__MANAGED_IDENTITY__", "replaceWith": "$(ManagedIdentity)" }, - { - "find": "__CDPX_LINUX_ACR__", - "replaceWith": "$(CDPXLinuxACR)" - }, - { - "find": "__CDPX_WINDOWS_ACR__", - "replaceWith": "$(CDPXWindowsACR)" - }, { "find": "__CDPX_LINUX_REGISTRY__", "replaceWith": "$(CDPXLinuxRegistry)" diff --git a/deployment/agent-deployment/ServiceGroupRoot/Scripts/pushAgentToAcr.sh b/deployment/agent-deployment/ServiceGroupRoot/Scripts/pushAgentToAcr.sh index f319c3bbe..7d1b6c27e 100644 --- a/deployment/agent-deployment/ServiceGroupRoot/Scripts/pushAgentToAcr.sh +++ b/deployment/agent-deployment/ServiceGroupRoot/Scripts/pushAgentToAcr.sh @@ -18,11 +18,6 @@ if [ -z $AGENT_IMAGE_FULL_PATH ]; then exit 1 fi -if [ -z $CDPX_ACR ]; then - echo "-e error value of CDPX_ACR shouldn't be empty. check release variables" - exit 1 -fi - if [ -z $CDPX_TAG ]; then echo "-e error value of CDPX_TAG shouldn't be empty. check release variables" exit 1 @@ -55,7 +50,7 @@ else fi echo "Pushing ${AGENT_IMAGE_FULL_PATH} to ${ACR_NAME}" -az acr import --name $ACR_NAME --registry $CDPX_REGISTRY --source ${CDPX_ACR}/official/${CDPX_REPO_NAME}:${CDPX_TAG} --image $AGENT_IMAGE_FULL_PATH +az acr import --name $ACR_NAME --registry $CDPX_REGISTRY --source official/${CDPX_REPO_NAME}:${CDPX_TAG} --image $AGENT_IMAGE_FULL_PATH if [ $? -eq 0 ]; then echo "Retagged and pushed image successfully" else From a36d8dfe004f009b0645f54781a2539104ff8768 Mon Sep 17 00:00:00 2001 From: sarahpeiffer <46665092+sarahpeiffer@users.noreply.github.com> Date: Mon, 27 Sep 2021 15:19:29 -0700 Subject: [PATCH 156/301] Sarah/ev2 update (#654) * remove acr name from repo path * add check to make sure tag does not exist in mcr repo --- .../ContainerInsights.Linux.Parameters.json | 2 +- .../ContainerInsights.Windows.Parameters.json | 2 +- .../ServiceGroupRoot/Scripts/pushAgentToAcr.sh | 15 ++++++++++++++- 3 files changed, 16 insertions(+), 3 deletions(-) diff --git a/deployment/agent-deployment/ServiceGroupRoot/Parameters/ContainerInsights.Linux.Parameters.json b/deployment/agent-deployment/ServiceGroupRoot/Parameters/ContainerInsights.Linux.Parameters.json index 6104609a6..70d0950a2 100644 --- a/deployment/agent-deployment/ServiceGroupRoot/Parameters/ContainerInsights.Linux.Parameters.json +++ b/deployment/agent-deployment/ServiceGroupRoot/Parameters/ContainerInsights.Linux.Parameters.json @@ -41,7 +41,7 @@ }, { "name": "AGENT_IMAGE_FULL_PATH", - "value": "__ACR_NAME__/public/azuremonitor/containerinsights/__AGENT_RELEASE__:__AGENT_RELEASE____AGENT_IMAGE_TAG_SUFFIX__" + "value": "public/azuremonitor/containerinsights/__AGENT_RELEASE__:__AGENT_RELEASE____AGENT_IMAGE_TAG_SUFFIX__" }, { "name": "CDPX_REGISTRY", diff --git a/deployment/agent-deployment/ServiceGroupRoot/Parameters/ContainerInsights.Windows.Parameters.json b/deployment/agent-deployment/ServiceGroupRoot/Parameters/ContainerInsights.Windows.Parameters.json index de0bbfe1c..b6a31ed10 100644 --- a/deployment/agent-deployment/ServiceGroupRoot/Parameters/ContainerInsights.Windows.Parameters.json +++ b/deployment/agent-deployment/ServiceGroupRoot/Parameters/ContainerInsights.Windows.Parameters.json @@ -41,7 +41,7 @@ }, { "name": "AGENT_IMAGE_FULL_PATH", - "value": "__ACR_NAME__/public/azuremonitor/containerinsights/__AGENT_RELEASE__:win-__AGENT_RELEASE____AGENT_IMAGE_TAG_SUFFIX__" + "value": "public/azuremonitor/containerinsights/__AGENT_RELEASE__:win-__AGENT_RELEASE____AGENT_IMAGE_TAG_SUFFIX__" }, { "name": "CDPX_REGISTRY", diff --git a/deployment/agent-deployment/ServiceGroupRoot/Scripts/pushAgentToAcr.sh b/deployment/agent-deployment/ServiceGroupRoot/Scripts/pushAgentToAcr.sh index 7d1b6c27e..c3f092d90 100644 --- a/deployment/agent-deployment/ServiceGroupRoot/Scripts/pushAgentToAcr.sh +++ b/deployment/agent-deployment/ServiceGroupRoot/Scripts/pushAgentToAcr.sh @@ -8,11 +8,24 @@ if [ -z $AGENT_IMAGE_TAG_SUFFIX ]; then exit 1 fi +#Make sure that tag being pushed will not overwrite an existing tag in mcr +MCR_TAG_RESULT="`wget -qO- https://mcr.microsoft.com/v2/azuremonitor/containerinsights/ciprod/tags/list`" +if [ $? -ne 0 ]; then + echo "-e error unable to get list of mcr tags for azuremonitor/containerinsights/ciprod repository" + exit 1 +fi +TAG_EXISTS=$(echo $MCR_TAG_RESULT | jq '.tags | contains(["'"$AGENT_IMAGE_TAG_SUFFIX"'"])') + +if $TAG_EXISTS; then + echo "-e error ${AGENT_IMAGE_TAG_SUFFIX} already exists in mcr. make sure the image tag is unique" + exit 1 +fi + if [ -z $AGENT_RELEASE ]; then echo "-e error AGENT_RELEASE shouldnt be empty. check release variables" exit 1 fi -#! + if [ -z $AGENT_IMAGE_FULL_PATH ]; then echo "-e error AGENT_IMAGE_FULL_PATH shouldnt be empty. check release variables" exit 1 From fdc99f6e56aa6316fd38a9d569f69a32995c8210 Mon Sep 17 00:00:00 2001 From: sarahpeiffer <46665092+sarahpeiffer@users.noreply.github.com> Date: Mon, 27 Sep 2021 17:10:08 -0700 Subject: [PATCH 157/301] change tag syntax for mcr repo check (#655) --- .../ServiceGroupRoot/Scripts/pushAgentToAcr.sh | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/deployment/agent-deployment/ServiceGroupRoot/Scripts/pushAgentToAcr.sh b/deployment/agent-deployment/ServiceGroupRoot/Scripts/pushAgentToAcr.sh index c3f092d90..d39cedde0 100644 --- a/deployment/agent-deployment/ServiceGroupRoot/Scripts/pushAgentToAcr.sh +++ b/deployment/agent-deployment/ServiceGroupRoot/Scripts/pushAgentToAcr.sh @@ -8,24 +8,24 @@ if [ -z $AGENT_IMAGE_TAG_SUFFIX ]; then exit 1 fi +if [ -z $AGENT_RELEASE ]; then + echo "-e error AGENT_RELEASE shouldnt be empty. check release variables" + exit 1 +fi + #Make sure that tag being pushed will not overwrite an existing tag in mcr MCR_TAG_RESULT="`wget -qO- https://mcr.microsoft.com/v2/azuremonitor/containerinsights/ciprod/tags/list`" if [ $? -ne 0 ]; then echo "-e error unable to get list of mcr tags for azuremonitor/containerinsights/ciprod repository" exit 1 fi -TAG_EXISTS=$(echo $MCR_TAG_RESULT | jq '.tags | contains(["'"$AGENT_IMAGE_TAG_SUFFIX"'"])') +TAG_EXISTS=$(echo $MCR_TAG_RESULT | jq '.tags | contains(["'"$AGENT_RELEASE$AGENT_IMAGE_TAG_SUFFIX"'"])') if $TAG_EXISTS; then echo "-e error ${AGENT_IMAGE_TAG_SUFFIX} already exists in mcr. make sure the image tag is unique" exit 1 fi -if [ -z $AGENT_RELEASE ]; then - echo "-e error AGENT_RELEASE shouldnt be empty. check release variables" - exit 1 -fi - if [ -z $AGENT_IMAGE_FULL_PATH ]; then echo "-e error AGENT_IMAGE_FULL_PATH shouldnt be empty. check release variables" exit 1 From 6292218d9bbf5885ae854a0c02be30f5587980ac Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Mon, 27 Sep 2021 18:09:44 -0700 Subject: [PATCH 158/301] Gangams/optimize win livenessprobe (#653) * livenessprobe optimization * optimize windows agent liveness probe * optimize windows agent liveness probe * optimize windows agent liveness probe * optimize windows agent liveness probe * optimize windows agent liveness probe * optimize windows agent liveness probe * optimize windows agent liveness probe * optimize windows agent liveness probe --- build/windows/Makefile.ps1 | 19 ++- .../installer/livenessprobe/livenessprobe.cpp | 137 ++++++++++++++++++ .../installer/scripts/livenessprobe.cmd | 36 ----- kubernetes/omsagent.yaml | 6 +- kubernetes/windows/Dockerfile | 2 +- 5 files changed, 159 insertions(+), 41 deletions(-) create mode 100644 build/windows/installer/livenessprobe/livenessprobe.cpp delete mode 100644 build/windows/installer/scripts/livenessprobe.cmd diff --git a/build/windows/Makefile.ps1 b/build/windows/Makefile.ps1 index b9bd1f3e4..9f3c438b0 100644 --- a/build/windows/Makefile.ps1 +++ b/build/windows/Makefile.ps1 @@ -3,6 +3,7 @@ # 1. Builds the certificate generator code in .NET and copy the binaries in zip file to ..\..\kubernetes\windows\omsagentwindows # 2. Builds the out_oms plugin code in go lang into the shared object(.so) file and copy the out_oms.so file to ..\..\kubernetes\windows\omsagentwindows # 3. copy the files under installer directory to ..\..\kubernetes\windows\omsagentwindows +# 4. Builds the livenessprobe cpp and copy the executable to the under directory ..\..\kubernetes\windows\omsagentwindows $dotnetcoreframework = "netcoreapp3.1" @@ -157,7 +158,7 @@ if ($isCDPxEnvironment) { Write-Host("getting latest go modules ...") go get - Write-Host("successfyullt got latest go modules") -ForegroundColor Green + Write-Host("successfully got latest go modules") -ForegroundColor Green go build -ldflags "-X 'main.revision=$buildVersionString' -X 'main.builddate=$buildVersionDate'" -buildmode=c-shared -o out_oms.so . } @@ -167,16 +168,28 @@ Write-Host("copying out_oms.so file to : $publishdir") Copy-Item -Path (Join-path -Path $outomsgoplugindir -ChildPath "out_oms.so") -Destination $publishdir -Force Write-Host("successfully copied out_oms.so file to : $publishdir") -ForegroundColor Green +# compile and build the liveness probe cpp code +Write-Host("Start:build livenessprobe cpp code") +$livenessprobesrcpath = Join-Path -Path $builddir -ChildPath "windows\installer\livenessprobe\livenessprobe.cpp" +$livenessprobeexepath = Join-Path -Path $builddir -ChildPath "windows\installer\livenessprobe\livenessprobe.exe" +g++ $livenessprobesrcpath -o $livenessprobeexepath -municode +Write-Host("End:build livenessprobe cpp code") +if (Test-Path -Path $livenessprobeexepath){ + Write-Host("livenessprobe.exe exists which indicates cpp build step succeeded") -ForegroundColor Green +} else { + Write-Host("livenessprobe.exe doesnt exist which indicates cpp build step failed") -ForegroundColor Red + exit +} $installerdir = Join-Path -Path $builddir -ChildPath "common\installer" Write-Host("copying common installer files conf and scripts from :" + $installerdir + " to :" + $publishdir + " ...") -$exclude = @('*.cs','*.csproj') +$exclude = @('*.cs','*.csproj', '*.cpp') Copy-Item -Path $installerdir -Destination $publishdir -Recurse -Force -Exclude $exclude Write-Host("successfully copied installer files conf and scripts from :" + $installerdir + " to :" + $publishdir + " ") -ForegroundColor Green $installerdir = Join-Path -Path $builddir -ChildPath "windows\installer" Write-Host("copying installer files conf and scripts from :" + $installerdir + " to :" + $publishdir + " ...") -$exclude = @('*.cs','*.csproj') +$exclude = @('*.cs','*.csproj', '*.cpp') Copy-Item -Path $installerdir -Destination $publishdir -Recurse -Force -Exclude $exclude Write-Host("successfully copied installer files conf and scripts from :" + $installerdir + " to :" + $publishdir + " ") -ForegroundColor Green diff --git a/build/windows/installer/livenessprobe/livenessprobe.cpp b/build/windows/installer/livenessprobe/livenessprobe.cpp new file mode 100644 index 000000000..eea792686 --- /dev/null +++ b/build/windows/installer/livenessprobe/livenessprobe.cpp @@ -0,0 +1,137 @@ +#ifndef UNICODE +#define UNICODE +#endif + +#ifndef _UNICODE +#define _UNICODE +#endif + +#include +#include +#include + +#define SUCCESS 0x00000000 +#define NO_FLUENT_BIT_PROCESS 0x00000001 +#define FILESYSTEM_WATCHER_FILE_EXISTS 0x00000002 +#define CERTIFICATE_RENEWAL_REQUIRED 0x00000003 +#define FLUENTDWINAKS_SERVICE_NOT_RUNNING 0x00000004 +#define UNEXPECTED_ERROR 0xFFFFFFFF + +/* + check if the process running or not for given exe file name +*/ +bool IsProcessRunning(const wchar_t *const executableName) +{ + PROCESSENTRY32 entry; + entry.dwSize = sizeof(PROCESSENTRY32); + + const auto snapshot = CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, NULL); + + if (!Process32First(snapshot, &entry)) + { + CloseHandle(snapshot); + wprintf_s(L"ERROR:IsProcessRunning::Process32First failed"); + return false; + } + + do + { + if (!_wcsicmp(entry.szExeFile, executableName)) + { + CloseHandle(snapshot); + return true; + } + } while (Process32Next(snapshot, &entry)); + + CloseHandle(snapshot); + return false; +} + +/* + check if the file exists +*/ +bool IsFileExists(const wchar_t *const fileName) +{ + DWORD dwAttrib = GetFileAttributes(fileName); + return dwAttrib != INVALID_FILE_SIZE; +} + +/* + Get the status of the service for given service name +*/ +int GetServiceStatus(const wchar_t *const serivceName) +{ + SC_HANDLE theService, scm; + SERVICE_STATUS_PROCESS ssStatus; + DWORD dwBytesNeeded; + + scm = OpenSCManager(nullptr, nullptr, SC_MANAGER_ENUMERATE_SERVICE); + if (!scm) + { + wprintf_s(L"ERROR:GetServiceStatus::OpenSCManager failed"); + return UNEXPECTED_ERROR; + } + + theService = OpenService(scm, serivceName, SERVICE_QUERY_STATUS); + if (!theService) + { + CloseServiceHandle(scm); + wprintf_s(L"ERROR:GetServiceStatus::OpenService failed"); + return UNEXPECTED_ERROR; + } + + auto result = QueryServiceStatusEx(theService, SC_STATUS_PROCESS_INFO, + reinterpret_cast(&ssStatus), sizeof(SERVICE_STATUS_PROCESS), + &dwBytesNeeded); + + CloseServiceHandle(theService); + CloseServiceHandle(scm); + + if (result == 0) + { + wprintf_s(L"ERROR:GetServiceStatus:QueryServiceStatusEx failed"); + return UNEXPECTED_ERROR; + } + + return ssStatus.dwCurrentState; +} + +/** + +**/ +int _tmain(int argc, wchar_t *argv[]) +{ + if (argc < 5) + { + wprintf_s(L"ERROR:unexpected number arguments and expected is 5"); + return UNEXPECTED_ERROR; + } + + if (!IsProcessRunning(argv[1])) + { + wprintf_s(L"ERROR:Process:%s is not running\n", argv[1]); + return NO_FLUENT_BIT_PROCESS; + } + + DWORD dwStatus = GetServiceStatus(argv[2]); + + if (dwStatus != SERVICE_RUNNING) + { + wprintf_s(L"ERROR:Service:%s is not running\n", argv[2]); + return FLUENTDWINAKS_SERVICE_NOT_RUNNING; + } + + if (IsFileExists(argv[3])) + { + wprintf_s(L"INFO:File:%s exists indicates Config Map Updated since agent started.\n", argv[3]); + return FILESYSTEM_WATCHER_FILE_EXISTS; + } + + if (IsFileExists(argv[4])) + { + wprintf_s(L"INFO:File:%s exists indicates Certificate needs to be renewed.\n", argv[4]); + return CERTIFICATE_RENEWAL_REQUIRED; + } + + return SUCCESS; +} diff --git a/build/windows/installer/scripts/livenessprobe.cmd b/build/windows/installer/scripts/livenessprobe.cmd deleted file mode 100644 index 19d0b69d7..000000000 --- a/build/windows/installer/scripts/livenessprobe.cmd +++ /dev/null @@ -1,36 +0,0 @@ -REM "Checking if fluent-bit is running" - -tasklist /fi "imagename eq fluent-bit.exe" /fo "table" | findstr fluent-bit - -IF ERRORLEVEL 1 ( - echo "Fluent-Bit is not running" - exit /b 1 -) - -REM "Checking if config map has been updated since agent start" - -IF EXIST C:\etc\omsagentwindows\filesystemwatcher.txt ( - echo "Config Map Updated since agent started" - exit /b 1 -) - -REM "Checking if certificate needs to be renewed (aka agent restart required)" - -IF EXIST C:\etc\omsagentwindows\renewcertificate.txt ( - echo "Certificate needs to be renewed" - exit /b 1 -) - -REM "Checking if fluentd service is running" -sc query fluentdwinaks | findstr /i STATE | findstr RUNNING - -IF ERRORLEVEL 1 ( - echo "Fluentd Service is NOT Running" - exit /b 1 -) - -exit /b 0 - - - - diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index d84e46701..98621b5f0 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -833,7 +833,11 @@ spec: command: - cmd - /c - - C:\opt\omsagentwindows\scripts\cmd\livenessProbe.cmd + - C:\opt\omsagentwindows\scripts\cmd\livenessprobe.exe + - fluent-bit.exe + - fluentdwinaks + - "C:\\etc\\omsagentwindows\\filesystemwatcher.txt" + - "C:\\etc\\omsagentwindows\\renewcertificate.txt" periodSeconds: 60 initialDelaySeconds: 180 timeoutSeconds: 15 diff --git a/kubernetes/windows/Dockerfile b/kubernetes/windows/Dockerfile index 290deef40..aa756b8b8 100644 --- a/kubernetes/windows/Dockerfile +++ b/kubernetes/windows/Dockerfile @@ -46,7 +46,7 @@ RUN ./setup.ps1 COPY main.ps1 /opt/omsagentwindows/scripts/powershell COPY ./omsagentwindows/installer/scripts/filesystemwatcher.ps1 /opt/omsagentwindows/scripts/powershell -COPY ./omsagentwindows/installer/scripts/livenessprobe.cmd /opt/omsagentwindows/scripts/cmd/ +COPY ./omsagentwindows/installer/livenessprobe/livenessprobe.exe /opt/omsagentwindows/scripts/cmd/ COPY setdefaulttelegrafenvvariables.ps1 /opt/omsagentwindows/scripts/powershell # copy ruby scripts to /opt folder From cfacf39842a7f621621196f7049293fb1d493846 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Tue, 28 Sep 2021 19:45:48 -0700 Subject: [PATCH 159/301] Gangams/addon token adapter image tag to telemetry (#656) * addon token adapter image tag * addon token adapter image tag --- source/plugins/ruby/ApplicationInsightsUtility.rb | 3 +++ source/plugins/ruby/in_containerinventory.rb | 13 +++++++++++++ 2 files changed, 16 insertions(+) diff --git a/source/plugins/ruby/ApplicationInsightsUtility.rb b/source/plugins/ruby/ApplicationInsightsUtility.rb index eaa1d903d..7691304a6 100644 --- a/source/plugins/ruby/ApplicationInsightsUtility.rb +++ b/source/plugins/ruby/ApplicationInsightsUtility.rb @@ -243,6 +243,9 @@ def sendTelemetry(pluginName, properties) getContainerRuntimeInfo() end @@CustomProperties["Computer"] = properties["Computer"] + if !properties["addonTokenAdapterImageTag"].nil? && !properties["addonTokenAdapterImageTag"].empty? + @@CustomProperties["addonTokenAdapterImageTag"] = properties["addonTokenAdapterImageTag"] + end sendHeartBeatEvent(pluginName) sendLastProcessedContainerInventoryCountMetric(pluginName, properties) rescue => errorStr diff --git a/source/plugins/ruby/in_containerinventory.rb b/source/plugins/ruby/in_containerinventory.rb index 9fcb7ab90..f52ed4026 100644 --- a/source/plugins/ruby/in_containerinventory.rb +++ b/source/plugins/ruby/in_containerinventory.rb @@ -57,6 +57,7 @@ def enumerate containerInventory = Array.new eventStream = Fluent::MultiEventStream.new hostName = "" + addonTokenAdapterImageTag = "" $log.info("in_container_inventory::enumerate : Begin processing @ #{Time.now.utc.iso8601}") if ExtensionUtils.isAADMSIAuthMode() $log.info("in_container_inventory::enumerate: AAD AUTH MSI MODE") @@ -82,6 +83,15 @@ def enumerate if hostName.empty? && !containerRecord["Computer"].empty? hostName = containerRecord["Computer"] end + if addonTokenAdapterImageTag.empty? && ExtensionUtils.isAADMSIAuthMode() + if !containerRecord["ElementName"].nil? && !containerRecord["ElementName"].empty? && + containerRecord["ElementName"].include?("kube-system") && + containerRecord["ElementName"].include?("addon-token-adapter_omsagent") + if !containerRecord["ImageTag"].nil? && !containerRecord["ImageTag"].empty? + addonTokenAdapterImageTag = containerRecord["ImageTag"] + end + end + end containerIds.push containerRecord["InstanceID"] containerInventory.push containerRecord end @@ -117,6 +127,9 @@ def enumerate telemetryProperties = {} telemetryProperties["Computer"] = hostName telemetryProperties["ContainerCount"] = containerInventory.length + if !addonTokenAdapterImageTag.empty? + telemetryProperties["addonTokenAdapterImageTag"] = addonTokenAdapterImageTag + end ApplicationInsightsUtility.sendTelemetry(@@PluginName, telemetryProperties) end rescue => errorStr From ae9ebd7647758b7d1dc6938da0a61733a4185f44 Mon Sep 17 00:00:00 2001 From: sarahpeiffer <46665092+sarahpeiffer@users.noreply.github.com> Date: Thu, 30 Sep 2021 09:46:32 -0700 Subject: [PATCH 160/301] Sarah/ev2 helm (#658) * Use MSI for Arc Release * Use CIPROD_ACR AME subscription for shell extension * remove extra line endings --- ...ContainerInsightsExtension.Parameters.json | 30 +++++-------------- .../Public.Canary.RolloutSpec.json | 2 +- .../ScopeBindings/Public.ScopeBindings.json | 30 ++++++++++++++++++- .../Scripts/pushChartToAcr.sh | 20 +++++++++++-- .../ServiceModels/Public.ServiceModel.json | 16 +++++----- 5 files changed, 64 insertions(+), 34 deletions(-) diff --git a/deployment/arc-k8s-extension/ServiceGroupRoot/Parameters/ContainerInsightsExtension.Parameters.json b/deployment/arc-k8s-extension/ServiceGroupRoot/Parameters/ContainerInsightsExtension.Parameters.json index a8a99e9f6..69e1bcf35 100644 --- a/deployment/arc-k8s-extension/ServiceGroupRoot/Parameters/ContainerInsightsExtension.Parameters.json +++ b/deployment/arc-k8s-extension/ServiceGroupRoot/Parameters/ContainerInsightsExtension.Parameters.json @@ -31,26 +31,6 @@ "name": "RELEASE_STAGE", "value": "__RELEASE_STAGE__" }, - { - "name": "ACR_APP_ID", - "reference": { - "provider": "AzureKeyVault", - "parameters": { - "secretId": "https://cibuildandreleasekv.vault.azure.net/secrets/ciprodacrappid/e8f47bf7505741ebaf65a4db16ff9fa7" - } - }, - "asSecureValue": "true" - }, - { - "name": "ACR_APP_SECRET", - "reference": { - "provider": "AzureKeyVault", - "parameters": { - "secretId": "https://cibuildandreleasekv.vault.azure.net/secrets/ciprodacrappsecret/8718afcdac114accb8b26f613cef1e1e" - } - }, - "asSecureValue": "true" - }, { "name": "ACR_NAME", "value": "__ACR_NAME__" @@ -59,8 +39,14 @@ "name": "CHART_VERSION", "value": "__CHART_VERSION__" } - ] + ], + "identity": { + "type": "userAssigned", + "userAssignedIdentities": [ + "__MANAGED_IDENTITY__" + ] + } } } ] -} +} \ No newline at end of file diff --git a/deployment/arc-k8s-extension/ServiceGroupRoot/RolloutSpecs/Public.Canary.RolloutSpec.json b/deployment/arc-k8s-extension/ServiceGroupRoot/RolloutSpecs/Public.Canary.RolloutSpec.json index cde103633..2d0149e24 100644 --- a/deployment/arc-k8s-extension/ServiceGroupRoot/RolloutSpecs/Public.Canary.RolloutSpec.json +++ b/deployment/arc-k8s-extension/ServiceGroupRoot/RolloutSpecs/Public.Canary.RolloutSpec.json @@ -26,4 +26,4 @@ "dependsOn": [ ] } ] -} +} \ No newline at end of file diff --git a/deployment/arc-k8s-extension/ServiceGroupRoot/ScopeBindings/Public.ScopeBindings.json b/deployment/arc-k8s-extension/ServiceGroupRoot/ScopeBindings/Public.ScopeBindings.json index 516eba3e2..bf61ab7fd 100644 --- a/deployment/arc-k8s-extension/ServiceGroupRoot/ScopeBindings/Public.ScopeBindings.json +++ b/deployment/arc-k8s-extension/ServiceGroupRoot/ScopeBindings/Public.ScopeBindings.json @@ -16,6 +16,10 @@ { "find": "__CHART_VERSION__", "replaceWith": "$(ChartVersion)" + }, + { + "find": "__MANAGED_IDENTITY__", + "replaceWith": "$(ManagedIdentity)" } ] }, @@ -33,6 +37,10 @@ { "find": "__CHART_VERSION__", "replaceWith": "$(ChartVersion)" + }, + { + "find": "__MANAGED_IDENTITY__", + "replaceWith": "$(ManagedIdentity)" } ] }, @@ -50,6 +58,10 @@ { "find": "__CHART_VERSION__", "replaceWith": "$(ChartVersion)" + }, + { + "find": "__MANAGED_IDENTITY__", + "replaceWith": "$(ManagedIdentity)" } ] }, @@ -67,6 +79,10 @@ { "find": "__CHART_VERSION__", "replaceWith": "$(ChartVersion)" + }, + { + "find": "__MANAGED_IDENTITY__", + "replaceWith": "$(ManagedIdentity)" } ] }, @@ -84,6 +100,10 @@ { "find": "__CHART_VERSION__", "replaceWith": "$(ChartVersion)" + }, + { + "find": "__MANAGED_IDENTITY__", + "replaceWith": "$(ManagedIdentity)" } ] }, @@ -101,6 +121,10 @@ { "find": "__CHART_VERSION__", "replaceWith": "$(ChartVersion)" + }, + { + "find": "__MANAGED_IDENTITY__", + "replaceWith": "$(ManagedIdentity)" } ] }, @@ -118,8 +142,12 @@ { "find": "__CHART_VERSION__", "replaceWith": "$(ChartVersion)" + }, + { + "find": "__MANAGED_IDENTITY__", + "replaceWith": "$(ManagedIdentity)" } ] } ] -} +} \ No newline at end of file diff --git a/deployment/arc-k8s-extension/ServiceGroupRoot/Scripts/pushChartToAcr.sh b/deployment/arc-k8s-extension/ServiceGroupRoot/Scripts/pushChartToAcr.sh index 520557592..99421b122 100644 --- a/deployment/arc-k8s-extension/ServiceGroupRoot/Scripts/pushChartToAcr.sh +++ b/deployment/arc-k8s-extension/ServiceGroupRoot/Scripts/pushChartToAcr.sh @@ -106,8 +106,24 @@ echo "START - Release stage : ${RELEASE_STAGE}" echo "Using acr : ${ACR_NAME}" echo "Using acr repo type: ${REPO_TYPE}" +#Login to az cli and authenticate to acr +echo "Login cli using managed identity" +az login --identity +if [ $? -eq 0 ]; then + echo "Logged in successfully" +else + echo "-e error az login with managed identity credentials failed. Please review the Ev2 pipeline logs for more details on the error." + exit 1 +fi + +ACCESS_TOKEN=$(az acr login --name ${ACR_NAME} --expose-token --output tsv --query accessToken) +if [ $? -ne 0 ]; then + echo "-e error az acr login failed. Please review the Ev2 pipeline logs for more details on the error." + exit 1 +fi + echo "login to acr:${ACR_NAME} using helm ..." -echo $ACR_APP_SECRET | helm registry login $ACR_NAME --username $ACR_APP_ID --password-stdin +echo $ACCESS_TOKEN | helm registry login $ACR_NAME -u 00000000-0000-0000-0000-000000000000 --password-stdin if [ $? -eq 0 ]; then echo "login to acr:${ACR_NAME} using helm completed successfully." else @@ -178,4 +194,4 @@ case $RELEASE_STAGE in ;; esac -echo "END - Release stage : ${RELEASE_STAGE}" +echo "END - Release stage : ${RELEASE_STAGE}" \ No newline at end of file diff --git a/deployment/arc-k8s-extension/ServiceGroupRoot/ServiceModels/Public.ServiceModel.json b/deployment/arc-k8s-extension/ServiceGroupRoot/ServiceModels/Public.ServiceModel.json index 71081661a..6f565d4c4 100644 --- a/deployment/arc-k8s-extension/ServiceGroupRoot/ServiceModels/Public.ServiceModel.json +++ b/deployment/arc-k8s-extension/ServiceGroupRoot/ServiceModels/Public.ServiceModel.json @@ -33,7 +33,7 @@ "AzureResourceGroupName": "ContainerInsightsExtension-Canary-Release", "Location": "eastus2", "InstanceOf": "ARC-Extension-ServiceResourceGroupDefinition", - "AzureSubscriptionId": "5fab7b6f-6150-42fe-89e1-0f07a0a9a46f", + "AzureSubscriptionId": "30c56c3a-54da-46ea-b004-06eb33432687", "ScopeTags": [ { "Name": "Canary" @@ -51,7 +51,7 @@ "AzureResourceGroupName": "ContainerInsightsExtension-Pilot-Release", "Location": "eastus2", "InstanceOf": "ARC-Extension-ServiceResourceGroupDefinition", - "AzureSubscriptionId": "5fab7b6f-6150-42fe-89e1-0f07a0a9a46f", + "AzureSubscriptionId": "30c56c3a-54da-46ea-b004-06eb33432687", "ScopeTags": [ { "Name": "Pilot" @@ -69,7 +69,7 @@ "AzureResourceGroupName": "ContainerInsightsExtension-LightLoad-Release", "Location": "eastus2", "InstanceOf": "ARC-Extension-ServiceResourceGroupDefinition", - "AzureSubscriptionId": "5fab7b6f-6150-42fe-89e1-0f07a0a9a46f", + "AzureSubscriptionId": "30c56c3a-54da-46ea-b004-06eb33432687", "ScopeTags": [ { "Name": "LightLoad" @@ -87,7 +87,7 @@ "AzureResourceGroupName": "ContainerInsightsExtension-MediumLoad-Release", "Location": "eastus2", "InstanceOf": "ARC-Extension-ServiceResourceGroupDefinition", - "AzureSubscriptionId": "5fab7b6f-6150-42fe-89e1-0f07a0a9a46f", + "AzureSubscriptionId": "30c56c3a-54da-46ea-b004-06eb33432687", "ScopeTags": [ { "Name": "MediumLoad" @@ -105,7 +105,7 @@ "AzureResourceGroupName": "ContainerInsightsExtension-HighLoad-Release", "Location": "eastus2", "InstanceOf": "ARC-Extension-ServiceResourceGroupDefinition", - "AzureSubscriptionId": "5fab7b6f-6150-42fe-89e1-0f07a0a9a46f", + "AzureSubscriptionId": "30c56c3a-54da-46ea-b004-06eb33432687", "ScopeTags": [ { "Name": "HighLoad" @@ -123,7 +123,7 @@ "AzureResourceGroupName": "ContainerInsightsExtension-FF-Release", "Location": "eastus2", "InstanceOf": "ARC-Extension-ServiceResourceGroupDefinition", - "AzureSubscriptionId": "5fab7b6f-6150-42fe-89e1-0f07a0a9a46f", + "AzureSubscriptionId": "30c56c3a-54da-46ea-b004-06eb33432687", "ScopeTags": [ { "Name": "FF" @@ -141,7 +141,7 @@ "AzureResourceGroupName": "ContainerInsightsExtension-MC-Release", "Location": "eastus2", "InstanceOf": "ARC-Extension-ServiceResourceGroupDefinition", - "AzureSubscriptionId": "5fab7b6f-6150-42fe-89e1-0f07a0a9a46f", + "AzureSubscriptionId": "30c56c3a-54da-46ea-b004-06eb33432687", "ScopeTags": [ { "Name": "MC" @@ -156,4 +156,4 @@ ] } ] - } + } \ No newline at end of file From a6c6c4a9bf5e0c676f164761da836941a3f41995 Mon Sep 17 00:00:00 2001 From: sarahpeiffer <46665092+sarahpeiffer@users.noreply.github.com> Date: Mon, 4 Oct 2021 15:37:17 -0700 Subject: [PATCH 161/301] Sarah/ev2 pipeline (#661) * testing build artifact dir changes * add .pipelines directory and omsagent.yaml to build artifacts --- ...fficial.all_tag.all_phase.all_config.ci_prod.yml | 13 +++++++++++++ .pipelines/pipeline.user.linux.yml | 8 ++++++++ 2 files changed, 21 insertions(+) diff --git a/.pipelines/pipeline.user.linux.official.all_tag.all_phase.all_config.ci_prod.yml b/.pipelines/pipeline.user.linux.official.all_tag.all_phase.all_config.ci_prod.yml index 4f73d7c71..9aed01213 100644 --- a/.pipelines/pipeline.user.linux.official.all_tag.all_phase.all_config.ci_prod.yml +++ b/.pipelines/pipeline.user.linux.official.all_tag.all_phase.all_config.ci_prod.yml @@ -28,6 +28,19 @@ build: name: 'Build Docker Provider Shell Bundle' command: '.pipelines/build-linux.sh' fail_on_stderr: false + artifacts: + - from: 'deployment' + to: 'build' + include: + - '**' + - from: '.pipelines' + to: 'build' + include: + - '*.sh' + - from: 'kubernetes' + to: 'build' + include: + - '*.yaml' package: commands: diff --git a/.pipelines/pipeline.user.linux.yml b/.pipelines/pipeline.user.linux.yml index a1175263e..7acd7da74 100644 --- a/.pipelines/pipeline.user.linux.yml +++ b/.pipelines/pipeline.user.linux.yml @@ -33,6 +33,14 @@ build: to: 'build' include: - '**' + - from: '.pipelines' + to: 'build' + include: + - '*.sh' + - from: 'kubernetes' + to: 'build' + include: + - '*.yaml' package: commands: From 9e2df4d3d1193374b49d69e332fa2e7df7b55c2d Mon Sep 17 00:00:00 2001 From: sarahpeiffer <46665092+sarahpeiffer@users.noreply.github.com> Date: Mon, 4 Oct 2021 16:13:06 -0700 Subject: [PATCH 162/301] add charts directory to build artifacts (#662) --- ...er.linux.official.all_tag.all_phase.all_config.ci_prod.yml | 4 ++++ .pipelines/pipeline.user.linux.yml | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/.pipelines/pipeline.user.linux.official.all_tag.all_phase.all_config.ci_prod.yml b/.pipelines/pipeline.user.linux.official.all_tag.all_phase.all_config.ci_prod.yml index 9aed01213..a199bd860 100644 --- a/.pipelines/pipeline.user.linux.official.all_tag.all_phase.all_config.ci_prod.yml +++ b/.pipelines/pipeline.user.linux.official.all_tag.all_phase.all_config.ci_prod.yml @@ -41,6 +41,10 @@ build: to: 'build' include: - '*.yaml' + - from: 'charts' + to: 'build' + include: + - '**' package: commands: diff --git a/.pipelines/pipeline.user.linux.yml b/.pipelines/pipeline.user.linux.yml index 7acd7da74..60c1f7640 100644 --- a/.pipelines/pipeline.user.linux.yml +++ b/.pipelines/pipeline.user.linux.yml @@ -41,6 +41,10 @@ build: to: 'build' include: - '*.yaml' + - from: 'charts' + to: 'build' + include: + - '**' package: commands: From f1d0e4334c5b035cc15b9a9d93e905febf3dde2c Mon Sep 17 00:00:00 2001 From: sarahpeiffer <46665092+sarahpeiffer@users.noreply.github.com> Date: Wed, 6 Oct 2021 16:05:00 -0700 Subject: [PATCH 163/301] Sarah/remove cdpx creds (#664) * don't use cdpx acr creds from kv * add e2etest.yaml to build output * keep cdpx creds for now --- ...er.linux.official.all_tag.all_phase.all_config.ci_prod.yml | 4 ++++ .pipelines/pipeline.user.linux.yml | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/.pipelines/pipeline.user.linux.official.all_tag.all_phase.all_config.ci_prod.yml b/.pipelines/pipeline.user.linux.official.all_tag.all_phase.all_config.ci_prod.yml index a199bd860..61785f38d 100644 --- a/.pipelines/pipeline.user.linux.official.all_tag.all_phase.all_config.ci_prod.yml +++ b/.pipelines/pipeline.user.linux.official.all_tag.all_phase.all_config.ci_prod.yml @@ -45,6 +45,10 @@ build: to: 'build' include: - '**' + - from: 'test/e2e' + to: 'build' + include: + - '*.yaml' package: commands: diff --git a/.pipelines/pipeline.user.linux.yml b/.pipelines/pipeline.user.linux.yml index 60c1f7640..4c39fad5a 100644 --- a/.pipelines/pipeline.user.linux.yml +++ b/.pipelines/pipeline.user.linux.yml @@ -45,6 +45,10 @@ build: to: 'build' include: - '**' + - from: 'test/e2e' + to: 'build' + include: + - '*.yaml' package: commands: From 6ff747cb2d2dc47933823ee2948118fbf9d6df41 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Thu, 7 Oct 2021 13:09:30 -0700 Subject: [PATCH 164/301] chart updates for rbac api version change (#660) * chart updates for rbac api version change * include windows ds for arc --- .../templates/omsagent-crd.yaml | 2 +- .../templates/omsagent-daemonset-windows.yaml | 4 ++-- .../templates/omsagent-rbac.yaml | 10 +++++++++- 3 files changed, 12 insertions(+), 4 deletions(-) diff --git a/charts/azuremonitor-containers/templates/omsagent-crd.yaml b/charts/azuremonitor-containers/templates/omsagent-crd.yaml index bbaf89a52..46c5341cc 100644 --- a/charts/azuremonitor-containers/templates/omsagent-crd.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-crd.yaml @@ -1,4 +1,4 @@ -{{- if semverCompare "<1.19-0" .Capabilities.KubeVersion.GitVersion }} +{{- if semverCompare "<1.19-0" .Capabilities.KubeVersion.Version }} apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: diff --git a/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml b/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml index 580ef9d15..efed76f7d 100644 --- a/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml @@ -1,4 +1,4 @@ -{{- if and (ne .Values.omsagent.secret.key "") (ne .Values.omsagent.secret.wsid "") (or (ne .Values.omsagent.env.clusterName "") (ne .Values.omsagent.env.clusterId ""))}} +{{- if and (ne .Values.omsagent.secret.key "") (ne .Values.omsagent.secret.wsid "") (or (ne .Values.omsagent.env.clusterName "") (ne .Values.omsagent.env.clusterId "") (ne .Values.Azure.Cluster.ResourceId "") )}} apiVersion: apps/v1 kind: DaemonSet metadata: @@ -32,7 +32,7 @@ spec: options: - name: ndots value: "3" -{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion }} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.Version }} nodeSelector: kubernetes.io/os: windows {{- else }} diff --git a/charts/azuremonitor-containers/templates/omsagent-rbac.yaml b/charts/azuremonitor-containers/templates/omsagent-rbac.yaml index c0a6e3722..d9bca069d 100644 --- a/charts/azuremonitor-containers/templates/omsagent-rbac.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-rbac.yaml @@ -10,7 +10,11 @@ metadata: heritage: {{ .Release.Service }} --- kind: ClusterRole +{{- if .Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1" }} +apiVersion: rbac.authorization.k8s.io/v1 +{{- else }} apiVersion: rbac.authorization.k8s.io/v1beta1 +{{- end }} metadata: name: omsagent-reader labels: @@ -33,7 +37,7 @@ rules: verbs: ["get", "create", "patch"] - nonResourceURLs: ["/metrics"] verbs: ["get"] -#arc k8s extension model grants access as part of the extension msi +#arc k8s extension model grants access as part of the extension msi #remove this explicit permission once the extension available in public preview {{- if (empty .Values.Azure.Extension.Name) }} - apiGroups: [""] @@ -43,7 +47,11 @@ rules: {{- end }} --- kind: ClusterRoleBinding +{{- if .Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1" }} +apiVersion: rbac.authorization.k8s.io/v1 +{{- else }} apiVersion: rbac.authorization.k8s.io/v1beta1 +{{- end }} metadata: name: omsagentclusterrolebinding labels: From f77587a3f3470de8593fa89809ffcf3ae10afb83 Mon Sep 17 00:00:00 2001 From: David Michelman Date: Fri, 8 Oct 2021 10:44:32 -0700 Subject: [PATCH 165/301] proxy support (for non-aks) (#665) * changes related to aad msi auth feature * use existing envvars * fix imds token expiry interval * initial proxy support * merge? * cleaning up some files which should've merged differently * proxy should be working, but most tables don't have any data. About to merge, maybe whatever was wrong is now fixed * linux AMA proxy works * about to merge * proxy support appears to be working, final mdsd build location will still change * removing some unnecessary changes * forgot to remove one last change * redirected mdsd stderr to stdout instead of stdin * addressing proxy password location comment Co-authored-by: Ganga Mahesh Siddem --- kubernetes/linux/main.sh | 17 ++++++++++++++++- kubernetes/linux/setup.sh | 4 ++-- 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index 4986e3113..a9184ab53 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -195,6 +195,21 @@ if [ -e "/etc/omsagent-secret/WSID" ]; then else echo "successfully validated provided proxy endpoint is valid and expected format" fi + + echo $pwd > /opt/microsoft/docker-cimprov/proxy_password + + export MDSD_PROXY_MODE=application + echo "export MDSD_PROXY_MODE=$MDSD_PROXY_MODE" >> ~/.bashrc + export MDSD_PROXY_ADDRESS=$proto$hostport + echo "export MDSD_PROXY_ADDRESS=$MDSD_PROXY_ADDRESS" >> ~/.bashrc + export MDSD_PROXY_USERNAME=$user + echo "export MDSD_PROXY_USERNAME=$MDSD_PROXY_USERNAME" >> ~/.bashrc + export MDSD_PROXY_PASSWORD_FILE=/opt/microsoft/docker-cimprov/proxy_password + echo "export MDSD_PROXY_PASSWORD_FILE=$MDSD_PROXY_PASSWORD_FILE" >> ~/.bashrc + + #TODO: Compression + proxy creates a deserialization error in ODS. This needs a fix in MDSD + export MDSD_ODS_COMPRESSION_LEVEL=0 + echo "export MDSD_ODS_COMPRESSION_LEVEL=$MDSD_ODS_COMPRESSION_LEVEL" >> ~/.bashrc fi if [ ! -z "$PROXY_ENDPOINT" ]; then @@ -563,7 +578,7 @@ if [ "${CONTAINER_TYPE}" == "PrometheusSidecar" ]; then else echo "starting mdsd mode in main container..." # add -T 0xFFFF for full traces - mdsd ${MDSD_AAD_MSI_AUTH_ARGS} -e ${MDSD_LOG}/mdsd.err -w ${MDSD_LOG}/mdsd.warn -o ${MDSD_LOG}/mdsd.info -q ${MDSD_LOG}/mdsd.qos & + mdsd ${MDSD_AAD_MSI_AUTH_ARGS} -e ${MDSD_LOG}/mdsd.err -w ${MDSD_LOG}/mdsd.warn -o ${MDSD_LOG}/mdsd.info -q ${MDSD_LOG}/mdsd.qos 2>> /dev/null & fi # Set up a cron job for logrotation diff --git a/kubernetes/linux/setup.sh b/kubernetes/linux/setup.sh index c14007d35..371d26fa5 100644 --- a/kubernetes/linux/setup.sh +++ b/kubernetes/linux/setup.sh @@ -9,8 +9,8 @@ sed -i -e 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen && \ dpkg-reconfigure --frontend=noninteractive locales && \ update-locale LANG=en_US.UTF-8 -#install oneagent - Official bits (08/04/2021) -wget https://github.com/microsoft/Docker-Provider/releases/download/06242021-oneagent/azure-mdsd_1.10.3-build.master.257_x86_64.deb +#install oneagent - Official bits (10/7/2021) +wget https://github.com/microsoft/Docker-Provider/releases/download/1.14/azure-mdsd_1.14.0-build.master.279_x86_64.deb /usr/bin/dpkg -i $TMPDIR/azure-mdsd*.deb cp -f $TMPDIR/mdsd.xml /etc/mdsd.d From 34f5c52e5968e44793d31969ddd7c820b6722553 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Fri, 8 Oct 2021 14:23:25 -0700 Subject: [PATCH 166/301] Gangams/agent release ciprod10082021 & win-ciprod10082021 (#666) * updates for the release ciprod10082021 and win-ciprod10082021 * updates for the release ciprod10082021 and win-ciprod10082021 * updates for the release ciprod10082021 and win-ciprod10082021 * updates for the release ciprod10082021 and win-ciprod10082021 --- ReleaseNotes.md | 31 ++++++++++++++++++++ kubernetes/linux/Dockerfile | 2 +- kubernetes/omsagent.yaml | 16 ++++++---- kubernetes/windows/Dockerfile | 2 +- source/plugins/ruby/in_containerinventory.rb | 12 ++++---- 5 files changed, 50 insertions(+), 13 deletions(-) diff --git a/ReleaseNotes.md b/ReleaseNotes.md index dc42e7d51..0fd0f7948 100644 --- a/ReleaseNotes.md +++ b/ReleaseNotes.md @@ -11,6 +11,37 @@ additional questions or comments. Note : The agent version(s) below has dates (ciprod), which indicate the agent build dates (not release dates) +### 10/08/2021 - +##### Version microsoft/oms:ciprod10082021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod10082021 (linux) +##### Version microsoft/oms:win-ciprod10082021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod10082021 (windows) +##### Code change log +- Linux Agent + - MDSD Proxy support for non-AKS + - log rotation for mdsd log files {err,warn, info & qos} + - Onboarding status + - AAD Auth MSI changes (not usable externally yet) + - Upgrade k8s and adx go packages to fix vulnerabilities + - Fix missing telegraf metrics (TelegrafMetricsSentCount & TelegrafMetricsSendErrorCount) in mdsd route + - Improve fluentd liveness probe checks to handle both supervisor and worker process + - Fix telegraf startup issue when endpoint is unreachable +- Windows Agent + - Windows liveness probe optimization +- Common + - Add new metrics to MDM for allocatable % calculation of cpu and memory usage +- Other changes + - Helm chart updates for removal of rbac api version and deprecation of.Capabilities.KubeVersion.GitVersion to .Capabilities.KubeVersion.Version + - Updates to build and release ev2 + - Scripts to collect troubleshooting logs + - Unit test tooling + - Yaml updates in parity with aks rp yaml + - upgrade golang version for windows in pipelines + - Conformance test updates + +### 09/02/2021 - +##### Version microsoft/oms:ciprod08052021-1 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08052021-1 (linux) +##### Code change log +- Bumping image tag for some tooling (no code changes except the IMAGE_TAG environment variable) + ### 08/05/2021 - ##### Version microsoft/oms:ciprod08052021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08052021 (linux) ##### Code change log diff --git a/kubernetes/linux/Dockerfile b/kubernetes/linux/Dockerfile index 07af7f4a7..fd408b9b2 100644 --- a/kubernetes/linux/Dockerfile +++ b/kubernetes/linux/Dockerfile @@ -2,7 +2,7 @@ FROM ubuntu:18.04 MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=ciprod08052021 +ARG IMAGE_TAG=ciprod10082021 ENV AGENT_VERSION ${IMAGE_TAG} ENV tmpdir /opt ENV APPLICATIONINSIGHTS_AUTH NzAwZGM5OGYtYTdhZC00NThkLWI5NWMtMjA3ZjM3NmM3YmRi diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index 98621b5f0..97e32c0e1 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -368,16 +368,22 @@ spec: value: "3" containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08052021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod10082021" imagePullPolicy: IfNotPresent resources: limits: cpu: 500m - memory: 600Mi + memory: 750Mi requests: cpu: 75m - memory: 225Mi + memory: 325Mi env: + - name: FBIT_SERVICE_FLUSH_INTERVAL + value: "15" + - name: FBIT_TAIL_BUFFER_CHUNK_SIZE + value: "1" + - name: FBIT_TAIL_BUFFER_MAX_SIZE + value: "1" # azure devops pipeline uses AKS_RESOURCE_ID and AKS_REGION hence ensure to uncomment these - name: AKS_RESOURCE_ID value: "VALUE_AKS_RESOURCE_ID_VALUE" @@ -597,7 +603,7 @@ spec: serviceAccountName: omsagent containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08052021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod10082021" imagePullPolicy: IfNotPresent resources: limits: @@ -770,7 +776,7 @@ spec: value: "3" containers: - name: omsagent-win - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod06112021-2" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod10082021" imagePullPolicy: IfNotPresent resources: limits: diff --git a/kubernetes/windows/Dockerfile b/kubernetes/windows/Dockerfile index aa756b8b8..76667f389 100644 --- a/kubernetes/windows/Dockerfile +++ b/kubernetes/windows/Dockerfile @@ -3,7 +3,7 @@ MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=win-ciprod06112021-2 +ARG IMAGE_TAG=win-ciprod10082021 # Do not split this into multiple RUN! # Docker creates a layer for every RUN-Statement diff --git a/source/plugins/ruby/in_containerinventory.rb b/source/plugins/ruby/in_containerinventory.rb index f52ed4026..c8ffe7d05 100644 --- a/source/plugins/ruby/in_containerinventory.rb +++ b/source/plugins/ruby/in_containerinventory.rb @@ -19,6 +19,7 @@ def initialize require_relative "CAdvisorMetricsAPIClient" require_relative "kubernetes_container_inventory" require_relative "extension_utils" + @addonTokenAdapterImageTag = "" end config_param :run_interval, :time, :default => 60 @@ -57,7 +58,6 @@ def enumerate containerInventory = Array.new eventStream = Fluent::MultiEventStream.new hostName = "" - addonTokenAdapterImageTag = "" $log.info("in_container_inventory::enumerate : Begin processing @ #{Time.now.utc.iso8601}") if ExtensionUtils.isAADMSIAuthMode() $log.info("in_container_inventory::enumerate: AAD AUTH MSI MODE") @@ -83,12 +83,12 @@ def enumerate if hostName.empty? && !containerRecord["Computer"].empty? hostName = containerRecord["Computer"] end - if addonTokenAdapterImageTag.empty? && ExtensionUtils.isAADMSIAuthMode() + if @addonTokenAdapterImageTag.empty? && ExtensionUtils.isAADMSIAuthMode() if !containerRecord["ElementName"].nil? && !containerRecord["ElementName"].empty? && - containerRecord["ElementName"].include?("kube-system") && + containerRecord["ElementName"].include?("_kube-system_") && containerRecord["ElementName"].include?("addon-token-adapter_omsagent") if !containerRecord["ImageTag"].nil? && !containerRecord["ImageTag"].empty? - addonTokenAdapterImageTag = containerRecord["ImageTag"] + @addonTokenAdapterImageTag = containerRecord["ImageTag"] end end end @@ -127,8 +127,8 @@ def enumerate telemetryProperties = {} telemetryProperties["Computer"] = hostName telemetryProperties["ContainerCount"] = containerInventory.length - if !addonTokenAdapterImageTag.empty? - telemetryProperties["addonTokenAdapterImageTag"] = addonTokenAdapterImageTag + if !@addonTokenAdapterImageTag.empty? + telemetryProperties["addonTokenAdapterImageTag"] = @addonTokenAdapterImageTag end ApplicationInsightsUtility.sendTelemetry(@@PluginName, telemetryProperties) end From c4d22548d7280591db3f45241a09ff8727aa7297 Mon Sep 17 00:00:00 2001 From: sarahpeiffer <46665092+sarahpeiffer@users.noreply.github.com> Date: Fri, 8 Oct 2021 15:03:49 -0700 Subject: [PATCH 167/301] use buildcommand for prod pipeline (#668) --- ...user.linux.official.all_tag.all_phase.all_config.ci_prod.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pipelines/pipeline.user.linux.official.all_tag.all_phase.all_config.ci_prod.yml b/.pipelines/pipeline.user.linux.official.all_tag.all_phase.all_config.ci_prod.yml index 61785f38d..97390298c 100644 --- a/.pipelines/pipeline.user.linux.official.all_tag.all_phase.all_config.ci_prod.yml +++ b/.pipelines/pipeline.user.linux.official.all_tag.all_phase.all_config.ci_prod.yml @@ -24,7 +24,7 @@ restore: build: commands: - - !!defaultcommand + - !!buildcommand name: 'Build Docker Provider Shell Bundle' command: '.pipelines/build-linux.sh' fail_on_stderr: false From 3b008e5e0ce0c62c0a1d015bb029019d47cc2da5 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Mon, 11 Oct 2021 10:29:16 -0700 Subject: [PATCH 168/301] fixed merge issues. (#671) (#672) * fix merge conflicts * update with newimage tag --- ReleaseNotes.md | 4 ++-- kubernetes/linux/Dockerfile | 2 +- kubernetes/omsagent.yaml | 6 +++--- kubernetes/windows/Dockerfile | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/ReleaseNotes.md b/ReleaseNotes.md index 0fd0f7948..98b1ef3ce 100644 --- a/ReleaseNotes.md +++ b/ReleaseNotes.md @@ -12,8 +12,8 @@ additional questions or comments. Note : The agent version(s) below has dates (ciprod), which indicate the agent build dates (not release dates) ### 10/08/2021 - -##### Version microsoft/oms:ciprod10082021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod10082021 (linux) -##### Version microsoft/oms:win-ciprod10082021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod10082021 (windows) +##### Version microsoft/oms:ciprod10092021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod10092021 (linux) +##### Version microsoft/oms:win-ciprod10092021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod10092021 (windows) ##### Code change log - Linux Agent - MDSD Proxy support for non-AKS diff --git a/kubernetes/linux/Dockerfile b/kubernetes/linux/Dockerfile index fd408b9b2..c3f952d4e 100644 --- a/kubernetes/linux/Dockerfile +++ b/kubernetes/linux/Dockerfile @@ -2,7 +2,7 @@ FROM ubuntu:18.04 MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=ciprod10082021 +ARG IMAGE_TAG=ciprod10092021 ENV AGENT_VERSION ${IMAGE_TAG} ENV tmpdir /opt ENV APPLICATIONINSIGHTS_AUTH NzAwZGM5OGYtYTdhZC00NThkLWI5NWMtMjA3ZjM3NmM3YmRi diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index 97e32c0e1..e7b632d04 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -368,7 +368,7 @@ spec: value: "3" containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod10082021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod10092021" imagePullPolicy: IfNotPresent resources: limits: @@ -603,7 +603,7 @@ spec: serviceAccountName: omsagent containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod10082021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod10092021" imagePullPolicy: IfNotPresent resources: limits: @@ -776,7 +776,7 @@ spec: value: "3" containers: - name: omsagent-win - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod10082021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod10092021" imagePullPolicy: IfNotPresent resources: limits: diff --git a/kubernetes/windows/Dockerfile b/kubernetes/windows/Dockerfile index 76667f389..0e6591e3f 100644 --- a/kubernetes/windows/Dockerfile +++ b/kubernetes/windows/Dockerfile @@ -3,7 +3,7 @@ MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=win-ciprod10082021 +ARG IMAGE_TAG=win-ciprod10092021 # Do not split this into multiple RUN! # Docker creates a layer for every RUN-Statement From d16d84b9e6afa81c169d638aeab948e9b5c8d418 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Mon, 11 Oct 2021 14:26:47 -0700 Subject: [PATCH 169/301] changes related to mdsd version update (#673) (#674) --- ReleaseNotes.md | 6 +++--- kubernetes/linux/Dockerfile | 2 +- kubernetes/linux/setup.sh | 2 +- kubernetes/omsagent.yaml | 6 +++--- kubernetes/windows/Dockerfile | 2 +- 5 files changed, 9 insertions(+), 9 deletions(-) diff --git a/ReleaseNotes.md b/ReleaseNotes.md index 98b1ef3ce..3e08481ee 100644 --- a/ReleaseNotes.md +++ b/ReleaseNotes.md @@ -11,9 +11,9 @@ additional questions or comments. Note : The agent version(s) below has dates (ciprod), which indicate the agent build dates (not release dates) -### 10/08/2021 - -##### Version microsoft/oms:ciprod10092021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod10092021 (linux) -##### Version microsoft/oms:win-ciprod10092021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod10092021 (windows) +### 10/11/2021 - +##### Version microsoft/oms:ciprod10112021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod10112021 (linux) +##### Version microsoft/oms:win-ciprod10112021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod10112021 (windows) ##### Code change log - Linux Agent - MDSD Proxy support for non-AKS diff --git a/kubernetes/linux/Dockerfile b/kubernetes/linux/Dockerfile index c3f952d4e..9b2241c7b 100644 --- a/kubernetes/linux/Dockerfile +++ b/kubernetes/linux/Dockerfile @@ -2,7 +2,7 @@ FROM ubuntu:18.04 MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=ciprod10092021 +ARG IMAGE_TAG=ciprod10112021 ENV AGENT_VERSION ${IMAGE_TAG} ENV tmpdir /opt ENV APPLICATIONINSIGHTS_AUTH NzAwZGM5OGYtYTdhZC00NThkLWI5NWMtMjA3ZjM3NmM3YmRi diff --git a/kubernetes/linux/setup.sh b/kubernetes/linux/setup.sh index 371d26fa5..7baae7954 100644 --- a/kubernetes/linux/setup.sh +++ b/kubernetes/linux/setup.sh @@ -10,7 +10,7 @@ sed -i -e 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen && \ update-locale LANG=en_US.UTF-8 #install oneagent - Official bits (10/7/2021) -wget https://github.com/microsoft/Docker-Provider/releases/download/1.14/azure-mdsd_1.14.0-build.master.279_x86_64.deb +wget https://github.com/microsoft/Docker-Provider/releases/download/1.14/azure-mdsd_1.14.1-build.master.283_x86_64.deb /usr/bin/dpkg -i $TMPDIR/azure-mdsd*.deb cp -f $TMPDIR/mdsd.xml /etc/mdsd.d diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index e7b632d04..a608b8f0c 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -368,7 +368,7 @@ spec: value: "3" containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod10092021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod10112021" imagePullPolicy: IfNotPresent resources: limits: @@ -603,7 +603,7 @@ spec: serviceAccountName: omsagent containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod10092021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod10112021" imagePullPolicy: IfNotPresent resources: limits: @@ -776,7 +776,7 @@ spec: value: "3" containers: - name: omsagent-win - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod10092021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod10112021" imagePullPolicy: IfNotPresent resources: limits: diff --git a/kubernetes/windows/Dockerfile b/kubernetes/windows/Dockerfile index 0e6591e3f..5b187d91a 100644 --- a/kubernetes/windows/Dockerfile +++ b/kubernetes/windows/Dockerfile @@ -3,7 +3,7 @@ MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=win-ciprod10092021 +ARG IMAGE_TAG=win-ciprod10112021 # Do not split this into multiple RUN! # Docker creates a layer for every RUN-Statement From ce65f2cbfe4e37dd5f203dd8f0372b782d346920 Mon Sep 17 00:00:00 2001 From: sarahpeiffer <46665092+sarahpeiffer@users.noreply.github.com> Date: Tue, 12 Oct 2021 12:48:55 -0700 Subject: [PATCH 170/301] Sarah/enable metrics (#675) * add user assigned msi to yaml for pipeline * update placeholders --- .pipelines/update-place-holders-in-yaml.sh | 5 +++++ kubernetes/omsagent.yaml | 8 ++++---- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/.pipelines/update-place-holders-in-yaml.sh b/.pipelines/update-place-holders-in-yaml.sh index 906303667..6b962bf72 100755 --- a/.pipelines/update-place-holders-in-yaml.sh +++ b/.pipelines/update-place-holders-in-yaml.sh @@ -10,6 +10,7 @@ do case "$KEY" in ClusterResourceId) ClusterResourceId=$VALUE ;; ClusterRegion) ClusterRegion=$VALUE ;; + UserAssignedIdentityClientId) UserAssignedIdentityClientId=$VALUE ;; CIRelease) CI_RELEASE=$VALUE ;; CIImageTagSuffix) CI_IMAGE_TAG_SUFFIX=$VALUE ;; *) @@ -24,6 +25,10 @@ echo "clusterRegion:$ClusterRegion" echo "replace cluster region" sed -i "s/VALUE_AKS_RESOURCE_REGION_VALUE/$ClusterRegion/g" omsagent.yaml +echo "userAssignedIdentityClientId:$UserAssignedIdentityClientId" +echo "replace user assigned identity client id" +sed -i "s=VALUE_USER_ASSIGNED_IDENTITY_CLIENT_ID_VALUE=$UserAssignedIdentityClientId=g" omsagent.yaml + echo "replace linux agent image" linuxAgentImageTag=$CI_RELEASE$CI_IMAGE_TAG_SUFFIX echo "Linux Agent Image Tag:"$linuxAgentImageTag diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index a608b8f0c..616dcc889 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -403,7 +403,7 @@ spec: fieldPath: status.hostIP # Update this with the user assigned msi client id for omsagent - name: USER_ASSIGNED_IDENTITY_CLIENT_ID - value: "" + value: "VALUE_USER_ASSIGNED_IDENTITY_CLIENT_ID_VALUE" - name: AZMON_CONTAINERLOGS_ONEAGENT_REGIONS value: "koreacentral,norwayeast,eastus2" - name: USING_AAD_MSI_AUTH @@ -486,7 +486,7 @@ spec: # fieldPath: status.hostIP # # Update this with the user assigned msi client id for omsagent # - name: USER_ASSIGNED_IDENTITY_CLIENT_ID - # value: "" + # value: "VALUE_USER_ASSIGNED_IDENTITY_CLIENT_ID_VALUE" # - name: USING_AAD_MSI_AUTH # value: "false" # securityContext: @@ -631,7 +631,7 @@ spec: fieldPath: status.hostIP # Update this with the user assigned msi client id for omsagent - name: USER_ASSIGNED_IDENTITY_CLIENT_ID - value: "" + value: "VALUE_USER_ASSIGNED_IDENTITY_CLIENT_ID_VALUE" # Add the below environment variable to true only in sidecar enabled regions, else set it to false - name: SIDECAR_SCRAPING_ENABLED value: "false" @@ -808,7 +808,7 @@ spec: value: "false" # Update this with the user assigned msi client id for omsagent - name: USER_ASSIGNED_IDENTITY_CLIENT_ID - value: "" + value: "VALUE_USER_ASSIGNED_IDENTITY_CLIENT_ID_VALUE" # Add this only for clouds that require cert bootstrapping # - name: REQUIRES_CERT_BOOTSTRAP # value: "true" From 608f92e9e8ba8d6223d7428630748da0ebfe4df9 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Tue, 12 Oct 2021 17:58:14 -0700 Subject: [PATCH 171/301] Gangams/chart updates oct2021 release (#676) * chart updates for oct2021 release * wip * wip * wip --- charts/azuremonitor-containers/Chart.yaml | 2 +- .../templates/omsagent-daemonset-windows.yaml | 8 ++- .../templates/omsagent-daemonset.yaml | 67 +++++++++++++++++++ .../templates/omsagent-deployment.yaml | 8 +-- charts/azuremonitor-containers/values.yaml | 29 +++++--- 5 files changed, 97 insertions(+), 17 deletions(-) diff --git a/charts/azuremonitor-containers/Chart.yaml b/charts/azuremonitor-containers/Chart.yaml index 00f3f49ed..4dd6623bf 100644 --- a/charts/azuremonitor-containers/Chart.yaml +++ b/charts/azuremonitor-containers/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v1 appVersion: 7.0.0-1 description: Helm chart for deploying Azure Monitor container monitoring agent in Kubernetes name: azuremonitor-containers -version: 2.8.3 +version: 2.9.0 kubeVersion: "^1.10.0-0" keywords: - monitoring diff --git a/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml b/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml index efed76f7d..78831aa10 100644 --- a/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml @@ -86,7 +86,7 @@ spec: fieldRef: fieldPath: metadata.name - name: SIDECAR_SCRAPING_ENABLED - value: "false" + value: {{ .Values.omsagent.sidecarscraping | quote }} volumeMounts: - mountPath: C:\ProgramData\docker\containers name: docker-windows-containers @@ -104,7 +104,11 @@ spec: command: - cmd - /c - - C:\opt\omsagentwindows\scripts\cmd\livenessProbe.cmd + - C:\opt\omsagentwindows\scripts\cmd\livenessprobe.exe + - fluent-bit.exe + - fluentdwinaks + - "C:\\etc\\omsagentwindows\\filesystemwatcher.txt" + - "C:\\etc\\omsagentwindows\\renewcertificate.txt" periodSeconds: 60 initialDelaySeconds: 180 timeoutSeconds: 15 diff --git a/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml b/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml index 7201ee6ae..8e5513f91 100644 --- a/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml @@ -132,6 +132,69 @@ spec: initialDelaySeconds: 60 periodSeconds: 60 timeoutSeconds: 15 + {{- if .Values.omsagent.sidecarscraping }} + - name: omsagent-prometheus + {{- if eq (.Values.omsagent.domain | lower) "opinsights.azure.cn" }} + image: "mcr.azk8s.cn/azuremonitor/containerinsights/ciprod:{{ .Values.omsagent.image.tag }}" + {{- else }} + image: {{ printf "%s:%s" .Values.omsagent.image.repo .Values.omsagent.image.tag }} + {{- end }} + imagePullPolicy: IfNotPresent + resources: +{{ toYaml .Values.omsagent.resources.daemonsetlinuxsidecar | indent 9 }} + env: + {{- if ne .Values.omsagent.env.clusterId "" }} + - name: AKS_RESOURCE_ID + value: {{ .Values.omsagent.env.clusterId | quote }} + {{- if ne .Values.omsagent.env.clusterRegion "" }} + - name: AKS_REGION + value: {{ .Values.omsagent.env.clusterRegion | quote }} + {{- end }} + {{- else if ne .Values.Azure.Cluster.ResourceId "" }} + - name: AKS_RESOURCE_ID + value: {{ .Values.Azure.Cluster.ResourceId | quote }} + {{- if ne .Values.Azure.Cluster.Region "" }} + - name: AKS_REGION + value: {{ .Values.Azure.Cluster.Region | quote }} + {{- end }} + {{- else }} + - name: ACS_RESOURCE_NAME + value: {{ .Values.omsagent.env.clusterName | quote }} + {{- end }} + - name: CONTROLLER_TYPE + value: "DaemonSet" + - name: CONTAINER_TYPE + value: "PrometheusSidecar" + - name: NODE_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: ISTEST + value: {{ .Values.omsagent.ISTEST | quote }} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/kubernetes/host + name: azure-json-path + - mountPath: /etc/omsagent-secret + name: omsagent-secret + readOnly: true + - mountPath: /etc/config/settings + name: settings-vol-config + readOnly: true + - mountPath: /etc/config/osm-settings + name: osm-settings-vol-config + readOnly: true + livenessProbe: + exec: + command: + - /bin/bash + - -c + - /opt/livenessprobe.sh + initialDelaySeconds: 60 + periodSeconds: 60 + timeoutSeconds: 15 + {{- end }} {{- with .Values.omsagent.daemonset.affinity }} affinity: {{- toYaml . | nindent 8 }} {{- end }} @@ -173,4 +236,8 @@ spec: secret: secretName: omsagent-adx-secret optional: true + - name: osm-settings-vol-config + configMap: + name: container-azm-ms-osmconfig + optional: true {{- end }} diff --git a/charts/azuremonitor-containers/templates/omsagent-deployment.yaml b/charts/azuremonitor-containers/templates/omsagent-deployment.yaml index fdc520cba..1eaf7f652 100644 --- a/charts/azuremonitor-containers/templates/omsagent-deployment.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-deployment.yaml @@ -69,14 +69,14 @@ spec: fieldPath: status.hostIP {{- if not (empty .Values.Azure.Extension.Name) }} - name: ARC_K8S_EXTENSION_NAME - value: {{ .Values.Azure.Extension.Name | quote }} - {{- end }} + value: {{ .Values.Azure.Extension.Name | quote }} + {{- end }} - name: USER_ASSIGNED_IDENTITY_CLIENT_ID value: "" - name: SIDECAR_SCRAPING_ENABLED - value: "false" + value: {{ .Values.omsagent.sidecarscraping | quote }} - name: ISTEST - value: {{ .Values.omsagent.ISTEST | quote }} + value: {{ .Values.omsagent.ISTEST | quote }} securityContext: privileged: true ports: diff --git a/charts/azuremonitor-containers/values.yaml b/charts/azuremonitor-containers/values.yaml index 9dd5317a4..0d78ed50f 100644 --- a/charts/azuremonitor-containers/values.yaml +++ b/charts/azuremonitor-containers/values.yaml @@ -17,14 +17,14 @@ Azure: httpProxy: "" httpsProxy: "" noProxy: "" - proxyCert: "" + proxyCert: "" omsagent: image: repo: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod" - tag: "ciprod04222021" - tagWindows: "win-ciprod04222021" + tag: "ciprod10112021" + tagWindows: "win-ciprod10112021" pullPolicy: IfNotPresent - dockerProviderVersion: "15.0.0-0" + dockerProviderVersion: "16.0.0-0" agentVersion: "1.10.0.1" # The priority used by the omsagent priority class for the daemonset pods @@ -39,7 +39,7 @@ omsagent: # chance to build pod for the node and give it to the scheduler) # Should be some number greater than default (0) priority: 10 - + # This used for running agent pods in test mode. # if set to true additional agent workflow logs will be emitted which are used for e2e and arc k8s conformance testing ISTEST: false @@ -58,10 +58,11 @@ omsagent: clusterId: clusterRegion: rbac: true + sidecarscraping: true logsettings: - logflushintervalsecs: "" - tailbufchunksizemegabytes: "" - tailbufmaxsizemegabytes: "" + logflushintervalsecs: "15" + tailbufchunksizemegabytes: "1" + tailbufmaxsizemegabytes: "1" ## Applicable for only Azure Stack Edge K8s since it has custom mount path for container logs which will have symlink to /var/log path custommountpath: "" @@ -171,10 +172,10 @@ omsagent: daemonsetlinux: requests: cpu: 75m - memory: 225Mi + memory: 325Mi limits: cpu: 150m - memory: 600Mi + memory: 750Mi daemonsetwindows: limits: cpu: 200m @@ -186,3 +187,11 @@ omsagent: limits: cpu: 1 memory: 1Gi + daemonsetlinuxsidecar: + limits: + cpu: 500m + memory: 1Gi + requests: + cpu: 75m + memory: 225Mi + From ab98c4b6eb61bda5ec6633e0ff6b3ea1121c41f2 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Wed, 13 Oct 2021 16:53:01 -0700 Subject: [PATCH 172/301] Gangams/msi mode mdsd crash fix (#677) * update mdsd version which has fix for crash in msi mode * image tag updates --- ReleaseNotes.md | 6 +++--- charts/azuremonitor-containers/values.yaml | 4 ++-- kubernetes/linux/Dockerfile | 2 +- kubernetes/linux/setup.sh | 2 +- kubernetes/omsagent.yaml | 8 ++++---- kubernetes/windows/Dockerfile | 2 +- 6 files changed, 12 insertions(+), 12 deletions(-) diff --git a/ReleaseNotes.md b/ReleaseNotes.md index 3e08481ee..c8a147044 100644 --- a/ReleaseNotes.md +++ b/ReleaseNotes.md @@ -11,9 +11,9 @@ additional questions or comments. Note : The agent version(s) below has dates (ciprod), which indicate the agent build dates (not release dates) -### 10/11/2021 - -##### Version microsoft/oms:ciprod10112021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod10112021 (linux) -##### Version microsoft/oms:win-ciprod10112021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod10112021 (windows) +### 10/13/2021 - +##### Version microsoft/oms:ciprod10132021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod10132021 (linux) +##### Version microsoft/oms:win-ciprod10132021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod10132021 (windows) ##### Code change log - Linux Agent - MDSD Proxy support for non-AKS diff --git a/charts/azuremonitor-containers/values.yaml b/charts/azuremonitor-containers/values.yaml index 0d78ed50f..3ca313d38 100644 --- a/charts/azuremonitor-containers/values.yaml +++ b/charts/azuremonitor-containers/values.yaml @@ -21,8 +21,8 @@ Azure: omsagent: image: repo: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod" - tag: "ciprod10112021" - tagWindows: "win-ciprod10112021" + tag: "ciprod10132021" + tagWindows: "win-ciprod10132021" pullPolicy: IfNotPresent dockerProviderVersion: "16.0.0-0" agentVersion: "1.10.0.1" diff --git a/kubernetes/linux/Dockerfile b/kubernetes/linux/Dockerfile index 9b2241c7b..90acb4959 100644 --- a/kubernetes/linux/Dockerfile +++ b/kubernetes/linux/Dockerfile @@ -2,7 +2,7 @@ FROM ubuntu:18.04 MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=ciprod10112021 +ARG IMAGE_TAG=ciprod10132021 ENV AGENT_VERSION ${IMAGE_TAG} ENV tmpdir /opt ENV APPLICATIONINSIGHTS_AUTH NzAwZGM5OGYtYTdhZC00NThkLWI5NWMtMjA3ZjM3NmM3YmRi diff --git a/kubernetes/linux/setup.sh b/kubernetes/linux/setup.sh index 7baae7954..243677dd0 100644 --- a/kubernetes/linux/setup.sh +++ b/kubernetes/linux/setup.sh @@ -10,7 +10,7 @@ sed -i -e 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen && \ update-locale LANG=en_US.UTF-8 #install oneagent - Official bits (10/7/2021) -wget https://github.com/microsoft/Docker-Provider/releases/download/1.14/azure-mdsd_1.14.1-build.master.283_x86_64.deb +wget https://github.com/microsoft/Docker-Provider/releases/download/1.14/azure-mdsd_1.14.2-build.master.284_x86_64.deb /usr/bin/dpkg -i $TMPDIR/azure-mdsd*.deb cp -f $TMPDIR/mdsd.xml /etc/mdsd.d diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index 616dcc889..66f8c4010 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -368,7 +368,7 @@ spec: value: "3" containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod10112021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod10132021" imagePullPolicy: IfNotPresent resources: limits: @@ -454,7 +454,7 @@ spec: timeoutSeconds: 15 #Only in sidecar scraping mode # - name: omsagent-prometheus - # image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod06112021" + # image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod10132021" # imagePullPolicy: IfNotPresent # resources: # limits: @@ -603,7 +603,7 @@ spec: serviceAccountName: omsagent containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod10112021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod10132021" imagePullPolicy: IfNotPresent resources: limits: @@ -776,7 +776,7 @@ spec: value: "3" containers: - name: omsagent-win - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod10112021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod10132021" imagePullPolicy: IfNotPresent resources: limits: diff --git a/kubernetes/windows/Dockerfile b/kubernetes/windows/Dockerfile index 5b187d91a..0ddf67ab2 100644 --- a/kubernetes/windows/Dockerfile +++ b/kubernetes/windows/Dockerfile @@ -3,7 +3,7 @@ MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=win-ciprod10112021 +ARG IMAGE_TAG=win-ciprod10132021 # Do not split this into multiple RUN! # Docker creates a layer for every RUN-Statement From a105a00827331d15122feb0c7af7e0d90861ce52 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Tue, 19 Oct 2021 18:08:48 -0700 Subject: [PATCH 173/301] update to use extension GA api version (#679) --- .../arc-k8s-extension/existingClusterOnboarding.json | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/scripts/onboarding/templates/arc-k8s-extension/existingClusterOnboarding.json b/scripts/onboarding/templates/arc-k8s-extension/existingClusterOnboarding.json index 95e7ba5d0..b2b61f4ab 100644 --- a/scripts/onboarding/templates/arc-k8s-extension/existingClusterOnboarding.json +++ b/scripts/onboarding/templates/arc-k8s-extension/existingClusterOnboarding.json @@ -13,7 +13,7 @@ "metadata": { "description": "Location of the Azure Arc Connected Cluster Resource e.g. \"eastus\"" } - }, + }, "workspaceResourceId": { "type": "string", "metadata": { @@ -83,7 +83,7 @@ "subscriptionId": "[split(parameters('clusterResourceId'),'/')[2]]", "resourceGroup": "[split(parameters('clusterResourceId'),'/')[4]]", "dependsOn": [ - "[Concat('ContainerInsights', '-', uniqueString(parameters('workspaceResourceId')))]" + "[Concat('ContainerInsights', '-', uniqueString(parameters('workspaceResourceId')))]" ], "properties": { "mode": "Incremental", @@ -95,7 +95,7 @@ "resources": [ { "type": "Microsoft.KubernetesConfiguration/extensions", - "apiVersion": "2020-07-01-preview", + "apiVersion": "2021-09-01", "name": "azuremonitor-containers", "location": "[parameters('clusterRegion')]", "identity": {"type": "systemassigned"}, @@ -107,7 +107,7 @@ }, "configurationProtectedSettings": { "omsagent.secret.wsid": "[reference(parameters('workspaceResourceId'), '2015-03-20').customerId]", - "omsagent.secret.key": "[listKeys(parameters('workspaceResourceId'), '2015-03-20').primarySharedKey]" + "omsagent.secret.key": "[listKeys(parameters('workspaceResourceId'), '2015-03-20').primarySharedKey]" }, "autoUpgradeMinorVersion": true, "releaseTrain": "Stable", From 87ff2813008caea661b8529d411a69ea7c443bd0 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Tue, 19 Oct 2021 18:09:07 -0700 Subject: [PATCH 174/301] Gangams/arm template msi onboarding (#659) * wip * wip * working * working * working * working * working * working * shorten dcr prefix to DCR- to handle default workspace name length * use MSCI- prefix similar to MSVMI- for dcr --- .../existingClusterOnboarding.json | 210 ++++++++++++++++++ .../existingClusterParam.json | 32 +++ 2 files changed, 242 insertions(+) create mode 100644 scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json create mode 100644 scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterParam.json diff --git a/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json b/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json new file mode 100644 index 000000000..c77e3203d --- /dev/null +++ b/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json @@ -0,0 +1,210 @@ +{ + "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "aksResourceId": { + "type": "string", + "metadata": { + "description": "AKS Cluster Resource ID" + } + }, + "aksResourceLocation": { + "type": "string", + "metadata": { + "description": "Location of the AKS resource e.g. \"East US\"" + } + }, + "aksResourceTagValues": { + "type": "object", + "metadata": { + "description": "Existing all tags on AKS Cluster Resource" + } + }, + "workspaceLocation": { + "type": "string", + "metadata": { + "description": "Worksapce Location for data collection rule" + } + }, + "workspaceResourceId": { + "type": "string", + "metadata": { + "description": "Full Resource ID of the log analitycs workspace that will be used for data destination. For example /subscriptions/00000000-0000-0000-0000-0000-00000000/resourceGroups/ResourceGroupName/providers/Microsoft.operationalinsights/workspaces/ws_xyz" + } + }, + "dcrResourceTagValues": { + "type": "object", + "metadata": { + "description": "Existing or new tags on DCR Cluster Resource" + } + } + }, + "variables": { + "clusterSubscriptionId": "[split(parameters('aksResourceId'),'/')[2]]", + "clusterResourceGroup": "[split(parameters('aksResourceId'),'/')[4]]", + "clusterName": "[split(parameters('aksResourceId'),'/')[8]]", + "workspaceSubscriptionId": "[split(parameters('workspaceResourceId'),'/')[2]]", + "workspaceResourceGroup": "[split(parameters('workspaceResourceId'),'/')[4]]", + "dcrName": "[Concat('MSCI', '-', split(parameters('workspaceResourceId'),'/')[8])]", + "associationName": "ContainerInsightsExtension", + "dataCollectionRuleId": "[resourceId(variables('workspaceSubscriptionId'), variables('workspaceResourceGroup'), 'Microsoft.Insights/dataCollectionRules', variables('dcrName'))]" + }, + "resources": [ + { + "type": "Microsoft.Resources/deployments", + "name": "[Concat('aks-monitoring-msi-dcr', '-', uniqueString(variables('dcrName')))]", + "apiVersion": "2017-05-10", + "subscriptionId": "[variables('workspaceSubscriptionId')]", + "resourceGroup": "[variables('workspaceResourceGroup')]", + "properties": { + "mode": "Incremental", + "template": { + "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": {}, + "variables": {}, + "resources": [ + { + "type": "Microsoft.Insights/dataCollectionRules", + "apiVersion": "2019-11-01-preview", + "name": "[variables('dcrName')]", + "location": "[parameters('workspaceLocation')]", + "tags": "[parameters('dcrResourceTagValues')]", + "kind": "Linux", + "properties": { + "dataSources": { + "extensions": [ + { + "name": "ContainerInsightsExtension", + "streams": [ + "Microsoft-Perf", + "Microsoft-ContainerInventory", + "Microsoft-ContainerLog", + "Microsoft-ContainerLogV2", + "Microsoft-ContainerNodeInventory", + "Microsoft-KubeEvents", + "Microsoft-KubeHealth", + "Microsoft-KubeMonAgentEvents", + "Microsoft-KubeNodeInventory", + "Microsoft-KubePodInventory", + "Microsoft-KubePVInventory", + "Microsoft-KubeServices", + "Microsoft-InsightsMetrics" + ], + "extensionName": "ContainerInsights" + } + ] + }, + "destinations": { + "logAnalytics": [ + { + "workspaceResourceId": "[parameters('workspaceResourceId')]", + "name": "ciworkspace" + } + ] + }, + "dataFlows": [ + { + "streams": [ + "Microsoft-Perf", + "Microsoft-ContainerInventory", + "Microsoft-ContainerLog", + "Microsoft-ContainerLogV2", + "Microsoft-ContainerNodeInventory", + "Microsoft-KubeEvents", + "Microsoft-KubeHealth", + "Microsoft-KubeMonAgentEvents", + "Microsoft-KubeNodeInventory", + "Microsoft-KubePodInventory", + "Microsoft-KubePVInventory", + "Microsoft-KubeServices", + "Microsoft-InsightsMetrics" + ], + "destinations": [ + "ciworkspace" + ] + } + ] + } + } + ] + }, + "parameters": {} + } + }, + { + "type": "Microsoft.Resources/deployments", + "name": "[Concat('aks-monitoring-msi-dcra', '-', uniqueString(parameters('aksResourceId')))]", + "apiVersion": "2017-05-10", + "subscriptionId": "[variables('clusterSubscriptionId')]", + "resourceGroup": "[variables('clusterResourceGroup')]", + "dependsOn": [ + "[Concat('aks-monitoring-msi-dcr', '-', uniqueString(variables('dcrName')))]" + ], + "properties": { + "mode": "Incremental", + "template": { + "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": {}, + "variables": {}, + "resources": [ + { + "type": "Microsoft.ContainerService/managedClusters/providers/dataCollectionRuleAssociations", + "name": "[concat(variables('clusterName'),'/microsoft.insights/', variables('associationName'))]", + "apiVersion": "2019-11-01-preview", + "properties": { + "description": "Association of data collection rule. Deleting this association will break the data collection for this AKS Cluster.", + "dataCollectionRuleId": "[variables('dataCollectionRuleId')]" + } + } + + ] + }, + "parameters": {} + } + }, + { + "type": "Microsoft.Resources/deployments", + "name": "[Concat('aks-monitoring-msi-addon', '-', uniqueString(parameters('aksResourceId')))]", + "apiVersion": "2017-05-10", + "subscriptionId": "[variables('clusterSubscriptionId')]", + "resourceGroup": "[variables('clusterResourceGroup')]", + "dependsOn": [ + "[Concat('aks-monitoring-msi-dcra', '-', uniqueString(parameters('aksResourceId')))]" + ], + "properties": { + "mode": "Incremental", + "template": { + "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": {}, + "variables": {}, + "resources": [ + { + "name": "[variables('clusterName')]", + "type": "Microsoft.ContainerService/managedClusters", + "location": "[parameters('aksResourceLocation')]", + "tags": "[parameters('aksResourceTagValues')]", + "apiVersion": "2018-03-31", + "properties": { + "mode": "Incremental", + "id": "[parameters('aksResourceId')]", + "addonProfiles": { + "omsagent": { + "enabled": true, + "config": { + "logAnalyticsWorkspaceResourceID": "[parameters('workspaceResourceId')]", + "useAADAuth": "true" + } + } + } + } + } + ] + }, + "parameters": {} + } + } + ] +} diff --git a/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterParam.json b/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterParam.json new file mode 100644 index 000000000..31f0f9c49 --- /dev/null +++ b/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterParam.json @@ -0,0 +1,32 @@ +{ + "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentParameters.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "aksResourceId": { + "value": "/subscriptions//resourcegroups//providers/Microsoft.ContainerService/managedClusters/" + }, + "aksResourceLocation": { + "value": "" + }, + "aksResourceTagValues": { + "value": { + "": "", + "": "", + "": "" + } + }, + "workspaceResourceId": { + "value": "/subscriptions//resourceGroups//providers/Microsoft.OperationalInsights/workspaces/" + }, + "workspaceLocation": { + "value": "" + }, + "dcrResourceTagValues": { + "value": { + "": "", + "": "", + "": "" + } + } + } + } From ac5dec34dd35f5afa083ff0004caf19e1264d965 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Thu, 21 Oct 2021 17:40:54 -0700 Subject: [PATCH 175/301] Gangams/conf test updates to handle sidecar (#681) * wip * test updates * fix pr feedback * fix pr feedback --- test/e2e/conformance.yaml | 2 +- test/e2e/src/common/constants.py | 3 +++ test/e2e/src/common/kubernetes_pod_utility.py | 6 +++--- test/e2e/src/core/Dockerfile | 2 +- test/e2e/src/tests/test_ds_workflows.py | 2 +- test/e2e/src/tests/test_resource_status.py | 4 ++++ test/e2e/src/tests/test_rs_workflows.py | 4 +--- 7 files changed, 14 insertions(+), 9 deletions(-) diff --git a/test/e2e/conformance.yaml b/test/e2e/conformance.yaml index ff790e690..71e40a6a2 100644 --- a/test/e2e/conformance.yaml +++ b/test/e2e/conformance.yaml @@ -3,7 +3,7 @@ sonobuoy-config: plugin-name: azure-arc-ci-conformance result-format: junit spec: - image: mcr.microsoft.com/azuremonitor/containerinsights/cidev:ciconftest08142021 + image: mcr.microsoft.com/azuremonitor/containerinsights/cidev:ciconftest10202021 imagePullPolicy: Always name: plugin resources: {} diff --git a/test/e2e/src/common/constants.py b/test/e2e/src/common/constants.py index 392b10554..c557a1c91 100644 --- a/test/e2e/src/common/constants.py +++ b/test/e2e/src/common/constants.py @@ -40,6 +40,9 @@ TIMEOUT = 300 +# omsagent main container name +OMSAGENT_MAIN_CONTAINER_NAME = 'omsagent' + # WAIT TIME BEFORE READING THE AGENT LOGS AGENT_WAIT_TIME_SECS = "180" # Azure Monitor for Container Extension related diff --git a/test/e2e/src/common/kubernetes_pod_utility.py b/test/e2e/src/common/kubernetes_pod_utility.py index 27345fae7..d70f443f0 100644 --- a/test/e2e/src/common/kubernetes_pod_utility.py +++ b/test/e2e/src/common/kubernetes_pod_utility.py @@ -20,12 +20,12 @@ def get_pod_list(api_instance, namespace, label_selector=""): pytest.fail("Error occurred when retrieving pod information: " + str(e)) # get the content of the log file in the container via exec -def get_log_file_content(api_instance, namespace, podName, logfilePath): +def get_log_file_content(api_instance, namespace, podName, containerName, logfilePath): try: exec_command = ['tar','cf', '-', logfilePath] - return stream(api_instance.connect_get_namespaced_pod_exec, podName, namespace, command=exec_command, stderr=True, stdin=False, stdout=True, tty=False) + return stream(api_instance.connect_get_namespaced_pod_exec, podName, namespace, command=exec_command, container=containerName, stderr=True, stdin=False, stdout=True, tty=False) except Exception as e: - pytest.fail("Error occurred when retrieving log file content: " + str(e)) + pytest.fail("Error occurred when retrieving log file content: " + str(e)) # Function that watches events corresponding to pods in the given namespace and passes the events to a callback function def watch_pod_status(api_instance, namespace, timeout, callback=None): diff --git a/test/e2e/src/core/Dockerfile b/test/e2e/src/core/Dockerfile index cd85aee40..52bcd7cf8 100644 --- a/test/e2e/src/core/Dockerfile +++ b/test/e2e/src/core/Dockerfile @@ -6,7 +6,7 @@ RUN curl https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | && helm version RUN apt-get update && apt-get -y upgrade && \ - apt-get -f -y install curl apt-transport-https lsb-release gnupg python3-pip python-pip && \ + apt-get -f -y install curl apt-transport-https lsb-release gnupg python3-pip && \ curl -sL https://packages.microsoft.com/keys/microsoft.asc | gpg --dearmor > /etc/apt/trusted.gpg.d/microsoft.asc.gpg && \ CLI_REPO=$(lsb_release -cs) && \ echo "deb [arch=amd64] https://packages.microsoft.com/repos/azure-cli/ ${CLI_REPO} main" \ diff --git a/test/e2e/src/tests/test_ds_workflows.py b/test/e2e/src/tests/test_ds_workflows.py index 731957788..e6d651e49 100755 --- a/test/e2e/src/tests/test_ds_workflows.py +++ b/test/e2e/src/tests/test_ds_workflows.py @@ -51,7 +51,7 @@ def test_ds_workflows(env_dict): for podItem in pod_list.items: podName = podItem.metadata.name logcontent = get_log_file_content( - api_instance, constants.AGENT_RESOURCES_NAMESPACE, podName, agentLogPath) + api_instance, constants.AGENT_RESOURCES_NAMESPACE, podName, constants.OMSAGENT_MAIN_CONTAINER_NAME, agentLogPath) if not logcontent: pytest.fail("logcontent should not be null or empty for pod: " + podName) loglines = logcontent.split("\n") diff --git a/test/e2e/src/tests/test_resource_status.py b/test/e2e/src/tests/test_resource_status.py index f2b5569e9..c240cbcf2 100755 --- a/test/e2e/src/tests/test_resource_status.py +++ b/test/e2e/src/tests/test_resource_status.py @@ -1,5 +1,6 @@ import pytest import constants +import time from kubernetes import client, config from results_utility import append_result_output @@ -21,6 +22,9 @@ def test_resource_status(env_dict): except Exception as e: pytest.fail("Error loading the in-cluster config: " + str(e)) + waitTimeSeconds = env_dict['AGENT_WAIT_TIME_SECS'] + time.sleep(int(waitTimeSeconds)) + # checking the deployment status check_kubernetes_deployment_status( constants.AGENT_RESOURCES_NAMESPACE, constants.AGENT_DEPLOYMENT_NAME, env_dict['TEST_AGENT_LOG_FILE']) diff --git a/test/e2e/src/tests/test_rs_workflows.py b/test/e2e/src/tests/test_rs_workflows.py index 36ec05867..6a29dcc73 100755 --- a/test/e2e/src/tests/test_rs_workflows.py +++ b/test/e2e/src/tests/test_rs_workflows.py @@ -39,9 +39,7 @@ def test_rs_workflows(env_dict): waitTimeSeconds = env_dict['AGENT_WAIT_TIME_SECS'] - print("start: waiting for seconds: {} for agent workflows to get emitted".format(waitTimeSeconds)) time.sleep(int(waitTimeSeconds)) - print("complete: waiting for seconds: {} for agent workflows to get emitted".format(waitTimeSeconds)) isOMSBaseAgent = env_dict.get('USING_OMSAGENT_BASE_AGENT') agentLogPath = constants.AGENT_FLUENTD_LOG_PATH @@ -49,7 +47,7 @@ def test_rs_workflows(env_dict): agentLogPath = constants.AGENT_OMSAGENT_LOG_PATH logcontent = get_log_file_content( - api_instance, constants.AGENT_RESOURCES_NAMESPACE, rspodName, agentLogPath) + api_instance, constants.AGENT_RESOURCES_NAMESPACE, rspodName, constants.OMSAGENT_MAIN_CONTAINER_NAME, agentLogPath) if not logcontent: pytest.fail("logcontent should not be null or empty for rs pod: {}".format(rspodName)) loglines = logcontent.split("\n") From 0bd3056e54ee82a113d6dbe65825e9665728bc26 Mon Sep 17 00:00:00 2001 From: Vishwanath Date: Tue, 26 Oct 2021 14:47:15 -0700 Subject: [PATCH 176/301] Fix scan break due to latest trivy changes --- .github/workflows/pr-checker.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pr-checker.yml b/.github/workflows/pr-checker.yml index bae117dbe..8a7e542b3 100644 --- a/.github/workflows/pr-checker.yml +++ b/.github/workflows/pr-checker.yml @@ -56,7 +56,7 @@ jobs: format: 'table' severity: 'CRITICAL,HIGH' vuln-type: 'os,library' - skip-dirs: 'opt/telegraf,usr/sbin/telegraf' + skip-dirs: '/opt,/usr/sbin' exit-code: '1' timeout: '5m0s' WINDOWS-build: From 761b6412bfdbf5e8dcbf93b47385d2d4b1811983 Mon Sep 17 00:00:00 2001 From: Anders Johansen Date: Tue, 26 Oct 2021 16:35:52 -0700 Subject: [PATCH 177/301] Anjohans/configurable database name (#663) * First cut at an implementation * Reverting a change * Moving a few lines to better align with cluster URI config * Moving a few lines to better align with cluster URI config * Adding an extra check that won't hurt * Getting ADX database name from config rather than from secret * Reverse the mangling done by editor * Fixes to the code for reading the db name setting * More fixes to the rb code for settings * Tweaked and tested * Code review * Review follow-up * Remove whitespace --- build/common/installer/scripts/tomlparser.rb | 21 ++++++++++++++++++++ source/plugins/go/src/oms.go | 20 ++++++++++++++++++- source/plugins/go/src/utils.go | 2 +- 3 files changed, 41 insertions(+), 2 deletions(-) diff --git a/build/common/installer/scripts/tomlparser.rb b/build/common/installer/scripts/tomlparser.rb index b173ecfe3..32ea09aa3 100644 --- a/build/common/installer/scripts/tomlparser.rb +++ b/build/common/installer/scripts/tomlparser.rb @@ -26,6 +26,7 @@ @containerLogSchemaVersion = "" @collectAllKubeEvents = false @containerLogsRoute = "v2" # default for linux +@adxDatabaseName = "containerinsights" # default for all configurations if !@os_type.nil? && !@os_type.empty? && @os_type.strip.casecmp("windows") == 0 @containerLogsRoute = "v1" # default is v1 for windows until windows agent integrates windows ama end @@ -175,6 +176,23 @@ def populateSettingValuesFromConfigMap(parsedConfig) ConfigParseErrorLogger.logError("Exception while reading config map settings for container logs route - #{errorStr}, using defaults, please check config map for errors") end + #Get ADX database name setting + begin + if !parsedConfig[:log_collection_settings][:adx_database].nil? && !parsedConfig[:log_collection_settings][:adx_database][:name].nil? + if !parsedConfig[:log_collection_settings][:adx_database][:name].empty? + @adxDatabaseName = parsedConfig[:log_collection_settings][:adx_database][:name] + puts "config::Using config map setting for ADX database name : #{@adxDatabaseName}" + else + puts "config::Ignoring config map settings and using default value '#{@adxDatabaseName}' since provided adx database name value is empty" + end + else + puts "config::No ADX database name set, using default value : #{@adxDatabaseName}" + end + rescue => errorStr + ConfigParseErrorLogger.logError("Exception while reading config map settings for adx database name - #{errorStr}, using default #{@adxDatabaseName}, please check config map for errors") + end + + end end end @@ -218,6 +236,7 @@ def populateSettingValuesFromConfigMap(parsedConfig) file.write("export AZMON_CLUSTER_COLLECT_ALL_KUBE_EVENTS=#{@collectAllKubeEvents}\n") file.write("export AZMON_CONTAINER_LOGS_ROUTE=#{@containerLogsRoute}\n") file.write("export AZMON_CONTAINER_LOG_SCHEMA_VERSION=#{@containerLogSchemaVersion}\n") + file.write("export AZMON_ADX_DATABASE_NAME=#{@adxDatabaseName}\n") # Close file after writing all environment variables file.close puts "Both stdout & stderr log collection are turned off for namespaces: '#{@excludePath}' " @@ -266,6 +285,8 @@ def get_command_windows(env_variable_name, env_variable_value) file.write(commands) commands = get_command_windows('AZMON_CONTAINER_LOG_SCHEMA_VERSION', @containerLogSchemaVersion) file.write(commands) + commands = get_command_windows('AZMON_ADX_DATABASE_NAME', @adxDatabaseName) + file.write(commands) # Close file after writing all environment variables file.close diff --git a/source/plugins/go/src/oms.go b/source/plugins/go/src/oms.go index 91a5b4b40..ee221a60b 100644 --- a/source/plugins/go/src/oms.go +++ b/source/plugins/go/src/oms.go @@ -117,6 +117,9 @@ const MdsdOutputStreamIdTagPrefix = "dcr-" //env variable to container type const ContainerTypeEnv = "CONTAINER_TYPE" +//Default ADX destination database name, can be overriden through configuration +const DefaultAdxDatabaseName = "containerinsights" + var ( // PluginConfiguration the plugins configuration PluginConfiguration map[string]string @@ -166,6 +169,8 @@ var ( AdxTenantID string //ADX client secret AdxClientSecret string + //ADX destination database name, default is DefaultAdxDatabaseName, can be overridden in configuration + AdxDatabaseName string // container log or container log v2 tag name for oneagent route MdsdContainerLogTagName string // kubemonagent events tag name for oneagent route @@ -1698,6 +1703,17 @@ func InitializePlugin(pluginConfPath string, agentVersion string) { ContainerLogsRouteADX = false if strings.Compare(ContainerLogsRoute, ContainerLogsADXRoute) == 0 { + // Try to read the ADX database name from environment variables. Default to DefaultAdsDatabaseName if not set. + // This SHOULD be set by tomlparser.rb so it's a highly unexpected event if it isn't. + // It should be set by the logic in tomlparser.rb EVEN if ADX logging isn't enabled + AdxDatabaseName := strings.TrimSpace(os.Getenv("AZMON_ADX_DATABASE_NAME")) + + // Check the len of the provided name for database and use default if 0, just to be sure + if len(AdxDatabaseName) == 0 { + Log("Adx database name unexpecedly empty (check config AND implementation, should have been set by tomlparser.rb?) - will default to '%s'", DefaultAdxDatabaseName) + AdxDatabaseName = DefaultAdxDatabaseName + } + //check if adx clusteruri, clientid & secret are set var err error AdxClusterUri, err = ReadFileContents(PluginConfiguration["adx_cluster_uri_path"]) @@ -1708,6 +1724,7 @@ func InitializePlugin(pluginConfPath string, agentVersion string) { Log("Invalid AdxClusterUri %s", AdxClusterUri) AdxClusterUri = "" } + AdxClientID, err = ReadFileContents(PluginConfiguration["adx_client_id_path"]) if err != nil { Log("Error when reading AdxClientID %s", err) @@ -1723,7 +1740,8 @@ func InitializePlugin(pluginConfPath string, agentVersion string) { Log("Error when reading AdxClientSecret %s", err) } - if len(AdxClusterUri) > 0 && len(AdxClientID) > 0 && len(AdxClientSecret) > 0 && len(AdxTenantID) > 0 { + // AdxDatabaseName should never get in a state where its length is 0, but it doesn't hurt to add the check + if len(AdxClusterUri) > 0 && len(AdxClientID) > 0 && len(AdxClientSecret) > 0 && len(AdxTenantID) > 0 && len(AdxDatabaseName) > 0 { ContainerLogsRouteADX = true Log("Routing container logs thru %s route...", ContainerLogsADXRoute) fmt.Fprintf(os.Stdout, "Routing container logs thru %s route...\n", ContainerLogsADXRoute) diff --git a/source/plugins/go/src/utils.go b/source/plugins/go/src/utils.go index 6b3036f85..61c6898d7 100644 --- a/source/plugins/go/src/utils.go +++ b/source/plugins/go/src/utils.go @@ -192,7 +192,7 @@ func CreateADXClient() { //log.Fatalf("Unable to create ADX connection %s", err.Error()) } else { Log("Successfully created ADX Client. Creating Ingestor...") - ingestor, ingestorErr := ingest.New(client, "containerinsights", "ContainerLogV2") + ingestor, ingestorErr := ingest.New(client, AdxDatabaseName, "ContainerLogV2") if ingestorErr != nil { Log("Error::mdsd::Unable to create ADX ingestor %s", ingestorErr.Error()) } else { From fc955b31c49ffd23bbba60c35405d622ad1ab4a9 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Tue, 26 Oct 2021 17:40:51 -0700 Subject: [PATCH 178/301] Gangams/troubelshooting script for arc k8s (#682) * wip * wip * wip * wip * wip * wip * wip * wip * wip * wip * wip * wip * wip * wip * wip * wip * wip * wip * doc updates * doc updates * wip * wip * update repo for issues * fix minor one --- scripts/troubleshoot/README.md | 32 +- scripts/troubleshoot/troubleshooterrors.sh | 485 +++++++++++++++++++++ 2 files changed, 516 insertions(+), 1 deletion(-) create mode 100644 scripts/troubleshoot/troubleshooterrors.sh diff --git a/scripts/troubleshoot/README.md b/scripts/troubleshoot/README.md index 5ffa07639..650a5df6f 100644 --- a/scripts/troubleshoot/README.md +++ b/scripts/troubleshoot/README.md @@ -1,5 +1,14 @@ # Troubleshoot Guide for Azure Monitor for containers +# Azure Arc-enabled Kubernetes +The table below summarizes known issues you may face while using Azure Monitor for containers . + +| Issues and Error Messages | Action | +| ---- | --- | +| Error Message `No data for selected filters` | It may take some time to establish monitoring data flow for newly created clusters. Please allow at least 10-15 minutes for data to appear for your cluster. | +| Error Message `Error retrieving data` | While Azure Arc-enabled Kubernetes cluster is setting up for health and performance monitoring, a connection is established between the cluster and Azure Log Analytics workspace. Log Analytics workspace is used to store all monitoring data for your cluster. This error may occurr when your Log Analytics workspace has been deleted or lost. Please check whether your Log Analytics workspace is available. To find your Log Analytics workspace go [here.](https://docs.microsoft.com/en-us/azure/log-analytics/log-analytics-manage-access) and your workspace is available. If the workspace is missing, you will have to delete and create Microsoft.AzureMonitor.Containers extension https://docs.microsoft.com/en-us/azure/azure-monitor/containers/container-insights-enable-arc-enabled-clusters?toc=/azure/azure-arc/kubernetes/toc.json. | + + # Azure Kubernetes Service (AKS) The table below summarizes known issues you may face while using Azure Monitor for containers . @@ -67,5 +76,26 @@ Please send this file to [AskCoin](mailto:askcoin@microsoft.com). We will respon For more details on Azure Resource Manager template deployment via cli refer to [this documentation](https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-template-deploy-cli). If steps above did not help to resolve your issue, you can use either of the following methods to contact us for help: -* File a [GitHub Issue](https://github.com/Microsoft/OMS-docker/issues) +* File a [GitHub Issue](https://github.com/microsoft/Docker-Provider/issues) * Email [AskCoin](mailto:askcoin@microsoft.com) : Please attach the TroubleshootErrorDump.txt in the email generated by the troubleshooting script if you had tried running the script to solve your problem. + +# Azure Arc-enabled Kubernetes + +You can use the troubleshooting script provided [here](https://raw.githubusercontent.com/microsoft/Docker-Provider/ci_dev/scripts/troubleshoot/troubleshooterrors.sh) to diagnose the problem. + +Steps: +- Before executing the Troubleshooting script, please install following pre-requisistes if you dont have already + - Install [Azure-CLI](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli) + - Install [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) + - Install [jq](https://stedolan.github.io/jq/download/) +- Download and execute the script + ``` bash + curl -LO https://raw.githubusercontent.com/microsoft/Docker-Provider/ci_dev/scripts/troubleshoot/troubleshooterrors.sh + bash troubleshooterrors.sh --resource-id --kube-context + ``` +- This script will generate a TroubleshootDump.log which collects detailed information about container health onboarding. +Please send this file to [AskCoin](mailto:askcoin@microsoft.com). We will respond back to you. + +If steps above did not help to resolve your issue, you can use either of the following methods to contact us for help: +* File a [GitHub Issue](https://github.com/microsoft/Docker-Provider/issues) +* Email [AskCoin](mailto:askcoin@microsoft.com) : Please attach the TroubleshootErrorDump.log in the email generated by the troubleshooting script if you had tried running the script to solve your problem. \ No newline at end of file diff --git a/scripts/troubleshoot/troubleshooterrors.sh b/scripts/troubleshoot/troubleshooterrors.sh new file mode 100644 index 000000000..ac08d7afc --- /dev/null +++ b/scripts/troubleshoot/troubleshooterrors.sh @@ -0,0 +1,485 @@ +#!/bin/bash +# +# This script troubleshoots errors related to onboarding of Azure Monitor for containers to Kubernetes cluster hosted outside and connected to Azure via Azure Arc cluster +# Prerequisites : +# Azure CLI: https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest + +# bash troubelshooterror.sh --resource-id --kube-context + +set -e +set -o pipefail + +logFile="TroubleshootDump.log" +clusterType="connectedClusters" +extensionInstanceName="azuremonitor-containers" +# resource type for azure log analytics workspace +workspaceResourceProvider="Microsoft.OperationalInsights/workspaces" +workspaceSolutionResourceProvider="Microsoft.OperationsManagement/solutions" +agentK8sNamespace="kube-system" +azureArcK8sNamespace="azure-arc" +agentK8sSecretName="omsagent-secret" +agentK8sDeploymentName="omsagent-rs" +agentK8sLinuxDaemonsetName="omsagent" +agentArcK8sIdentityCRDName="container-insights-clusteridentityrequest" +workspaceId="" +workspacePrimarySharedKey="" +contactUSMessage="Please contact us by emailing askcoin@microsoft.com if you need any help with TroubleshootDump.log generated by this script" +dataCapHelpMessage="Please review and increase data cap https://docs.microsoft.com/en-us/azure/azure-monitor/logs/manage-cost-storage" +workspacePrivateLinkMessage="Please review this doc https://docs.microsoft.com/en-us/azure/azure-monitor/logs/private-link-security" +azureCLIInstallLinkMessage="Please install Azure-CLI as per the instructions https://docs.microsoft.com/en-us/cli/azure/install-azure-cli and rerun the troubleshooting script" +kubectlInstallLinkMessage="Please install kubectl as per the instructions https://kubernetes.io/docs/tasks/tools/#kubectl and rerun the troubleshooting script" +jqInstallLinkMessage="Please install jq as per instructions https://stedolan.github.io/jq/download/ and rerun the troubleshooting script" +ciExtensionReOnboarding="Please reinstall extension as per instructions https://docs.microsoft.com/en-us/azure/azure-monitor/containers/container-insights-enable-arc-enabled-clusters?toc=/azure/azure-arc/kubernetes/toc.json" +timesyncHelpMessage="Please check if you have any timesync issues on your cluster nodes" + +log_message() { + echo "$@" + echo "" + echo "$@" >> $logFile +} + + +login_to_azure() { + if [ "$isUsingServicePrincipal" = true ]; then + log_message "login to the azure using provided service principal creds" + az login --service-principal --username="$servicePrincipalClientId" --password="$servicePrincipalClientSecret" --tenant="$servicePrincipalTenantId" + else + log_message "login to the azure interactively" + az login --use-device-code + fi +} + +set_azure_subscription() { + local subscriptionId="$(echo ${1})" + log_message "setting the subscription id: ${subscriptionId} as current subscription for the azure cli" + az account set -s ${subscriptionId} + log_message "successfully configured subscription id: ${subscriptionId} as current subscription for the azure cli" +} + +usage() { + local basename=$(basename $0) + echo + echo "Troubleshooting Errors related to Azure Monitor for containers:" + echo "$basename --resource-id [--kube-context ]" +} + +parse_args() { + + if [ $# -le 1 ]; then + usage + exit 1 + fi + + # Transform long options to short ones + for arg in "$@"; do + shift + case "$arg" in + "--resource-id") set -- "$@" "-r" ;; + "--kube-context") set -- "$@" "-k" ;; + "--"*) usage ;; + *) set -- "$@" "$arg" ;; + esac + done + + local OPTIND opt + + while getopts 'hk:r:' opt; do + case "$opt" in + h) + usage + ;; + + k) + kubeconfigContext="$OPTARG" + log_message "name of kube-context is $OPTARG" + ;; + + r) + clusterResourceId="$OPTARG" + log_message "clusterResourceId is $OPTARG" + ;; + + ?) + usage + exit 1 + ;; + esac + done + shift "$(($OPTIND - 1))" + + local subscriptionId="$(echo ${clusterResourceId} | cut -d'/' -f3)" + local resourceGroup="$(echo ${clusterResourceId} | cut -d'/' -f5)" + + # get resource parts and join back to get the provider name + local providerNameResourcePart1="$(echo ${clusterResourceId} | cut -d'/' -f7)" + local providerNameResourcePart2="$(echo ${clusterResourceId} | cut -d'/' -f8)" + local providerName="$(echo ${providerNameResourcePart1}/${providerNameResourcePart2})" + + local clusterName="$(echo ${clusterResourceId} | cut -d'/' -f9)" + + # convert to lowercase for validation + providerName=$(echo $providerName | tr "[:upper:]" "[:lower:]") + + log_message "cluster SubscriptionId:" $subscriptionId + log_message "cluster ResourceGroup:" $resourceGroup + log_message "cluster ProviderName:" $providerName + log_message "cluster Name:" $clusterName + + if [ -z "$subscriptionId" -o -z "$resourceGroup" -o -z "$providerName" -o -z "$clusterName" ]; then + log_message "-e invalid cluster resource id. Please try with valid fully qualified resource id of the cluster" + exit 1 + fi + + if [[ $providerName != microsoft.* ]]; then + log_message "-e invalid azure cluster resource id format." + exit 1 + fi + + # detect the resource provider from the provider name in the cluster resource id + if [ $providerName = "microsoft.kubernetes/connectedclusters" ]; then + log_message "provider cluster resource is of Azure Arc enabled Kubernetes cluster type" + isArcK8sCluster=true + resourceProvider=$arcK8sResourceProvider + else + log_message "-e not valid azure arc enabled kubernetes cluster resource id" + exit 1 + fi + + if [ -z "$kubeconfigContext" ]; then + log_message "using or getting current kube config context since --kube-context parameter not set " + fi + + if [ ! -z "$servicePrincipalClientId" -a ! -z "$servicePrincipalClientSecret" -a ! -z "$servicePrincipalTenantId" ]; then + log_message "using service principal creds (clientId, secret and tenantId) for azure login since provided" + isUsingServicePrincipal=true + fi +} + +command_exists() { + command -v "$@" > /dev/null 2>&1 +} + +validate_ci_extension() { + log_message "START:validate_ci_extension" + extension=$(az k8s-extension show -c ${4} -g ${3} -t $clusterType -n $extensionInstanceName) + log_message $extension + configurationSettings=$(az k8s-extension show -c ${4} -g ${3} -t $clusterType -n $extensionInstanceName --query "configurationSettings") + if [ -z "$configurationSettings" ]; then + log_message "-e error configurationSettings either null or empty" + log_message ${contactUSMessage} + exit 1 + fi + logAnalyticsWorkspaceResourceID=$(az k8s-extension show -c ${4} -g ${3} -t $clusterType -n $extensionInstanceName --query "configurationSettings.logAnalyticsWorkspaceResourceID" -o tsv | tr "[:upper:]" "[:lower:]" | tr -d "[:space:]") + log_message "Extension logAnalyticsWorkspaceResourceID: ${logAnalyticsWorkspaceResourceID}" + if [ -z "$logAnalyticsWorkspaceResourceID" ]; then + log_message "-e error logAnalyticsWorkspaceResourceID either null or empty in the config settings" + log_message ${contactUSMessage} + exit 1 + fi + + provisioningState=$(az k8s-extension show -c ${4} -g ${3} -t $clusterType -n $extensionInstanceName --query "provisioningState" -o tsv | tr "[:upper:]" "[:lower:]" | tr -d "[:space:]") + log_message "Extension provisioningState: ${provisioningState}" + if [ -z "$provisioningState" ]; then + log_message "-e error provisioningState either null or empty in the config settings" + log_message ${contactUSMessage} + exit 1 + fi + if [ "$provisioningState" != "succeeded" ]; then + log_message "-e error expected state of extension provisioningState MUST be succeeded state but actual state is ${provisioningState}" + log_message ${contactUSMessage} + exit 1 + fi + logAnalyticsWorkspaceDomain=$(az k8s-extension show -c ${4} -g ${3} -t $clusterType -n $extensionInstanceName --query 'configurationSettings."omsagent.domain"') + log_message "Extension logAnalyticsWorkspaceDomain: ${logAnalyticsWorkspaceDomain}" + if [ -z "$logAnalyticsWorkspaceDomain" ]; then + log_message "-e error logAnalyticsWorkspaceDomain either null or empty in the config settings" + log_message ${contactUSMessage} + exit 1 + fi + azureCloudName=${1} + if [ "$azureCloudName" = "azureusgovernment" ]; then + log_message "az cli configured cloud name:$azureCloudName" + if [ $logAnalyticsWorkspaceDomain = "opinsights.azure.us" ]; then + log_message "-e error expected value of logAnalyticsWorkspaceDomain MUST opinsights.azure.us but actual value is ${logAnalyticsWorkspaceDomain}" + log_message ${contactUSMessage} + exit 1 + fi + elif [ "$azureCloudName" = "azurecloud" ]; then + log_message "az cli configured cloud name:$azureCloudName" + if [ $logAnalyticsWorkspaceDomain = "opinsights.azure.com" ]; then + log_message "-e error expected value of logAnalyticsWorkspaceDomain MUST opinsights.azure.com but actual value is ${logAnalyticsWorkspaceDomain}" + log_message ${contactUSMessage} + exit 1 + fi + elif [ "$azureCloudName" = "azurechinacloud" ]; then + log_message "az cli configured cloud name:$azureCloudName" + if [ $logAnalyticsWorkspaceDomain = "opinsights.azure.cn" ]; then + log_message "-e error expected value of logAnalyticsWorkspaceDomain MUST opinsights.azure.cn but actual value is ${logAnalyticsWorkspaceDomain}" + log_message ${contactUSMessage} + exit 1 + fi + fi + + workspaceSubscriptionId="$(echo ${logAnalyticsWorkspaceResourceID} | cut -d'/' -f3 | tr "[:upper:]" "[:lower:]")" + workspaceResourceGroup="$(echo ${logAnalyticsWorkspaceResourceID} | cut -d'/' -f5)" + workspaceName="$(echo ${logAnalyticsWorkspaceResourceID} | cut -d'/' -f9)" + log_message "workspaceSubscriptionId:${workspaceSubscriptionId} workspaceResourceGroup:${workspaceResourceGroup} workspaceName:${workspaceName}" + + clusterSubscriptionId=${2} + # set the azure subscription to azure cli if the workspace in different sub than cluster + if [[ "$clusterSubscriptionId" != "$workspaceSubscriptionId" ]]; then + log_message "switch subscription id of workspace as active subscription for azure cli since workspace in different subscription than cluster: ${workspaceSubscriptionId}" + isClusterAndWorkspaceInSameSubscription=false + set_azure_subscription $workspaceSubscriptionId + fi + workspaceList=$(az resource list -g "$workspaceResourceGroup" -n "$workspaceName" --resource-type $workspaceResourceProvider) + log_message "workspace info:${workspaceList}" + if [ "$workspaceList" = "[]" ]; then + log_message "-e error workspace:${logAnalyticsWorkspaceResourceID} doesnt exist" + exit 1 + fi + + ciSolutionResourceName="ContainerInsights(${workspaceName})" + workspaceSolutionList=$(az resource list -g $workspaceResourceGroup -n $ciSolutionResourceName --resource-type $workspaceSolutionResourceProvider) + log_message "workspace solution info:${workspaceSolutionList}" + if [ "$workspaceSolutionList" = "[]" ]; then + log_message "-e error ContainerInsights solution on workspace:${logAnalyticsWorkspaceResourceID} doesnt exist" + exit 1 + fi + + privateLinkScopedResources=$(az resource show --ids ${logAnalyticsWorkspaceResourceID} --query properties.privateLinkScopedResources -o tsv | tr "[:upper:]" "[:lower:]" | tr -d "[:space:]") + log_message "workspace privateLinkScopedResources:${privateLinkScopedResources}" + + publicNetworkAccessForIngestion=$(az resource show --ids ${logAnalyticsWorkspaceResourceID} --query properties.publicNetworkAccessForIngestion -o tsv | tr "[:upper:]" "[:lower:]" | tr -d "[:space:]") + log_message "workspace publicNetworkAccessForIngestion:${publicNetworkAccessForIngestion}" + if [ -z "$privateLinkScopedResources" ]; then + if [ "$publicNetworkAccessForIngestion" != "enabled" ]; then + log_message "-e error Unless private link configuration, publicNetworkAccessForIngestion MUST be enabled for data ingestion" + log_message ${workspacePrivateLinkMessage} + exit 1 + fi + fi + publicNetworkAccessForQuery=$(az resource show --ids ${logAnalyticsWorkspaceResourceID} --query properties.publicNetworkAccessForQuery -o tsv | tr "[:upper:]" "[:lower:]" | tr -d "[:space:]") + log_message "workspace publicNetworkAccessForQuery:${publicNetworkAccessForQuery}" + if [ -z "$privateLinkScopedResources" ]; then + if [ "$publicNetworkAccessForQuery" != "enabled" ]; then + log_message "-e error Unless private link configuration, publicNetworkAccessForQuery MUST be enabled for data query" + log_message ${workspacePrivateLinkMessage} + exit 1 + fi + fi + + workspaceCappingDailyQuotaGb=$(az resource show --ids ${logAnalyticsWorkspaceResourceID} --query properties.workspaceCapping.dailyQuotaGb -o tsv | tr -d "[:space:]") + log_message "workspaceCapping dailyQuotaGb:${workspaceCappingDailyQuotaGb}" + if [ "$workspaceCappingDailyQuotaGb" != "-1.0" ]; then + log_message "-e error workspace configured daily quota and verify ingestion data reaching over the quota:${workspaceCappingDailyQuotaGb}" + log_message ${dataCapHelpMessage} + exit 1 + fi + + workspaceId=$(az resource show --ids ${logAnalyticsWorkspaceResourceID} --query properties.customerId -o tsv | tr -d "[:space:]") + log_message "workspaceId: ${workspaceId}" + + workspaceKey=$(az rest --method post --uri $logAnalyticsWorkspaceResourceID/sharedKeys?api-version=2015-11-01-preview --query primarySharedKey -o json) + workspacePrimarySharedKey=$(echo $workspaceKey | tr -d '"') + + log_message "END:validate_ci_extension:SUCCESS" +} + +validate_az_cli_installed_or_not() { + if command_exists az; then + log_message "detected azure cli installed" + azCLIVersion=$(az -v) + log_message "azure-cli version: ${azCLIVersion}" + azCLIExtension=$(az extension list --query "[?name=='k8s-extension'].name | [0]") + if [ "$azCLIExtension" = "k8s-extension" ]; then + azCLIExtensionVersion=$(az extension list --query "[?name=='k8s-extension'].version | [0]") + log_message "detected k8s-extension and current installed version: ${azCLIExtensionVersion}" + log_message "updating the k8s-extension version to latest available one" + az extension update --name 'k8s-extension' + else + log_message "adding k8s-extension since k8s-extension doesnt exist as installed" + az extension add --name 'k8s-extension' + fi + azCLIExtensionVersion=$(az extension list --query "[?name=='k8s-extension'].version | [0]") + log_message "current installed k8s-extension version: ${azCLIExtensionVersion}" + else + log_message "-e error azure cli doesnt exist as installed" + log_message ${azureCLIInstallLinkMessage} + exit 1 + fi +} + +validate_ci_agent_pods() { + log_message "START:validate_ci_agent_pods" + # verify the id and key of the workspace matches with workspace key value in the secret + wsID=$(kubectl get secrets ${agentK8sSecretName} -n ${agentK8sNamespace} -o json | jq -r ".data.WSID") + wsID=$(echo $wsID | base64 -d) + log_message "workspaceId: ${wsID} value in the ${agentK8sSecretName}" + + wsKEY=$(kubectl get secrets ${agentK8sSecretName} -n ${agentK8sNamespace} -o json | jq -r ".data.KEY") + wsKEY=$(echo $wsKEY | base64 -d) + + if [[ "$workspaceId" != "$wsID" ]]; then + log_message "-e error workspaceId: ${workspaceID} of the workspace doesnt match with workspaceId: ${wsID} value in the omsagent secret" + log_message $ciExtensionReOnboarding + exit 1 + fi + if [[ "$workspacePrimarySharedKey" != "$wsKEY" ]]; then + log_message "-e error workspacePrimarySharedKey of the workspace doesnt match with workspacekey value value in the omsagent secret" + log_message $ciExtensionReOnboarding + exit 1 + fi + + # verify state of agent deployment + readyReplicas=$(kubectl get deployments -n ${agentK8sNamespace} ${agentK8sDeploymentName} -o json | jq '.status.readyReplicas') + log_message "number of deployment ready replicas:${readyReplicas}" + if [[ "$readyReplicas" != "1" ]]; then + log_message "-e error number of readyReplicas of agent deployment MUST be 1" + exit 1 + fi + replicas=$(kubectl get deployments -n ${agentK8sNamespace} ${agentK8sDeploymentName} -o json | jq '.status.replicas') + log_message "number of deployment replicas:${replicas}" + if [[ "$replicas" != "1" ]]; then + log_message "-e error number of replicas of agent deployment MUST be 1" + exit 1 + fi + + # verify state of agent ds + currentNumberScheduled=$(kubectl get ds -n ${agentK8sNamespace} ${agentK8sLinuxDaemonsetName} -o json | jq '.status.currentNumberScheduled') + desiredNumberScheduled=$(kubectl get ds -n ${agentK8sNamespace} ${agentK8sLinuxDaemonsetName} -o json | jq '.status.desiredNumberScheduled') + log_message "number of linux deamonset pods currentNumberScheduled:${currentNumberScheduled} and currentNumberScheduled:${currentNumberScheduled}" + if [[ "$currentNumberScheduled" != "$desiredNumberScheduled" ]]; then + log_message "-e error desiredNumberScheduled: ${desiredNumberScheduled} doesnt match with currentNumberScheduled: ${currentNumberScheduled}" + log_message "-e error please fix the pod scheduling issues of omsagent daemonset pods in namespace: ${agentK8sNamespace}" + exit 1 + fi + + numberAvailable=$(kubectl get ds -n ${agentK8sNamespace} ${agentK8sLinuxDaemonsetName} -o json | jq '.status.numberAvailable') + log_message "number of linux deamonset pods numberAvailable:${numberAvailable}" + if [[ "$numberAvailable" != "$currentNumberScheduled" ]]; then + log_message "-e error numberAvailable: ${numberAvailable} doesnt match with currentNumberScheduled: ${currentNumberScheduled}" + log_message "-e error please fix the pod scheduling issues of omsagent daemonset pods in namespace: ${agentK8sNamespace}" + exit 1 + fi + numberReady=$(kubectl get ds -n ${agentK8sNamespace} ${agentK8sLinuxDaemonsetName} -o json | jq '.status.numberReady') + log_message "number of linux deamonset pods numberReady:${numberReady}" + if [[ "$numberAvailable" != "$numberReady" ]]; then + log_message "-e error numberAvailable: ${numberAvailable} doesnt match with numberReady: ${numberReady}" + log_message "-e error please fix the pod scheduling issues of omsagent daemonset pods in namespace: ${agentK8sNamespace}" + exit 1 + fi + log_message "END:validate_ci_agent_pods:SUCCESS" +} + +validate_ci_agent_identity_status() { + log_message "START:validate_ci_agent_identity_status" + log_message "Info of ${agentArcK8sIdentityCRDName} in namespace ${azureArcK8sNamespace}" + kubectl get azureclusteridentityrequests -n ${azureArcK8sNamespace} ${agentArcK8sIdentityCRDName} -o json >> $logFile + status=$(kubectl get azureclusteridentityrequests -n ${azureArcK8sNamespace} ${agentArcK8sIdentityCRDName} -o json | jq -r '.status') + if [ -z "$status" ]; then + log_message "-e error status field empty for the CRD ${agentArcK8sIdentityCRDName} in namespace ${azureArcK8sNamespace}" + log_message $timesyncHelpMessage + exit 1 + fi + expirationTime=$(kubectl get azureclusteridentityrequests -n ${azureArcK8sNamespace} ${agentArcK8sIdentityCRDName} -o json | jq -r '.status.expirationTime') + if [ -z "$expirationTime" ]; then + log_message "-e error expirationTime field empty for the CRD ${agentArcK8sIdentityCRDName} in namespace ${azureArcK8sNamespace}" + log_message $timesyncHelpMessage + exit 1 + fi + tokenReference=$(kubectl get azureclusteridentityrequests -n ${azureArcK8sNamespace} ${agentArcK8sIdentityCRDName} -o json | jq -r '.status.tokenReference') + if [ -z "$tokenReference" ]; then + log_message "-e error tokenReference field empty for the CRD ${agentArcK8sIdentityCRDName} in namespace ${azureArcK8sNamespace}" + log_message $timesyncHelpMessage + exit 1 + fi + dataName=$(kubectl get azureclusteridentityrequests -n ${azureArcK8sNamespace} ${agentArcK8sIdentityCRDName} -o json | jq -r '.status.tokenReference.dataName') + if [ -z "$dataName" ]; then + log_message "-e error dataName field of tokenReference empty for the CRD ${agentArcK8sIdentityCRDName} in namespace ${azureArcK8sNamespace}" + log_message $timesyncHelpMessage + exit 1 + fi + secretName=$(kubectl get azureclusteridentityrequests -n ${azureArcK8sNamespace} ${agentArcK8sIdentityCRDName} -o json | jq -r '.status.tokenReference.secretName') + if [ -z "$secretName" ]; then + log_message "-e error secretName field of tokenReference empty for the CRD ${agentArcK8sIdentityCRDName} in namespace ${azureArcK8sNamespace}" + log_message $timesyncHelpMessage + exit 1 + fi + log_message "END:validate_ci_agent_identity_status:SUCCESS" +} + +get_nodes_pods_crds_info() { + log_message "START:get_nodes_pods_crds_info" + log_message "nodes" + kubectl get nodes >> $logFile + + log_message "kube-system pods" + kubectl get pods -n ${agentK8sNamespace} >> $logFile + + log_message "azurearck8spods" + kubectl get pods -n ${azureArcK8sNamespace} >> $logFile + + log_message "crds" + kubectl get crds -A >> $logFile + + log_message "azureclusteridentityrequests crds" + kubectl get crds azureclusteridentityrequests.clusterconfig.azure.com >> $logFile + kubectl get azureclusteridentityrequests -n ${azureArcK8sNamespace} >> $logFile + + log_message "container-insights-clusteridentityrequest crd" + kubectl describe azureclusteridentityrequests -n ${azureArcK8sNamespace} container-insights-clusteridentityrequest >> $logFile + log_message "END:get_nodes_pods_crds_info:SUCCESS" +} + +datetime=$(date -u) +log_message "*** Script Execution start @ ${datetime} ***" + +# verify azure cli installed or not +validate_az_cli_installed_or_not + +# parse and validate args +parse_args $@ + +# parse cluster resource id +clusterSubscriptionId="$(echo $clusterResourceId | cut -d'/' -f3 | tr "[:upper:]" "[:lower:]")" +clusterResourceGroup="$(echo $clusterResourceId | cut -d'/' -f5)" +providerName="$(echo $clusterResourceId | cut -d'/' -f7)" +clusterName="$(echo $clusterResourceId | cut -d'/' -f9)" + +# get the current active azure cloud of the az cli +azureCloudName=$(az cloud show --query name -o tsv | tr "[:upper:]" "[:lower:]" | tr -d "[:space:]") +log_message "azure cloud name: ${azureCloudName}" + +# login to azure interactively +login_to_azure + +# set the cluster subscription id as active sub for azure cli +set_azure_subscription $clusterSubscriptionId + +# validate ci extension +validate_ci_extension $azureCloudName $clusterSubscriptionId $clusterResourceGroup $clusterName + +# validate ci agent pods +if command_exists kubectl; then + if command_exists jq; then + validate_ci_agent_pods + else + log_message "-e error jq doesnt exist as installed" + log_message $jqInstallLinkMessage + exit 1 + fi +else + log_message "-e error kubectl doesnt exist as installed" + log_message ${kubectlInstallLinkMessage} + exit 1 +fi + +# validate ci cluster identity token +validate_ci_agent_identity_status + +# get nodes and pods status +get_nodes_pods_crds_info + +log_message "Everything looks good according to this script." +log_message $contactUSMessage From 7c9cdc819eddf828b140b274cdeeb0121661a656 Mon Sep 17 00:00:00 2001 From: sarahpeiffer <46665092+sarahpeiffer@users.noreply.github.com> Date: Mon, 1 Nov 2021 17:21:17 -0700 Subject: [PATCH 179/301] Sarah/remove cdpx creds (#685) * remove download of cdpx creds --- .pipelines/get-aad-app-creds-from-kv.sh | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/.pipelines/get-aad-app-creds-from-kv.sh b/.pipelines/get-aad-app-creds-from-kv.sh index a0ba464cc..8ef56cddb 100755 --- a/.pipelines/get-aad-app-creds-from-kv.sh +++ b/.pipelines/get-aad-app-creds-from-kv.sh @@ -11,8 +11,6 @@ do KV) KV=$VALUE ;; KVSECRETNAMEAPPID) AppId=$VALUE ;; KVSECRETNAMEAPPSECRET) AppSecret=$VALUE ;; - KVSECRETNAMECDPXAPPID) CdpxAppId=$VALUE ;; - KVSECRETNAMECDPXAPPSECRET) CdpxAppSecret=$VALUE ;; *) esac done @@ -29,16 +27,4 @@ az keyvault secret download --file ~/acrappsecret --vault-name ${KV} --name ${A echo "downloaded the appsecret from KV:${KV} and KV secret:${AppSecret}" -echo "key vault secret name for cdpx appid:${KVSECRETNAMECDPXAPPID}" - -echo "key vault secret name for cdpx appsecret:${KVSECRETNAMECDPXAPPSECRET}" - -az keyvault secret download --file ~/cdpxacrappid --vault-name ${KV} --name ${CdpxAppId} - -echo "downloaded the appid from KV:${KV} and KV secret:${CdpxAppId}" - -az keyvault secret download --file ~/cdpxacrappsecret --vault-name ${KV} --name ${CdpxAppSecret} - -echo "downloaded the appsecret from KV:${KV} and KV secret:${CdpxAppSecret}" - echo "end: get app id and secret from specified key vault" From f75eea66327776075c1094c636404e5ee3bdfa95 Mon Sep 17 00:00:00 2001 From: bragi92 Date: Fri, 5 Nov 2021 10:14:59 -0700 Subject: [PATCH 180/301] fix: subtract number instead of string + update fluentd version 1.14.2 to fix security vulnerability (#686) * fix: change default value to a number so that substraction happens correctly * update fluentd version to 1.14.2 * extra end statement * safely set to float * big decimal precision * revert omsagent * keep telemetry --- .github/workflows/run_unit_tests.yml | 2 +- build/common/installer/scripts/tomlparser.rb | 2 - kubernetes/linux/setup.sh | 2 +- kubernetes/omsagent.yaml | 1 + kubernetes/windows/Dockerfile | 2 +- kubernetes/windows/Dockerfile-dev-base-image | 2 +- source/plugins/ruby/kubelet_utils.rb | 41 ++++++++++++-------- 7 files changed, 29 insertions(+), 23 deletions(-) diff --git a/.github/workflows/run_unit_tests.yml b/.github/workflows/run_unit_tests.yml index 94ac4371a..435de91e8 100644 --- a/.github/workflows/run_unit_tests.yml +++ b/.github/workflows/run_unit_tests.yml @@ -26,7 +26,7 @@ jobs: uses: actions/checkout@v2 - name: install fluent run: | - sudo gem install fluentd -v "1.12.2" --no-document + sudo gem install fluentd -v "1.14.2" --no-document sudo fluentd --setup ./fluent - name: Run unit tests run: | diff --git a/build/common/installer/scripts/tomlparser.rb b/build/common/installer/scripts/tomlparser.rb index 32ea09aa3..03b470205 100644 --- a/build/common/installer/scripts/tomlparser.rb +++ b/build/common/installer/scripts/tomlparser.rb @@ -191,8 +191,6 @@ def populateSettingValuesFromConfigMap(parsedConfig) rescue => errorStr ConfigParseErrorLogger.logError("Exception while reading config map settings for adx database name - #{errorStr}, using default #{@adxDatabaseName}, please check config map for errors") end - - end end end diff --git a/kubernetes/linux/setup.sh b/kubernetes/linux/setup.sh index 243677dd0..5bddfc604 100644 --- a/kubernetes/linux/setup.sh +++ b/kubernetes/linux/setup.sh @@ -52,7 +52,7 @@ sudo echo "deb http://ppa.launchpad.net/brightbox/ruby-ng/ubuntu bionic main" >> sudo apt-get update sudo apt-get install ruby2.6 ruby2.6-dev gcc make -y # fluentd v1 gem -gem install fluentd -v "1.12.2" --no-document +gem install fluentd -v "1.14.2" --no-document fluentd --setup ./fluent gem install gyoku iso8601 --no-doc diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index 66f8c4010..a1a843196 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -927,3 +927,4 @@ spec: names: plural: healthstates kind: HealthState + \ No newline at end of file diff --git a/kubernetes/windows/Dockerfile b/kubernetes/windows/Dockerfile index 0ddf67ab2..41ad7e7ba 100644 --- a/kubernetes/windows/Dockerfile +++ b/kubernetes/windows/Dockerfile @@ -20,7 +20,7 @@ RUN refreshenv \ && gem install cool.io -v 1.5.4 --platform ruby \ && gem install oj -v 3.3.10 \ && gem install json -v 2.2.0 \ -&& gem install fluentd -v 1.12.2 \ +&& gem install fluentd -v 1.14.2 \ && gem install win32-service -v 1.0.1 \ && gem install win32-ipc -v 0.7.0 \ && gem install win32-event -v 0.6.3 \ diff --git a/kubernetes/windows/Dockerfile-dev-base-image b/kubernetes/windows/Dockerfile-dev-base-image index 0081f9c53..501fead89 100644 --- a/kubernetes/windows/Dockerfile-dev-base-image +++ b/kubernetes/windows/Dockerfile-dev-base-image @@ -18,7 +18,7 @@ RUN refreshenv \ && gem install cool.io -v 1.5.4 --platform ruby \ && gem install oj -v 3.3.10 \ && gem install json -v 2.2.0 \ -&& gem install fluentd -v 1.12.2 \ +&& gem install fluentd -v 1.14.2 \ && gem install win32-service -v 1.0.1 \ && gem install win32-ipc -v 0.7.0 \ && gem install win32-event -v 0.6.3 \ diff --git a/source/plugins/ruby/kubelet_utils.rb b/source/plugins/ruby/kubelet_utils.rb index e31407b54..368ca8639 100644 --- a/source/plugins/ruby/kubelet_utils.rb +++ b/source/plugins/ruby/kubelet_utils.rb @@ -47,6 +47,9 @@ def get_node_allocatable(cpu_capacity, memory_capacity) @log.error "kubelet_utils.rb::get_node_allocatble - cpu_capacity or memory_capacity values not set. Hence we cannot calculate allocatable values" end + cpu_capacity = BigDecimal(cpu_capacity, 2).to_f + memory_capacity = BigDecimal(memory_capacity, 2).to_f + cpu_allocatable = 1.0 memory_allocatable = 1.0 @@ -56,74 +59,74 @@ def get_node_allocatable(cpu_capacity, memory_capacity) begin kubereserved_cpu = parsed_response["kubeletconfig"]["kubeReserved"]["cpu"] if kubereserved_cpu.nil? || kubereserved_cpu == "" - kubereserved_cpu = "0" + kubereserved_cpu = "0.0" end @log.info "get_node_allocatable::kubereserved_cpu #{kubereserved_cpu}" rescue => errorStr @log.error "Error in get_node_allocatable::kubereserved_cpu: #{errorStr}" - kubereserved_cpu = "0" + kubereserved_cpu = "0.0" ApplicationInsightsUtility.sendExceptionTelemetry("Error in get_node_allocatable::kubereserved_cpu: #{errorStr}") end begin kubereserved_memory = parsed_response["kubeletconfig"]["kubeReserved"]["memory"] if kubereserved_memory.nil? || kubereserved_memory == "" - kubereserved_memory = "0" + kubereserved_memory = "0.0" end @log.info "get_node_allocatable::kubereserved_memory #{kubereserved_memory}" rescue => errorStr @log.error "Error in get_node_allocatable::kubereserved_memory: #{errorStr}" - kubereserved_memory = "0" - ApplicationInsightsUtility.sendExceptionTelemetry("Error in get_node_allocatable::kubereserved_cpu: #{errorStr}") + kubereserved_memory = "0.0" + ApplicationInsightsUtility.sendExceptionTelemetry("Error in get_node_allocatable::kubereserved_memory: #{errorStr}") end begin systemReserved_cpu = parsed_response["kubeletconfig"]["systemReserved"]["cpu"] if systemReserved_cpu.nil? || systemReserved_cpu == "" - systemReserved_cpu = "0" + systemReserved_cpu = "0.0" end @log.info "get_node_allocatable::systemReserved_cpu #{systemReserved_cpu}" rescue => errorStr # this will likely always reach this condition for AKS ~ only applicable for hyrid + MDM combination @log.error "Error in get_node_allocatable::systemReserved_cpu: #{errorStr}" - systemReserved_cpu = "0" - ApplicationInsightsUtility.sendExceptionTelemetry("Error in get_node_allocatable::kubereserved_cpu: #{errorStr}") + systemReserved_cpu = "0.0" + ApplicationInsightsUtility.sendExceptionTelemetry("Error in get_node_allocatable::systemReserved_cpu: #{errorStr}") end begin explicitlyReserved_cpu = parsed_response["kubeletconfig"]["reservedCPUs"] if explicitlyReserved_cpu.nil? || explicitlyReserved_cpu == "" - explicitlyReserved_cpu = "0" + explicitlyReserved_cpu = "0.0" end @log.info "get_node_allocatable::explicitlyReserved_cpu #{explicitlyReserved_cpu}" rescue => errorStr # this will likely always reach this condition for AKS ~ only applicable for hyrid + MDM combination @log.error "Error in get_node_allocatable::explicitlyReserved_cpu: #{errorStr}" - explicitlyReserved_cpu = "0" + explicitlyReserved_cpu = "0.0" ApplicationInsightsUtility.sendExceptionTelemetry("Error in get_node_allocatable::explicitlyReserved_cpu: #{errorStr}") end begin systemReserved_memory = parsed_response["kubeletconfig"]["systemReserved"]["memory"] if systemReserved_memory.nil? || systemReserved_memory == "" - systemReserved_memory = "0" + systemReserved_memory = "0.0" end @log.info "get_node_allocatable::systemReserved_memory #{systemReserved_memory}" rescue => errorStr @log.error "Error in get_node_allocatable::systemReserved_memory: #{errorStr}" - systemReserved_memory = "0" - ApplicationInsightsUtility.sendExceptionTelemetry("Error in get_node_allocatable::kubereserved_cpu: #{errorStr}") + systemReserved_memory = "0.0" + ApplicationInsightsUtility.sendExceptionTelemetry("Error in get_node_allocatable::systemReserved_memory: #{errorStr}") end begin evictionHard_memory = parsed_response["kubeletconfig"]["evictionHard"]["memory.available"] if evictionHard_memory.nil? || evictionHard_memory == "" - evictionHard_memory = "0" + evictionHard_memory = "0.0" end @log.info "get_node_allocatable::evictionHard_memory #{evictionHard_memory}" rescue => errorStr @log.error "Error in get_node_allocatable::evictionHard_memory: #{errorStr}" - evictionHard_memory = "0" - ApplicationInsightsUtility.sendExceptionTelemetry("Error in get_node_allocatable::kubereserved_cpu: #{errorStr}") + evictionHard_memory = "0.0" + ApplicationInsightsUtility.sendExceptionTelemetry("Error in get_node_allocatable::evictionHard_memory: #{errorStr}") end # do calculation in nanocore since that's what KubernetesApiClient.getMetricNumericValue expects @@ -137,9 +140,13 @@ def get_node_allocatable(cpu_capacity, memory_capacity) end # convert back to units similar to what we get for capacity cpu_allocatable = cpu_allocatable / (1000.0 ** 2) - @log.info "CPU Allocatable #{cpu_allocatable}" memory_allocatable = memory_capacity - (KubernetesApiClient.getMetricNumericValue("memory", kubereserved_memory) + KubernetesApiClient.getMetricNumericValue("memory", systemReserved_memory) + KubernetesApiClient.getMetricNumericValue("memory", evictionHard_memory)) + + cpu_allocatable = BigDecimal(cpu_allocatable, 2).to_f + memory_allocatable = BigDecimal(memory_allocatable, 2).to_f + + @log.info "CPU Allocatable #{cpu_allocatable}" @log.info "Memory Allocatable #{memory_allocatable}" return [cpu_allocatable, memory_allocatable] From 15ee6c53337bec218cabf02aa3234f6b1e0ea412 Mon Sep 17 00:00:00 2001 From: David Michelman Date: Fri, 5 Nov 2021 11:28:21 -0700 Subject: [PATCH 181/301] Faster Linux builds (part 1) (#687) * moved docker image arg later on to enable docker build caching * fixing image tag (doh) --- kubernetes/linux/Dockerfile | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/kubernetes/linux/Dockerfile b/kubernetes/linux/Dockerfile index 90acb4959..9164abc9c 100644 --- a/kubernetes/linux/Dockerfile +++ b/kubernetes/linux/Dockerfile @@ -2,8 +2,6 @@ FROM ubuntu:18.04 MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=ciprod10132021 -ENV AGENT_VERSION ${IMAGE_TAG} ENV tmpdir /opt ENV APPLICATIONINSIGHTS_AUTH NzAwZGM5OGYtYTdhZC00NThkLWI5NWMtMjA3ZjM3NmM3YmRi ENV MALLOC_ARENA_MAX 2 @@ -18,6 +16,10 @@ ENV KUBE_CLIENT_BACKOFF_DURATION 0 ENV RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR 0.9 RUN /usr/bin/apt-get update && /usr/bin/apt-get install -y libc-bin wget openssl curl sudo python-ctypes init-system-helpers net-tools rsyslog cron vim dmidecode apt-transport-https gnupg && rm -rf /var/lib/apt/lists/* COPY setup.sh main.sh defaultpromenvvariables defaultpromenvvariables-rs defaultpromenvvariables-sidecar mdsd.xml envmdsd logrotate.conf $tmpdir/ + +ARG IMAGE_TAG=ciprod10132021 +ENV AGENT_VERSION ${IMAGE_TAG} + WORKDIR ${tmpdir} # copy docker provider shell bundle to use the agent image @@ -27,3 +29,4 @@ COPY ./Linux_ULINUX_1.0_x64_64_Release/docker-cimprov-*.*.*-*.x86_64.sh . RUN chmod 775 $tmpdir/*.sh; sync; $tmpdir/setup.sh CMD [ "/opt/main.sh" ] + From b4ca054e30a7271e28b5fd38f6cdeaaf9ebfe370 Mon Sep 17 00:00:00 2001 From: sarahpeiffer <46665092+sarahpeiffer@users.noreply.github.com> Date: Thu, 13 Jan 2022 13:25:53 -0800 Subject: [PATCH 182/301] Sarah/fluentbit windows log (#688) * upgrade fluentbit version for windows * saving progress--fluent bit log tailing working for windows * use configmap values for fluent-bit.conf where necessary and make necessary files common * revert certificategenerator * remove tomlparser-agent-config from linux folder * clean up fluent.conf * clean up fluent-bit.conf * revert image tag * fix agent tag * make fluent bit flush interval configurable * clean up unecessary conf files * remove unecessary parts of fluent and fluent-bit conf * log level back to info * add fbit env variables for omsagent-win * moving db files to var directory --- .../installer/conf/azm-containers-parser.conf | 0 .../scripts/td-agent-bit-conf-customizer.rb | 5 ++ .../scripts/tomlparser-agent-config.rb | 34 ++++++++++ build/common/installer/scripts/tomlparser.rb | 2 + .../installer/datafiles/base_container.data | 4 +- build/windows/installer/conf/fluent-bit.conf | 63 ++++++++++++----- .../installer/conf/fluent-cri-parser.conf | 6 -- .../installer/conf/fluent-docker-parser.conf | 5 -- build/windows/installer/conf/fluent.conf | 68 ------------------- kubernetes/linux/setup.sh | 2 +- kubernetes/omsagent.yaml | 6 ++ kubernetes/windows/Dockerfile | 4 +- kubernetes/windows/Dockerfile-dev-image | 4 +- kubernetes/windows/main.ps1 | 15 ++-- kubernetes/windows/setup.ps1 | 7 +- 15 files changed, 110 insertions(+), 115 deletions(-) rename build/{linux => common}/installer/conf/azm-containers-parser.conf (100%) rename build/{linux => common}/installer/scripts/tomlparser-agent-config.rb (87%) delete mode 100644 build/windows/installer/conf/fluent-cri-parser.conf delete mode 100644 build/windows/installer/conf/fluent-docker-parser.conf diff --git a/build/linux/installer/conf/azm-containers-parser.conf b/build/common/installer/conf/azm-containers-parser.conf similarity index 100% rename from build/linux/installer/conf/azm-containers-parser.conf rename to build/common/installer/conf/azm-containers-parser.conf diff --git a/build/common/installer/scripts/td-agent-bit-conf-customizer.rb b/build/common/installer/scripts/td-agent-bit-conf-customizer.rb index f29c87407..995d72b87 100644 --- a/build/common/installer/scripts/td-agent-bit-conf-customizer.rb +++ b/build/common/installer/scripts/td-agent-bit-conf-customizer.rb @@ -3,6 +3,11 @@ @td_agent_bit_conf_path = "/etc/opt/microsoft/docker-cimprov/td-agent-bit.conf" +@os_type = ENV["OS_TYPE"] +if !@os_type.nil? && !@os_type.empty? && @os_type.strip.casecmp("windows") == 0 + @td_agent_bit_conf_path = "/etc/fluent-bit/fluent-bit.conf" +end + @default_service_interval = "15" @default_mem_buf_limit = "10" diff --git a/build/linux/installer/scripts/tomlparser-agent-config.rb b/build/common/installer/scripts/tomlparser-agent-config.rb similarity index 87% rename from build/linux/installer/scripts/tomlparser-agent-config.rb rename to build/common/installer/scripts/tomlparser-agent-config.rb index 4daaf6a0c..052bb5a5d 100644 --- a/build/linux/installer/scripts/tomlparser-agent-config.rb +++ b/build/common/installer/scripts/tomlparser-agent-config.rb @@ -228,3 +228,37 @@ def populateSettingValuesFromConfigMap(parsedConfig) puts "Exception while opening file for writing config environment variables" puts "****************End Config Processing********************" end + +def get_command_windows(env_variable_name, env_variable_value) + return "[System.Environment]::SetEnvironmentVariable(\"#{env_variable_name}\", \"#{env_variable_value}\", \"Process\")" + "\n" + "[System.Environment]::SetEnvironmentVariable(\"#{env_variable_name}\", \"#{env_variable_value}\", \"Machine\")" + "\n" +end + +if !@os_type.nil? && !@os_type.empty? && @os_type.strip.casecmp("windows") == 0 + # Write the settings to file, so that they can be set as environment variables + file = File.open("setagentenv.ps1", "w") + + if !file.nil? + if @fbitFlushIntervalSecs > 0 + commands = get_command_windows('FBIT_SERVICE_FLUSH_INTERVAL', @fbitFlushIntervalSecs) + file.write(commands) + end + if @fbitTailBufferChunkSizeMBs > 0 + commands = get_command_windows('FBIT_TAIL_BUFFER_CHUNK_SIZE', @fbitTailBufferChunkSizeMBs) + file.write(commands) + end + if @fbitTailBufferMaxSizeMBs > 0 + commands = get_command_windows('FBIT_TAIL_BUFFER_MAX_SIZE', @fbitTailBufferMaxSizeMBs) + file.write(commands) + end + if @fbitTailMemBufLimitMBs > 0 + commands = get_command_windows('FBIT_TAIL_MEM_BUF_LIMIT', @fbitTailMemBufLimitMBs) + file.write(commands) + end + # Close file after writing all environment variables + file.close + puts "****************End Config Processing********************" + else + puts "Exception while opening file for writing config environment variables for WINDOWS LOG" + puts "****************End Config Processing********************" + end +end \ No newline at end of file diff --git a/build/common/installer/scripts/tomlparser.rb b/build/common/installer/scripts/tomlparser.rb index 03b470205..64d6d48fb 100644 --- a/build/common/installer/scripts/tomlparser.rb +++ b/build/common/installer/scripts/tomlparser.rb @@ -29,6 +29,8 @@ @adxDatabaseName = "containerinsights" # default for all configurations if !@os_type.nil? && !@os_type.empty? && @os_type.strip.casecmp("windows") == 0 @containerLogsRoute = "v1" # default is v1 for windows until windows agent integrates windows ama + # This path format is necessary for fluent-bit in windows + @logTailPath = "C:\\var\\log\\containers\\*.log" end # Use parser to parse the configmap toml file to a ruby structure def parseConfigMap diff --git a/build/linux/installer/datafiles/base_container.data b/build/linux/installer/datafiles/base_container.data index d104a5084..985c73a17 100644 --- a/build/linux/installer/datafiles/base_container.data +++ b/build/linux/installer/datafiles/base_container.data @@ -34,7 +34,7 @@ MAINTAINER: 'Microsoft Corporation' /etc/opt/microsoft/docker-cimprov/td-agent-bit.conf; build/linux/installer/conf/td-agent-bit.conf; 644; root; root /etc/opt/microsoft/docker-cimprov/td-agent-bit-prom-side-car.conf; build/linux/installer/conf/td-agent-bit-prom-side-car.conf; 644; root; root /etc/opt/microsoft/docker-cimprov/td-agent-bit-rs.conf; build/linux/installer/conf/td-agent-bit-rs.conf; 644; root; root -/etc/opt/microsoft/docker-cimprov/azm-containers-parser.conf; build/linux/installer/conf/azm-containers-parser.conf; 644; root; root +/etc/opt/microsoft/docker-cimprov/azm-containers-parser.conf; build/common/installer/conf/azm-containers-parser.conf; 644; root; root /etc/opt/microsoft/docker-cimprov/out_oms.conf; build/linux/installer/conf/out_oms.conf; 644; root; root /etc/opt/microsoft/docker-cimprov/test.json; build/linux/installer/conf/test.json; 644; root; root /etc/opt/microsoft/docker-cimprov/telegraf.conf; build/linux/installer/conf/telegraf.conf; 644; root; root @@ -48,7 +48,7 @@ MAINTAINER: 'Microsoft Corporation' /opt/tomlparser-metric-collection-config.rb; build/linux/installer/scripts/tomlparser-metric-collection-config.rb; 755; root; root -/opt/tomlparser-agent-config.rb; build/linux/installer/scripts/tomlparser-agent-config.rb; 755; root; root +/opt/tomlparser-agent-config.rb; build/common/installer/scripts/tomlparser-agent-config.rb; 755; root; root /opt/tomlparser.rb; build/common/installer/scripts/tomlparser.rb; 755; root; root /opt/td-agent-bit-conf-customizer.rb; build/common/installer/scripts/td-agent-bit-conf-customizer.rb; 755; root; root /opt/ConfigParseErrorLogger.rb; build/common/installer/scripts/ConfigParseErrorLogger.rb; 755; root; root diff --git a/build/windows/installer/conf/fluent-bit.conf b/build/windows/installer/conf/fluent-bit.conf index 1eebe5fd6..243056505 100644 --- a/build/windows/installer/conf/fluent-bit.conf +++ b/build/windows/installer/conf/fluent-bit.conf @@ -1,25 +1,54 @@ [SERVICE] - Flush 15 - Daemon Off - Log_Level info - Log_File /etc/fluent-bit/fluent-bit.log + #Default service flush interval is 15 seconds + ${SERVICE_FLUSH_INTERVAL} + Daemon Off + storage.path /etc/fluent-bit/flbstore/ + storage.sync normal + storage.checksum off + storage.backlog.mem_limit 10M + Log_Level info + Parsers_File /etc/fluent-bit/azm-containers-parser.conf + Log_File /etc/fluent-bit/fluent-bit.log [INPUT] - Name forward - Listen 127.0.0.1 - Port 25230 - Mem_Buf_Limit 10m - Chunk_Size 32 - Buffer_Size 64 + Name tail + Tag oms.container.log.la.* + Path ${AZMON_LOG_TAIL_PATH} + Read_from_Head true + DB C:\\var\\log\\omsagent-fblogs.db + DB.Sync Off + Parser docker + ${TAIL_MEM_BUF_LIMIT} + ${TAIL_BUFFER_CHUNK_SIZE} + ${TAIL_BUFFER_MAX_SIZE} + Rotate_Wait 20 + Refresh_Interval 30 + Path_Key filepath + Skip_Long_Lines On + Ignore_Older 5m + Exclude_Path ${AZMON_CLUSTER_LOG_TAIL_EXCLUDE_PATH} [INPUT] - Name tcp - Tag oms.container.perf.telegraf.* - Listen 0.0.0.0 - Port 25229 - Chunk_Size 32 - Buffer_Size 64 - Mem_Buf_Limit 5m + Name tail + Tag oms.container.log.flbplugin.* + Path C:\\var\\log\\containers\\omsagent*.log + Read_from_Head true + DB C:\\var\\log\\omsagent-fluentbit-containers.db + DB.Sync Off + Parser docker + Mem_Buf_Limit 1m + Path_Key filepath + Skip_Long_Lines On + Ignore_Older 2m + +[INPUT] + Name tcp + Tag oms.container.perf.telegraf.* + Listen 0.0.0.0 + Port 25229 + Chunk_Size 32 + Buffer_Size 64 + Mem_Buf_Limit 5m [OUTPUT] Name oms diff --git a/build/windows/installer/conf/fluent-cri-parser.conf b/build/windows/installer/conf/fluent-cri-parser.conf deleted file mode 100644 index 86f1572ca..000000000 --- a/build/windows/installer/conf/fluent-cri-parser.conf +++ /dev/null @@ -1,6 +0,0 @@ - - @type regexp - expression ^(? diff --git a/build/windows/installer/conf/fluent-docker-parser.conf b/build/windows/installer/conf/fluent-docker-parser.conf deleted file mode 100644 index 9dc800aeb..000000000 --- a/build/windows/installer/conf/fluent-docker-parser.conf +++ /dev/null @@ -1,5 +0,0 @@ - - @type json - time_format %Y-%m-%dT%H:%M:%S.%NZ - keep_time_key true - diff --git a/build/windows/installer/conf/fluent.conf b/build/windows/installer/conf/fluent.conf index 741e5ce19..a78ac58fa 100644 --- a/build/windows/installer/conf/fluent.conf +++ b/build/windows/installer/conf/fluent.conf @@ -11,31 +11,6 @@ @log_level debug - - @type tail - path "#{ENV['AZMON_LOG_TAIL_PATH']}" - exclude_path "#{ENV['AZMON_CLUSTER_LOG_TAIL_EXCLUDE_PATH']}" - pos_file /var/opt/microsoft/fluent/fluentd-containers.log.pos - tag oms.container.log.la - @log_level trace - path_key tailed_path - limit_recently_modified 5m - # if the container runtime is non docker then this will be updated to fluent-cri-parser.conf during container startup - @include fluent-docker-parser.conf - - - - @type tail - path /var/log/containers/omsagent*.log - pos_file /opt/microsoft/fluent/omsagent-fluentd-containers.log.pos - tag oms.container.log.flbplugin - @log_level trace - path_key tailed_path - read_from_head true - # if the container runtime is non docker then this will be updated to fluent-cri-parser.conf during container startup - @include fluent-docker-parser.conf - - #custom_metrics_mdm filter plugin @type cadvisor2mdm @@ -44,23 +19,6 @@ @log_level info - - @type grep - - key stream - pattern "#{ENV['AZMON_LOG_EXCLUSION_REGEX_PATTERN']}" - - - - - @type record_transformer - # fluent-plugin-record-modifier more light-weight but needs to be installed (dependency worth it?) - remove_keys tailed_path - - filepath ${record["tailed_path"]} - - - @type mdm @log_level debug @@ -77,29 +35,3 @@ retry_mdm_post_wait_minutes 30 - - - @type forward - send_timeout 60s - recover_wait 10s - hard_timeout 60s - heartbeat_type none - ignore_network_errors_at_startup true - - name logaggregationserver - host 127.0.0.1 - port 25230 - weight 60 - - - - overflow_action throw_exception - chunk_limit_size 32k - queued_chunks_limit_size 256 - flush_interval 1 - flush_thread_interval 0.5 - flush_thread_burst_interval 0.01 - flush_thread_count 4 - retry_forever true - - diff --git a/kubernetes/linux/setup.sh b/kubernetes/linux/setup.sh index 5bddfc604..80a1b5b1d 100644 --- a/kubernetes/linux/setup.sh +++ b/kubernetes/linux/setup.sh @@ -44,7 +44,7 @@ chmod 777 /opt/telegraf wget -qO - https://packages.fluentbit.io/fluentbit.key | sudo apt-key add - sudo echo "deb https://packages.fluentbit.io/ubuntu/xenial xenial main" >> /etc/apt/sources.list sudo apt-get update -sudo apt-get install td-agent-bit=1.6.8 -y +sudo apt-get install td-agent-bit=1.7.8 -y # install ruby2.6 sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys F5DA5F09C3173AA6 diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index a1a843196..152f2313b 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -783,6 +783,12 @@ spec: cpu: 200m memory: 600Mi env: + - name: FBIT_SERVICE_FLUSH_INTERVAL + value: "15" + - name: FBIT_TAIL_BUFFER_CHUNK_SIZE + value: "1" + - name: FBIT_TAIL_BUFFER_MAX_SIZE + value: "1" # azure devops pipeline uses AKS_RESOURCE_ID and AKS_REGION hence ensure to uncomment these - name: AKS_RESOURCE_ID value: "VALUE_AKS_RESOURCE_ID_VALUE" diff --git a/kubernetes/windows/Dockerfile b/kubernetes/windows/Dockerfile index 41ad7e7ba..671a89246 100644 --- a/kubernetes/windows/Dockerfile +++ b/kubernetes/windows/Dockerfile @@ -57,10 +57,8 @@ COPY ./omsagentwindows/out_oms.so /opt/omsagentwindows/out_oms.so # copy fluent, fluent-bit and out_oms conf files COPY ./omsagentwindows/installer/conf/fluent.conf /etc/fluent/ -# copy fluent docker and cri parser conf files -COPY ./omsagentwindows/installer/conf/fluent-cri-parser.conf /etc/fluent/ -COPY ./omsagentwindows/installer/conf/fluent-docker-parser.conf /etc/fluent/ COPY ./omsagentwindows/installer/conf/fluent-bit.conf /etc/fluent-bit +COPY ./omsagentwindows/installer/conf/azm-containers-parser.conf /etc/fluent-bit/ COPY ./omsagentwindows/installer/conf/out_oms.conf /etc/omsagentwindows # copy telegraf conf file diff --git a/kubernetes/windows/Dockerfile-dev-image b/kubernetes/windows/Dockerfile-dev-image index 35aa83bd9..c38889f7b 100644 --- a/kubernetes/windows/Dockerfile-dev-image +++ b/kubernetes/windows/Dockerfile-dev-image @@ -19,10 +19,8 @@ COPY ./omsagentwindows/out_oms.so /opt/omsagentwindows/out_oms.so # copy fluent, fluent-bit and out_oms conf files COPY ./omsagentwindows/installer/conf/fluent.conf /etc/fluent/ -# copy fluent docker and cri parser conf files -COPY ./omsagentwindows/installer/conf/fluent-cri-parser.conf /etc/fluent/ -COPY ./omsagentwindows/installer/conf/fluent-docker-parser.conf /etc/fluent/ COPY ./omsagentwindows/installer/conf/fluent-bit.conf /etc/fluent-bit +COPY ./omsagentwindows/installer/conf/azm-containers-parser.conf /etc/fluent-bit/ COPY ./omsagentwindows/installer/conf/out_oms.conf /etc/omsagentwindows # copy telegraf conf file diff --git a/kubernetes/windows/main.ps1 b/kubernetes/windows/main.ps1 index 3cbc11e20..733ddb408 100644 --- a/kubernetes/windows/main.ps1 +++ b/kubernetes/windows/main.ps1 @@ -297,6 +297,13 @@ function Set-EnvironmentVariables { # run config parser ruby /opt/omsagentwindows/scripts/ruby/tomlparser.rb .\setenv.ps1 + + #Parse the configmap to set the right environment variables for agent config. + ruby /opt/omsagentwindows/scripts/ruby/tomlparser-agent-config.rb + .\setagentenv.ps1 + + #Replace placeholders in fluent-bit.conf + ruby /opt/omsagentwindows/scripts/ruby/td-agent-bit-conf-customizer.rb # run mdm config parser ruby /opt/omsagentwindows/scripts/ruby/tomlparser-mdm-metrics-config.rb @@ -418,18 +425,18 @@ function Get-ContainerRuntime { function Start-Fluent-Telegraf { - # Run fluent-bit service first so that we do not miss any logs being forwarded by the fluentd service and telegraf service. + $containerRuntime = Get-ContainerRuntime + + # Run fluent-bit service first so that we do not miss any logs being forwarded by the telegraf service. # Run fluent-bit as a background job. Switch this to a windows service once fluent-bit supports natively running as a windows service Start-Job -ScriptBlock { Start-Process -NoNewWindow -FilePath "C:\opt\fluent-bit\bin\fluent-bit.exe" -ArgumentList @("-c", "C:\etc\fluent-bit\fluent-bit.conf", "-e", "C:\opt\omsagentwindows\out_oms.so") } - $containerRuntime = Get-ContainerRuntime - #register fluentd as a service and start # there is a known issues with win32-service https://github.com/chef/win32-service/issues/70 if (![string]::IsNullOrEmpty($containerRuntime) -and [string]$containerRuntime.StartsWith('docker') -eq $false) { # change parser from docker to cri if the container runtime is not docker Write-Host "changing parser from Docker to CRI since container runtime : $($containerRuntime) and which is non-docker" - (Get-Content -Path C:/etc/fluent/fluent.conf -Raw) -replace 'fluent-docker-parser.conf', 'fluent-cri-parser.conf' | Set-Content C:/etc/fluent/fluent.conf + (Get-Content -Path C:/etc/fluent-bit/fluent-bit.conf -Raw) -replace 'docker', 'cri' | Set-Content C:/etc/fluent-bit/fluent-bit.conf } # Start telegraf only in sidecar scraping mode diff --git a/kubernetes/windows/setup.ps1 b/kubernetes/windows/setup.ps1 index 3e47b7eb2..8742fba8b 100644 --- a/kubernetes/windows/setup.ps1 +++ b/kubernetes/windows/setup.ps1 @@ -1,8 +1,3 @@ -# -################# Dangerous to use appveyor links - the builds are removed after 6 months -# -#ARG FLUENTBIT_URL=https://ci.appveyor.com/api/buildjobs/37lho3xf8j5i6crj/artifacts/build%2Ftd-agent-bit-1.4.0-win64.zip - Write-Host ('Creating folder structure') New-Item -Type Directory -Path /installation -ErrorAction SilentlyContinue @@ -21,7 +16,7 @@ Write-Host ('Creating folder structure') Write-Host ('Installing Fluent Bit'); try { - $fluentBitUri='https://github.com/microsoft/OMS-docker/releases/download/winakslogagent/td-agent-bit-1.4.0-win64.zip' + $fluentBitUri='https://fluentbit.io/releases/1.7/td-agent-bit-1.7.8-win64.zip' Invoke-WebRequest -Uri $fluentBitUri -OutFile /installation/td-agent-bit.zip Expand-Archive -Path /installation/td-agent-bit.zip -Destination /installation/fluent-bit Move-Item -Path /installation/fluent-bit/*/* -Destination /opt/fluent-bit/ -ErrorAction SilentlyContinue From 5b9988cab76c4f78af17dc240de4e08a489a1e97 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Fri, 21 Jan 2022 15:04:00 -0800 Subject: [PATCH 183/301] default to port 10250 & containerd for linux agent (#699) * default to port 10250 & containerd * fix pr feedback --- kubernetes/linux/main.sh | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index a9184ab53..980c15586 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -206,7 +206,7 @@ if [ -e "/etc/omsagent-secret/WSID" ]; then echo "export MDSD_PROXY_USERNAME=$MDSD_PROXY_USERNAME" >> ~/.bashrc export MDSD_PROXY_PASSWORD_FILE=/opt/microsoft/docker-cimprov/proxy_password echo "export MDSD_PROXY_PASSWORD_FILE=$MDSD_PROXY_PASSWORD_FILE" >> ~/.bashrc - + #TODO: Compression + proxy creates a deserialization error in ODS. This needs a fix in MDSD export MDSD_ODS_COMPRESSION_LEVEL=0 echo "export MDSD_ODS_COMPRESSION_LEVEL=$MDSD_ODS_COMPRESSION_LEVEL" >> ~/.bashrc @@ -425,19 +425,24 @@ fi #Setting environment variable for CAdvisor metrics to use port 10255/10250 based on curl request echo "Making wget request to cadvisor endpoint with port 10250" -#Defaults to use port 10255 -cAdvisorIsSecure=false -RET_CODE=`wget --server-response https://$NODE_IP:10250/stats/summary --no-check-certificate --header="Authorization: Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" 2>&1 | awk '/^ HTTP/{print $2}'` -if [ $RET_CODE -eq 200 ]; then - cAdvisorIsSecure=true +#Defaults to use secure port: 10250 +cAdvisorIsSecure=true +RET_CODE=$(wget --server-response https://$NODE_IP:10250/stats/summary --no-check-certificate --header="Authorization: Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" 2>&1 | awk '/^ HTTP/{print $2}') +if [ -z "$RET_CODE" ] || [ $RET_CODE -ne 200 ]; then + echo "Making wget request to cadvisor endpoint with port 10255 since failed with port 10250" + RET_CODE=$(wget --server-response http://$NODE_IP:10255/stats/summary 2>&1 | awk '/^ HTTP/{print $2}') + if [ ! -z "$RET_CODE" ] && [ $RET_CODE -eq 200 ]; then + cAdvisorIsSecure=false + fi fi -# default to docker since this is default in AKS as of now and change to containerd once this becomes default in AKS -export CONTAINER_RUNTIME="docker" +# default to containerd since this is common default in AKS and non-AKS +export CONTAINER_RUNTIME="containerd" export NODE_NAME="" + if [ "$cAdvisorIsSecure" = true ]; then - echo "Wget request using port 10250 succeeded. Using 10250" + echo "Using port 10250" export IS_SECURE_CADVISOR_PORT=true echo "export IS_SECURE_CADVISOR_PORT=true" >> ~/.bashrc export CADVISOR_METRICS_URL="https://$NODE_IP:10250/metrics" @@ -445,7 +450,7 @@ if [ "$cAdvisorIsSecure" = true ]; then echo "Making curl request to cadvisor endpoint /pods with port 10250 to get the configured container runtime on kubelet" podWithValidContainerId=$(curl -s -k -H "Authorization: Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" https://$NODE_IP:10250/pods | jq -R 'fromjson? | [ .items[] | select( any(.status.phase; contains("Running")) ) ] | .[0]') else - echo "Wget request using port 10250 failed. Using port 10255" + echo "Using port 10255" export IS_SECURE_CADVISOR_PORT=false echo "export IS_SECURE_CADVISOR_PORT=false" >> ~/.bashrc export CADVISOR_METRICS_URL="http://$NODE_IP:10255/metrics" @@ -460,10 +465,10 @@ if [ ! -z "$podWithValidContainerId" ]; then # convert to lower case so that everywhere else can be used in lowercase containerRuntime=$(echo $containerRuntime | tr "[:upper:]" "[:lower:]") nodeName=$(echo $nodeName | tr "[:upper:]" "[:lower:]") - # update runtime only if its not empty, not null and not startswith docker + # use default container runtime if obtained runtime value is either empty or null if [ -z "$containerRuntime" -o "$containerRuntime" == null ]; then echo "using default container runtime as $CONTAINER_RUNTIME since got containeRuntime as empty or null" - elif [[ $containerRuntime != docker* ]]; then + else export CONTAINER_RUNTIME=$containerRuntime fi From 4c460c6509b58cb942a2f8a17e6176453dbaf7cc Mon Sep 17 00:00:00 2001 From: rashmichandrashekar Date: Mon, 24 Jan 2022 10:24:48 -0800 Subject: [PATCH 184/301] Updating pod annotation for latest agent version (#697) --- kubernetes/omsagent.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index 152f2313b..5a77f3563 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -357,7 +357,7 @@ spec: component: oms-agent tier: node annotations: - agentVersion: "1.10.0.1" + agentVersion: "azure-mdsd-1.14.2" dockerProviderVersion: "16.0.0-0" schema-versions: "v1" spec: @@ -596,7 +596,7 @@ spec: labels: rsName: "omsagent-rs" annotations: - agentVersion: "1.10.0.1" + agentVersion: "azure-mdsd-1.14.2" dockerProviderVersion: "16.0.0-0" schema-versions: "v1" spec: @@ -765,7 +765,7 @@ spec: component: oms-agent-win tier: node-win annotations: - agentVersion: "1.10.0.1" + agentVersion: "0.0.0-0" dockerProviderVersion: "16.0.0-0" schema-versions: "v1" spec: From f2c2904b38117971030b8f3fddcb7ea3bdc10aa2 Mon Sep 17 00:00:00 2001 From: bragi92 Date: Wed, 26 Jan 2022 00:07:59 +0530 Subject: [PATCH 185/301] fix windows build failure due to msys2 version (#700) * fix windows build failure due to msys2 version * 20211130.0.0 --- kubernetes/windows/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kubernetes/windows/Dockerfile b/kubernetes/windows/Dockerfile index 671a89246..55bedf7f5 100644 --- a/kubernetes/windows/Dockerfile +++ b/kubernetes/windows/Dockerfile @@ -10,7 +10,7 @@ ARG IMAGE_TAG=win-ciprod10132021 RUN powershell -Command "Set-ExecutionPolicy Bypass -Scope Process -Force; iex ((New-Object System.Net.WebClient).DownloadString('https://chocolatey.org/install.ps1'))" # Fluentd depends on cool.io whose fat gem is only available for Ruby < 2.5, so need to specify --platform ruby when install Ruby > 2.5 and install msys2 to get dev tools RUN choco install -y ruby --version 2.6.5.1 --params "'/InstallDir:C:\ruby26'" \ -&& choco install -y msys2 --version 20210604.0.0 --params "'/NoPath /NoUpdate /InstallDir:C:\ruby26\msys64'" \ +&& choco install -y msys2 --version 20211130.0.0 --params "'/NoPath /NoUpdate /InstallDir:C:\ruby26\msys64'" \ && choco install -y vim # gangams - optional MSYS2 update via ridk failing in merged docker file so skipping that since we dont need optional update From 78440cfb0ea8674565fc6fc5fdf907da88082f89 Mon Sep 17 00:00:00 2001 From: Grace Wehner Date: Thu, 27 Jan 2022 16:25:06 -0800 Subject: [PATCH 186/301] Jan agent tasks (#698) --- .github/workflows/pr-checker.yml | 2 +- build/linux/installer/conf/telegraf.conf | 2 +- charts/azuremonitor-containers/values.yaml | 2 +- kubernetes/linux/setup.sh | 7 ++++--- kubernetes/omsagent.yaml | 2 +- kubernetes/windows/setup.ps1 | 2 +- 6 files changed, 9 insertions(+), 8 deletions(-) diff --git a/.github/workflows/pr-checker.yml b/.github/workflows/pr-checker.yml index 8a7e542b3..723f22dc7 100644 --- a/.github/workflows/pr-checker.yml +++ b/.github/workflows/pr-checker.yml @@ -56,7 +56,7 @@ jobs: format: 'table' severity: 'CRITICAL,HIGH' vuln-type: 'os,library' - skip-dirs: '/opt,/usr/sbin' + skip-dirs: '/usr/sbin' exit-code: '1' timeout: '5m0s' WINDOWS-build: diff --git a/build/linux/installer/conf/telegraf.conf b/build/linux/installer/conf/telegraf.conf index 0e4824e70..b0a8730c6 100644 --- a/build/linux/installer/conf/telegraf.conf +++ b/build/linux/installer/conf/telegraf.conf @@ -425,7 +425,7 @@ # Below due to Bug - https://github.com/influxdata/telegraf/issues/5615 # ORDER matters here!! - i.e the below should be the LAST modifier [inputs.disk.tagdrop] - path = ["/var/lib/kubelet*", "/dev/termination-log", "/var/log", "/etc/hosts", "/etc/resolv.conf", "/etc/hostname", "/etc/kubernetes/host", "/var/lib/docker/containers", "/etc/config/settings"] + path = ["/var/lib/kubelet*", "/dev/termination-log", "/var/log", "/etc/hosts", "/etc/resolv.conf", "/etc/hostname", "/etc/kubernetes/host", "/var/lib/docker/containers", "/etc/config/settings", "/run/host/containerd/io.containerd.runtime.v2.task/k8s.io/*"] # Read metrics about memory usage diff --git a/charts/azuremonitor-containers/values.yaml b/charts/azuremonitor-containers/values.yaml index 3ca313d38..d5d7ad2e1 100644 --- a/charts/azuremonitor-containers/values.yaml +++ b/charts/azuremonitor-containers/values.yaml @@ -178,7 +178,7 @@ omsagent: memory: 750Mi daemonsetwindows: limits: - cpu: 200m + cpu: 500m memory: 600Mi deployment: requests: diff --git a/kubernetes/linux/setup.sh b/kubernetes/linux/setup.sh index 80a1b5b1d..872ac99cf 100644 --- a/kubernetes/linux/setup.sh +++ b/kubernetes/linux/setup.sh @@ -30,10 +30,10 @@ sudo apt-get install jq=1.5+dfsg-2 -y #used to setcaps for ruby process to read /proc/env sudo apt-get install libcap2-bin -y -wget https://dl.influxdata.com/telegraf/releases/telegraf-1.18.0_linux_amd64.tar.gz -tar -zxvf telegraf-1.18.0_linux_amd64.tar.gz +wget https://dl.influxdata.com/telegraf/releases/telegraf-1.20.3_linux_amd64.tar.gz +tar -zxvf telegraf-1.20.3_linux_amd64.tar.gz -mv /opt/telegraf-1.18.0/usr/bin/telegraf /opt/telegraf +mv /opt/telegraf-1.20.3/usr/bin/telegraf /opt/telegraf chmod 777 /opt/telegraf @@ -61,6 +61,7 @@ rm -f $TMPDIR/docker-cimprov*.sh rm -f $TMPDIR/azure-mdsd*.deb rm -f $TMPDIR/mdsd.xml rm -f $TMPDIR/envmdsd +rm -f $TMPDIR/telegraf-*.tar.gz # remove build dependencies sudo apt-get remove ruby2.6-dev gcc make -y diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index 5a77f3563..248276a08 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -780,7 +780,7 @@ spec: imagePullPolicy: IfNotPresent resources: limits: - cpu: 200m + cpu: 500m memory: 600Mi env: - name: FBIT_SERVICE_FLUSH_INTERVAL diff --git a/kubernetes/windows/setup.ps1 b/kubernetes/windows/setup.ps1 index 8742fba8b..857f9f690 100644 --- a/kubernetes/windows/setup.ps1 +++ b/kubernetes/windows/setup.ps1 @@ -31,7 +31,7 @@ Write-Host ('Finished Installing Fluentbit') Write-Host ('Installing Telegraf'); try { - $telegrafUri='https://dl.influxdata.com/telegraf/releases/telegraf-1.18.0_windows_amd64.zip' + $telegrafUri='https://dl.influxdata.com/telegraf/releases/telegraf-1.20.3_windows_amd64.zip' Invoke-WebRequest -Uri $telegrafUri -OutFile /installation/telegraf.zip Expand-Archive -Path /installation/telegraf.zip -Destination /installation/telegraf Move-Item -Path /installation/telegraf/*/* -Destination /opt/telegraf/ -ErrorAction SilentlyContinue From 3dce72f090a5e866e4d688f73f50594b8dde3b9b Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Thu, 27 Jan 2022 17:02:33 -0800 Subject: [PATCH 187/301] remove v1 fallback hidden option (#705) --- source/plugins/go/src/oms.go | 277 +++++++++++++++++------------------ 1 file changed, 136 insertions(+), 141 deletions(-) diff --git a/source/plugins/go/src/oms.go b/source/plugins/go/src/oms.go index ee221a60b..8c7695346 100644 --- a/source/plugins/go/src/oms.go +++ b/source/plugins/go/src/oms.go @@ -21,9 +21,10 @@ import ( "github.com/google/uuid" "github.com/tinylib/msgp/msgp" - lumberjack "gopkg.in/natefinch/lumberjack.v2" "Docker-Provider/source/plugins/go/src/extension" + lumberjack "gopkg.in/natefinch/lumberjack.v2" + "github.com/Azure/azure-kusto-go/kusto/ingest" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" @@ -85,7 +86,6 @@ const WindowsContainerLogPluginConfFilePath = "/etc/omsagentwindows/out_oms.conf // IPName const IPName = "ContainerInsights" - const defaultContainerInventoryRefreshInterval = 60 const kubeMonAgentConfigEventFlushInterval = 60 @@ -102,9 +102,6 @@ const ContainerLogsV2Route = "v2" const ContainerLogsADXRoute = "adx" -//fallback option v1 route i.e. ODS direct if required in any case -const ContainerLogsV1Route = "v1" - //container logs schema (v2=ContainerLogsV2 table in LA, anything else ContainerLogs table in LA. This is applicable only if Container logs route is NOT ADX) const ContainerLogV2SchemaVersion = "v2" @@ -252,29 +249,29 @@ type DataItemLAv1 struct { // DataItemLAv2 == ContainerLogV2 table in LA // Please keep the names same as destination column names, to avoid transforming one to another in the pipeline type DataItemLAv2 struct { - TimeGenerated string `json:"TimeGenerated"` - Computer string `json:"Computer"` - ContainerId string `json:"ContainerId"` - ContainerName string `json:"ContainerName"` - PodName string `json:"PodName"` - PodNamespace string `json:"PodNamespace"` - LogMessage string `json:"LogMessage"` - LogSource string `json:"LogSource"` + TimeGenerated string `json:"TimeGenerated"` + Computer string `json:"Computer"` + ContainerId string `json:"ContainerId"` + ContainerName string `json:"ContainerName"` + PodName string `json:"PodName"` + PodNamespace string `json:"PodNamespace"` + LogMessage string `json:"LogMessage"` + LogSource string `json:"LogSource"` //PodLabels string `json:"PodLabels"` } // DataItemADX == ContainerLogV2 table in ADX type DataItemADX struct { - TimeGenerated string `json:"TimeGenerated"` - Computer string `json:"Computer"` - ContainerId string `json:"ContainerId"` - ContainerName string `json:"ContainerName"` - PodName string `json:"PodName"` - PodNamespace string `json:"PodNamespace"` - LogMessage string `json:"LogMessage"` - LogSource string `json:"LogSource"` + TimeGenerated string `json:"TimeGenerated"` + Computer string `json:"Computer"` + ContainerId string `json:"ContainerId"` + ContainerName string `json:"ContainerName"` + PodName string `json:"PodName"` + PodNamespace string `json:"PodNamespace"` + LogMessage string `json:"LogMessage"` + LogSource string `json:"LogSource"` //PodLabels string `json:"PodLabels"` - AzureResourceId string `json:"AzureResourceId"` + AzureResourceId string `json:"AzureResourceId"` } // telegraf metric DataItem represents the object corresponding to the json that is sent by fluentbit tail plugin @@ -299,15 +296,15 @@ type InsightsMetricsBlob struct { // ContainerLogBlob represents the object corresponding to the payload that is sent to the ODS end point type ContainerLogBlobLAv1 struct { - DataType string `json:"DataType"` - IPName string `json:"IPName"` + DataType string `json:"DataType"` + IPName string `json:"IPName"` DataItems []DataItemLAv1 `json:"DataItems"` } // ContainerLogBlob represents the object corresponding to the payload that is sent to the ODS end point type ContainerLogBlobLAv2 struct { - DataType string `json:"DataType"` - IPName string `json:"IPName"` + DataType string `json:"DataType"` + IPName string `json:"IPName"` DataItems []DataItemLAv2 `json:"DataItems"` } @@ -361,6 +358,7 @@ const ( // DataType to be used as enum per data type socket client creation type DataType int + const ( // DataType to be used as enum per data type socket client creation ContainerLogV2 DataType = iota @@ -628,12 +626,12 @@ func flushKubeMonAgentEventRecords() { Log(message) SendException(message) } else { - msgPackEntry := MsgPackEntry{ + msgPackEntry := MsgPackEntry{ Record: stringMap, } - msgPackEntries = append(msgPackEntries, msgPackEntry) - } - } + msgPackEntries = append(msgPackEntries, msgPackEntry) + } + } } } @@ -670,8 +668,8 @@ func flushKubeMonAgentEventRecords() { msgPackEntry := MsgPackEntry{ Record: stringMap, } - msgPackEntries = append(msgPackEntries, msgPackEntry) - } + msgPackEntries = append(msgPackEntries, msgPackEntry) + } } } } @@ -713,18 +711,18 @@ func flushKubeMonAgentEventRecords() { } else { if err := json.Unmarshal(jsonBytes, &stringMap); err != nil { message := fmt.Sprintf("Error while UnMarshalling json bytes to stringmap: %s", err.Error()) - Log(message) - SendException(message) + Log(message) + SendException(message) } else { msgPackEntry := MsgPackEntry{ Record: stringMap, - } - msgPackEntries = append(msgPackEntries, msgPackEntry) + } + msgPackEntries = append(msgPackEntries, msgPackEntry) } } } } - if (IsWindows == false && len(msgPackEntries) > 0) { //for linux, mdsd route + if IsWindows == false && len(msgPackEntries) > 0 { //for linux, mdsd route if IsAADMSIAuthMode == true && strings.HasPrefix(MdsdKubeMonAgentEventsTagName, MdsdOutputStreamIdTagPrefix) == false { Log("Info::mdsd::obtaining output stream id for data type: %s", KubeMonAgentEventDataType) MdsdKubeMonAgentEventsTagName = extension.GetInstance(FLBLogger, ContainerType).GetOutputStreamId(KubeMonAgentEventDataType) @@ -757,7 +755,7 @@ func flushKubeMonAgentEventRecords() { } else { numRecords := len(msgPackEntries) Log("FlushKubeMonAgentEventRecords::Info::Successfully flushed %d records that was %d bytes in %s", numRecords, bts, elapsed) - // Send telemetry to AppInsights resource + // Send telemetry to AppInsights resource SendEvent(KubeMonAgentEventsFlushedEvent, telemetryDimensions) } } else { @@ -788,8 +786,8 @@ func flushKubeMonAgentEventRecords() { if IsAADMSIAuthMode == true { IngestionAuthTokenUpdateMutex.Lock() - ingestionAuthToken := ODSIngestionAuthToken - IngestionAuthTokenUpdateMutex.Unlock() + ingestionAuthToken := ODSIngestionAuthToken + IngestionAuthTokenUpdateMutex.Unlock() if ingestionAuthToken == "" { Log("Error::ODS Ingestion Auth Token is empty. Please check error log.") } @@ -910,77 +908,77 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int var msgPackEntries []MsgPackEntry var i int start := time.Now() - var elapsed time.Duration + var elapsed time.Duration for i = 0; i < len(laMetrics); i++ { - var interfaceMap map[string]interface{} - stringMap := make(map[string]string) - jsonBytes, err := json.Marshal(*laMetrics[i]) - if err != nil { - message := fmt.Sprintf("PostTelegrafMetricsToLA::Error:when marshalling json %q", err) + var interfaceMap map[string]interface{} + stringMap := make(map[string]string) + jsonBytes, err := json.Marshal(*laMetrics[i]) + if err != nil { + message := fmt.Sprintf("PostTelegrafMetricsToLA::Error:when marshalling json %q", err) + Log(message) + SendException(message) + return output.FLB_OK + } else { + if err := json.Unmarshal(jsonBytes, &interfaceMap); err != nil { + message := fmt.Sprintf("Error while UnMarshalling json bytes to interfaceMap: %s", err.Error()) Log(message) SendException(message) return output.FLB_OK } else { - if err := json.Unmarshal(jsonBytes, &interfaceMap); err != nil { - message := fmt.Sprintf("Error while UnMarshalling json bytes to interfaceMap: %s", err.Error()) - Log(message) - SendException(message) - return output.FLB_OK - } else { - for key, value := range interfaceMap { - strKey := fmt.Sprintf("%v", key) - strValue := fmt.Sprintf("%v", value) - stringMap[strKey] = strValue - } - msgPackEntry := MsgPackEntry{ - Record: stringMap, - } - msgPackEntries = append(msgPackEntries, msgPackEntry) + for key, value := range interfaceMap { + strKey := fmt.Sprintf("%v", key) + strValue := fmt.Sprintf("%v", value) + stringMap[strKey] = strValue + } + msgPackEntry := MsgPackEntry{ + Record: stringMap, } + msgPackEntries = append(msgPackEntries, msgPackEntry) } + } } - if (len(msgPackEntries) > 0) { - if IsAADMSIAuthMode == true && (strings.HasPrefix(MdsdInsightsMetricsTagName, MdsdOutputStreamIdTagPrefix) == false) { - Log("Info::mdsd::obtaining output stream id for InsightsMetricsDataType since Log Analytics AAD MSI Auth Enabled") - MdsdInsightsMetricsTagName = extension.GetInstance(FLBLogger, ContainerType).GetOutputStreamId(InsightsMetricsDataType) - } - msgpBytes := convertMsgPackEntriesToMsgpBytes(MdsdInsightsMetricsTagName, msgPackEntries) + if len(msgPackEntries) > 0 { + if IsAADMSIAuthMode == true && (strings.HasPrefix(MdsdInsightsMetricsTagName, MdsdOutputStreamIdTagPrefix) == false) { + Log("Info::mdsd::obtaining output stream id for InsightsMetricsDataType since Log Analytics AAD MSI Auth Enabled") + MdsdInsightsMetricsTagName = extension.GetInstance(FLBLogger, ContainerType).GetOutputStreamId(InsightsMetricsDataType) + } + msgpBytes := convertMsgPackEntriesToMsgpBytes(MdsdInsightsMetricsTagName, msgPackEntries) + if MdsdInsightsMetricsMsgpUnixSocketClient == nil { + Log("Error::mdsd::mdsd connection does not exist. re-connecting ...") + CreateMDSDClient(InsightsMetrics, ContainerType) if MdsdInsightsMetricsMsgpUnixSocketClient == nil { - Log("Error::mdsd::mdsd connection does not exist. re-connecting ...") - CreateMDSDClient(InsightsMetrics, ContainerType) - if MdsdInsightsMetricsMsgpUnixSocketClient == nil { - Log("Error::mdsd::Unable to create mdsd client for insights metrics. Please check error log.") - ContainerLogTelemetryMutex.Lock() - defer ContainerLogTelemetryMutex.Unlock() - InsightsMetricsMDSDClientCreateErrors += 1 - return output.FLB_RETRY - } - } - - deadline := 10 * time.Second - MdsdInsightsMetricsMsgpUnixSocketClient.SetWriteDeadline(time.Now().Add(deadline)) //this is based of clock time, so cannot reuse - bts, er := MdsdInsightsMetricsMsgpUnixSocketClient.Write(msgpBytes) - - elapsed = time.Since(start) - - if er != nil { - Log("Error::mdsd::Failed to write to mdsd %d records after %s. Will retry ... error : %s", len(msgPackEntries), elapsed, er.Error()) - UpdateNumTelegrafMetricsSentTelemetry(0, 1, 0) - if MdsdInsightsMetricsMsgpUnixSocketClient != nil { - MdsdInsightsMetricsMsgpUnixSocketClient.Close() - MdsdInsightsMetricsMsgpUnixSocketClient = nil - } - + Log("Error::mdsd::Unable to create mdsd client for insights metrics. Please check error log.") ContainerLogTelemetryMutex.Lock() defer ContainerLogTelemetryMutex.Unlock() InsightsMetricsMDSDClientCreateErrors += 1 return output.FLB_RETRY - } else { - numTelegrafMetricsRecords := len(msgPackEntries) - UpdateNumTelegrafMetricsSentTelemetry(numTelegrafMetricsRecords, 0, 0) - Log("Success::mdsd::Successfully flushed %d telegraf metrics records that was %d bytes to mdsd in %s ", numTelegrafMetricsRecords, bts, elapsed) } + } + + deadline := 10 * time.Second + MdsdInsightsMetricsMsgpUnixSocketClient.SetWriteDeadline(time.Now().Add(deadline)) //this is based of clock time, so cannot reuse + bts, er := MdsdInsightsMetricsMsgpUnixSocketClient.Write(msgpBytes) + + elapsed = time.Since(start) + + if er != nil { + Log("Error::mdsd::Failed to write to mdsd %d records after %s. Will retry ... error : %s", len(msgPackEntries), elapsed, er.Error()) + UpdateNumTelegrafMetricsSentTelemetry(0, 1, 0) + if MdsdInsightsMetricsMsgpUnixSocketClient != nil { + MdsdInsightsMetricsMsgpUnixSocketClient.Close() + MdsdInsightsMetricsMsgpUnixSocketClient = nil + } + + ContainerLogTelemetryMutex.Lock() + defer ContainerLogTelemetryMutex.Unlock() + InsightsMetricsMDSDClientCreateErrors += 1 + return output.FLB_RETRY + } else { + numTelegrafMetricsRecords := len(msgPackEntries) + UpdateNumTelegrafMetricsSentTelemetry(numTelegrafMetricsRecords, 0, 0) + Log("Success::mdsd::Successfully flushed %d telegraf metrics records that was %d bytes to mdsd in %s ", numTelegrafMetricsRecords, bts, elapsed) + } } } else { // for windows, ODS direct @@ -1117,12 +1115,12 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { stringMap = make(map[string]string) //below id & name are used by latency telemetry in both v1 & v2 LA schemas id := "" - name := "" + name := "" logEntry := ToString(record["log"]) logEntryTimeStamp := ToString(record["time"]) //ADX Schema & LAv2 schema are almost the same (except resourceId) - if (ContainerLogSchemaV2 == true || ContainerLogsRouteADX == true) { + if ContainerLogSchemaV2 == true || ContainerLogsRouteADX == true { stringMap["Computer"] = Computer stringMap["ContainerId"] = containerID stringMap["ContainerName"] = containerName @@ -1171,29 +1169,29 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { stringMap["AzureResourceId"] = "" } dataItemADX = DataItemADX{ - TimeGenerated: stringMap["TimeGenerated"], - Computer: stringMap["Computer"], - ContainerId: stringMap["ContainerId"], - ContainerName: stringMap["ContainerName"], - PodName: stringMap["PodName"], - PodNamespace: stringMap["PodNamespace"], - LogMessage: stringMap["LogMessage"], - LogSource: stringMap["LogSource"], - AzureResourceId: stringMap["AzureResourceId"], + TimeGenerated: stringMap["TimeGenerated"], + Computer: stringMap["Computer"], + ContainerId: stringMap["ContainerId"], + ContainerName: stringMap["ContainerName"], + PodName: stringMap["PodName"], + PodNamespace: stringMap["PodNamespace"], + LogMessage: stringMap["LogMessage"], + LogSource: stringMap["LogSource"], + AzureResourceId: stringMap["AzureResourceId"], } //ADX dataItemsADX = append(dataItemsADX, dataItemADX) } else { - if (ContainerLogSchemaV2 == true) { + if ContainerLogSchemaV2 == true { dataItemLAv2 = DataItemLAv2{ - TimeGenerated: stringMap["TimeGenerated"], - Computer: stringMap["Computer"], - ContainerId: stringMap["ContainerId"], - ContainerName: stringMap["ContainerName"], - PodName: stringMap["PodName"], - PodNamespace: stringMap["PodNamespace"], - LogMessage: stringMap["LogMessage"], - LogSource: stringMap["LogSource"], + TimeGenerated: stringMap["TimeGenerated"], + Computer: stringMap["Computer"], + ContainerId: stringMap["ContainerId"], + ContainerName: stringMap["ContainerName"], + PodName: stringMap["PodName"], + PodNamespace: stringMap["PodNamespace"], + LogMessage: stringMap["LogMessage"], + LogSource: stringMap["LogSource"], } //ODS-v2 schema dataItemsLAv2 = append(dataItemsLAv2, dataItemLAv2) @@ -1211,10 +1209,10 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { Image: stringMap["Image"], Name: stringMap["Name"], } - //ODS-v1 schema - dataItemsLAv1 = append(dataItemsLAv1, dataItemLAv1) - name = stringMap["Name"] - id = stringMap["Id"] + //ODS-v1 schema + dataItemsLAv1 = append(dataItemsLAv1, dataItemLAv1) + name = stringMap["Name"] + id = stringMap["Id"] } } @@ -1364,18 +1362,18 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { numContainerLogRecords = len(dataItemsADX) Log("Success::ADX::Successfully wrote %d container log records to ADX in %s", numContainerLogRecords, elapsed) - } else if ((ContainerLogSchemaV2 == true && len(dataItemsLAv2) > 0) || len(dataItemsLAv1) > 0) { //ODS + } else if (ContainerLogSchemaV2 == true && len(dataItemsLAv2) > 0) || len(dataItemsLAv1) > 0 { //ODS var logEntry interface{} recordType := "" loglinesCount := 0 //schema v2 - if (len(dataItemsLAv2) > 0 && ContainerLogSchemaV2 == true) { + if len(dataItemsLAv2) > 0 && ContainerLogSchemaV2 == true { logEntry = ContainerLogBlobLAv2{ DataType: ContainerLogV2DataType, IPName: IPName, DataItems: dataItemsLAv2} - loglinesCount = len(dataItemsLAv2) - recordType = "ContainerLogV2" + loglinesCount = len(dataItemsLAv2) + recordType = "ContainerLogV2" } else { //schema v1 if len(dataItemsLAv1) > 0 { @@ -1383,8 +1381,8 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { DataType: ContainerLogDataType, IPName: IPName, DataItems: dataItemsLAv1} - loglinesCount = len(dataItemsLAv1) - recordType = "ContainerLog" + loglinesCount = len(dataItemsLAv1) + recordType = "ContainerLog" } } @@ -1416,7 +1414,7 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { return output.FLB_RETRY } // add authorization header to the req - req.Header.Set("Authorization", "Bearer "+ingestionAuthToken) + req.Header.Set("Authorization", "Bearer "+ingestionAuthToken) } resp, err := HTTPClient.Do(req) @@ -1444,7 +1442,7 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { numContainerLogRecords = loglinesCount Log("PostDataHelper::Info::Successfully flushed %d %s records to ODS in %s", numContainerLogRecords, recordType, elapsed) - } + } ContainerLogTelemetryMutex.Lock() defer ContainerLogTelemetryMutex.Unlock() @@ -1558,7 +1556,7 @@ func InitializePlugin(pluginConfPath string, agentVersion string) { Log("Container Type %s", ContainerType) osType := os.Getenv("OS_TYPE") - IsWindows = false + IsWindows = false // Linux if strings.Compare(strings.ToLower(osType), "windows") != 0 { Log("Reading configuration for Linux from %s", pluginConfPath) @@ -1703,7 +1701,7 @@ func InitializePlugin(pluginConfPath string, agentVersion string) { ContainerLogsRouteADX = false if strings.Compare(ContainerLogsRoute, ContainerLogsADXRoute) == 0 { - // Try to read the ADX database name from environment variables. Default to DefaultAdsDatabaseName if not set. + // Try to read the ADX database name from environment variables. Default to DefaultAdsDatabaseName if not set. // This SHOULD be set by tomlparser.rb so it's a highly unexpected event if it isn't. // It should be set by the logic in tomlparser.rb EVEN if ADX logging isn't enabled AdxDatabaseName := strings.TrimSpace(os.Getenv("AZMON_ADX_DATABASE_NAME")) @@ -1747,10 +1745,7 @@ func InitializePlugin(pluginConfPath string, agentVersion string) { fmt.Fprintf(os.Stdout, "Routing container logs thru %s route...\n", ContainerLogsADXRoute) } } else if strings.Compare(strings.ToLower(osType), "windows") != 0 { //for linux, oneagent will be default route - ContainerLogsRouteV2 = true //default is mdsd route - if strings.Compare(ContainerLogsRoute, ContainerLogsV1Route) == 0 { - ContainerLogsRouteV2 = false //fallback option when hiddensetting set - } + ContainerLogsRouteV2 = true //default is mdsd route Log("Routing container logs thru %s route...", ContainerLogsRoute) fmt.Fprintf(os.Stdout, "Routing container logs thru %s route... \n", ContainerLogsRoute) } @@ -1768,14 +1763,14 @@ func InitializePlugin(pluginConfPath string, agentVersion string) { Log("Creating MDSD clients for KubeMonAgentEvents & InsightsMetrics") CreateMDSDClient(KubeMonAgentEvents, ContainerType) CreateMDSDClient(InsightsMetrics, ContainerType) - } + } ContainerLogSchemaVersion := strings.TrimSpace(strings.ToLower(os.Getenv("AZMON_CONTAINER_LOG_SCHEMA_VERSION"))) Log("AZMON_CONTAINER_LOG_SCHEMA_VERSION:%s", ContainerLogSchemaVersion) - ContainerLogSchemaV2 = false //default is v1 schema + ContainerLogSchemaV2 = false //default is v1 schema - if strings.Compare(ContainerLogSchemaVersion, ContainerLogV2SchemaVersion) == 0 && ContainerLogsRouteADX != true { + if strings.Compare(ContainerLogSchemaVersion, ContainerLogV2SchemaVersion) == 0 && ContainerLogsRouteADX != true { ContainerLogSchemaV2 = true Log("Container logs schema=%s", ContainerLogV2SchemaVersion) fmt.Fprintf(os.Stdout, "Container logs schema=%s... \n", ContainerLogV2SchemaVersion) @@ -1801,15 +1796,15 @@ func InitializePlugin(pluginConfPath string, agentVersion string) { if ContainerLogSchemaV2 == true { MdsdContainerLogTagName = MdsdContainerLogV2SourceName } else { - MdsdContainerLogTagName = MdsdContainerLogSourceName - } + MdsdContainerLogTagName = MdsdContainerLogSourceName + } MdsdInsightsMetricsTagName = MdsdInsightsMetricsSourceName - MdsdKubeMonAgentEventsTagName = MdsdKubeMonAgentEventsSourceName + MdsdKubeMonAgentEventsTagName = MdsdKubeMonAgentEventsSourceName Log("ContainerLogsRouteADX: %v, IsWindows: %v, IsAADMSIAuthMode = %v \n", ContainerLogsRouteADX, IsWindows, IsAADMSIAuthMode) if !ContainerLogsRouteADX && IsWindows && IsAADMSIAuthMode { Log("defaultIngestionAuthTokenRefreshIntervalSeconds = %d \n", defaultIngestionAuthTokenRefreshIntervalSeconds) - IngestionAuthTokenRefreshTicker = time.NewTicker(time.Second * time.Duration(defaultIngestionAuthTokenRefreshIntervalSeconds)) + IngestionAuthTokenRefreshTicker = time.NewTicker(time.Second * time.Duration(defaultIngestionAuthTokenRefreshIntervalSeconds)) go refreshIngestionAuthToken() } } From 2726d01655055de56ce1c25fc8ece671427e1a4b Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Thu, 27 Jan 2022 18:40:11 -0800 Subject: [PATCH 188/301] collect telemetry containerlog records with emptystamp (#703) * collect telemetry containerlog records with emptystamp * collect telemetry containerlog records with emptystamp --- source/plugins/go/src/oms.go | 4 ++++ source/plugins/go/src/telemetry.go | 8 ++++++++ 2 files changed, 12 insertions(+) diff --git a/source/plugins/go/src/oms.go b/source/plugins/go/src/oms.go index 8c7695346..fbee1dd75 100644 --- a/source/plugins/go/src/oms.go +++ b/source/plugins/go/src/oms.go @@ -1229,6 +1229,10 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { maxLatencyContainer = name + "=" + id } } + } else { + ContainerLogTelemetryMutex.Lock() + ContainerLogRecordCountWithEmptyTimeStamp += 1 + ContainerLogTelemetryMutex.Unlock() } } diff --git a/source/plugins/go/src/telemetry.go b/source/plugins/go/src/telemetry.go index 31818dbb3..b344f4ac8 100644 --- a/source/plugins/go/src/telemetry.go +++ b/source/plugins/go/src/telemetry.go @@ -50,6 +50,8 @@ var ( ContainerLogsSendErrorsToADXFromFluent float64 //Tracks the number of ADX client create errors for containerlogs (uses ContainerLogTelemetryTicker) ContainerLogsADXClientCreateErrors float64 + //Tracks the number of container log records with empty Timestamp (uses ContainerLogTelemetryTicker) + ContainerLogRecordCountWithEmptyTimeStamp float64 //Tracks the number of OSM namespaces and sent only from prometheus sidecar (uses ContainerLogTelemetryTicker) OSMNamespaceCount int //Tracks whether monitor kubernetes pods is set to true and sent only from prometheus sidecar (uses ContainerLogTelemetryTicker) @@ -82,6 +84,7 @@ const ( metricNameErrorCountKubeMonEventsMDSDClientCreateError = "KubeMonEventsMDSDClientCreateErrorsCount" metricNameErrorCountContainerLogsSendErrorsToADXFromFluent = "ContainerLogs2ADXSendErrorCount" metricNameErrorCountContainerLogsADXClientCreateError = "ContainerLogsADXClientCreateErrorCount" + metricNameContainerLogRecordCountWithEmptyTimeStamp = "ContainerLogRecordCountWithEmptyTimeStamp" defaultTelemetryPushIntervalSeconds = 300 @@ -125,6 +128,7 @@ func SendContainerLogPluginMetrics(telemetryPushIntervalProperty string) { promMonitorPodsNamespaceLength := PromMonitorPodsNamespaceLength promMonitorPodsLabelSelectorLength := PromMonitorPodsLabelSelectorLength promMonitorPodsFieldSelectorLength := PromMonitorPodsFieldSelectorLength + containerLogRecordCountWithEmptyTimeStamp := ContainerLogRecordCountWithEmptyTimeStamp TelegrafMetricsSentCount = 0.0 TelegrafMetricsSendErrorCount = 0.0 @@ -142,6 +146,7 @@ func SendContainerLogPluginMetrics(telemetryPushIntervalProperty string) { ContainerLogsADXClientCreateErrors = 0.0 InsightsMetricsMDSDClientCreateErrors = 0.0 KubeMonEventsMDSDClientCreateErrors = 0.0 + ContainerLogRecordCountWithEmptyTimeStamp = 0.0 ContainerLogTelemetryMutex.Unlock() if strings.Compare(strings.ToLower(os.Getenv("CONTROLLER_TYPE")), "daemonset") == 0 { @@ -222,6 +227,9 @@ func SendContainerLogPluginMetrics(telemetryPushIntervalProperty string) { if kubeMonEventsMDSDClientCreateErrors > 0.0 { TelemetryClient.Track(appinsights.NewMetricTelemetry(metricNameErrorCountKubeMonEventsMDSDClientCreateError, kubeMonEventsMDSDClientCreateErrors)) } + if ContainerLogRecordCountWithEmptyTimeStamp > 0.0 { + TelemetryClient.Track(appinsights.NewMetricTelemetry(metricNameContainerLogRecordCountWithEmptyTimeStamp, containerLogRecordCountWithEmptyTimeStamp)) + } start = time.Now() } From 28599b36376abbb69d11ace0689577280a2a923d Mon Sep 17 00:00:00 2001 From: rashmichandrashekar Date: Fri, 28 Jan 2022 13:59:31 -0800 Subject: [PATCH 189/301] Fixing telegraf bug for placeholder name (#706) --- kubernetes/windows/main.ps1 | 29 +++++++++++++++++++++-------- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/kubernetes/windows/main.ps1 b/kubernetes/windows/main.ps1 index 733ddb408..f5fab4edd 100644 --- a/kubernetes/windows/main.ps1 +++ b/kubernetes/windows/main.ps1 @@ -52,23 +52,29 @@ function Set-EnvironmentVariables { if ($domain -eq "opinsights.azure.com") { $cloud_environment = "azurepubliccloud" $mcs_endpoint = "monitor.azure.com" - } elseif ($domain -eq "opinsights.azure.cn") { + } + elseif ($domain -eq "opinsights.azure.cn") { $cloud_environment = "azurechinacloud" $mcs_endpoint = "monitor.azure.cn" - } elseif ($domain -eq "opinsights.azure.us") { + } + elseif ($domain -eq "opinsights.azure.us") { $cloud_environment = "azureusgovernmentcloud" $mcs_endpoint = "monitor.azure.us" - } elseif ($domain -eq "opinsights.azure.eaglex.ic.gov") { + } + elseif ($domain -eq "opinsights.azure.eaglex.ic.gov") { $cloud_environment = "usnat" $mcs_endpoint = "monitor.azure.eaglex.ic.gov" - } elseif ($domain -eq "opinsights.azure.microsoft.scloud") { + } + elseif ($domain -eq "opinsights.azure.microsoft.scloud") { $cloud_environment = "ussec" $mcs_endpoint = "monitor.azure.microsoft.scloud" - } else { + } + else { Write-Host "Invalid or Unsupported domain name $($domain). EXITING....." exit 1 } - } else { + } + else { Write-Host "Domain name either null or empty. EXITING....." exit 1 } @@ -490,6 +496,11 @@ function Start-Telegraf { Write-Host "Failed to set environment variable NODE_IP for target 'machine' since it is either null or empty" } + $hostName = [System.Environment]::GetEnvironmentVariable("HOSTNAME", "process") + Write-Host "nodename: $($hostName)" + Write-Host "replacing nodename in telegraf config" + (Get-Content "C:\etc\telegraf\telegraf.conf").replace('placeholder_hostname', $hostName) | Set-Content "C:\etc\telegraf\telegraf.conf" + Write-Host "Installing telegraf service" C:\opt\telegraf\telegraf.exe --service install --config "C:\etc\telegraf\telegraf.conf" @@ -589,14 +600,16 @@ if (![string]::IsNullOrEmpty($requiresCertBootstrap) -and ` $isAADMSIAuth = [System.Environment]::GetEnvironmentVariable("USING_AAD_MSI_AUTH") if (![string]::IsNullOrEmpty($isAADMSIAuth) -and $isAADMSIAuth.ToLower() -eq 'true') { Write-Host "skipping agent onboarding via cert since AAD MSI Auth configured" -} else { +} +else { Generate-Certificates Test-CertificatePath } + Start-Fluent-Telegraf # List all powershell processes running. This should have main.ps1 and filesystemwatcher.ps1 Get-WmiObject Win32_process | Where-Object { $_.Name -match 'powershell' } | Format-Table -Property Name, CommandLine, ProcessId #check if fluentd service is running -Get-Service fluentdwinaks +Get-Service fluentdwinaks \ No newline at end of file From 7452ee2767dabc54b7b3a787a3127b94a14fd0ae Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Fri, 28 Jan 2022 14:19:09 -0800 Subject: [PATCH 190/301] Gangams/jan 2022 release tasks 3 (#702) * add telemetry related to windows containers records * add telemetry related to windows containers records * containercount telemetry * add explicit exit code in ps scripts * node count telemetry * telemetry for win cirecord 64KB or more * metric to track wintelegraf metrics with tags 64kb * metric to track wintelegraf metrics with tags 64kb * fix pr feedback --- build/windows/Makefile.ps1 | 20 ++-- .../build-and-publish-dev-docker-image.ps1 | 8 +- .../build-and-publish-docker-image.ps1 | 8 +- .../dockerbuild/build-dev-base-image.ps1 | 6 +- .../windows/install-build-pre-requisites.ps1 | 16 ++-- .../aks/mdmonboarding/mdm_onboarding.ps1 | 22 ++--- .../mdmonboarding/mdm_onboarding_atscale.ps1 | 22 ++--- .../kubernetes/AddMonitoringWorkspaceTags.ps1 | 28 +++--- .../onboarding/managed/disable-monitoring.ps1 | 38 ++++---- .../onboarding/managed/enable-monitoring.ps1 | 40 ++++---- scripts/troubleshoot/TroubleshootError.ps1 | 86 ++++++++--------- .../TroubleshootError_AcsEngine.ps1 | 54 +++++------ .../TroubleshootError_nonAzureK8s.ps1 | 68 ++++++------- source/plugins/go/src/oms.go | 17 ++-- source/plugins/go/src/telemetry.go | 8 ++ source/plugins/ruby/constants.rb | 12 ++- source/plugins/ruby/in_cadvisor_perf.rb | 12 +-- source/plugins/ruby/in_kube_nodes.rb | 38 ++++---- source/plugins/ruby/in_kube_podinventory.rb | 96 +++++++++++++------ 19 files changed, 330 insertions(+), 269 deletions(-) diff --git a/build/windows/Makefile.ps1 b/build/windows/Makefile.ps1 index 9f3c438b0..52abbb071 100644 --- a/build/windows/Makefile.ps1 +++ b/build/windows/Makefile.ps1 @@ -13,21 +13,21 @@ Write-Host("current script dir : " + $currentdir + " ") if ($false -eq (Test-Path -Path $currentdir)) { Write-Host("Invalid current dir : " + $currentdir + " ") -ForegroundColor Red - exit + exit 1 } $builddir = Split-Path -Path $currentdir Write-Host("builddir dir : " + $builddir + " ") if ($false -eq (Test-Path -Path $builddir)) { Write-Host("Invalid build dir : " + $builddir + " ") -ForegroundColor Red - exit + exit 1 } $versionFilePath = Join-Path -Path $builddir -child "version" Write-Host("versionFilePath : " + $versionFilePath + " ") if ($false -eq (Test-Path -Path $versionFilePath)) { Write-Host("Version file path incorrect or doesnt exist : " + $versionFilePath + " ") -ForegroundColor Red - exit + exit 1 } # read the version info @@ -36,7 +36,7 @@ foreach($line in Get-Content -Path $versionFilePath) { $parts = $line.split("=") if ($parts.length -lt 2 ) { Write-Host("Invalid content in version file : " + $versionFilePath + " ") -ForegroundColor Red - exit + exit 1 } switch ($parts[0]) { "CONTAINER_BUILDVERSION_MAJOR" { $BuildVersionMajor = $parts[1] } @@ -57,7 +57,7 @@ if ([string]::IsNullOrEmpty($BuildVersionMajor) -or [string]::IsNullOrEmpty($BuildVersionDate) -or [string]::IsNullOrEmpty($BuildVersionStatus)) { Write-Host("Expected version info doesnt exist in this version file : " + $versionFilePath + " ") -ForegroundColor Red - exit + exit 1 } # build version format will be [major].[minior].[patch]-[revision] $buildVersionString = $BuildVersionMajor + "." + $BuildVersionMinor + "." + $BuildVersionPatch + "-" + $BuildVersionBuildNR @@ -68,7 +68,7 @@ $certsrcdir = Join-Path -Path $builddir -ChildPath "windows\installer\certificat Write-Host("certsrc dir : " + $certsrcdir + " ") if ($false -eq (Test-Path -Path $certsrcdir)) { Write-Host("Invalid certificate generator source dir : " + $certsrcdir + " ") -ForegroundColor Red - exit + exit 1 } Write-Host("set the cerificate generator source code directory : " + $certsrcdir + " ...") Set-Location -Path $certsrcdir @@ -100,13 +100,13 @@ Write-Host("Successfully published certificate generator code binaries") -Foregr $certreleasebinpath = Join-Path -PATH $certsrcdir -ChildPath "bin\Release\$dotnetcoreframework\win10-x64\publish\*.*" if ($false -eq (Test-Path -Path $certreleasebinpath)) { Write-Host("certificate release bin path doesnt exist : " + $certreleasebinpath + " ") -ForegroundColor Red - exit + exit 1 } $rootdir = Split-Path -Path $builddir if ($false -eq (Test-Path -Path $rootdir)) { Write-Host("Invalid docker provider root source dir : " + $rootdir + " ") -ForegroundColor Red - exit + exit 1 } $publishdir = Join-Path -Path $rootdir -ChildPath "kubernetes\windows\omsagentwindows" @@ -128,7 +128,7 @@ $outomsgoplugindir = Join-Path -Path $rootdir -ChildPath "source\plugins\go\src" Write-Host("Building Out_OMS go plugin code...") if ($false -eq (Test-Path -Path $outomsgoplugindir)) { Write-Host("Invalid Out oms go plugin code dir : " + $outomsgoplugindir + " ") -ForegroundColor Red - exit + exit 1 } Set-Location -Path $outomsgoplugindir @@ -178,7 +178,7 @@ if (Test-Path -Path $livenessprobeexepath){ Write-Host("livenessprobe.exe exists which indicates cpp build step succeeded") -ForegroundColor Green } else { Write-Host("livenessprobe.exe doesnt exist which indicates cpp build step failed") -ForegroundColor Red - exit + exit 1 } $installerdir = Join-Path -Path $builddir -ChildPath "common\installer" diff --git a/kubernetes/windows/dockerbuild/build-and-publish-dev-docker-image.ps1 b/kubernetes/windows/dockerbuild/build-and-publish-dev-docker-image.ps1 index 0fde7f379..b87132218 100644 --- a/kubernetes/windows/dockerbuild/build-and-publish-dev-docker-image.ps1 +++ b/kubernetes/windows/dockerbuild/build-and-publish-dev-docker-image.ps1 @@ -15,18 +15,18 @@ Write-Host("current script dir : " + $currentdir + " ") if ($false -eq (Test-Path -Path $currentdir)) { Write-Host("Invalid current dir : " + $currentdir + " ") -ForegroundColor Red - exit + exit 1 } if ([string]::IsNullOrEmpty($image)) { Write-Host "Image parameter shouldnt be null or empty" -ForegroundColor Red - exit + exit 1 } $imageparts = $image.split(":") if (($imageparts.Length -ne 2)){ Write-Host "Image not in valid format. Expected format should be /:" -ForegroundColor Red - exit + exit 1 } $imagetag = $imageparts[1].ToLower() @@ -48,7 +48,7 @@ $dockerFileDir = Split-Path -Path $currentdir Write-Host("builddir dir : " + $dockerFileDir + " ") if ($false -eq (Test-Path -Path $dockerFileDir)) { Write-Host("Invalid dockerFile Dir : " + $dockerFileDir + " ") -ForegroundColor Red - exit + exit 1 } Write-Host "changing directory to DockerFile dir: $dockerFileDir" diff --git a/kubernetes/windows/dockerbuild/build-and-publish-docker-image.ps1 b/kubernetes/windows/dockerbuild/build-and-publish-docker-image.ps1 index dbcfa6097..c1f655882 100644 --- a/kubernetes/windows/dockerbuild/build-and-publish-docker-image.ps1 +++ b/kubernetes/windows/dockerbuild/build-and-publish-docker-image.ps1 @@ -15,18 +15,18 @@ Write-Host("current script dir : " + $currentdir + " ") if ($false -eq (Test-Path -Path $currentdir)) { Write-Host("Invalid current dir : " + $currentdir + " ") -ForegroundColor Red - exit + exit 1 } if ([string]::IsNullOrEmpty($image)) { Write-Host "Image parameter shouldnt be null or empty" -ForegroundColor Red - exit + exit 1 } $imageparts = $image.split(":") if (($imageparts.Length -ne 2)){ Write-Host "Image not in valid format. Expected format should be /:" -ForegroundColor Red - exit + exit 1 } $imagetag = $imageparts[1].ToLower() @@ -48,7 +48,7 @@ $dockerFileDir = Split-Path -Path $currentdir Write-Host("builddir dir : " + $dockerFileDir + " ") if ($false -eq (Test-Path -Path $dockerFileDir)) { Write-Host("Invalid dockerFile Dir : " + $dockerFileDir + " ") -ForegroundColor Red - exit + exit 1 } Write-Host "changing directory to DockerFile dir: $dockerFileDir" diff --git a/kubernetes/windows/dockerbuild/build-dev-base-image.ps1 b/kubernetes/windows/dockerbuild/build-dev-base-image.ps1 index 142e20c3f..4b17239d2 100644 --- a/kubernetes/windows/dockerbuild/build-dev-base-image.ps1 +++ b/kubernetes/windows/dockerbuild/build-dev-base-image.ps1 @@ -1,6 +1,6 @@ <# .DESCRIPTION - Builds the Docker Image locally for the server core ltsc base and installs dependencies + Builds the Docker Image locally for the server core ltsc base and installs dependencies #> @@ -9,7 +9,7 @@ Write-Host("current script dir : " + $currentdir + " ") if ($false -eq (Test-Path -Path $currentdir)) { Write-Host("Invalid current dir : " + $currentdir + " ") -ForegroundColor Red - exit + exit 1 } Write-Host "start:Building the cert generator and out oms code via Makefile.ps1" @@ -20,7 +20,7 @@ $dockerFileDir = Split-Path -Path $currentdir Write-Host("builddir dir : " + $dockerFileDir + " ") if ($false -eq (Test-Path -Path $dockerFileDir)) { Write-Host("Invalid dockerFile Dir : " + $dockerFileDir + " ") -ForegroundColor Red - exit + exit 1 } Write-Host "changing directory to DockerFile dir: $dockerFileDir" diff --git a/scripts/build/windows/install-build-pre-requisites.ps1 b/scripts/build/windows/install-build-pre-requisites.ps1 index 7f1c9b54f..1ea316798 100755 --- a/scripts/build/windows/install-build-pre-requisites.ps1 +++ b/scripts/build/windows/install-build-pre-requisites.ps1 @@ -2,7 +2,7 @@ function Install-Go { $tempDir = $env:TEMP if ($false -eq (Test-Path -Path $tempDir)) { Write-Host("Invalid TEMP dir PATH : " + $tempDir + " ") -ForegroundColor Red - exit + exit 1 } $tempGo = Join-Path -Path $tempDir -ChildPath "gotemp" @@ -10,7 +10,7 @@ function Install-Go { New-Item -Path $tempGo -ItemType "directory" -Force -ErrorAction Stop if ($false -eq (Test-Path -Path $tempGo)) { Write-Host("Invalid tempGo : " + $tempGo + " ") -ForegroundColor Red - exit + exit 1 } $url = "https://dl.google.com/go/go1.15.14.windows-amd64.msi" @@ -35,7 +35,7 @@ function Build-Dependencies { $tempDir = $env:TEMP if ($false -eq (Test-Path -Path $tempDir)) { Write-Host("Invalid TEMP dir PATH : " + $tempDir + " ") -ForegroundColor Red - exit + exit 1 } $tempDependencies = Join-Path -Path $tempDir -ChildPath "gcctemp" @@ -43,7 +43,7 @@ function Build-Dependencies { New-Item -Path $tempDependencies -ItemType "directory" -Force -ErrorAction Stop if ($false -eq (Test-Path -Path $tempDependencies)) { Write-Host("Invalid temp Dir : " + $tempDependencies + " ") -ForegroundColor Red - exit + exit 1 } @@ -82,7 +82,7 @@ function Install-DotNetCoreSDK() { $tempDir = $env:TEMP if ($false -eq (Test-Path -Path $tempDir)) { Write-Host("Invalid TEMP dir : " + $tempDir + " ") -ForegroundColor Red - exit + exit 1 } $dotNetSdkTemp = Join-Path -Path $tempDir -ChildPath "dotNetSdk" @@ -90,7 +90,7 @@ function Install-DotNetCoreSDK() { New-Item -Path $dotNetSdkTemp -ItemType "directory" -Force -ErrorAction Stop if ($false -eq (Test-Path -Path $dotNetSdkTemp)) { Write-Host("Invalid dotNetSdkTemp : " + $tempDir + " ") -ForegroundColor Red - exit + exit 1 } $url = "https://download.visualstudio.microsoft.com/download/pr/4e88f517-196e-4b17-a40c-2692c689661d/eed3f5fca28262f764d8b650585a7278/dotnet-sdk-3.1.301-win-x64.exe" @@ -110,7 +110,7 @@ function Install-Docker() { $tempDir = $env:TEMP if ($false -eq (Test-Path -Path $tempDir)) { Write-Host("Invalid TEMP dir PATH : " + $tempDir + " ") -ForegroundColor Red - exit + exit 1 } $dockerTemp = Join-Path -Path $tempDir -ChildPath "docker" @@ -118,7 +118,7 @@ function Install-Docker() { New-Item -Path $dockerTemp -ItemType "directory" -Force -ErrorAction Stop if ($false -eq (Test-Path -Path $dockerTemp)) { Write-Host("Invalid dockerTemp : " + $tempDir + " ") -ForegroundColor Red - exit + exit 1 } $url = "https://download.docker.com/win/stable/Docker%20Desktop%20Installer.exe" diff --git a/scripts/onboarding/aks/mdmonboarding/mdm_onboarding.ps1 b/scripts/onboarding/aks/mdmonboarding/mdm_onboarding.ps1 index dcf73f098..a5d95c31e 100644 --- a/scripts/onboarding/aks/mdmonboarding/mdm_onboarding.ps1 +++ b/scripts/onboarding/aks/mdmonboarding/mdm_onboarding.ps1 @@ -39,7 +39,7 @@ if (($null -eq $azAccountModule) -or ($null -eq $azAksModule) -or ($null -eq $az else { Write-Host("Please run the script as an administrator") -ForegroundColor Red Stop-Transcript - exit + exit 1 } @@ -66,7 +66,7 @@ if (($null -eq $azAccountModule) -or ($null -eq $azAksModule) -or ($null -eq $az } catch { Write-Host("Close other powershell logins and try installing the latest modules forAz.Accounts in a new powershell window: eg. 'Install-Module Az.Accounts -Repository PSGallery -Force'") -ForegroundColor Red - exit + exit 1 } } @@ -77,7 +77,7 @@ if (($null -eq $azAccountModule) -or ($null -eq $azAksModule) -or ($null -eq $az } catch { Write-Host("Close other powershell logins and try installing the latest modules forAz.Accounts in a new powershell window: eg. 'Install-Module Az.Accounts -Repository PSGallery -Force'") -ForegroundColor Red - exit + exit 1 } } @@ -88,7 +88,7 @@ if (($null -eq $azAccountModule) -or ($null -eq $azAksModule) -or ($null -eq $az } catch { Write-Host("Close other powershell logins and try installing the latest modules for Az.Aks in a new powershell window: eg. 'Install-Module Az.Aks -Repository PSGallery -Force'") -ForegroundColor Red - exit + exit 1 } } @@ -103,7 +103,7 @@ if (($null -eq $azAccountModule) -or ($null -eq $azAksModule) -or ($null -eq $az Write-Host("Could not import Az.Resources...") -ForegroundColor Red Write-Host("Close other powershell logins and try installing the latest modules for Az.Resources in a new powershell window: eg. 'Install-Module Az.Resources -Repository PSGallery -Force'") -ForegroundColor Red Stop-Transcript - exit + exit 1 } } if ($null -eq $azAccountModule) { @@ -114,7 +114,7 @@ if (($null -eq $azAccountModule) -or ($null -eq $azAksModule) -or ($null -eq $az Write-Host("Could not import Az.Accounts...") -ForegroundColor Red Write-Host("Close other powershell logins and try installing the latest modules for Az.Accounts in a new powershell window: eg. 'Install-Module Az.Accounts -Repository PSGallery -Force'") -ForegroundColor Red Stop-Transcript - exit + exit 1 } } if ($null -eq $azAksModule) { @@ -124,7 +124,7 @@ if (($null -eq $azAccountModule) -or ($null -eq $azAksModule) -or ($null -eq $az catch { Write-Host("Could not import Az.Aks... Please reinstall this Module") -ForegroundColor Red Stop-Transcript - exit + exit 1 } } @@ -132,7 +132,7 @@ if (($null -eq $azAccountModule) -or ($null -eq $azAksModule) -or ($null -eq $az 2 { Write-Host("") Stop-Transcript - exit + exit 1 } } } @@ -161,7 +161,7 @@ if ($account.Account -eq $null) { Write-Host("Could not select subscription with ID : " + $SubscriptionId + ". Please make sure the ID you entered is correct and you have access to the cluster" ) -ForegroundColor Red Write-Host("") Stop-Transcript - exit + exit 1 } } else { @@ -181,7 +181,7 @@ else { Write-Host("Could not select subscription with ID : " + $SubscriptionId + ". Please make sure the ID you entered is correct and you have access to the cluster" ) -ForegroundColor Red Write-Host("") Stop-Transcript - exit + exit 1 } } } @@ -196,7 +196,7 @@ if ($notPresent) { Write-Host("Could not find Aks cluster. Please make sure that specified cluster exists: '" + $clusterName + "'is correct and you have access to the cluster") -ForegroundColor Red Write-Host("") Stop-Transcript - exit + exit 1 } Write-Host("Successfully checked specified cluster exists details...") -ForegroundColor Green diff --git a/scripts/onboarding/aks/mdmonboarding/mdm_onboarding_atscale.ps1 b/scripts/onboarding/aks/mdmonboarding/mdm_onboarding_atscale.ps1 index a791bb18e..32311ca61 100644 --- a/scripts/onboarding/aks/mdmonboarding/mdm_onboarding_atscale.ps1 +++ b/scripts/onboarding/aks/mdmonboarding/mdm_onboarding_atscale.ps1 @@ -30,7 +30,7 @@ if (($null -eq $azAccountModule) -or ( $null -eq $azAksModule ) -or ($null -eq $ else { Write-Host("Please run the script as an administrator") -ForegroundColor Red Stop-Transcript - exit + exit 1 } @@ -57,7 +57,7 @@ if (($null -eq $azAccountModule) -or ( $null -eq $azAksModule ) -or ($null -eq $ } catch { Write-Host("Close other powershell logins and try installing the latest modules forAz.Accounts in a new powershell window: eg. 'Install-Module Az.Accounts -Repository PSGallery -Force'") -ForegroundColor Red - exit + exit 1 } } @@ -68,7 +68,7 @@ if (($null -eq $azAccountModule) -or ( $null -eq $azAksModule ) -or ($null -eq $ } catch { Write-Host("Close other powershell logins and try installing the latest modules forAz.Accounts in a new powershell window: eg. 'Install-Module Az.Accounts -Repository PSGallery -Force'") -ForegroundColor Red - exit + exit 1 } } @@ -79,7 +79,7 @@ if (($null -eq $azAccountModule) -or ( $null -eq $azAksModule ) -or ($null -eq $ } catch { Write-Host("Close other powershell logins and try installing the latest modules for Az.Aks in a new powershell window: eg. 'Install-Module Az.Aks -Repository PSGallery -Force'") -ForegroundColor Red - exit + exit 1 } } @@ -94,7 +94,7 @@ if (($null -eq $azAccountModule) -or ( $null -eq $azAksModule ) -or ($null -eq $ Write-Host("Could not import Az.Resources...") -ForegroundColor Red Write-Host("Close other powershell logins and try installing the latest modules for Az.Resources in a new powershell window: eg. 'Install-Module Az.Resources -Repository PSGallery -Force'") -ForegroundColor Red Stop-Transcript - exit + exit 1 } } if ($null -eq $azAccountModule) { @@ -105,7 +105,7 @@ if (($null -eq $azAccountModule) -or ( $null -eq $azAksModule ) -or ($null -eq $ Write-Host("Could not import Az.Accounts...") -ForegroundColor Red Write-Host("Close other powershell logins and try installing the latest modules for Az.Accounts in a new powershell window: eg. 'Install-Module Az.Accounts -Repository PSGallery -Force'") -ForegroundColor Red Stop-Transcript - exit + exit 1 } } if ($null -eq $azAksModule) { @@ -115,7 +115,7 @@ if (($null -eq $azAccountModule) -or ( $null -eq $azAksModule ) -or ($null -eq $ catch { Write-Host("Could not import Az.Aks... Please reinstall this Module") -ForegroundColor Red Stop-Transcript - exit + exit 1 } } @@ -123,7 +123,7 @@ if (($null -eq $azAccountModule) -or ( $null -eq $azAksModule ) -or ($null -eq $ 2 { Write-Host("") Stop-Transcript - exit + exit 1 } } } @@ -152,7 +152,7 @@ if ($account.Account -eq $null) { Write-Host("Could not select subscription with ID : " + $SubscriptionId + ". Please make sure the ID you entered is correct and you have access to the cluster" ) -ForegroundColor Red Write-Host("") Stop-Transcript - exit + exit 1 } } else { @@ -172,7 +172,7 @@ else { Write-Host("Could not select subscription with ID : " + $SubscriptionId + ". Please make sure the ID you entered is correct and you have access to the cluster" ) -ForegroundColor Red Write-Host("") Stop-Transcript - exit + exit 1 } } } @@ -187,7 +187,7 @@ if ($notPresent) { Write-Host("Failed to get Aks clusters in specified subscription. Please make sure that you have access to the existing clusters") -ForegroundColor Red Write-Host("") Stop-Transcript - exit + exit 1 } Write-Host("Successfully got all aks clusters ...") -ForegroundColor Green diff --git a/scripts/onboarding/aksengine/kubernetes/AddMonitoringWorkspaceTags.ps1 b/scripts/onboarding/aksengine/kubernetes/AddMonitoringWorkspaceTags.ps1 index 29f629878..a0965f960 100644 --- a/scripts/onboarding/aksengine/kubernetes/AddMonitoringWorkspaceTags.ps1 +++ b/scripts/onboarding/aksengine/kubernetes/AddMonitoringWorkspaceTags.ps1 @@ -64,7 +64,7 @@ if (($null -eq $azAccountModule) -or ($null -eq $azResourcesModule)) { else { Write-Host("Please run the script as an administrator") -ForegroundColor Red Stop-Transcript - exit + exit 1 } } @@ -89,7 +89,7 @@ if (($null -eq $azAccountModule) -or ($null -eq $azResourcesModule)) { } catch { Write-Host("Close other powershell logins and try installing the latest modules for Az.Resources in a new powershell window: eg. 'Install-Module Az.Resources -Repository PSGallery -Force'") -ForegroundColor Red - exit + exit 1 } try { Write-Host("Installing Az.Accounts...") @@ -97,7 +97,7 @@ if (($null -eq $azAccountModule) -or ($null -eq $azResourcesModule)) { } catch { Write-Host("Close other powershell logins and try installing the latest modules for Az.Accounts in a new powershell window: eg. 'Install-Module Az.Accounts -Repository PSGallery -Force'") -ForegroundColor Red - exit + exit 1 } } @@ -109,7 +109,7 @@ if (($null -eq $azAccountModule) -or ($null -eq $azResourcesModule)) { Write-Host("Could not import Az.Resources ...") -ForegroundColor Red Write-Host("Close other powershell logins and try installing the latest modules for Az.Resources in a new powershell window: eg. 'Install-Module Az.Resources -Repository PSGallery -Force'") -ForegroundColor Red Stop-Transcript - exit + exit 1 } try { Import-Module Az.Accounts @@ -117,14 +117,14 @@ if (($null -eq $azAccountModule) -or ($null -eq $azResourcesModule)) { catch { Write-Host("Could not import Az.Accounts... Please reinstall this Module") -ForegroundColor Red Stop-Transcript - exit + exit 1 } } 2 { Write-Host("") Stop-Transcript - exit + exit 1 } } } @@ -138,7 +138,7 @@ if ($NameoftheCloud -like "AzureCloud" -or } else { Write-Host("Error: Monitoring not supported in this cloud: $NameoftheCloud") -ForegroundColor Red - exit + exit 1 } # @@ -151,7 +151,7 @@ if ($notPresent) { Write-Host("Could not find RG. Please make sure that the resource group name: '" + $ResourceGroupName + "'is correct and you have access to the aks-engine cluster") -ForegroundColor Red Write-Host("") Stop-Transcript - exit + exit 1 } Write-Host("Successfully checked resource groups details...") -ForegroundColor Green @@ -179,20 +179,20 @@ foreach ($k8MasterVM in $k8sMasterVMsOrVMSSes) { } else { Write-Host("Resource group name: '" + $ResourceGroupName + "'is doesnt have the aks-engine resources") -ForegroundColor Red - exit + exit 1 } } if ($isKubernetesCluster -eq $false) { Write-Host("Resource group name: '" + $ResourceGroupName + "' doesnt have the aks-engine or acs-engine resources") -ForegroundColor Red - exit + exit 1 } # validate specified logAnalytics workspace exists or not $workspaceResource = Get-AzResource -ResourceId $LogAnalyticsWorkspaceResourceId if ($null -eq $workspaceResource) { Write-Host("Specified Log Analytics workspace ResourceId: '" + $LogAnalyticsWorkspaceResourceId + "' doesnt exist or don't have access to it") -ForegroundColor Red - exit + exit 1 } # @@ -202,11 +202,11 @@ foreach ($k8MasterVM in $k8sMasterVMsOrVMSSes) { $r = Get-AzResource -ResourceGroupName $ResourceGroupName -ResourceName $k8MasterVM.Name if ($null -eq $r) { Write-Host("Get-AzResource for Resource Group: " + $ResourceGroupName + "Resource Name :" + $k8MasterVM.Name + " failed" ) -ForegroundColor Red - exit + exit 1 } if ($null -eq $r.Tags) { Write-Host("K8s master VM should have the tags" ) -ForegroundColor Red - exit + exit 1 } if ($r.Tags.ContainsKey("logAnalyticsWorkspaceResourceId")) { $existingLogAnalyticsWorkspaceResourceId = $r.Tags["logAnalyticsWorkspaceResourceId"] @@ -225,7 +225,7 @@ foreach ($k8MasterVM in $k8sMasterVMsOrVMSSes) { $existingclusterName = $r.Tags["clusterName"] if ($existingclusterName -eq $ClusterName) { Write-Host("Ignoring attaching clusterName tag to K8s master VM :" + $k8MasterVM.Name + " since it has already with same tag value" ) -ForegroundColor Yellow - exit + exit 1 } Write-Host("K8s master VM :" + $k8MasterVM.Name + " has the existing tag for clusterName with different from specified one" ) -ForegroundColor Green $r.Tags.Remove("clusterName") diff --git a/scripts/onboarding/managed/disable-monitoring.ps1 b/scripts/onboarding/managed/disable-monitoring.ps1 index bcd135dba..8be60c50d 100644 --- a/scripts/onboarding/managed/disable-monitoring.ps1 +++ b/scripts/onboarding/managed/disable-monitoring.ps1 @@ -61,7 +61,7 @@ if ([string]::IsNullOrEmpty($azureCloudName) -eq $true) { } else { Write-Host("Specified Azure Cloud name is : $azureCloudName") Write-Host("Only supported Azure clouds are : AzureCloud and AzureUSGovernment") - exit + exit 1 } } @@ -89,7 +89,7 @@ if (($null -eq $azAccountModule) -or ($null -eq $azResourcesModule) -or ($null - else { Write-Host("Please re-launch the script with elevated administrator") -ForegroundColor Red Stop-Transcript - exit + exit 1 } } @@ -116,7 +116,7 @@ if (($null -eq $azAccountModule) -or ($null -eq $azResourcesModule) -or ($null - } catch { Write-Host("Close other powershell logins and try installing the latest modules forAz.Accounts in a new powershell window: eg. 'Install-Module Az.Accounts -Repository PSGallery -Force'") -ForegroundColor Red - exit + exit 1 } } @@ -127,7 +127,7 @@ if (($null -eq $azAccountModule) -or ($null -eq $azResourcesModule) -or ($null - } catch { Write-Host("Close other powershell logins and try installing the latest modules forAz.Accounts in a new powershell window: eg. 'Install-Module Az.Accounts -Repository PSGallery -Force'") -ForegroundColor Red - exit + exit 1 } } @@ -139,7 +139,7 @@ if (($null -eq $azAccountModule) -or ($null -eq $azResourcesModule) -or ($null - } catch { Write-Host("Close other powershell logins and try installing the latest modules for Az.OperationalInsights in a new powershell window: eg. 'Install-Module Az.OperationalInsights -Repository PSGallery -Force'") -ForegroundColor Red - exit + exit 1 } } @@ -154,7 +154,7 @@ if (($null -eq $azAccountModule) -or ($null -eq $azResourcesModule) -or ($null - Write-Host("Could not import Az.Resources...") -ForegroundColor Red Write-Host("Close other powershell logins and try installing the latest modules for Az.Resources in a new powershell window: eg. 'Install-Module Az.Resources -Repository PSGallery -Force'") -ForegroundColor Red Stop-Transcript - exit + exit 1 } } if ($null -eq $azAccountModule) { @@ -165,7 +165,7 @@ if (($null -eq $azAccountModule) -or ($null -eq $azResourcesModule) -or ($null - Write-Host("Could not import Az.Accounts...") -ForegroundColor Red Write-Host("Close other powershell logins and try installing the latest modules for Az.Accounts in a new powershell window: eg. 'Install-Module Az.Accounts -Repository PSGallery -Force'") -ForegroundColor Red Stop-Transcript - exit + exit 1 } } @@ -176,7 +176,7 @@ if (($null -eq $azAccountModule) -or ($null -eq $azResourcesModule) -or ($null - catch { Write-Host("Could not import Az.OperationalInsights... Please reinstall this Module") -ForegroundColor Red Stop-Transcript - exit + exit 1 } } @@ -184,14 +184,14 @@ if (($null -eq $azAccountModule) -or ($null -eq $azResourcesModule) -or ($null - 2 { Write-Host("") Stop-Transcript - exit + exit 1 } } } if ([string]::IsNullOrEmpty($clusterResourceId)) { Write-Host("Specified Azure ClusterResourceId should not be NULL or empty") -ForegroundColor Red - exit + exit 1 } if ([string]::IsNullOrEmpty($kubeContext)) { @@ -211,7 +211,7 @@ if ($clusterResourceId.StartsWith("/") -eq $false) { if ($clusterResourceId.Split("/").Length -ne 9){ Write-Host("Provided Cluster Resource Id is not in expected format") -ForegroundColor Red - exit + exit 1 } if (($clusterResourceId.ToLower().Contains("microsoft.kubernetes/connectedclusters") -ne $true) -and @@ -219,7 +219,7 @@ if (($clusterResourceId.ToLower().Contains("microsoft.kubernetes/connectedcluste ($clusterResourceId.ToLower().Contains("microsoft.containerservice/managedclusters") -ne $true) ) { Write-Host("Provided cluster ResourceId is not supported cluster type: $clusterResourceId") -ForegroundColor Red - exit + exit 1 } if ($clusterResourceId.ToLower().Contains("microsoft.kubernetes/connectedclusters") -eq $true) { @@ -284,7 +284,7 @@ if ($null -eq $account.Account) { Write-Host("Could not select subscription with ID : " + $clusterSubscriptionId + ". Please make sure the ID you entered is correct and you have access to the cluster" ) -ForegroundColor Red Write-Host("") Stop-Transcript - exit + exit 1 } } else { @@ -304,7 +304,7 @@ else { Write-Host("Could not select subscription with ID : " + $clusterSubscriptionId + ". Please make sure the ID you entered is correct and you have access to the cluster" ) -ForegroundColor Red Write-Host("") Stop-Transcript - exit + exit 1 } } } @@ -314,7 +314,7 @@ Write-Host("Checking specified Azure Managed cluster resource exists and got acc $clusterResource = Get-AzResource -ResourceId $clusterResourceId if ($null -eq $clusterResource) { Write-Host("specified Azure Managed cluster resource id either you dont have access or doesnt exist") -ForegroundColor Red - exit + exit 1 } $clusterRegion = $clusterResource.Location.ToLower() @@ -323,7 +323,7 @@ if ($isArcK8sCluster -eq $true) { $clusterIdentity = $clusterResource.identity.type.ToString().ToLower() if ($clusterIdentity.Contains("systemassigned") -eq $false) { Write-Host("Identity of Azure Arc enabled Kubernetes cluster should be systemassigned but it has identity: $clusterIdentity") -ForegroundColor Red - exit + exit 1 } } @@ -345,7 +345,7 @@ try { $releases = helm list --filter $helmChartReleaseName if ($releases.Count -lt 2) { Write-Host("There is no existing release with name : $helmChartReleaseName") -ForegroundColor Yellow - exit + exit 1 } for($index =0 ; $index -lt $releases.Count ; $index ++ ) { @@ -360,7 +360,7 @@ try { $releases = helm list --filter $helmChartReleaseName --kube-context $kubeContext if ($releases.Count -lt 2) { Write-Host("There is no existing release with name : $helmChartReleaseName") -ForegroundColor Yellow - exit + exit 1 } for($index =0 ; $index -lt $releases.Count ; $index ++ ) { @@ -374,7 +374,7 @@ try { } catch { Write-Host ("Failed to delete Azure Monitor for containers HELM chart : '" + $Error[0] + "' ") -ForegroundColor Red - exit + exit 1 } Write-Host("Successfully disabled Azure Monitor for containers for cluster: $clusteResourceId") -ForegroundColor Green diff --git a/scripts/onboarding/managed/enable-monitoring.ps1 b/scripts/onboarding/managed/enable-monitoring.ps1 index e79ef2138..27bc2fd62 100644 --- a/scripts/onboarding/managed/enable-monitoring.ps1 +++ b/scripts/onboarding/managed/enable-monitoring.ps1 @@ -81,7 +81,7 @@ if ([string]::IsNullOrEmpty($azureCloudName) -eq $true) { } else { Write-Host("Specified Azure Cloud name is : $azureCloudName") Write-Host("Only supported azure clouds are : AzureCloud and AzureUSGovernment") - exit + exit 1 } } @@ -109,7 +109,7 @@ if (($null -eq $azAccountModule) -or ($null -eq $azResourcesModule) -or ($null - else { Write-Host("Please re-launch the script with elevated administrator") -ForegroundColor Red Stop-Transcript - exit + exit 1 } } @@ -136,7 +136,7 @@ if (($null -eq $azAccountModule) -or ($null -eq $azResourcesModule) -or ($null - } catch { Write-Host("Close other powershell logins and try installing the latest modules forAz.Accounts in a new powershell window: eg. 'Install-Module Az.Accounts -Repository PSGallery -Force'") -ForegroundColor Red - exit + exit 1 } } @@ -147,7 +147,7 @@ if (($null -eq $azAccountModule) -or ($null -eq $azResourcesModule) -or ($null - } catch { Write-Host("Close other powershell logins and try installing the latest modules forAz.Accounts in a new powershell window: eg. 'Install-Module Az.Accounts -Repository PSGallery -Force'") -ForegroundColor Red - exit + exit 1 } } @@ -159,7 +159,7 @@ if (($null -eq $azAccountModule) -or ($null -eq $azResourcesModule) -or ($null - } catch { Write-Host("Close other powershell logins and try installing the latest modules for Az.OperationalInsights in a new powershell window: eg. 'Install-Module Az.OperationalInsights -Repository PSGallery -Force'") -ForegroundColor Red - exit + exit 1 } } @@ -174,7 +174,7 @@ if (($null -eq $azAccountModule) -or ($null -eq $azResourcesModule) -or ($null - Write-Host("Could not import Az.Resources...") -ForegroundColor Red Write-Host("Close other powershell logins and try installing the latest modules for Az.Resources in a new powershell window: eg. 'Install-Module Az.Resources -Repository PSGallery -Force'") -ForegroundColor Red Stop-Transcript - exit + exit 1 } } if ($null -eq $azAccountModule) { @@ -185,7 +185,7 @@ if (($null -eq $azAccountModule) -or ($null -eq $azResourcesModule) -or ($null - Write-Host("Could not import Az.Accounts...") -ForegroundColor Red Write-Host("Close other powershell logins and try installing the latest modules for Az.Accounts in a new powershell window: eg. 'Install-Module Az.Accounts -Repository PSGallery -Force'") -ForegroundColor Red Stop-Transcript - exit + exit 1 } } @@ -196,7 +196,7 @@ if (($null -eq $azAccountModule) -or ($null -eq $azResourcesModule) -or ($null - catch { Write-Host("Could not import Az.OperationalInsights... Please reinstall this Module") -ForegroundColor Red Stop-Transcript - exit + exit 1 } } @@ -204,14 +204,14 @@ if (($null -eq $azAccountModule) -or ($null -eq $azResourcesModule) -or ($null - 2 { Write-Host("") Stop-Transcript - exit + exit 1 } } } if ([string]::IsNullOrEmpty($clusterResourceId)) { Write-Host("Specified Azure Arc enabled Kubernetes ClusterResourceId should not be NULL or empty") -ForegroundColor Red - exit + exit 1 } if ([string]::IsNullOrEmpty($kubeContext)) { @@ -232,7 +232,7 @@ if ($clusterResourceId.StartsWith("/") -eq $false) { if ($clusterResourceId.Split("/").Length -ne 9) { Write-Host("Provided Cluster Resource Id is not in expected format") -ForegroundColor Red - exit + exit 1 } if (($clusterResourceId.ToLower().Contains("microsoft.kubernetes/connectedclusters") -ne $true) -and @@ -240,7 +240,7 @@ if (($clusterResourceId.ToLower().Contains("microsoft.kubernetes/connectedcluste ($clusterResourceId.ToLower().Contains("microsoft.containerservice/managedclusters") -ne $true) ) { Write-Host("Provided cluster ResourceId is not supported cluster type: $clusterResourceId") -ForegroundColor Red - exit + exit 1 } if (([string]::IsNullOrEmpty($servicePrincipalClientId) -eq $false) -and @@ -305,7 +305,7 @@ if ($null -eq $account.Account) { Write-Host("Could not select subscription with ID : " + $clusterSubscriptionId + ". Please make sure the ID you entered is correct and you have access to the cluster" ) -ForegroundColor Red Write-Host("") Stop-Transcript - exit + exit 1 } } else { @@ -325,7 +325,7 @@ else { Write-Host("Could not select subscription with ID : " + $clusterSubscriptionId + ". Please make sure the ID you entered is correct and you have access to the cluster" ) -ForegroundColor Red Write-Host("") Stop-Transcript - exit + exit 1 } } } @@ -335,7 +335,7 @@ Write-Host("Checking specified Azure Managed cluster resource exists and got acc $clusterResource = Get-AzResource -ResourceId $clusterResourceId if ($null -eq $clusterResource) { Write-Host("specified Azure Managed cluster resource id either you dont have access or doesnt exist") -ForegroundColor Red - exit + exit 1 } $clusterRegion = $clusterResource.Location.ToLower() @@ -344,7 +344,7 @@ if ($isArcK8sCluster -eq $true) { $clusterIdentity = $clusterResource.identity.type.ToString().ToLower() if ($clusterIdentity.contains("systemassigned") -eq $false) { Write-Host("Identity of Azure Arc enabled Kubernetes cluster should be systemassigned but it has identity: $clusterIdentity") -ForegroundColor Red - exit + exit 1 } } @@ -450,7 +450,7 @@ else { Write-Host("using specified Log Analytics Workspace ResourceId: '" + $workspaceResourceId + "' ") if ([string]::IsNullOrEmpty($workspaceResourceId)) { Write-Host("Specified workspaceResourceId should not be NULL or empty") -ForegroundColor Red - exit + exit 1 } $workspaceResourceId = $workspaceResourceId.Trim() if ($workspaceResourceId.EndsWith("/")) { @@ -465,7 +465,7 @@ else { if (($workspaceResourceId.ToLower().Contains("microsoft.operationalinsights/workspaces") -ne $true) -or ($workspaceResourceId.Split("/").Length -ne 9)) { Write-Host("Provided workspace resource id should be in this format /subscriptions//resourceGroups//providers/Microsoft.OperationalInsights/workspaces/") -ForegroundColor Red - exit + exit 1 } $workspaceResourceParts = $workspaceResourceId.Split("/") @@ -482,7 +482,7 @@ else { $WorkspaceInformation = Get-AzOperationalInsightsWorkspace -ResourceGroupName $workspaceResourceGroup -Name $workspaceName -ErrorAction SilentlyContinue if ($null -eq $WorkspaceInformation) { Write-Host("Specified Log Analytics Workspace: '" + $workspaceName + "' in Resource Group: '" + $workspaceResourceGroup + "' in Subscription: '" + $workspaceSubscriptionId + "' does not exist") -ForegroundColor Red - exit + exit 1 } } @@ -520,7 +520,7 @@ try { } catch { Write-Host ("Failed to workspace details. Please validate whether you have Log Analytics Contributor role on the workspace error: '" + $Error[0] + "' ") -ForegroundColor Red - exit + exit 1 } diff --git a/scripts/troubleshoot/TroubleshootError.ps1 b/scripts/troubleshoot/TroubleshootError.ps1 index 4c2d95ac6..6d97c53d5 100644 --- a/scripts/troubleshoot/TroubleshootError.ps1 +++ b/scripts/troubleshoot/TroubleshootError.ps1 @@ -35,7 +35,7 @@ if (($null -eq $ClusterResourceId) -or ($ClusterResourceId.Split("/").Length -ne Write-Host("Resource Id Format for AKS cluster is : /subscriptions//resourceGroups//providers/Microsoft.ContainerService/managedClusters/") -ForegroundColor Red Write-Host("Resource Id Format for ARO cluster is : /subscriptions//resourceGroups//providers/Microsoft.ContainerService/openShiftManagedClusters/") -ForegroundColor Red Stop-Transcript - exit + exit 1 } $isClusterAndWorkspaceInDifferentSubs = $false @@ -70,7 +70,7 @@ if (($null -eq $azAksModule) -or ($null -eq $azARGModule) -or ($null -eq $azAcco else { Write-Host("Please re-launch the script with elevated administrator") -ForegroundColor Red Stop-Transcript - exit + exit 1 } } @@ -97,7 +97,7 @@ if (($null -eq $azAksModule) -or ($null -eq $azARGModule) -or ($null -eq $azAcco catch { Write-Host("Close other powershell logins and try installing the latest modules for Az.ResourceGraph in a new powershell window: eg. 'Install-Module Az.ResourceGraph -Force'") -ForegroundColor Red Stop-Transcript - exit + exit 1 } } if ($null -eq $azAksModule) { @@ -108,7 +108,7 @@ if (($null -eq $azAksModule) -or ($null -eq $azARGModule) -or ($null -eq $azAcco catch { Write-Host("Close other powershell logins and try installing the latest modules for Az.Aks in a new powershell window: eg. 'Install-Module Az.Aks -Force'") -ForegroundColor Red Stop-Transcript - exit + exit 1 } } @@ -120,7 +120,7 @@ if (($null -eq $azAksModule) -or ($null -eq $azARGModule) -or ($null -eq $azAcco catch { Write-Host("Close other powershell logins and try installing the latest modules forAz.Accounts in a new powershell window: eg. 'Install-Module Az.Accounts -Repository PSGallery -Force'") -ForegroundColor Red Stop-Transcript - exit + exit 1 } } @@ -132,7 +132,7 @@ if (($null -eq $azAksModule) -or ($null -eq $azARGModule) -or ($null -eq $azAcco catch { Write-Host("Close other powershell logins and try installing the latest modules forAz.Accounts in a new powershell window: eg. 'Install-Module Az.Accounts -Repository PSGallery -Force'") -ForegroundColor Red Stop-Transcript - exit + exit 1 } } @@ -145,7 +145,7 @@ if (($null -eq $azAksModule) -or ($null -eq $azARGModule) -or ($null -eq $azAcco catch { Write-Host("Close other powershell logins and try installing the latest modules for Az.OperationalInsights in a new powershell window: eg. 'Install-Module Az.OperationalInsights -Repository PSGallery -Force'") -ForegroundColor Red Stop-Transcript - exit + exit 1 } } @@ -159,7 +159,7 @@ if (($null -eq $azAksModule) -or ($null -eq $azARGModule) -or ($null -eq $azAcco Write-Host("Could not Import Az.ResourceGraph...") -ForegroundColor Red Write-Host("Close other powershell logins and try installing the latest modules for Az.ResourceGraph in a new powershell window: eg. 'Install-Module Az.ResourceGraph -Force'") -ForegroundColor Red Stop-Transcript - exit + exit 1 } } @@ -171,7 +171,7 @@ if (($null -eq $azAksModule) -or ($null -eq $azARGModule) -or ($null -eq $azAcco Write-Host("Could not Import Az.Aks...") -ForegroundColor Red Write-Host("Close other powershell logins and try installing the latest modules for Az.Aks in a new powershell window: eg. 'Install-Module Az.Aks -Force'") -ForegroundColor Red Stop-Transcript - exit + exit 1 } } @@ -183,7 +183,7 @@ if (($null -eq $azAksModule) -or ($null -eq $azARGModule) -or ($null -eq $azAcco Write-Host("Could not import Az.Resources...") -ForegroundColor Red Write-Host("Close other powershell logins and try installing the latest modules for Az.Resources in a new powershell window: eg. 'Install-Module Az.Resources -Repository PSGallery -Force'") -ForegroundColor Red Stop-Transcript - exit + exit 1 } } if ($null -eq $azAccountModule) { @@ -194,7 +194,7 @@ if (($null -eq $azAksModule) -or ($null -eq $azARGModule) -or ($null -eq $azAcco Write-Host("Could not import Az.Accounts...") -ForegroundColor Red Write-Host("Close other powershell logins and try installing the latest modules for Az.Accounts in a new powershell window: eg. 'Install-Module Az.Accounts -Repository PSGallery -Force'") -ForegroundColor Red Stop-Transcript - exit + exit 1 } } @@ -205,7 +205,7 @@ if (($null -eq $azAksModule) -or ($null -eq $azARGModule) -or ($null -eq $azAcco catch { Write-Host("Could not import Az.OperationalInsights... Please reinstall this Module") -ForegroundColor Red Stop-Transcript - exit + exit 1 } } @@ -213,7 +213,7 @@ if (($null -eq $azAksModule) -or ($null -eq $azARGModule) -or ($null -eq $azAcco 2 { Write-Host("") Stop-Transcript - exit + exit 1 } } } @@ -277,7 +277,7 @@ if ($null -eq $account.Account) { Write-Host("Could not select subscription with ID : " + $ClusterSubscriptionId + ". Please make sure the SubscriptionId you entered is correct and you have access to the Subscription" ) -ForegroundColor Red Write-Host("") Stop-Transcript - exit + exit 1 } } else { @@ -297,7 +297,7 @@ else { Write-Host("Could not select subscription with ID : " + $ClusterSubscriptionId + ". Please make sure the SubscriptionId you entered is correct and you have access to the Subscription" ) -ForegroundColor Red Write-Host("") Stop-Transcript - exit + exit 1 } } } @@ -313,7 +313,7 @@ if ($notPresent) { Write-Host("Could not find RG. Please make sure that the resource group name: '" + $ResourceGroupName + "'is correct and you have access to the Resource Group") -ForegroundColor Red Write-Host("") Stop-Transcript - exit + exit 1 } Write-Host("Successfully checked resource groups details...") -ForegroundColor Green @@ -327,7 +327,7 @@ try { Write-Host("Could not fetch cluster details: Please make sure that the '" + $ClusterType + "' Cluster name: '" + $ClusterName + "' is correct and you have access to the cluster") -ForegroundColor Red Write-Host("") Stop-Transcript - exit + exit 1 } else { Write-Host("Successfully checked '" + $ClusterType + "' Cluster details...") -ForegroundColor Green @@ -342,7 +342,7 @@ try { Write-Host($AksOptInLink) -ForegroundColor Red; Write-Host(""); Stop-Transcript - exit + exit 1 } $omsagentconfig = $props.addonprofiles.omsagent.config; @@ -364,7 +364,7 @@ try { Write-Host("Could not fetch cluster details: Please make sure that the '" + $ClusterType + "' Cluster name: '" + $ClusterName + "' is correct and you have access to the cluster") -ForegroundColor Red Write-Host("") Stop-Transcript - exit + exit 1 } $monitorProfile = $ResourceDetail.aroproperties.monitorprofile @@ -373,7 +373,7 @@ try { Write-Host($AksOptInLink) -ForegroundColor Red; Write-Host(""); Stop-Transcript - exit + exit 1 } $LogAnalyticsWorkspaceResourceID = $monitorProfile.workspaceresourceid @@ -385,7 +385,7 @@ catch { Write-Host("Could not fetch cluster details: Please make sure that the '" + $ClusterType + "' Cluster name: '" + $ClusterName + "' is correct and you have access to the cluster") -ForegroundColor Red Write-Host("") Stop-Transcript - exit + exit 1 } @@ -511,7 +511,7 @@ if ($null -eq $LogAnalyticsWorkspaceResourceID) { } Write-Host("") Stop-Transcript - exit + exit 1 } else { @@ -532,7 +532,7 @@ else { Write-Host("Could not change to Workspace subscriptionId : '" + $workspaceSubscriptionId + "'." ) -ForegroundColor Red Write-Host("") Stop-Transcript - exit + exit 1 } @@ -557,7 +557,7 @@ else { } Write-Host("") Stop-Transcript - exit + exit 1 } Write-Host("Successfully fetched workspace subcription details...") -ForegroundColor Green Write-Host("") @@ -581,7 +581,7 @@ else { Write-Host("Opt-in - " + $AksOptInLink) -ForegroundColor Red } Stop-Transcript - exit + exit 1 } Write-Host("Successfully fetched workspace resource group...") -ForegroundColor Green Write-Host("") @@ -610,7 +610,7 @@ else { } Write-Host("") Stop-Transcript - exit + exit 1 } $WorkspaceLocation = $WorkspaceInformation.Location @@ -619,7 +619,7 @@ else { Write-Host("Cannot fetch workspace location. Please try again...") -ForegroundColor Red Write-Host("") Stop-Transcript - exit + exit 1 } $WorkspacePricingTier = $WorkspaceInformation.sku @@ -635,7 +635,7 @@ else { Write-Host("Failed to get the list of solutions onboarded to the workspace. Please make sure that it hasn't been deleted and you have access to it.") -ForegroundColor Red Write-Host("") Stop-Transcript - exit + exit 1 } try { @@ -647,7 +647,7 @@ else { Write-Host("Failed to get ContainerInsights solution details from the workspace") -ForegroundColor Red Write-Host("") Stop-Transcript - exit + exit 1 } $isSolutionOnboarded = $WorkspaceIPDetails.Enabled[$ContainerInsightsIndex] @@ -711,7 +711,7 @@ try { if ($WorkspaceUsage.CurrentValue -ge $WorkspaceUsage.Limit) { Write-Host("Workspace usage has reached or over the configured daily cap. Please increase the daily cap limits or wait for next reset interval") -ForegroundColor Red Stop-Transcript - exit + exit 1 } } Write-Host("Workspace doesnt have daily cap configured") -ForegroundColor Green @@ -720,7 +720,7 @@ catch { Write-Host("Failed to get usage details of the workspace") -ForegroundColor Red Write-Host("") Stop-Transcript - exit + exit 1 } @@ -757,7 +757,7 @@ if ("AKS" -eq $ClusterType ) { Write-Host($AksOptInLink) -ForegroundColor Red Write-Host($contactUSMessage) Stop-Transcript - exit + exit 1 } $rsPodStatus = $rsPod.status @@ -778,7 +778,7 @@ if ("AKS" -eq $ClusterType ) { Write-Host($AksOptInLink) -ForegroundColor Red Write-Host($contactUSMessage) Stop-Transcript - exit + exit 1 } Write-Host( "omsagent replicaset pod running OK.") -ForegroundColor Green @@ -786,7 +786,7 @@ if ("AKS" -eq $ClusterType ) { catch { Write-Host ("Failed to get omsagent replicatset pod info using kubectl get rs : '" + $Error[0] + "' ") -ForegroundColor Red Stop-Transcript - exit + exit 1 } Write-Host("Checking whether the omsagent daemonset pod running correctly ...") @@ -795,7 +795,7 @@ if ("AKS" -eq $ClusterType ) { if (($null -eq $ds) -or ($null -eq $ds.Items) -or ($ds.Items.Length -ne 1)) { Write-Host( "omsagent replicaset pod not scheduled or failed to schedule." + $contactUSMessage) Stop-Transcript - exit + exit 1 } $dsStatus = $ds.Items[0].status @@ -809,7 +809,7 @@ if ("AKS" -eq $ClusterType ) { Write-Host($dsStatus) Write-Host($contactUSMessage) Stop-Transcript - exit + exit 1 } Write-Host( "omsagent daemonset pod running OK.") -ForegroundColor Green @@ -817,7 +817,7 @@ if ("AKS" -eq $ClusterType ) { catch { Write-Host ("Failed to execute the script : '" + $Error[0] + "' ") -ForegroundColor Red Stop-Transcript - exit + exit 1 } Write-Host("Checking whether the omsagent heatlhservice running correctly ...") @@ -826,7 +826,7 @@ if ("AKS" -eq $ClusterType ) { if ($healthservice.Items.Length -ne 1) { Write-Host( "omsagent healthservice not scheduled or failed to schedule." + $contactUSMessage) Stop-Transcript - exit + exit 1 } Write-Host( "omsagent healthservice running OK.") -ForegroundColor Green @@ -834,7 +834,7 @@ if ("AKS" -eq $ClusterType ) { catch { Write-Host ("Failed to execute kubectl get services command : '" + $Error[0] + "' ") -ForegroundColor Red Stop-Transcript - exit + exit 1 } if ($isClusterAndWorkspaceInDifferentSubs) { @@ -851,7 +851,7 @@ if ("AKS" -eq $ClusterType ) { catch { Write-Host ("Failed to get workspace details. Please validate whether you have Log Analytics Contributor role on the workspace error: '" + $Error[0] + "' ") -ForegroundColor Red Stop-Transcript - exit + exit 1 } Write-Host("Checking whether the WorkspaceGuid and key matching with configured log analytics workspace ...") @@ -862,7 +862,7 @@ if ("AKS" -eq $ClusterType ) { if ((($workspaceGuidConfiguredOnAgent -eq $workspaceGUID) -and ($workspaceKeyConfiguredOnAgent -eq $workspacePrimarySharedKey)) -eq $false) { Write-Host ("Error - Log Analytics Workspace Guid and key configured on the agent not matching with details of the Workspace. Please verify and fix with the correct workspace Guid and Key") -ForegroundColor Red Stop-Transcript - exit + exit 1 } Write-Host("Workspace Guid and Key on the agent matching with the Workspace") -ForegroundColor Green @@ -870,7 +870,7 @@ if ("AKS" -eq $ClusterType ) { catch { Write-Host ("Failed to execute the script : '" + $Error[0] + "' ") -ForegroundColor Red Stop-Transcript - exit + exit 1 } Write-Host("Checking agent version...") @@ -885,7 +885,7 @@ if ("AKS" -eq $ClusterType ) { } catch { Write-Host ("Failed to execute the script : '" + $Error[0] + "' ") -ForegroundColor Red Stop-Transcript - exit + exit 1 } } diff --git a/scripts/troubleshoot/TroubleshootError_AcsEngine.ps1 b/scripts/troubleshoot/TroubleshootError_AcsEngine.ps1 index 1f1e1ba5d..5662d3f79 100644 --- a/scripts/troubleshoot/TroubleshootError_AcsEngine.ps1 +++ b/scripts/troubleshoot/TroubleshootError_AcsEngine.ps1 @@ -45,7 +45,7 @@ if (($null -eq $azureRmProfileModule) -or ($null -eq $azureRmResourcesModule) -o else { Write-Host("Please run the script as an administrator") -ForegroundColor Red Stop-Transcript - exit + exit 1 } $message = "This script will try to install the latest versions of the following Modules : ` @@ -69,7 +69,7 @@ if (($null -eq $azureRmProfileModule) -or ($null -eq $azureRmResourcesModule) -o } catch { Write-Host("Close other powershell logins and try installing the latest modules for AzureRM.profile in a new powershell window: eg. 'Install-Module AzureRM.profile -Repository PSGallery -Force'") -ForegroundColor Red - exit + exit 1 } try { Write-Host("Installing AzureRM.Resources...") @@ -77,7 +77,7 @@ if (($null -eq $azureRmProfileModule) -or ($null -eq $azureRmResourcesModule) -o } catch { Write-Host("Close other powershell logins and try installing the latest modules for AzureRM.Resoureces in a new powershell window: eg. 'Install-Module AzureRM.Resoureces -Repository PSGallery -Force'") -ForegroundColor Red - exit + exit 1 } try { @@ -86,7 +86,7 @@ if (($null -eq $azureRmProfileModule) -or ($null -eq $azureRmResourcesModule) -o } catch { Write-Host("Close other powershell logins and try installing the latest modules for AzureRM.OperationalInsights in a new powershell window: eg. 'Install-Module AzureRM.OperationalInsights -Repository PSGallery -Force'") -ForegroundColor Red - exit + exit 1 } } 1 { @@ -97,7 +97,7 @@ if (($null -eq $azureRmProfileModule) -or ($null -eq $azureRmResourcesModule) -o Write-Host("Could not import AzureRM.profile...") -ForegroundColor Red Write-Host("Close other powershell logins and try installing the latest modules for AzureRM.profile in a new powershell window: eg. 'Install-Module AzureRM.profile -Repository PSGallery -Force'") -ForegroundColor Red Stop-Transcript - exit + exit 1 } try { Import-Module AzureRM.Resources @@ -105,7 +105,7 @@ if (($null -eq $azureRmProfileModule) -or ($null -eq $azureRmResourcesModule) -o catch { Write-Host("Could not import AzureRM.Resources... Please reinstall this Module") -ForegroundColor Red Stop-Transcript - exit + exit 1 } try { Import-Module AzureRM.OperationalInsights @@ -113,7 +113,7 @@ if (($null -eq $azureRmProfileModule) -or ($null -eq $azureRmResourcesModule) -o catch { Write-Host("Could not import AzureRM.OperationalInsights... Please reinstall this Module") -ForegroundColor Red Stop-Transcript - exit + exit 1 } Write-Host("Running troubleshooting script... Please reinstall this Module") Write-Host("") @@ -121,7 +121,7 @@ if (($null -eq $azureRmProfileModule) -or ($null -eq $azureRmResourcesModule) -o 2 { Write-Host("") Stop-Transcript - exit + exit 1 } } } @@ -151,7 +151,7 @@ if ($null -eq $account.Account) { Write-Host("Could not select subscription with ID : " + $SubscriptionId + ". Please make sure the SubscriptionId you entered is correct and you have access to the Subscription" ) -ForegroundColor Red Write-Host("") Stop-Transcript - exit + exit 1 } } else { @@ -171,7 +171,7 @@ else { Write-Host("Could not select subscription with ID : " + $SubscriptionId + ". Please make sure the SubscriptionId you entered is correct and you have access to the Subscription" ) -ForegroundColor Red Write-Host("") Stop-Transcript - exit + exit 1 } } } @@ -187,7 +187,7 @@ if ($notPresent) { Write-Host("Could not find RG. Please make sure that the resource group name: '" + $ResourceGroupName + "'is correct and you have access to the Resource Group") -ForegroundColor Red Write-Host("") Stop-Transcript - exit + exit 1 } Write-Host("Successfully checked resource groups details...") -ForegroundColor Green @@ -197,13 +197,13 @@ Write-Host("Successfully checked resource groups details...") -ForegroundColor G if ([string]::IsNullOrEmpty($KubeConfig)) { Write-Host("KubeConfig should not be NULL or empty") -ForegroundColor Red Stop-Transcript - exit + exit 1 } if ((Test-Path $KubeConfig -PathType Leaf) -ne $true) { Write-Host("provided KubeConfig path : '" + $KubeConfig + "' doesnt exist or you dont have read access") -ForegroundColor Red Stop-Transcript - exit + exit 1 } # @@ -249,13 +249,13 @@ foreach ($k8MasterVM in $k8sMasterVMsOrVMSSes) { } else { Write-Host("This Resource group : '" + $ResourceGroupName + "'does not have the AKS-engine or ACS-Engine Kubernetes resources") -ForegroundColor Red - exit + exit 1 } } if ($isKubernetesCluster -eq $false) { Write-Host("Monitoring only supported for AKS-Engine or ACS-Engine with Kubernetes") -ForegroundColor Red - exit + exit 1 } Write-Host("Successfully checked the AKS-Engine or ACS-Engine Kuberentes cluster resources in specified resource group") -ForegroundColor Green @@ -270,7 +270,7 @@ foreach ($k8MasterVM in $k8sMasterVMsOrVMSSes) { if ($null -eq $r) { Write-Host("Get-AzureRmResource for Resource Group: " + $ResourceGroupName + "Resource Name :" + $k8MasterVM.Name + " failed" ) -ForegroundColor Red - exit + exit 1 } if ($null -eq $r.Tags) { @@ -279,7 +279,7 @@ foreach ($k8MasterVM in $k8sMasterVMsOrVMSSes) { Write-Host("Please try to opt out of monitoring and opt-in using the following links:") -ForegroundColor Red Write-Host("Opt-out - " + $OptOutLink) -ForegroundColor Red Write-Host("Opt-in - " + $OptInLink) -ForegroundColor Red - exit + exit 1 } if ($r.Tags.ContainsKey("logAnalyticsWorkspaceResourceId")) { @@ -300,7 +300,7 @@ if ($null -eq $LogAnalyticsWorkspaceResourceID) { Write-Host("There is no existing logAnalyticsWorkspaceResourceId tag on AKS-Engine k8 master nodes or VMSSes so this indicates this cluster not enabled monitoring or tags have been removed" ) -ForegroundColor Red Write-Host("Please try to opt-in for monitoring using the following links:") -ForegroundColor Red Write-Host("Opt-in - " + $OptInLink) -ForegroundColor Red - exit + exit 1 } else { @@ -309,7 +309,7 @@ else { Write-Host("Please add the clusterName tag with the value of clusterName used during the omsagent agent onboarding. Refer below link for details:") -ForegroundColor Red Write-Host("Opt-in - " + $OptInLink) -ForegroundColor Red - exit + exit 1 } Write-Host("Configured LogAnalyticsWorkspaceResourceId: : '" + $LogAnalyticsWorkspaceResourceID + "' ") @@ -328,7 +328,7 @@ else { Write-Host("Could not change to Workspace subscriptionId : '" + $workspaceSubscriptionId + "'." ) -ForegroundColor Red Write-Host("") Stop-Transcript - exit + exit 1 } @@ -347,7 +347,7 @@ else { Write-Host("Opt-in - " + $OptInLink) -ForegroundColor Red Write-Host("") Stop-Transcript - exit + exit 1 } Write-Host("Successfully fetched workspace subcription details...") -ForegroundColor Green Write-Host("") @@ -364,7 +364,7 @@ else { Write-Host("Opt-out - " + $OptOutLink) -ForegroundColor Red Write-Host("Opt-in - " + $OptInLink) -ForegroundColor Red Stop-Transcript - exit + exit 1 } Write-Host("Successfully fetched workspace resource group...") -ForegroundColor Green Write-Host("") @@ -386,7 +386,7 @@ else { Write-Host("Opt-in - " + $OptInLink) -ForegroundColor Red Write-Host("") Stop-Transcript - exit + exit 1 } $WorkspaceLocation = $WorkspaceInformation.Location @@ -396,7 +396,7 @@ else { Write-Host("Cannot fetch workspace location. Please try again...") -ForegroundColor Red Write-Host("") Stop-Transcript - exit + exit 1 } $WorkspacePricingTier = $WorkspaceInformation.sku @@ -413,7 +413,7 @@ else { Write-Host("Failed to get the list of solutions onboarded to the workspace. Please make sure that it hasn't been deleted and you have access to it.") -ForegroundColor Red Write-Host("") Stop-Transcript - exit + exit 1 } try { @@ -425,7 +425,7 @@ else { Write-Host("Failed to get ContainerInsights solution details from the workspace") -ForegroundColor Red Write-Host("") Stop-Transcript - exit + exit 1 } $isSolutionOnboarded = $WorkspaceIPDetails.Enabled[$ContainerInsightsIndex] @@ -498,7 +498,7 @@ try { } catch { Write-Host ("Failed to execute the script : '" + $Error[0] + "' ") -ForegroundColor Red Stop-Transcript - exit + exit 1 } Write-Host("") diff --git a/scripts/troubleshoot/TroubleshootError_nonAzureK8s.ps1 b/scripts/troubleshoot/TroubleshootError_nonAzureK8s.ps1 index 14b080b23..76bbad16c 100644 --- a/scripts/troubleshoot/TroubleshootError_nonAzureK8s.ps1 +++ b/scripts/troubleshoot/TroubleshootError_nonAzureK8s.ps1 @@ -47,25 +47,25 @@ Write-Host("LogAnalyticsWorkspaceResourceId: : '" + $azureLogAnalyticsWorkspaceR if (($azureLogAnalyticsWorkspaceResourceId.ToLower().Contains("microsoft.operationalinsights/workspaces") -ne $true) -or ($azureLogAnalyticsWorkspaceResourceId.Split("/").Length -ne 9)) { Write-Host("Provided Azure Log Analytics resource id should be in this format /subscriptions//resourceGroups//providers/Microsoft.OperationalInsights/workspaces/") -ForegroundColor Red Stop-Transcript - exit + exit 1 } if ([string]::IsNullOrEmpty($kubeConfig)) { Write-Host("kubeConfig should not be NULL or empty") -ForegroundColor Red Stop-Transcript - exit + exit 1 } if ((Test-Path $kubeConfig -PathType Leaf) -ne $true) { Write-Host("provided kubeConfig path : '" + $kubeConfig + "' doesnt exist or you dont have read access") -ForegroundColor Red Stop-Transcript - exit + exit 1 } if ([string]::IsNullOrEmpty($clusterContextInKubeconfig)) { Write-Host("provide clusterContext should be valid context in the provided kubeconfig") -ForegroundColor Red Stop-Transcript - exit + exit 1 } # checks the all required Powershell modules exist and if not exists, request the user permission to install @@ -92,7 +92,7 @@ if (($null -eq $azAccountModule) -or ($null -eq $azResourcesModule) -or ($null - else { Write-Host("Please re-launch the script with elevated administrator") -ForegroundColor Red Stop-Transcript - exit + exit 1 } } @@ -120,7 +120,7 @@ if (($null -eq $azAccountModule) -or ($null -eq $azResourcesModule) -or ($null - catch { Write-Host("Close other powershell logins and try installing the latest modules forAz.Accounts in a new powershell window: eg. 'Install-Module Az.Accounts -Repository PSGallery -Force'") -ForegroundColor Red Stop-Transcript - exit + exit 1 } } @@ -132,7 +132,7 @@ if (($null -eq $azAccountModule) -or ($null -eq $azResourcesModule) -or ($null - catch { Write-Host("Close other powershell logins and try installing the latest modules forAz.Accounts in a new powershell window: eg. 'Install-Module Az.Accounts -Repository PSGallery -Force'") -ForegroundColor Red Stop-Transcript - exit + exit 1 } } @@ -145,7 +145,7 @@ if (($null -eq $azAccountModule) -or ($null -eq $azResourcesModule) -or ($null - catch { Write-Host("Close other powershell logins and try installing the latest modules for Az.OperationalInsights in a new powershell window: eg. 'Install-Module Az.OperationalInsights -Repository PSGallery -Force'") -ForegroundColor Red Stop-Transcript - exit + exit 1 } } @@ -160,7 +160,7 @@ if (($null -eq $azAccountModule) -or ($null -eq $azResourcesModule) -or ($null - Write-Host("Could not import Az.Resources...") -ForegroundColor Red Write-Host("Close other powershell logins and try installing the latest modules for Az.Resources in a new powershell window: eg. 'Install-Module Az.Resources -Repository PSGallery -Force'") -ForegroundColor Red Stop-Transcript - exit + exit 1 } } if ($null -eq $azAccountModule) { @@ -171,7 +171,7 @@ if (($null -eq $azAccountModule) -or ($null -eq $azResourcesModule) -or ($null - Write-Host("Could not import Az.Accounts...") -ForegroundColor Red Write-Host("Close other powershell logins and try installing the latest modules for Az.Accounts in a new powershell window: eg. 'Install-Module Az.Accounts -Repository PSGallery -Force'") -ForegroundColor Red Stop-Transcript - exit + exit 1 } } @@ -182,7 +182,7 @@ if (($null -eq $azAccountModule) -or ($null -eq $azResourcesModule) -or ($null - catch { Write-Host("Could not import Az.OperationalInsights... Please reinstall this Module") -ForegroundColor Red Stop-Transcript - exit + exit 1 } } @@ -190,7 +190,7 @@ if (($null -eq $azAccountModule) -or ($null -eq $azResourcesModule) -or ($null - 2 { Write-Host("") Stop-Transcript - exit + exit 1 } } } @@ -222,7 +222,7 @@ if ($null -eq $account.Account) { Write-Host("Could not select subscription with ID : " + $workspaceSubscriptionId + ". Please make sure the SubscriptionId you entered is correct and you have access to the cluster" ) -ForegroundColor Red Write-Host("") Stop-Transcript - exit + exit 1 } } else { @@ -242,7 +242,7 @@ else { Write-Host("Could not select subscription with ID : " + $workspaceSubscriptionId + ". Please make sure the ID you entered is correct and you have access to the cluster" ) -ForegroundColor Red Write-Host("") Stop-Transcript - exit + exit 1 } } } @@ -253,7 +253,7 @@ $workspaceResource = Get-AzResource -ResourceId $azureLogAnalyticsWorkspaceResou if ($null -eq $workspaceResource) { Write-Host("specified Azure Log Analytics resource id: " + $azureLogAnalyticsWorkspaceResourceId + ". either you dont have access or doesnt exist") -ForegroundColor Red Stop-Transcript - exit + exit 1 } # @@ -272,7 +272,7 @@ catch { Write-Host("Opt-in - " + $OptInLink) -ForegroundColor Red Write-Host("") Stop-Transcript - exit + exit 1 } $WorkspaceLocation = $WorkspaceInformation.Location @@ -281,7 +281,7 @@ if ($null -eq $WorkspaceLocation) { Write-Host("Cannot fetch workspace location. Please try again...") -ForegroundColor Red Write-Host("") Stop-Transcript - exit + exit 1 } $WorkspacePricingTier = $WorkspaceInformation.sku @@ -297,7 +297,7 @@ catch { Write-Host("Failed to get the list of solutions onboarded to the workspace. Please make sure that it hasn't been deleted and you have access to it.") -ForegroundColor Red Write-Host("") Stop-Transcript - exit + exit 1 } try { @@ -309,7 +309,7 @@ catch { Write-Host("Failed to get ContainerInsights solution details from the workspace") -ForegroundColor Red Write-Host("") Stop-Transcript - exit + exit 1 } $isSolutionOnboarded = $WorkspaceIPDetails.Enabled[$ContainerInsightsIndex] @@ -317,7 +317,7 @@ if ($isSolutionOnboarded) { if ($WorkspacePricingTier -eq "Free") { Write-Host("Pricing tier of the configured LogAnalytics workspace is Free so you may need to upgrade to pricing tier to non-Free") -ForegroundColor Red Stop-Transcript - exit + exit 1 } } else { @@ -356,13 +356,13 @@ else { Write-Host ("Template deployment failed with an error: '" + $Error[0] + "' ") -ForegroundColor Red Write-Host($contactUSMessage) -ForegroundColor Red Stop-Transcript - exit + exit 1 } } else { Write-Host("The container health solution isn't onboarded to your cluster. This required for the monitoring to work.") -ForegroundColor Red Stop-Transcript - exit + exit 1 } } @@ -382,7 +382,7 @@ try { if ($null -eq $rsPod) { Write-Host( "omsagent replicaset pod not scheduled or failed to scheduled." + $contactUSMessage) -ForegroundColor Red Stop-Transcript - exit + exit 1 } $rsPodStatus = $rsPod.status if ((($rsPodStatus.availableReplicas -eq 1) -and @@ -393,7 +393,7 @@ try { Write-Host($rsPodStatus) Write-Host($contactUSMessage) Stop-Transcript - exit + exit 1 } Write-Host( "omsagent replicaset pod running OK.") -ForegroundColor Green @@ -401,7 +401,7 @@ try { catch { Write-Host ("Failed to get omsagent replicatset pod info using kubectl get rs : '" + $Error[0] + "' ") -ForegroundColor Red Stop-Transcript - exit + exit 1 } Write-Host("Checking whether the omsagent daemonset pod running correctly ...") @@ -410,7 +410,7 @@ try { if ($ds.Items.Length -ne 1) { Write-Host( "omsagent replicaset pod not scheduled or failed to schedule." + $contactUSMessage) -ForegroundColor Red Stop-Transcript - exit + exit 1 } $dsStatus = $ds.Items[0].status @@ -424,7 +424,7 @@ try { Write-Host($rsPodStatus) Write-Host($contactUSMessage) Stop-Transcript - exit + exit 1 } Write-Host( "omsagent daemonset pod running OK.") -ForegroundColor Green @@ -432,7 +432,7 @@ try { catch { Write-Host ("Failed to execute the script : '" + $Error[0] + "' ") -ForegroundColor Red Stop-Transcript - exit + exit 1 } Write-Host("Checking whether the omsagent heatlhservice running correctly ...") @@ -441,7 +441,7 @@ try { if ($healthservice.Items.Length -ne 1) { Write-Host( "omsagent healthservice not scheduled or failed to schedule." + $contactUSMessage) Stop-Transcript - exit + exit 1 } Write-Host( "omsagent healthservice pod running OK.") -ForegroundColor Green @@ -449,7 +449,7 @@ try { catch { Write-Host ("Failed to execute kubectl get services command : '" + $Error[0] + "' ") -ForegroundColor Red Stop-Transcript - exit + exit 1 } Write-Host("Retrieving WorkspaceGUID and WorkspacePrimaryKey of the workspace : " + $WorkspaceInformation.Name) @@ -462,7 +462,7 @@ try { catch { Write-Host ("Failed to workspace details. Please validate whether you have Log Analytics Contributor role on the workspace error: '" + $Error[0] + "' ") -ForegroundColor Red Stop-Transcript - exit + exit 1 } Write-Host("Checking whether the WorkspaceGuid and key matching with configured log analytics workspace ...") @@ -473,7 +473,7 @@ try { if ((($workspaceGuidConfiguredOnAgent -eq $workspaceGUID) -and ($workspaceKeyConfiguredOnAgent -eq $workspacePrimarySharedKey)) -eq $false) { Write-Host ("Error - Log Analytics Workspace Guid and key configured on the agent not matching with details of the Workspace. Please verify and fix with the correct workspace Guid and Key") -ForegroundColor Red Stop-Transcript - exit + exit 1 } Write-Host("Workspace Guid and Key on the agent matching with the Workspace") -ForegroundColor Green @@ -481,7 +481,7 @@ try { catch { Write-Host ("Failed to execute the script : '" + $Error[0] + "' ") -ForegroundColor Red Stop-Transcript - exit + exit 1 } Write-Host("Checking agent version...") @@ -497,7 +497,7 @@ try { catch { Write-Host ("Failed to execute the script : '" + $Error[0] + "' ") -ForegroundColor Red Stop-Transcript - exit + exit 1 } Write-Host("resetting cluster context back, what it was before") diff --git a/source/plugins/go/src/oms.go b/source/plugins/go/src/oms.go index fbee1dd75..407ab3611 100644 --- a/source/plugins/go/src/oms.go +++ b/source/plugins/go/src/oms.go @@ -964,7 +964,7 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int if er != nil { Log("Error::mdsd::Failed to write to mdsd %d records after %s. Will retry ... error : %s", len(msgPackEntries), elapsed, er.Error()) - UpdateNumTelegrafMetricsSentTelemetry(0, 1, 0) + UpdateNumTelegrafMetricsSentTelemetry(0, 1, 0, 0) if MdsdInsightsMetricsMsgpUnixSocketClient != nil { MdsdInsightsMetricsMsgpUnixSocketClient.Close() MdsdInsightsMetricsMsgpUnixSocketClient = nil @@ -976,7 +976,7 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int return output.FLB_RETRY } else { numTelegrafMetricsRecords := len(msgPackEntries) - UpdateNumTelegrafMetricsSentTelemetry(numTelegrafMetricsRecords, 0, 0) + UpdateNumTelegrafMetricsSentTelemetry(numTelegrafMetricsRecords, 0, 0, 0) Log("Success::mdsd::Successfully flushed %d telegraf metrics records that was %d bytes to mdsd in %s ", numTelegrafMetricsRecords, bts, elapsed) } } @@ -985,9 +985,13 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int var metrics []laTelegrafMetric var i int + numWinMetricsWithTagsSize64KBorMore := 0 for i = 0; i < len(laMetrics); i++ { metrics = append(metrics, *laMetrics[i]) + if len(*&laMetrics[i].Tags) >= (64 * 1024) { + numWinMetricsWithTagsSize64KBorMore += 1 + } } laTelegrafMetrics := InsightsMetricsBlob{ @@ -1039,7 +1043,7 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int if err != nil { message := fmt.Sprintf("PostTelegrafMetricsToLA::Error:(retriable) when sending %v metrics. duration:%v err:%q \n", len(laMetrics), elapsed, err.Error()) Log(message) - UpdateNumTelegrafMetricsSentTelemetry(0, 1, 0) + UpdateNumTelegrafMetricsSentTelemetry(0, 1, 0, 0) return output.FLB_RETRY } @@ -1048,7 +1052,7 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int Log("PostTelegrafMetricsToLA::Error:(retriable) RequestID %s Response Status %v Status Code %v", reqID, resp.Status, resp.StatusCode) } if resp != nil && resp.StatusCode == 429 { - UpdateNumTelegrafMetricsSentTelemetry(0, 1, 1) + UpdateNumTelegrafMetricsSentTelemetry(0, 1, 1, 0) } return output.FLB_RETRY } @@ -1056,18 +1060,19 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int defer resp.Body.Close() numMetrics := len(laMetrics) - UpdateNumTelegrafMetricsSentTelemetry(numMetrics, 0, 0) + UpdateNumTelegrafMetricsSentTelemetry(numMetrics, 0, 0, numWinMetricsWithTagsSize64KBorMore) Log("PostTelegrafMetricsToLA::Info:Successfully flushed %v records in %v", numMetrics, elapsed) } return output.FLB_OK } -func UpdateNumTelegrafMetricsSentTelemetry(numMetricsSent int, numSendErrors int, numSend429Errors int) { +func UpdateNumTelegrafMetricsSentTelemetry(numMetricsSent int, numSendErrors int, numSend429Errors int, numWinMetricswith64KBorMoreSize int) { ContainerLogTelemetryMutex.Lock() TelegrafMetricsSentCount += float64(numMetricsSent) TelegrafMetricsSendErrorCount += float64(numSendErrors) TelegrafMetricsSend429ErrorCount += float64(numSend429Errors) + WinTelegrafMetricsCountWithTagsSize64KBorMore += float64(numWinMetricswith64KBorMoreSize) ContainerLogTelemetryMutex.Unlock() } diff --git a/source/plugins/go/src/telemetry.go b/source/plugins/go/src/telemetry.go index b344f4ac8..b4f8ab89d 100644 --- a/source/plugins/go/src/telemetry.go +++ b/source/plugins/go/src/telemetry.go @@ -32,6 +32,8 @@ var ( TelemetryClient appinsights.TelemetryClient // ContainerLogTelemetryTicker sends telemetry periodically ContainerLogTelemetryTicker *time.Ticker + //Tracks the number of windows telegraf metrics count with Tags size 64KB or more between telemetry ticker periods (uses ContainerLogTelemetryTicker) + WinTelegrafMetricsCountWithTagsSize64KBorMore float64 //Tracks the number of telegraf metrics sent successfully between telemetry ticker periods (uses ContainerLogTelemetryTicker) TelegrafMetricsSentCount float64 //Tracks the number of send errors between telemetry ticker periods (uses ContainerLogTelemetryTicker) @@ -78,6 +80,7 @@ const ( metricNameNumberofTelegrafMetricsSentSuccessfully = "TelegrafMetricsSentCount" metricNameNumberofSendErrorsTelegrafMetrics = "TelegrafMetricsSendErrorCount" metricNameNumberofSend429ErrorsTelegrafMetrics = "TelegrafMetricsSend429ErrorCount" + metricNameNumberofWinTelegrafMetricsWithTagsSize64KBorMore = "WinTelegrafMetricsCountWithTagsSize64KBorMore" metricNameErrorCountContainerLogsSendErrorsToMDSDFromFluent = "ContainerLogs2MdsdSendErrorCount" metricNameErrorCountContainerLogsMDSDClientCreateError = "ContainerLogsMdsdClientCreateErrorCount" metricNameErrorCountInsightsMetricsMDSDClientCreateError = "InsightsMetricsMDSDClientCreateErrorsCount" @@ -117,6 +120,7 @@ func SendContainerLogPluginMetrics(telemetryPushIntervalProperty string) { telegrafMetricsSentCount := TelegrafMetricsSentCount telegrafMetricsSendErrorCount := TelegrafMetricsSendErrorCount telegrafMetricsSend429ErrorCount := TelegrafMetricsSend429ErrorCount + winTelegrafMetricsCountWithTagsSize64KBorMore := WinTelegrafMetricsCountWithTagsSize64KBorMore containerLogsSendErrorsToMDSDFromFluent := ContainerLogsSendErrorsToMDSDFromFluent containerLogsMDSDClientCreateErrors := ContainerLogsMDSDClientCreateErrors containerLogsSendErrorsToADXFromFluent := ContainerLogsSendErrorsToADXFromFluent @@ -133,6 +137,7 @@ func SendContainerLogPluginMetrics(telemetryPushIntervalProperty string) { TelegrafMetricsSentCount = 0.0 TelegrafMetricsSendErrorCount = 0.0 TelegrafMetricsSend429ErrorCount = 0.0 + WinTelegrafMetricsCountWithTagsSize64KBorMore = 0.0 FlushedRecordsCount = 0.0 FlushedRecordsSize = 0.0 FlushedRecordsTimeTaken = 0.0 @@ -227,6 +232,9 @@ func SendContainerLogPluginMetrics(telemetryPushIntervalProperty string) { if kubeMonEventsMDSDClientCreateErrors > 0.0 { TelemetryClient.Track(appinsights.NewMetricTelemetry(metricNameErrorCountKubeMonEventsMDSDClientCreateError, kubeMonEventsMDSDClientCreateErrors)) } + if winTelegrafMetricsCountWithTagsSize64KBorMore > 0.0 { + TelemetryClient.Track(appinsights.NewMetricTelemetry(metricNameNumberofWinTelegrafMetricsWithTagsSize64KBorMore, winTelegrafMetricsCountWithTagsSize64KBorMore)) + } if ContainerLogRecordCountWithEmptyTimeStamp > 0.0 { TelemetryClient.Track(appinsights.NewMetricTelemetry(metricNameContainerLogRecordCountWithEmptyTimeStamp, containerLogRecordCountWithEmptyTimeStamp)) } diff --git a/source/plugins/ruby/constants.rb b/source/plugins/ruby/constants.rb index 69da56488..b9516c2ce 100644 --- a/source/plugins/ruby/constants.rb +++ b/source/plugins/ruby/constants.rb @@ -110,7 +110,7 @@ class Constants CONTAINER_INVENTORY_DATA_TYPE = "CONTAINER_INVENTORY_BLOB" CONTAINER_NODE_INVENTORY_DATA_TYPE = "CONTAINER_NODE_INVENTORY_BLOB" PERF_DATA_TYPE = "LINUX_PERF_BLOB" - INSIGHTS_METRICS_DATA_TYPE = "INSIGHTS_METRICS_BLOB" + INSIGHTS_METRICS_DATA_TYPE = "INSIGHTS_METRICS_BLOB" KUBE_SERVICES_DATA_TYPE = "KUBE_SERVICES_BLOB" KUBE_POD_INVENTORY_DATA_TYPE = "KUBE_POD_INVENTORY_BLOB" KUBE_NODE_INVENTORY_DATA_TYPE = "KUBE_NODE_INVENTORY_BLOB" @@ -119,17 +119,21 @@ class Constants KUBE_MON_AGENT_EVENTS_DATA_TYPE = "KUBE_MON_AGENT_EVENTS_BLOB" KUBE_HEALTH_DATA_TYPE = "KUBE_HEALTH_BLOB" CONTAINERLOGV2_DATA_TYPE = "CONTAINERINSIGHTS_CONTAINERLOGV2" - CONTAINERLOG_DATA_TYPE = "CONTAINER_LOG_BLOB" + CONTAINERLOG_DATA_TYPE = "CONTAINER_LOG_BLOB" #ContainerInsights Extension (AMCS) CI_EXTENSION_NAME = "ContainerInsights" - CI_EXTENSION_VERSION = "1" + CI_EXTENSION_VERSION = "1" #Current CI extension config size is ~5KB and going with 20KB to handle any future scenarios CI_EXTENSION_CONFIG_MAX_BYTES = 20480 - ONEAGENT_FLUENT_SOCKET_NAME = "/var/run/mdsd/default_fluent.socket" + ONEAGENT_FLUENT_SOCKET_NAME = "/var/run/mdsd/default_fluent.socket" #Tag prefix for output stream EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX = "dcr-" LINUX_LOG_PATH = $in_unit_test.nil? ? "/var/opt/microsoft/docker-cimprov/log/" : "./" WINDOWS_LOG_PATH = $in_unit_test.nil? ? "/etc/omsagentwindows/" : "./" + + #This is for telemetry to track if any of the windows customer has any of the field size >= 64KB + #To evaluate switching to Windows AMA 64KB impacts any existing customers + MAX_RECORD_OR_FIELD_SIZE_FOR_TELEMETRY = 65536 end diff --git a/source/plugins/ruby/in_cadvisor_perf.rb b/source/plugins/ruby/in_cadvisor_perf.rb index 862e88e44..aba24ecc2 100644 --- a/source/plugins/ruby/in_cadvisor_perf.rb +++ b/source/plugins/ruby/in_cadvisor_perf.rb @@ -64,12 +64,12 @@ def enumerate() begin eventStream = Fluent::MultiEventStream.new insightsMetricsEventStream = Fluent::MultiEventStream.new - metricData = CAdvisorMetricsAPIClient.getMetrics(winNode: nil, metricTime: batchTime ) + metricData = CAdvisorMetricsAPIClient.getMetrics(winNode: nil, metricTime: batchTime) metricData.each do |record| eventStream.add(time, record) if record end - if ExtensionUtils.isAADMSIAuthMode() + if ExtensionUtils.isAADMSIAuthMode() && !@@isWindows.nil? && @@isWindows == false $log.info("in_cadvisor_perf::enumerate: AAD AUTH MSI MODE") if @tag.nil? || !@tag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) @tag = ExtensionUtils.getOutputStreamId(Constants::PERF_DATA_TYPE) @@ -77,7 +77,7 @@ def enumerate() if @insightsmetricstag.nil? || !@insightsmetricstag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) @insightsmetricstag = ExtensionUtils.getOutputStreamId(Constants::INSIGHTS_METRICS_DATA_TYPE) end - $log.info("in_cadvisor_perf::enumerate: using perf tag -#{@tag} @ #{Time.now.utc.iso8601}") + $log.info("in_cadvisor_perf::enumerate: using perf tag -#{@tag} @ #{Time.now.utc.iso8601}") $log.info("in_cadvisor_perf::enumerate: using insightsmetrics tag -#{@insightsmetricstag} @ #{Time.now.utc.iso8601}") end router.emit_stream(@tag, eventStream) if eventStream @@ -95,9 +95,9 @@ def enumerate() containerGPUusageInsightsMetricsDataItems = [] containerGPUusageInsightsMetricsDataItems.concat(CAdvisorMetricsAPIClient.getInsightsMetrics(winNode: nil, metricTime: batchTime)) - containerGPUusageInsightsMetricsDataItems.each do |insightsMetricsRecord| - insightsMetricsEventStream.add(time, insightsMetricsRecord) if insightsMetricsRecord - end + containerGPUusageInsightsMetricsDataItems.each do |insightsMetricsRecord| + insightsMetricsEventStream.add(time, insightsMetricsRecord) if insightsMetricsRecord + end router.emit_stream(@insightsmetricstag, insightsMetricsEventStream) if insightsMetricsEventStream router.emit_stream(@mdmtag, insightsMetricsEventStream) if insightsMetricsEventStream diff --git a/source/plugins/ruby/in_kube_nodes.rb b/source/plugins/ruby/in_kube_nodes.rb index a32a32769..abbfe94a1 100644 --- a/source/plugins/ruby/in_kube_nodes.rb +++ b/source/plugins/ruby/in_kube_nodes.rb @@ -1,17 +1,17 @@ #!/usr/local/bin/ruby # frozen_string_literal: true -require 'fluent/plugin/input' +require "fluent/plugin/input" module Fluent::Plugin class Kube_nodeInventory_Input < Input Fluent::Plugin.register_input("kube_nodes", self) - def initialize (kubernetesApiClient=nil, - applicationInsightsUtility=nil, - extensionUtils=nil, - env=nil, - telemetry_flush_interval=nil) + def initialize(kubernetesApiClient = nil, + applicationInsightsUtility = nil, + extensionUtils = nil, + env = nil, + telemetry_flush_interval = nil) super() require "yaml" @@ -36,8 +36,7 @@ def initialize (kubernetesApiClient=nil, @@promConfigMountPath = "/etc/config/settings/prometheus-data-collection-settings" @@osmConfigMountPath = "/etc/config/osm-settings/osm-metric-collection-configuration" @@AzStackCloudFileName = "/etc/kubernetes/host/azurestackcloud.json" - - + @@rsPromInterval = @env["TELEMETRY_RS_PROM_INTERVAL"] @@rsPromFieldPassCount = @env["TELEMETRY_RS_PROM_FIELDPASS_LENGTH"] @@rsPromFieldDropCount = @env["TELEMETRY_RS_PROM_FIELDDROP_LENGTH"] @@ -119,6 +118,7 @@ def enumerate nodeInventory = nil currentTime = Time.now batchTime = currentTime.utc.iso8601 + nodeCount = 0 @nodesAPIE2ELatencyMs = 0 @nodeInventoryE2EProcessingLatencyMs = 0 @@ -138,7 +138,7 @@ def enumerate if @tag.nil? || !@tag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) @tag = @extensionUtils.getOutputStreamId(Constants::KUBE_NODE_INVENTORY_DATA_TYPE) end - $log.info("in_kube_nodes::enumerate: using perf tag -#{@kubeperfTag} @ #{Time.now.utc.iso8601}") + $log.info("in_kube_nodes::enumerate: using perf tag -#{@kubeperfTag} @ #{Time.now.utc.iso8601}") $log.info("in_kube_nodes::enumerate: using insightsmetrics tag -#{@insightsMetricsTag} @ #{Time.now.utc.iso8601}") $log.info("in_kube_nodes::enumerate: using containernodeinventory tag -#{@ContainerNodeInventoryTag} @ #{Time.now.utc.iso8601}") $log.info("in_kube_nodes::enumerate: using kubenodeinventory tag -#{@tag} @ #{Time.now.utc.iso8601}") @@ -155,6 +155,7 @@ def enumerate nodesAPIChunkEndTime = (Time.now.to_f * 1000).to_i @nodesAPIE2ELatencyMs = (nodesAPIChunkEndTime - nodesAPIChunkStartTime) if (!nodeInventory.nil? && !nodeInventory.empty? && nodeInventory.key?("items") && !nodeInventory["items"].nil? && !nodeInventory["items"].empty?) + nodeCount += nodeInventory["items"].length $log.info("in_kube_nodes::enumerate : number of node items :#{nodeInventory["items"].length} from Kube API @ #{Time.now.utc.iso8601}") parse_and_emit_records(nodeInventory, batchTime) else @@ -168,6 +169,7 @@ def enumerate nodesAPIChunkEndTime = (Time.now.to_f * 1000).to_i @nodesAPIE2ELatencyMs = @nodesAPIE2ELatencyMs + (nodesAPIChunkEndTime - nodesAPIChunkStartTime) if (!nodeInventory.nil? && !nodeInventory.empty? && nodeInventory.key?("items") && !nodeInventory["items"].nil? && !nodeInventory["items"].empty?) + nodeCount += nodeInventory["items"].length $log.info("in_kube_nodes::enumerate : number of node items :#{nodeInventory["items"].length} from Kube API @ #{Time.now.utc.iso8601}") parse_and_emit_records(nodeInventory, batchTime) else @@ -181,6 +183,7 @@ def enumerate if (timeDifferenceInMinutes >= @TELEMETRY_FLUSH_INTERVAL_IN_MINUTES) @applicationInsightsUtility.sendMetricTelemetry("NodeInventoryE2EProcessingLatencyMs", @nodeInventoryE2EProcessingLatencyMs, {}) @applicationInsightsUtility.sendMetricTelemetry("NodesAPIE2ELatencyMs", @nodesAPIE2ELatencyMs, {}) + @applicationInsightsUtility.sendMetricTelemetry("NodeCount", nodeCount, {}) @@nodeInventoryLatencyTelemetryTimeTracker = DateTime.now.to_time.to_i end # Setting this to nil so that we dont hold memory until GC kicks in @@ -208,9 +211,9 @@ def parse_and_emit_records(nodeInventory, batchTime = Time.utc.iso8601) nodeInventoryRecord = getNodeInventoryRecord(item, batchTime) eventStream.add(emitTime, nodeInventoryRecord) if nodeInventoryRecord if @NODES_EMIT_STREAM_BATCH_SIZE > 0 && eventStream.count >= @NODES_EMIT_STREAM_BATCH_SIZE - $log.info("in_kube_node::parse_and_emit_records: number of node inventory records emitted #{@NODES_EMIT_STREAM_BATCH_SIZE} @ #{Time.now.utc.iso8601}") + $log.info("in_kube_node::parse_and_emit_records: number of node inventory records emitted #{eventStream.count} @ #{Time.now.utc.iso8601}") router.emit_stream(@tag, eventStream) if eventStream - $log.info("in_kube_node::parse_and_emit_records: number of mdm node inventory records emitted #{@NODES_EMIT_STREAM_BATCH_SIZE} @ #{Time.now.utc.iso8601}") + $log.info("in_kube_node::parse_and_emit_records: number of mdm node inventory records emitted #{eventStream.count} @ #{Time.now.utc.iso8601}") router.emit_stream(@MDMKubeNodeInventoryTag, eventStream) if eventStream if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) $log.info("kubeNodeInventoryEmitStreamSuccess @ #{Time.now.utc.iso8601}") @@ -223,7 +226,7 @@ def parse_and_emit_records(nodeInventory, batchTime = Time.utc.iso8601) containerNodeInventoryEventStream.add(emitTime, containerNodeInventoryRecord) if containerNodeInventoryRecord if @NODES_EMIT_STREAM_BATCH_SIZE > 0 && containerNodeInventoryEventStream.count >= @NODES_EMIT_STREAM_BATCH_SIZE - $log.info("in_kube_node::parse_and_emit_records: number of container node inventory records emitted #{@NODES_EMIT_STREAM_BATCH_SIZE} @ #{Time.now.utc.iso8601}") + $log.info("in_kube_node::parse_and_emit_records: number of container node inventory records emitted #{containerNodeInventoryEventStream.count} @ #{Time.now.utc.iso8601}") router.emit_stream(@ContainerNodeInventoryTag, containerNodeInventoryEventStream) if containerNodeInventoryEventStream containerNodeInventoryEventStream = Fluent::MultiEventStream.new if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) @@ -272,7 +275,7 @@ def parse_and_emit_records(nodeInventory, batchTime = Time.utc.iso8601) kubePerfEventStream.add(emitTime, metricRecord) if metricRecord end if @NODES_EMIT_STREAM_BATCH_SIZE > 0 && kubePerfEventStream.count >= @NODES_EMIT_STREAM_BATCH_SIZE - $log.info("in_kube_nodes::parse_and_emit_records: number of node perf metric records emitted #{@NODES_EMIT_STREAM_BATCH_SIZE} @ #{Time.now.utc.iso8601}") + $log.info("in_kube_nodes::parse_and_emit_records: number of node perf metric records emitted #{kubePerfEventStream.count} @ #{Time.now.utc.iso8601}") router.emit_stream(@kubeperfTag, kubePerfEventStream) if kubePerfEventStream kubePerfEventStream = Fluent::MultiEventStream.new if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) @@ -302,7 +305,7 @@ def parse_and_emit_records(nodeInventory, batchTime = Time.utc.iso8601) insightsMetricsEventStream.add(emitTime, insightsMetricsRecord) if insightsMetricsRecord end if @NODES_EMIT_STREAM_BATCH_SIZE > 0 && insightsMetricsEventStream.count >= @NODES_EMIT_STREAM_BATCH_SIZE - $log.info("in_kube_nodes::parse_and_emit_records: number of GPU node perf metric records emitted #{@NODES_EMIT_STREAM_BATCH_SIZE} @ #{Time.now.utc.iso8601}") + $log.info("in_kube_nodes::parse_and_emit_records: number of GPU node perf metric records emitted #{insightsMetricsEventStream.count} @ #{Time.now.utc.iso8601}") router.emit_stream(@insightsMetricsTag, insightsMetricsEventStream) if insightsMetricsEventStream insightsMetricsEventStream = Fluent::MultiEventStream.new if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) @@ -572,12 +575,12 @@ def getNodeTelemetryProps(item) return properties end end # Kube_Node_Input + class NodeStatsCache # inner class for caching implementation (CPU and memory caching is handled the exact same way, so logic to do so is moved to a private inner class) # (to reduce code duplication) class NodeCache - - @@RECORD_TIME_TO_LIVE = 60*20 # units are seconds, so clear the cache every 20 minutes. + @@RECORD_TIME_TO_LIVE = 60 * 20 # units are seconds, so clear the cache every 20 minutes. def initialize @cacheHash = {} @@ -622,7 +625,7 @@ def clean_cache() end end - nodes_to_remove.each {|node_name| + nodes_to_remove.each { |node_name| @cacheHash.delete(node_name) @timeAdded.delete(node_name) } @@ -630,7 +633,6 @@ def clean_cache() end end # NodeCache - @@cpuCache = NodeCache.new @@memCache = NodeCache.new diff --git a/source/plugins/ruby/in_kube_podinventory.rb b/source/plugins/ruby/in_kube_podinventory.rb index 3f5f4f1cc..f979ef7c5 100644 --- a/source/plugins/ruby/in_kube_podinventory.rb +++ b/source/plugins/ruby/in_kube_podinventory.rb @@ -1,7 +1,7 @@ #!/usr/local/bin/ruby # frozen_string_literal: true -require 'fluent/plugin/input' +require "fluent/plugin/input" module Fluent::Plugin require_relative "podinventory_to_mdm" @@ -12,7 +12,6 @@ class Kube_PodInventory_Input < Input @@MDMKubePodInventoryTag = "mdm.kubepodinventory" @@hostName = (OMS::Common.get_hostname) - def initialize super require "yaml" @@ -35,9 +34,16 @@ def initialize @PODS_EMIT_STREAM_BATCH_SIZE = 0 @podCount = 0 + @containerCount = 0 @serviceCount = 0 @controllerSet = Set.new [] @winContainerCount = 0 + @windowsNodeCount = 0 + @winContainerInventoryTotalSizeBytes = 0 + @winContainerCountWithInventoryRecordSize64KBOrMore = 0 + @winContainerCountWithEnvVarSize64KBOrMore = 0 + @winContainerCountWithPortsSize64KBOrMore = 0 + @winContainerCountWithCommandSize64KBOrMore = 0 @controllerData = {} @podInventoryE2EProcessingLatencyMs = 0 @podsAPIE2ELatencyMs = 0 @@ -100,9 +106,16 @@ def enumerate(podList = nil) podInventory = podList telemetryFlush = false @podCount = 0 + @containerCount = 0 @serviceCount = 0 @controllerSet = Set.new [] @winContainerCount = 0 + @winContainerInventoryTotalSizeBytes = 0 + @winContainerCountWithInventoryRecordSize64KBOrMore = 0 + @winContainerCountWithEnvVarSize64KBOrMore = 0 + @winContainerCountWithPortsSize64KBOrMore = 0 + @winContainerCountWithCommandSize64KBOrMore = 0 + @windowsNodeCount = 0 @controllerData = {} currentTime = Time.now batchTime = currentTime.utc.iso8601 @@ -110,27 +123,27 @@ def enumerate(podList = nil) @podInventoryE2EProcessingLatencyMs = 0 podInventoryStartTime = (Time.now.to_f * 1000).to_i if ExtensionUtils.isAADMSIAuthMode() - $log.info("in_kube_podinventory::enumerate: AAD AUTH MSI MODE") - if @kubeperfTag.nil? || !@kubeperfTag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) - @kubeperfTag = ExtensionUtils.getOutputStreamId(Constants::PERF_DATA_TYPE) - end - if @kubeservicesTag.nil? || !@kubeservicesTag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) - @kubeservicesTag = ExtensionUtils.getOutputStreamId(Constants::KUBE_SERVICES_DATA_TYPE) - end - if @containerInventoryTag.nil? || !@containerInventoryTag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) - @containerInventoryTag = ExtensionUtils.getOutputStreamId(Constants::CONTAINER_INVENTORY_DATA_TYPE) - end - if @insightsMetricsTag.nil? || !@insightsMetricsTag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) - @insightsMetricsTag = ExtensionUtils.getOutputStreamId(Constants::INSIGHTS_METRICS_DATA_TYPE) - end - if @tag.nil? || !@tag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) - @tag = ExtensionUtils.getOutputStreamId(Constants::KUBE_POD_INVENTORY_DATA_TYPE) - end - $log.info("in_kube_podinventory::enumerate: using perf tag -#{@kubeperfTag} @ #{Time.now.utc.iso8601}") - $log.info("in_kube_podinventory::enumerate: using kubeservices tag -#{@kubeservicesTag} @ #{Time.now.utc.iso8601}") - $log.info("in_kube_podinventory::enumerate: using containerinventory tag -#{@containerInventoryTag} @ #{Time.now.utc.iso8601}") - $log.info("in_kube_podinventory::enumerate: using insightsmetrics tag -#{@insightsMetricsTag} @ #{Time.now.utc.iso8601}") - $log.info("in_kube_podinventory::enumerate: using kubepodinventory tag -#{@tag} @ #{Time.now.utc.iso8601}") + $log.info("in_kube_podinventory::enumerate: AAD AUTH MSI MODE") + if @kubeperfTag.nil? || !@kubeperfTag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) + @kubeperfTag = ExtensionUtils.getOutputStreamId(Constants::PERF_DATA_TYPE) + end + if @kubeservicesTag.nil? || !@kubeservicesTag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) + @kubeservicesTag = ExtensionUtils.getOutputStreamId(Constants::KUBE_SERVICES_DATA_TYPE) + end + if @containerInventoryTag.nil? || !@containerInventoryTag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) + @containerInventoryTag = ExtensionUtils.getOutputStreamId(Constants::CONTAINER_INVENTORY_DATA_TYPE) + end + if @insightsMetricsTag.nil? || !@insightsMetricsTag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) + @insightsMetricsTag = ExtensionUtils.getOutputStreamId(Constants::INSIGHTS_METRICS_DATA_TYPE) + end + if @tag.nil? || !@tag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) + @tag = ExtensionUtils.getOutputStreamId(Constants::KUBE_POD_INVENTORY_DATA_TYPE) + end + $log.info("in_kube_podinventory::enumerate: using perf tag -#{@kubeperfTag} @ #{Time.now.utc.iso8601}") + $log.info("in_kube_podinventory::enumerate: using kubeservices tag -#{@kubeservicesTag} @ #{Time.now.utc.iso8601}") + $log.info("in_kube_podinventory::enumerate: using containerinventory tag -#{@containerInventoryTag} @ #{Time.now.utc.iso8601}") + $log.info("in_kube_podinventory::enumerate: using insightsmetrics tag -#{@insightsMetricsTag} @ #{Time.now.utc.iso8601}") + $log.info("in_kube_podinventory::enumerate: using kubepodinventory tag -#{@tag} @ #{Time.now.utc.iso8601}") end # Get services first so that we dont need to make a call for very chunk @@ -202,11 +215,24 @@ def enumerate(podList = nil) telemetryProperties["PODS_EMIT_STREAM_BATCH_SIZE"] = @PODS_EMIT_STREAM_BATCH_SIZE ApplicationInsightsUtility.sendCustomEvent("KubePodInventoryHeartBeatEvent", telemetryProperties) ApplicationInsightsUtility.sendMetricTelemetry("PodCount", @podCount, {}) + ApplicationInsightsUtility.sendMetricTelemetry("ContainerCount", @containerCount, {}) ApplicationInsightsUtility.sendMetricTelemetry("ServiceCount", @serviceCount, {}) telemetryProperties["ControllerData"] = @controllerData.to_json ApplicationInsightsUtility.sendMetricTelemetry("ControllerCount", @controllerSet.length, telemetryProperties) if @winContainerCount > 0 telemetryProperties["ClusterWideWindowsContainersCount"] = @winContainerCount + telemetryProperties["WindowsNodeCount"] = @windowsNodeCount + telemetryProperties["ClusterWideWindowsContainerInventoryTotalSizeKB"] = @winContainerInventoryTotalSizeBytes / 1024 + telemetryProperties["WindowsContainerCountWithInventoryRecordSize64KBorMore"] = @winContainerCountWithInventoryRecordSize64KBOrMore + if @winContainerCountWithEnvVarSize64KBOrMore > 0 + telemetryProperties["WinContainerCountWithEnvVarSize64KBOrMore"] = @winContainerCountWithEnvVarSize64KBOrMore + end + if @winContainerCountWithPortsSize64KBOrMore > 0 + telemetryProperties["WinContainerCountWithPortsSize64KBOrMore"] = @winContainerCountWithPortsSize64KBOrMore + end + if @winContainerCountWithCommandSize64KBOrMore > 0 + telemetryProperties["WinContainerCountWithCommandSize64KBOrMore"] = @winContainerCountWithCommandSize64KBOrMore + end ApplicationInsightsUtility.sendCustomEvent("WindowsContainerInventoryEvent", telemetryProperties) end ApplicationInsightsUtility.sendMetricTelemetry("PodInventoryE2EProcessingLatencyMs", @podInventoryE2EProcessingLatencyMs, telemetryProperties) @@ -236,6 +262,7 @@ def parse_and_emit_records(podInventory, serviceRecords, continuationToken, batc podInventory["items"].each do |item| #podInventory block start # pod inventory records podInventoryRecords = getPodInventoryRecords(item, serviceRecords, batchTime) + @containerCount += podInventoryRecords.length podInventoryRecords.each do |record| if !record.nil? eventStream.add(emitTime, record) if record @@ -249,6 +276,7 @@ def parse_and_emit_records(podInventory, serviceRecords, continuationToken, batc if !item["spec"]["nodeName"].nil? nodeName = item["spec"]["nodeName"] end + @windowsNodeCount = winNodes.length if (!nodeName.empty? && (winNodes.include? nodeName)) clusterCollectEnvironmentVar = ENV["AZMON_CLUSTER_COLLECT_ENV_VAR"] #Generate ContainerInventory records for windows nodes so that we can get image and image tag in property panel @@ -258,13 +286,27 @@ def parse_and_emit_records(podInventory, serviceRecords, continuationToken, batc containerInventoryRecords.each do |cirecord| if !cirecord.nil? containerInventoryStream.add(emitTime, cirecord) if cirecord + ciRecordSize = cirecord.to_s.length + @winContainerInventoryTotalSizeBytes += ciRecordSize + if ciRecordSize >= Constants::MAX_RECORD_OR_FIELD_SIZE_FOR_TELEMETRY + @winContainerCountWithInventoryRecordSize64KBOrMore += 1 + end + if !cirecord["EnvironmentVar"].nil? && !cirecord["EnvironmentVar"].empty? && cirecord["EnvironmentVar"].length >= Constants::MAX_RECORD_OR_FIELD_SIZE_FOR_TELEMETRY + @winContainerCountWithEnvVarSize64KBOrMore += 1 + end + if !cirecord["Ports"].nil? && !cirecord["Ports"].empty? && cirecord["Ports"].length >= Constants::MAX_RECORD_OR_FIELD_SIZE_FOR_TELEMETRY + @winContainerCountWithPortsSize64KBOrMore += 1 + end + if !cirecord["Command"].nil? && !cirecord["Command"].empty? && cirecord["Command"].length >= Constants::MAX_RECORD_OR_FIELD_SIZE_FOR_TELEMETRY + @winContainerCountWithCommandSize64KBOrMore += 1 + end end end end end if @PODS_EMIT_STREAM_BATCH_SIZE > 0 && eventStream.count >= @PODS_EMIT_STREAM_BATCH_SIZE - $log.info("in_kube_podinventory::parse_and_emit_records: number of pod inventory records emitted #{@PODS_EMIT_STREAM_BATCH_SIZE} @ #{Time.now.utc.iso8601}") + $log.info("in_kube_podinventory::parse_and_emit_records: number of pod inventory records emitted #{eventStream.count} @ #{Time.now.utc.iso8601}") if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) $log.info("kubePodInventoryEmitStreamSuccess @ #{Time.now.utc.iso8601}") end @@ -284,7 +326,7 @@ def parse_and_emit_records(podInventory, serviceRecords, continuationToken, batc end if @PODS_EMIT_STREAM_BATCH_SIZE > 0 && kubePerfEventStream.count >= @PODS_EMIT_STREAM_BATCH_SIZE - $log.info("in_kube_podinventory::parse_and_emit_records: number of container perf records emitted #{@PODS_EMIT_STREAM_BATCH_SIZE} @ #{Time.now.utc.iso8601}") + $log.info("in_kube_podinventory::parse_and_emit_records: number of container perf records emitted #{kubePerfEventStream.count} @ #{Time.now.utc.iso8601}") router.emit_stream(@kubeperfTag, kubePerfEventStream) if kubePerfEventStream if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) $log.info("kubeContainerPerfEventEmitStreamSuccess @ #{Time.now.utc.iso8601}") @@ -303,7 +345,7 @@ def parse_and_emit_records(podInventory, serviceRecords, continuationToken, batc end if @PODS_EMIT_STREAM_BATCH_SIZE > 0 && insightsMetricsEventStream.count >= @PODS_EMIT_STREAM_BATCH_SIZE - $log.info("in_kube_podinventory::parse_and_emit_records: number of GPU insights metrics records emitted #{@PODS_EMIT_STREAM_BATCH_SIZE} @ #{Time.now.utc.iso8601}") + $log.info("in_kube_podinventory::parse_and_emit_records: number of GPU insights metrics records emitted #{insightsMetricsEventStream.count} @ #{Time.now.utc.iso8601}") if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) $log.info("kubePodInsightsMetricsEmitStreamSuccess @ #{Time.now.utc.iso8601}") end @@ -368,7 +410,7 @@ def parse_and_emit_records(podInventory, serviceRecords, continuationToken, batc kubeServiceRecord["ClusterName"] = KubernetesApiClient.getClusterName kubeServicesEventStream.add(emitTime, kubeServiceRecord) if kubeServiceRecord if @PODS_EMIT_STREAM_BATCH_SIZE > 0 && kubeServicesEventStream.count >= @PODS_EMIT_STREAM_BATCH_SIZE - $log.info("in_kube_podinventory::parse_and_emit_records: number of service records emitted #{@PODS_EMIT_STREAM_BATCH_SIZE} @ #{Time.now.utc.iso8601}") + $log.info("in_kube_podinventory::parse_and_emit_records: number of service records emitted #{kubeServicesEventStream.count} @ #{Time.now.utc.iso8601}") router.emit_stream(@kubeservicesTag, kubeServicesEventStream) if kubeServicesEventStream kubeServicesEventStream = Fluent::MultiEventStream.new if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) From bfc41a4f12a5972dc0a74dfabee22c561dadd71f Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Fri, 28 Jan 2022 17:33:56 -0800 Subject: [PATCH 191/301] Gangams/jan 2022 release tasks 2 (#701) * mdsd proc cpu and memory telemetry * write ai logs to file and telemetry for mdsd proc * write ai logs to file and telemetry for mdsd proc * write ai logs to file and telemetry for mdsd proc * fix pr feedback * use name_prefix * remove mdsd telemetry changes * remove mdsd telemetry changes * remove mdsd telemetry changes --- .../conf/telegraf-prom-side-car.conf | 53 ++++++++++++++++++- build/linux/installer/conf/telegraf-rs.conf | 52 ++++++++++++++++-- build/linux/installer/conf/telegraf.conf | 38 ++++++++++--- .../installer/datafiles/base_container.data | 4 +- .../ruby/ApplicationInsightsUtility.rb | 15 ++++-- .../channel/asynchronous_sender.rb | 13 ++--- .../channel/sender_base.rb | 31 ++++++----- .../channel/synchronous_sender.rb | 5 +- 8 files changed, 173 insertions(+), 38 deletions(-) diff --git a/build/linux/installer/conf/telegraf-prom-side-car.conf b/build/linux/installer/conf/telegraf-prom-side-car.conf index 1b6bab9f9..a94150fad 100644 --- a/build/linux/installer/conf/telegraf-prom-side-car.conf +++ b/build/linux/installer/conf/telegraf-prom-side-car.conf @@ -111,6 +111,26 @@ data_format = "json" namedrop = ["agent_telemetry", "file"] +# [[outputs.application_insights]] +# ## Instrumentation key of the Application Insights resource. +# instrumentation_key = "$TELEMETRY_APPLICATIONINSIGHTS_KEY" + +# ## Timeout for closing (default: 5s). +# # timeout = "5s" + +# ## Enable additional diagnostic logging. +# enable_diagnostic_logging = false + +# ## Context Tag Sources add Application Insights context tags to a tag value. +# ## +# ## For list of allowed context tag keys see: +# ## https://github.com/Microsoft/ApplicationInsights-Go/blob/master/appinsights/contracts/contexttagkeys.go +# # [outputs.application_insights.context_tag_sources] +# # "ai.cloud.role" = "kubernetes_container_name" +# # "ai.cloud.roleInstance" = "kubernetes_pod_name" +# namepass = ["t.azm.ms/agent_telemetry"] + #tagdrop = ["nodeName"] + ############################################################################### # PROCESSOR PLUGINS # ############################################################################### @@ -119,9 +139,23 @@ [processors.converter.fields] float = ["*"] +############################################################################### +# AGGREGATOR PLUGINS # +############################################################################### +# [[aggregators.quantile]] +# period = "30m" +# drop_original = true +# quantiles = [0.95] +# algorithm = "t-digest" +# compression = 100.0 +# namepass = ["agent_telemetry"] + +############################################################################### +# INPUT PLUGINS # +############################################################################### # Dummy plugin to test out toml parsing happens properly [[inputs.file]] - interval = "24h" + interval = "24h" files = ["test.json"] data_format = "json" @@ -166,3 +200,20 @@ $AZMON_TELEGRAF_CUSTOM_PROM_PLUGINS_WITH_NAMESPACE_FILTER ## OSM Prometheus configuration $AZMON_TELEGRAF_OSM_PROM_PLUGINS + +# [[inputs.procstat]] +# name_prefix="t.azm.ms/" +# exe = "mdsd" +# interval = "60s" +# pid_finder = "native" +# pid_tag = true +# name_override = "agent_telemetry" +# fieldpass = ["cpu_usage", "memory_rss"] +# [inputs.procstat.tags] +# Computer = "$NODE_NAME" +# AgentVersion = "$AGENT_VERSION" +# ControllerType = "$CONTROLLER_TYPE" +# AKS_RESOURCE_ID = "$TELEMETRY_AKS_RESOURCE_ID" +# ACSResourceName = "$TELEMETRY_ACS_RESOURCE_NAME" +# Region = "$TELEMETRY_AKS_REGION" +# ContainerType = "$CONTAINER_TYPE" diff --git a/build/linux/installer/conf/telegraf-rs.conf b/build/linux/installer/conf/telegraf-rs.conf index 5de35d82c..72fc25451 100644 --- a/build/linux/installer/conf/telegraf-rs.conf +++ b/build/linux/installer/conf/telegraf-rs.conf @@ -124,6 +124,26 @@ namedrop = ["agent_telemetry", "file"] #tagdrop = ["AgentVersion","AKS_RESOURCE_ID", "ACS_RESOURCE_NAME", "Region","ClusterName","ClusterType", "Computer", "ControllerType"] +# [[outputs.application_insights]] +# ## Instrumentation key of the Application Insights resource. +# instrumentation_key = "$TELEMETRY_APPLICATIONINSIGHTS_KEY" + +# ## Timeout for closing (default: 5s). +# # timeout = "5s" + +# ## Enable additional diagnostic logging. +# enable_diagnostic_logging = false + +# ## Context Tag Sources add Application Insights context tags to a tag value. +# ## +# ## For list of allowed context tag keys see: +# ## https://github.com/Microsoft/ApplicationInsights-Go/blob/master/appinsights/contracts/contexttagkeys.go +# # [outputs.application_insights.context_tag_sources] +# # "ai.cloud.role" = "kubernetes_container_name" +# # "ai.cloud.roleInstance" = "kubernetes_pod_name" +# namepass = ["t.azm.ms/agent_telemetry"] + #tagdrop = ["nodeName"] + ############################################################################### # PROCESSOR PLUGINS # ############################################################################### @@ -293,6 +313,13 @@ ############################################################################### # AGGREGATOR PLUGINS # ############################################################################### +# [[aggregators.quantile]] +# period = "30m" +# drop_original = true +# quantiles = [0.95] +# algorithm = "t-digest" +# compression = 100.0 +# namepass = ["agent_telemetry"] # # Keep the aggregate basicstats of each metric passing through. # [[aggregators.basicstats]] @@ -369,7 +396,7 @@ # report_active = true # fieldpass = ["usage_active","cluster","node","host","device"] # taginclude = ["cluster","cpu","node"] - + # Read metrics about disk usage by mount point @@ -377,7 +404,7 @@ ## By default stats will be gathered for all mount points. ## Set mount_points will restrict the stats to only the specified mount points. # mount_points = ["/"] - + ## Ignore mount points by filesystem type. # ignore_fs = ["tmpfs", "devtmpfs", "devfs", "overlay", "aufs", "squashfs"] # fieldpass = ["free", "used", "used_percent"] @@ -520,7 +547,7 @@ # Dummy plugin to test out toml parsing happens properly [[inputs.file]] - interval = "24h" + interval = "24h" files = ["test.json"] data_format = "json" @@ -530,10 +557,10 @@ ## An array of urls to scrape metrics from. urls = $AZMON_TELEGRAF_CUSTOM_PROM_URLS - + ## An array of Kubernetes services to scrape metrics from. kubernetes_services = $AZMON_TELEGRAF_CUSTOM_PROM_K8S_SERVICES - + ## Scrape Kubernetes pods for the following prometheus annotations: ## - prometheus.io/scrape: Enable scraping for this pod ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to @@ -648,3 +675,18 @@ $AZMON_TELEGRAF_OSM_PROM_PLUGINS #[inputs.prometheus.tagpass] # operation_type = ["create_container", "remove_container", "pull_image"] +# [[inputs.procstat]] +# name_prefix="t.azm.ms/" +# exe = "mdsd" +# interval = "60s" +# pid_finder = "native" +# pid_tag = true +# name_override = "agent_telemetry" +# fieldpass = ["cpu_usage", "memory_rss"] +# [inputs.procstat.tags] +# Computer = "$NODE_NAME" +# AgentVersion = "$AGENT_VERSION" +# ControllerType = "$CONTROLLER_TYPE" +# AKS_RESOURCE_ID = "$TELEMETRY_AKS_RESOURCE_ID" +# ACSResourceName = "$TELEMETRY_ACS_RESOURCE_NAME" +# Region = "$TELEMETRY_AKS_REGION" diff --git a/build/linux/installer/conf/telegraf.conf b/build/linux/installer/conf/telegraf.conf index b0a8730c6..9f213e3e8 100644 --- a/build/linux/installer/conf/telegraf.conf +++ b/build/linux/installer/conf/telegraf.conf @@ -158,6 +158,26 @@ namepass = ["container.azm.ms/disk"] #fieldpass = ["used_percent"] +# [[outputs.application_insights]] +# ## Instrumentation key of the Application Insights resource. +# instrumentation_key = "$TELEMETRY_APPLICATIONINSIGHTS_KEY" + +# ## Timeout for closing (default: 5s). +# # timeout = "5s" + +# ## Enable additional diagnostic logging. +# enable_diagnostic_logging = false + + ## Context Tag Sources add Application Insights context tags to a tag value. + ## + ## For list of allowed context tag keys see: + ## https://github.com/Microsoft/ApplicationInsights-Go/blob/master/appinsights/contracts/contexttagkeys.go + # [outputs.application_insights.context_tag_sources] + # "ai.cloud.role" = "kubernetes_container_name" + # "ai.cloud.roleInstance" = "kubernetes_pod_name" + # namepass = ["agent_telemetry"] + #tagdrop = ["nodeName"] + ############################################################################### # PROCESSOR PLUGINS # ############################################################################### @@ -328,7 +348,13 @@ ############################################################################### # AGGREGATOR PLUGINS # ############################################################################### - +# [[aggregators.quantile]] +# period = "30m" +# drop_original = true +# quantiles = [0.95] +# algorithm = "t-digest" +# compression = 100.0 +# namepass = ["t.azm.ms/agent_telemetry"] # # Keep the aggregate basicstats of each metric passing through. # [[aggregators.basicstats]] # ## General Aggregator Arguments: @@ -407,7 +433,7 @@ # Dummy plugin to test out toml parsing happens properly [[inputs.file]] - interval = "24h" + interval = "24h" files = ["test.json"] data_format = "json" @@ -550,14 +576,14 @@ #fieldpass = ["numContainers", "numContainersRunning", "numContainersStopped", "numContainersPaused", "numContainerImages"] # taginclude = ["nodeName"] -#[[inputs.procstat]] -# #name_prefix="t.azm.ms/" +# [[inputs.procstat]] +# name_prefix="t.azm.ms/" # exe = "mdsd" -# interval = "10s" +# interval = "60s" # pid_finder = "native" # pid_tag = true # name_override = "agent_telemetry" -# fieldpass = ["cpu_usage", "memory_rss", "memory_swap", "memory_vms", "memory_stack"] +# fieldpass = ["cpu_usage", "memory_rss"] # [inputs.procstat.tags] # Computer = "$NODE_NAME" # AgentVersion = "$AGENT_VERSION" diff --git a/build/linux/installer/datafiles/base_container.data b/build/linux/installer/datafiles/base_container.data index 985c73a17..9fc7ce08f 100644 --- a/build/linux/installer/datafiles/base_container.data +++ b/build/linux/installer/datafiles/base_container.data @@ -286,6 +286,8 @@ chmod 666 /var/opt/microsoft/docker-cimprov/log/fluent_forward_failed.log touch /var/opt/microsoft/docker-cimprov/log/arc_k8s_cluster_identity.log chmod 666 /var/opt/microsoft/docker-cimprov/log/arc_k8s_cluster_identity.log +touch /var/opt/microsoft/docker-cimprov/log/appinsights_error.log +chmod 666 /var/opt/microsoft/docker-cimprov/log/appinsights_error.log touch /var/opt/microsoft/docker-cimprov/log/fluentd.log chmod 666 /var/opt/microsoft/docker-cimprov/log/fluentd.log @@ -309,7 +311,7 @@ if ${{PERFORMING_UPGRADE_NOT}}; then rmdir /etc/opt/microsoft/docker-cimprov/conf 2> /dev/null rmdir /etc/opt/microsoft/docker-cimprov 2> /dev/null rmdir /etc/opt/microsoft 2> /dev/null - rmdir /etc/opt 2> /dev/null + rmdir /etc/opt 2> /dev/null fi %Preinstall_0 diff --git a/source/plugins/ruby/ApplicationInsightsUtility.rb b/source/plugins/ruby/ApplicationInsightsUtility.rb index 7691304a6..eb143c4ba 100644 --- a/source/plugins/ruby/ApplicationInsightsUtility.rb +++ b/source/plugins/ruby/ApplicationInsightsUtility.rb @@ -98,6 +98,13 @@ def initializeUtility() elsif !encodedAppInsightsKey.nil? decodedAppInsightsKey = Base64.decode64(encodedAppInsightsKey) + if @@isWindows + logPath = "/etc/omsagentwindows/appinsights_error.log" + else + logPath = "/var/opt/microsoft/docker-cimprov/log/appinsights_error.log" + end + aiLogger = Logger.new(logPath, 1, 2 * 1024 * 1024) + #override ai endpoint if its available otherwise use default. if appInsightsEndpoint && !appInsightsEndpoint.nil? && !appInsightsEndpoint.empty? $log.info("AppInsightsUtility: Telemetry client uses overrided endpoint url : #{appInsightsEndpoint}") @@ -105,20 +112,20 @@ def initializeUtility() #telemetrySynchronousQueue = ApplicationInsights::Channel::SynchronousQueue.new(telemetrySynchronousSender) #telemetryChannel = ApplicationInsights::Channel::TelemetryChannel.new nil, telemetrySynchronousQueue if !isProxyConfigured - sender = ApplicationInsights::Channel::AsynchronousSender.new appInsightsEndpoint + sender = ApplicationInsights::Channel::AsynchronousSender.new appInsightsEndpoint, aiLogger else $log.info("AppInsightsUtility: Telemetry client uses provided proxy configuration since proxy configured") - sender = ApplicationInsights::Channel::AsynchronousSender.new appInsightsEndpoint, @@proxy + sender = ApplicationInsights::Channel::AsynchronousSender.new appInsightsEndpoint, aiLogger, @@proxy end queue = ApplicationInsights::Channel::AsynchronousQueue.new sender channel = ApplicationInsights::Channel::TelemetryChannel.new nil, queue @@Tc = ApplicationInsights::TelemetryClient.new decodedAppInsightsKey, channel else if !isProxyConfigured - sender = ApplicationInsights::Channel::AsynchronousSender.new + sender = ApplicationInsights::Channel::AsynchronousSender.new nil, aiLogger else $log.info("AppInsightsUtility: Telemetry client uses provided proxy configuration since proxy configured") - sender = ApplicationInsights::Channel::AsynchronousSender.new nil, @@proxy + sender = ApplicationInsights::Channel::AsynchronousSender.new nil, aiLogger, @@proxy end queue = ApplicationInsights::Channel::AsynchronousQueue.new sender channel = ApplicationInsights::Channel::TelemetryChannel.new nil, queue diff --git a/source/plugins/ruby/lib/application_insights/channel/asynchronous_sender.rb b/source/plugins/ruby/lib/application_insights/channel/asynchronous_sender.rb index 4786aa1d9..df2138b3a 100644 --- a/source/plugins/ruby/lib/application_insights/channel/asynchronous_sender.rb +++ b/source/plugins/ruby/lib/application_insights/channel/asynchronous_sender.rb @@ -1,5 +1,5 @@ -require_relative 'sender_base' -require 'thread' +require_relative "sender_base" +require "thread" module ApplicationInsights module Channel @@ -17,12 +17,13 @@ module Channel # If no queue items are found for {#send_time} seconds, the worker thread # will shut down (and {#start} will need to be called again). class AsynchronousSender < SenderBase - SERVICE_ENDPOINT_URI = 'https://dc.services.visualstudio.com/v2/track' + SERVICE_ENDPOINT_URI = "https://dc.services.visualstudio.com/v2/track" # Initializes a new instance of the class. # @param [String] service_endpoint_uri the address of the service to send + # @param [Logger] instance of the logger to write the logs (optional) # @param [Hash] proxy server configuration to send (optional) # telemetry data to. - def initialize(service_endpoint_uri = SERVICE_ENDPOINT_URI, proxy = {}) + def initialize(service_endpoint_uri = SERVICE_ENDPOINT_URI, logger = nil, proxy = {}) # callers which requires proxy dont require to maintain service endpoint uri which potentially can change if service_endpoint_uri.nil? || service_endpoint_uri.empty? service_endpoint_uri = SERVICE_ENDPOINT_URI @@ -33,7 +34,7 @@ def initialize(service_endpoint_uri = SERVICE_ENDPOINT_URI, proxy = {}) @lock_work_thread = Mutex.new @work_thread = nil @start_notification_processed = true - super service_endpoint_uri, proxy + super service_endpoint_uri, logger, proxy end # The time span in seconds at which the the worker thread will check the @@ -130,7 +131,7 @@ def run rescue Exception => e # Make sure work_thread sets to nil when it terminates abnormally @work_thread = nil - @logger.error('application_insights') { "Asynchronous sender work thread terminated abnormally: #{e.to_s}" } + @logger.error("application_insights") { "Asynchronous sender work thread terminated abnormally: #{e.to_s}" } end end end diff --git a/source/plugins/ruby/lib/application_insights/channel/sender_base.rb b/source/plugins/ruby/lib/application_insights/channel/sender_base.rb index bedbae4ee..a6b7966db 100644 --- a/source/plugins/ruby/lib/application_insights/channel/sender_base.rb +++ b/source/plugins/ruby/lib/application_insights/channel/sender_base.rb @@ -1,9 +1,9 @@ -require 'yajl/json_gem' -require 'net/http' -require 'openssl' -require 'stringio' -require 'zlib' -require 'logger' +require "yajl/json_gem" +require "net/http" +require "openssl" +require "stringio" +require "zlib" +require "logger" module ApplicationInsights module Channel @@ -16,13 +16,18 @@ module Channel class SenderBase # Initializes a new instance of the class. # @param [String] service_endpoint_uri the address of the service to send + # @param [Logger] instance of the logger to write the logs # @param [Hash] proxy server configuration to send (optional) # telemetry data to. - def initialize(service_endpoint_uri, proxy = {}) + def initialize(service_endpoint_uri, logger, proxy = {}) @service_endpoint_uri = service_endpoint_uri @queue = nil @send_buffer_size = 100 - @logger = Logger.new(STDOUT) + if !logger.nil? && !logger.empty? + @logger = logger + else + @logger = Logger.new(STDOUT) + end @proxy = proxy end @@ -53,9 +58,9 @@ def initialize(service_endpoint_uri, proxy = {}) def send(data_to_send) uri = URI(@service_endpoint_uri) headers = { - 'Accept' => 'application/json', - 'Content-Type' => 'application/json; charset=utf-8', - 'Content-Encoding' => 'gzip' + "Accept" => "application/json", + "Content-Type" => "application/json; charset=utf-8", + "Content-Encoding" => "gzip", } request = Net::HTTP::Post.new(uri.path, headers) @@ -69,7 +74,7 @@ def send(data_to_send) else http = Net::HTTP.new(uri.hostname, uri.port, @proxy[:addr], @proxy[:port], @proxy[:user], @proxy[:pass]) end - if uri.scheme.downcase == 'https' + if uri.scheme.downcase == "https" http.use_ssl = true http.verify_mode = OpenSSL::SSL::VERIFY_PEER end @@ -78,7 +83,7 @@ def send(data_to_send) http.finish if http.started? if !response.kind_of? Net::HTTPSuccess - @logger.warn('application_insights') { "Failed to send data: #{response.message}" } + @logger.warn("application_insights") { "Failed to send data: #{response.message}" } end end diff --git a/source/plugins/ruby/lib/application_insights/channel/synchronous_sender.rb b/source/plugins/ruby/lib/application_insights/channel/synchronous_sender.rb index 597e97b9e..2bb212026 100644 --- a/source/plugins/ruby/lib/application_insights/channel/synchronous_sender.rb +++ b/source/plugins/ruby/lib/application_insights/channel/synchronous_sender.rb @@ -8,14 +8,15 @@ class SynchronousSender < SenderBase SERVICE_ENDPOINT_URI = "https://dc.services.visualstudio.com/v2/track" # Initializes a new instance of the class. # @param [String] service_endpoint_uri the address of the service to send + # @param [Logger] instance of the logger to write the logs (optional) # @param [Hash] proxy server configuration to send (optional) # telemetry data to. - def initialize(service_endpoint_uri = SERVICE_ENDPOINT_URI, proxy = {}) + def initialize(service_endpoint_uri = SERVICE_ENDPOINT_URI, logger = nil, proxy = {}) # callers which requires proxy dont require to maintain service endpoint uri which potentially can change if service_endpoint_uri.nil? || service_endpoint_uri.empty? service_endpoint_uri = SERVICE_ENDPOINT_URI end - super service_endpoint_uri, proxy + super service_endpoint_uri, logger, proxy end end end From ec2b09f72843a65e5fc08ba3f1a42e4860ac46a7 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Mon, 31 Jan 2022 13:57:35 -0800 Subject: [PATCH 192/301] release updates for ciprod01312022 & win-ciprod01312022release (#707) * release updates for ciprod01312022 release * release updates for ciprod01312022 release * fix pr feedback --- ReleaseNotes.md | 35 ++++++++++++++++++++++ charts/azuremonitor-containers/values.yaml | 4 +-- kubernetes/linux/Dockerfile | 2 +- kubernetes/omsagent.yaml | 9 +++--- kubernetes/windows/Dockerfile | 2 +- 5 files changed, 43 insertions(+), 9 deletions(-) diff --git a/ReleaseNotes.md b/ReleaseNotes.md index c8a147044..67f144608 100644 --- a/ReleaseNotes.md +++ b/ReleaseNotes.md @@ -11,6 +11,41 @@ additional questions or comments. Note : The agent version(s) below has dates (ciprod), which indicate the agent build dates (not release dates) +### 1/31/2022 - +##### Version microsoft/oms:ciprod01312022 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod01312022 (linux) +##### Version microsoft/oms:win-ciprod01312022 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod01312022 (windows) +##### Code change log +- Linux Agent + - Configurable DB name via configmap for ADX (default DB name:containerinsights) + - Default to cAdvisor port to 10250 and container runtime to Containerd + - Update AgentVersion annotation in yamls (omsagent and chart) with released MDSD agent version + - Incresing windows agent CPU limits from 200m to 500m + - Ignore new disk path that comes from containerd starting with k8s version >= 1.19.x, which was adding unnecessary InsightsMetrics logs and increasing cost + - Route the AI SDK logs to log file instead of stdout + - Telemetry to collect ContainerLog Records with empty Timestamp + - FluentBit version upgrade from 1.6.8 to 1.7.8 +- Windows Agent + - Update to use FluentBit for container log collection and removed FluentD dependency for container log collection + - Telemetry to track if any of the variable fields of windows container inventory records has field size >= 64KB + - Add windows os check in in_cadvisor_perf plugin to avoid making call in MDSD in MSI auth mode + - Bug fix for placeholder_hostname in telegraf metrics + - FluentBit version upgrade from 1.4.0 to 1.7.8 +- Common + - Upgrade FluentD gem version from 1.12.2 to 1.14.2 + - Upgrade Telegraf version from 1.18.0 to 1.20.3 + - Fix for exception in node allocatable + - Telemetry to track nodeCount & containerCount +- Other changes + - Updates to Arc K8s Extension ARM Onboarding templates with GA API version + - Added ARM Templates for MSI Based Onboarding for AKS + - Conformance test updates relates to sidecar container + - Troubelshooting script to detect issues related to Arc K8s Extension onboarding + - Remove the dependency SP for CDPX since configured to use MSI + - Linux Agent Image build improvements + - Update msys2 version to fix windows agent build + - Add explicit exit code 1 across all the PS scripts + + ### 10/13/2021 - ##### Version microsoft/oms:ciprod10132021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod10132021 (linux) ##### Version microsoft/oms:win-ciprod10132021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod10132021 (windows) diff --git a/charts/azuremonitor-containers/values.yaml b/charts/azuremonitor-containers/values.yaml index d5d7ad2e1..0456eb625 100644 --- a/charts/azuremonitor-containers/values.yaml +++ b/charts/azuremonitor-containers/values.yaml @@ -21,8 +21,8 @@ Azure: omsagent: image: repo: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod" - tag: "ciprod10132021" - tagWindows: "win-ciprod10132021" + tag: "ciprod01312022" + tagWindows: "win-ciprod01312022" pullPolicy: IfNotPresent dockerProviderVersion: "16.0.0-0" agentVersion: "1.10.0.1" diff --git a/kubernetes/linux/Dockerfile b/kubernetes/linux/Dockerfile index 9164abc9c..f3a9efd7a 100644 --- a/kubernetes/linux/Dockerfile +++ b/kubernetes/linux/Dockerfile @@ -17,7 +17,7 @@ ENV RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR 0.9 RUN /usr/bin/apt-get update && /usr/bin/apt-get install -y libc-bin wget openssl curl sudo python-ctypes init-system-helpers net-tools rsyslog cron vim dmidecode apt-transport-https gnupg && rm -rf /var/lib/apt/lists/* COPY setup.sh main.sh defaultpromenvvariables defaultpromenvvariables-rs defaultpromenvvariables-sidecar mdsd.xml envmdsd logrotate.conf $tmpdir/ -ARG IMAGE_TAG=ciprod10132021 +ARG IMAGE_TAG=ciprod01312022 ENV AGENT_VERSION ${IMAGE_TAG} WORKDIR ${tmpdir} diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index 248276a08..28c8803c6 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -368,7 +368,7 @@ spec: value: "3" containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod10132021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod01312022" imagePullPolicy: IfNotPresent resources: limits: @@ -454,7 +454,7 @@ spec: timeoutSeconds: 15 #Only in sidecar scraping mode # - name: omsagent-prometheus - # image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod10132021" + # image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod01312022" # imagePullPolicy: IfNotPresent # resources: # limits: @@ -603,7 +603,7 @@ spec: serviceAccountName: omsagent containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod10132021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod01312022" imagePullPolicy: IfNotPresent resources: limits: @@ -776,7 +776,7 @@ spec: value: "3" containers: - name: omsagent-win - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod10132021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod01312022" imagePullPolicy: IfNotPresent resources: limits: @@ -933,4 +933,3 @@ spec: names: plural: healthstates kind: HealthState - \ No newline at end of file diff --git a/kubernetes/windows/Dockerfile b/kubernetes/windows/Dockerfile index 55bedf7f5..6a2785e69 100644 --- a/kubernetes/windows/Dockerfile +++ b/kubernetes/windows/Dockerfile @@ -3,7 +3,7 @@ MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=win-ciprod10132021 +ARG IMAGE_TAG=win-ciprod01312022 # Do not split this into multiple RUN! # Docker creates a layer for every RUN-Statement From 5f41c5e85d582db2bf1c6297d3718382481cc90e Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Mon, 31 Jan 2022 17:02:14 -0800 Subject: [PATCH 193/301] fix logger exception (#709) --- .../ruby/lib/application_insights/channel/sender_base.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/plugins/ruby/lib/application_insights/channel/sender_base.rb b/source/plugins/ruby/lib/application_insights/channel/sender_base.rb index a6b7966db..e5a4dea62 100644 --- a/source/plugins/ruby/lib/application_insights/channel/sender_base.rb +++ b/source/plugins/ruby/lib/application_insights/channel/sender_base.rb @@ -23,7 +23,7 @@ def initialize(service_endpoint_uri, logger, proxy = {}) @service_endpoint_uri = service_endpoint_uri @queue = nil @send_buffer_size = 100 - if !logger.nil? && !logger.empty? + if !logger.nil? @logger = logger else @logger = Logger.new(STDOUT) From 7ff6c02351534d74b1344de09070f6b8d0f1e5a9 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Wed, 2 Feb 2022 14:09:39 -0800 Subject: [PATCH 194/301] Gangams/chart version update for jan release (#710) * chart updates for jan2022 release * add missing agentversion annotations --- charts/azuremonitor-containers/Chart.yaml | 2 +- .../templates/omsagent-daemonset-windows.yaml | 8 +++++++- .../templates/omsagent-daemonset.yaml | 2 +- .../templates/omsagent-deployment.yaml | 2 +- charts/azuremonitor-containers/values.yaml | 3 ++- 5 files changed, 12 insertions(+), 5 deletions(-) diff --git a/charts/azuremonitor-containers/Chart.yaml b/charts/azuremonitor-containers/Chart.yaml index 4dd6623bf..38d2e30ec 100644 --- a/charts/azuremonitor-containers/Chart.yaml +++ b/charts/azuremonitor-containers/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v1 appVersion: 7.0.0-1 description: Helm chart for deploying Azure Monitor container monitoring agent in Kubernetes name: azuremonitor-containers -version: 2.9.0 +version: 2.9.1 kubeVersion: "^1.10.0-0" keywords: - monitoring diff --git a/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml b/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml index 78831aa10..cc995b3c2 100644 --- a/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml @@ -21,7 +21,7 @@ spec: labels: dsName: "omsagent-ds" annotations: - agentVersion: {{ .Values.omsagent.image.tagWindows }} + agentVersion: {{ .Values.omsagent.winAgentVersion }} dockerProviderVersion: {{ .Values.omsagent.image.dockerProviderVersion }} schema-versions: "v1" checksum/secret: {{ include (print $.Template.BasePath "/omsagent-secret.yaml") . | sha256sum }} @@ -53,6 +53,12 @@ spec: resources: {{ toYaml .Values.omsagent.resources.daemonsetwindows | indent 9 }} env: + - name: FBIT_SERVICE_FLUSH_INTERVAL + value: "15" + - name: FBIT_TAIL_BUFFER_CHUNK_SIZE + value: "1" + - name: FBIT_TAIL_BUFFER_MAX_SIZE + value: "1" {{- if ne .Values.omsagent.env.clusterId "" }} - name: AKS_RESOURCE_ID value: {{ .Values.omsagent.env.clusterId | quote }} diff --git a/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml b/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml index 8e5513f91..ba3ad7c0b 100644 --- a/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml @@ -21,7 +21,7 @@ spec: labels: dsName: "omsagent-ds" annotations: - agentVersion: {{ .Values.omsagent.image.tag }} + agentVersion: {{ .Values.omsagent.agentVersion }} dockerProviderVersion: {{ .Values.omsagent.image.dockerProviderVersion }} schema-versions: "v1" checksum/secret: {{ include (print $.Template.BasePath "/omsagent-secret.yaml") . | sha256sum }} diff --git a/charts/azuremonitor-containers/templates/omsagent-deployment.yaml b/charts/azuremonitor-containers/templates/omsagent-deployment.yaml index 1eaf7f652..26b0ccca0 100644 --- a/charts/azuremonitor-containers/templates/omsagent-deployment.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-deployment.yaml @@ -22,7 +22,7 @@ spec: labels: rsName: "omsagent-rs" annotations: - agentVersion: {{ .Values.omsagent.image.tag }} + agentVersion: {{ .Values.omsagent.agentVersion }} dockerProviderVersion: {{ .Values.omsagent.image.dockerProviderVersion }} schema-versions: "v1" checksum/secret: {{ include (print $.Template.BasePath "/omsagent-secret.yaml") . | sha256sum }} diff --git a/charts/azuremonitor-containers/values.yaml b/charts/azuremonitor-containers/values.yaml index 0456eb625..aa4c6bcf2 100644 --- a/charts/azuremonitor-containers/values.yaml +++ b/charts/azuremonitor-containers/values.yaml @@ -25,7 +25,8 @@ omsagent: tagWindows: "win-ciprod01312022" pullPolicy: IfNotPresent dockerProviderVersion: "16.0.0-0" - agentVersion: "1.10.0.1" + agentVersion: "azure-mdsd-1.14.2" + winAgentVersion: "0.0.0-0" # there is no base agent version for windows agent # The priority used by the omsagent priority class for the daemonset pods # Note that this is not execution piority - it is scheduling priority, as From d6e9272a23f8c033505b6231bd28bcf8f748b438 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Wed, 2 Feb 2022 18:05:05 -0800 Subject: [PATCH 195/301] fix agentversion annotation issue in chart (#712) --- .../templates/omsagent-daemonset-windows.yaml | 2 +- .../azuremonitor-containers/templates/omsagent-daemonset.yaml | 2 +- .../azuremonitor-containers/templates/omsagent-deployment.yaml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml b/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml index cc995b3c2..b581a324a 100644 --- a/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml @@ -21,7 +21,7 @@ spec: labels: dsName: "omsagent-ds" annotations: - agentVersion: {{ .Values.omsagent.winAgentVersion }} + agentVersion: {{ .Values.omsagent.image.winAgentVersion }} dockerProviderVersion: {{ .Values.omsagent.image.dockerProviderVersion }} schema-versions: "v1" checksum/secret: {{ include (print $.Template.BasePath "/omsagent-secret.yaml") . | sha256sum }} diff --git a/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml b/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml index ba3ad7c0b..153395727 100644 --- a/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml @@ -21,7 +21,7 @@ spec: labels: dsName: "omsagent-ds" annotations: - agentVersion: {{ .Values.omsagent.agentVersion }} + agentVersion: {{ .Values.omsagent.image.agentVersion }} dockerProviderVersion: {{ .Values.omsagent.image.dockerProviderVersion }} schema-versions: "v1" checksum/secret: {{ include (print $.Template.BasePath "/omsagent-secret.yaml") . | sha256sum }} diff --git a/charts/azuremonitor-containers/templates/omsagent-deployment.yaml b/charts/azuremonitor-containers/templates/omsagent-deployment.yaml index 26b0ccca0..a7ea8b097 100644 --- a/charts/azuremonitor-containers/templates/omsagent-deployment.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-deployment.yaml @@ -22,7 +22,7 @@ spec: labels: rsName: "omsagent-rs" annotations: - agentVersion: {{ .Values.omsagent.agentVersion }} + agentVersion: {{ .Values.omsagent.image.agentVersion }} dockerProviderVersion: {{ .Values.omsagent.image.dockerProviderVersion }} schema-versions: "v1" checksum/secret: {{ include (print $.Template.BasePath "/omsagent-secret.yaml") . | sha256sum }} From 918ad11717771e2c4c885b8ce318f419207d5601 Mon Sep 17 00:00:00 2001 From: Vishwanath Date: Fri, 11 Feb 2022 15:29:11 -0800 Subject: [PATCH 196/301] adx bug + misc (#714) * fix golang dependencies * fix adx bug * exclude telegraf * fix space * include both * exclude files specifically --- .github/workflows/pr-checker.yml | 4 +- source/plugins/go/src/go.mod | 8 ++- source/plugins/go/src/go.sum | 87 ++++++++++++++++++-------------- source/plugins/go/src/oms.go | 2 +- source/plugins/go/src/utils.go | 1 + 5 files changed, 56 insertions(+), 46 deletions(-) diff --git a/.github/workflows/pr-checker.yml b/.github/workflows/pr-checker.yml index 723f22dc7..40221b21a 100644 --- a/.github/workflows/pr-checker.yml +++ b/.github/workflows/pr-checker.yml @@ -56,9 +56,11 @@ jobs: format: 'table' severity: 'CRITICAL,HIGH' vuln-type: 'os,library' - skip-dirs: '/usr/sbin' + #[vishwa] - Fix telegraf & test all for next release - see work item #https://msazure.visualstudio.com/InfrastructureInsights/_workitems/edit/13322134 + skip-files: '/usr/sbin/telegraf,/opt/telegraf' exit-code: '1' timeout: '5m0s' + ignore-unfixed: true WINDOWS-build: runs-on: windows-latest steps: diff --git a/source/plugins/go/src/go.mod b/source/plugins/go/src/go.mod index 58e668597..3957a2b74 100644 --- a/source/plugins/go/src/go.mod +++ b/source/plugins/go/src/go.mod @@ -3,13 +3,11 @@ module Docker-Provider/source/plugins/go/src go 1.14 require ( - github.com/Azure/azure-kusto-go v0.3.2 - github.com/Azure/go-autorest/autorest/azure/auth v0.4.2 - github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect - github.com/dnaeon/go-vcr v1.2.0 // indirect + github.com/Azure/azure-kusto-go v0.5.2 + github.com/Azure/go-autorest/autorest/azure/auth v0.5.11 github.com/fluent/fluent-bit-go v0.0.0-20171103221316-c4a158a6e3a7 github.com/golang/mock v1.4.1 - github.com/google/uuid v1.1.2 + github.com/google/uuid v1.2.0 github.com/microsoft/ApplicationInsights-Go v0.4.3 github.com/philhofer/fwd v1.1.1 // indirect github.com/tinylib/msgp v1.1.2 diff --git a/source/plugins/go/src/go.sum b/source/plugins/go/src/go.sum index ad9e40089..ef157f9e9 100644 --- a/source/plugins/go/src/go.sum +++ b/source/plugins/go/src/go.sum @@ -23,54 +23,42 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl code.cloudfoundry.org/clock v0.0.0-20180518195852-02e53af36e6c h1:5eeuG0BHx1+DHeT3AP+ISKZ2ht1UjGhm581ljqYpVeQ= code.cloudfoundry.org/clock v0.0.0-20180518195852-02e53af36e6c/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/azure-kusto-go v0.3.2 h1:XpS9co6GvEDl2oICF9HsjEsQVwEpRK6wbNWb9Z+uqsY= -github.com/Azure/azure-kusto-go v0.3.2/go.mod h1:wd50n4qlsSxh+G4f80t+Fnl2ShK9AcXD+lMOstiKuYo= +github.com/Azure/azure-kusto-go v0.5.2 h1:6kFVZp4iyz8YFTuxrIdivAXVcEs5wNKTVK5gai+E8pk= +github.com/Azure/azure-kusto-go v0.5.2/go.mod h1:2xOhBxRcHyyNifFHmNMcqYL6AMdhyrUHCkEJkrZ+EI4= github.com/Azure/azure-pipeline-go v0.1.8/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg= -github.com/Azure/azure-pipeline-go v0.2.1 h1:OLBdZJ3yvOn2MezlWvbrBMTEUQC72zAftRZOMdj5HYo= -github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= -github.com/Azure/azure-sdk-for-go v44.1.0+incompatible h1:l1UGvaaoMCUwVGUauvHzeB4t+Y0yPX5iJwBhzc0LqyE= -github.com/Azure/azure-sdk-for-go v44.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-storage-blob-go v0.8.0 h1:53qhf0Oxa0nOjgbDeeYPUeyiNmafAFEY95rZLK0Tj6o= -github.com/Azure/azure-storage-blob-go v0.8.0/go.mod h1:lPI3aLPpuLTeUwh1sViKXFxwl2B6teiRqI0deQUvsw0= +github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U= +github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= +github.com/Azure/azure-sdk-for-go v61.2.0+incompatible h1:sSormXkfW0ov1vh6ihTBRQxdfg73fPqkccl50GbR9iM= +github.com/Azure/azure-sdk-for-go v61.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-storage-blob-go v0.14.0 h1:1BCg74AmVdYwO3dlKwtFU1V0wU2PZdREkXvAmZJRUlM= +github.com/Azure/azure-storage-blob-go v0.14.0/go.mod h1:SMqIBi+SuiQH32bvyjngEewEeXoPfKMgWlBDaYf6fck= github.com/Azure/azure-storage-queue-go v0.0.0-20191125232315-636801874cdd h1:b3wyxBl3vvr15tUAziPBPK354y+LSdfPCpex5oBttHo= github.com/Azure/azure-storage-queue-go v0.0.0-20191125232315-636801874cdd/go.mod h1:K6am8mT+5iFXgingS9LUc7TmbsW6XBw3nxaRyaMyWc8= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0= -github.com/Azure/go-autorest/autorest v0.10.0 h1:mvdtztBqcL8se7MdrUweNieTNi4kfNG6GOJuurQJpuY= -github.com/Azure/go-autorest/autorest v0.10.0/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= github.com/Azure/go-autorest/autorest v0.11.12 h1:gI8ytXbxMfI+IVbI9mP2JGCTXIuhHLgRlvQ9X4PsnHE= github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= -github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= -github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= -github.com/Azure/go-autorest/autorest/adal v0.8.2 h1:O1X4oexUxnZCaEUGsvMnr8ZGj8HI37tNezwY4npRqA0= -github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= +github.com/Azure/go-autorest/autorest v0.11.24 h1:1fIGgHKqVm54KIPT+q8Zmd1QlVsmHqeUGso5qm2BqqE= +github.com/Azure/go-autorest/autorest v0.11.24/go.mod h1:G6kyRlFnTuSbEYkQGawPfsCswgme4iYf6rfSKUDzbCc= github.com/Azure/go-autorest/autorest/adal v0.9.5 h1:Y3bBUV4rTuxenJJs41HU3qmqsb+auo+a3Lz+PlJPpL0= github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= -github.com/Azure/go-autorest/autorest/azure/auth v0.4.2 h1:iM6UAvjR97ZIeR93qTcwpKNMpV+/FTWjwEbuPD495Tk= -github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM= -github.com/Azure/go-autorest/autorest/azure/cli v0.3.1 h1:LXl088ZQlP0SBppGFsRZonW6hSvwgL5gRByMbvUbx8U= -github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw= -github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM= -github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= +github.com/Azure/go-autorest/autorest/adal v0.9.18 h1:kLnPsRjzZZUF3K5REu/Kc+qMQrvuza2bwSnNdhmzLfQ= +github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.11 h1:P6bYXFoao05z5uhOQzbC3Qd8JqF3jUoocoTeIxkp2cA= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.11/go.mod h1:84w/uV8E37feW2NCJ08uT9VBfjfUHpgLVnG2InYD6cg= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 h1:0W/yGmFdTIT77fvdlGZ0LMISoLHFJ7Tx4U0yeB+uFs4= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.5/go.mod h1:ADQAXrkgm7acgWVUNamOgh8YNrv4p27l3Wc55oVfpzg= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= -github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= -github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= -github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= github.com/Azure/go-autorest/logger v0.2.0 h1:e4RVHVZKC5p6UANLJHkM4OfR1UKZPj8Wt8Pcx+3oqrE= github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= -github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= +github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= @@ -89,10 +77,8 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4= -github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= +github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= +github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= @@ -120,8 +106,13 @@ github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL9 github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/gofrs/uuid v4.2.0+incompatible h1:yyYWMnhkhrKwwr8gAOcOCYxOOscHgDS9yZgBrnJfGa0= +github.com/gofrs/uuid v4.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang-jwt/jwt/v4 v4.2.0 h1:besgBTC8w8HjP6NzQdxwKH9Z5oQMZ24ThTrHp3cZ8eU= +github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -169,6 +160,8 @@ github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I= @@ -190,6 +183,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= @@ -199,8 +194,8 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149 h1:HfxbT6/JcvIljmERptWhwa8XzP7H3T+Z2N26gTsaDaA= -github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= +github.com/mattn/go-ieproxy v0.0.1 h1:qiyop7gCflfhwCzGyeT0gro3sF9AIg9HU98JORTkqfI= +github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= github.com/microsoft/ApplicationInsights-Go v0.4.3 h1:gBuy5rM3o6Zo69QTkq1Ens8wx6sVf+mpgMjjfayiRcw= github.com/microsoft/ApplicationInsights-Go v0.4.3/go.mod h1:ih0t3h84PdzV1qGeUs89o9wL8eCuwf24M7TZp/nyqXk= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= @@ -267,12 +262,13 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83 h1:/ZScEX8SfEmUGRHs0gxpqteO5nfNW6axyZbBdw9A12g= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3 h1:0es+/5331RGQPcXlMfP+WrnIIS6dNnNRe0WB02W0F4M= +golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -317,6 +313,7 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -327,6 +324,9 @@ golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210224082022-3d97a244fca7 h1:OgUuv8lsRpBibGNbSizVwKWlysjaNzmC9gYMhPVfqFM= golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 h1:CIJ76btIcR3eFI5EgSo6k1qKw9KJexJuRLI9G7Hp5wE= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -353,6 +353,7 @@ golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -363,10 +364,14 @@ golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073 h1:8qxJSnu+7dRq6upnbntrmriWByIakBuct5OM/MdQC1M= golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 h1:SrN+KX8Art/Sf4HNj6Zcz06G7VEz+7w9tdXTPOZ7+l4= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221 h1:/ZHdbVpdR/jk3g30/d4yUL0JU9kksj8+F/bnQUVLGDM= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -380,6 +385,8 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -481,6 +488,8 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= diff --git a/source/plugins/go/src/oms.go b/source/plugins/go/src/oms.go index 407ab3611..99329ac6f 100644 --- a/source/plugins/go/src/oms.go +++ b/source/plugins/go/src/oms.go @@ -1713,7 +1713,7 @@ func InitializePlugin(pluginConfPath string, agentVersion string) { // Try to read the ADX database name from environment variables. Default to DefaultAdsDatabaseName if not set. // This SHOULD be set by tomlparser.rb so it's a highly unexpected event if it isn't. // It should be set by the logic in tomlparser.rb EVEN if ADX logging isn't enabled - AdxDatabaseName := strings.TrimSpace(os.Getenv("AZMON_ADX_DATABASE_NAME")) + AdxDatabaseName = strings.TrimSpace(os.Getenv("AZMON_ADX_DATABASE_NAME")) // Check the len of the provided name for database and use default if 0, just to be sure if len(AdxDatabaseName) == 0 { diff --git a/source/plugins/go/src/utils.go b/source/plugins/go/src/utils.go index 61c6898d7..23353a372 100644 --- a/source/plugins/go/src/utils.go +++ b/source/plugins/go/src/utils.go @@ -192,6 +192,7 @@ func CreateADXClient() { //log.Fatalf("Unable to create ADX connection %s", err.Error()) } else { Log("Successfully created ADX Client. Creating Ingestor...") + Log("AdxDatabaseName=%s", AdxDatabaseName) ingestor, ingestorErr := ingest.New(client, AdxDatabaseName, "ContainerLogV2") if ingestorErr != nil { Log("Error::mdsd::Unable to create ADX ingestor %s", ingestorErr.Error()) From 2dbc4f12b7653495342605efce6a2beaa7401bbb Mon Sep 17 00:00:00 2001 From: Vishwanath Date: Tue, 15 Feb 2022 10:21:44 -0800 Subject: [PATCH 197/301] fix build break (#715) * fix build break * update all places --- ...cial.all_tag.all_phase.all_config.ci_prod.yml | 2 +- .pipelines/pipeline.user.linux.yml | 2 +- README.md | 4 ++-- .../build/linux/install-build-pre-requisites.sh | 16 ++++++++-------- .../windows/install-build-pre-requisites.ps1 | 2 +- 5 files changed, 13 insertions(+), 13 deletions(-) mode change 100755 => 100644 scripts/build/windows/install-build-pre-requisites.ps1 diff --git a/.pipelines/pipeline.user.linux.official.all_tag.all_phase.all_config.ci_prod.yml b/.pipelines/pipeline.user.linux.official.all_tag.all_phase.all_config.ci_prod.yml index 97390298c..9ea82d428 100644 --- a/.pipelines/pipeline.user.linux.official.all_tag.all_phase.all_config.ci_prod.yml +++ b/.pipelines/pipeline.user.linux.official.all_tag.all_phase.all_config.ci_prod.yml @@ -5,7 +5,7 @@ environment: version: '16.04' runtime: provider: 'appcontainer' - image: 'cdpxlinux.azurecr.io/user/azure-monitor/container-insights:1.0' + image: 'cdpxlinux.azurecr.io/user/azure-monitor/container-insights:latest' version: name: 'DockerProvider' diff --git a/.pipelines/pipeline.user.linux.yml b/.pipelines/pipeline.user.linux.yml index 4c39fad5a..80b1b0687 100644 --- a/.pipelines/pipeline.user.linux.yml +++ b/.pipelines/pipeline.user.linux.yml @@ -5,7 +5,7 @@ environment: version: '16.04' runtime: provider: 'appcontainer' - image: 'cdpxlinux.azurecr.io/user/azure-monitor/container-insights:1.0' + image: 'cdpxlinux.azurecr.io/user/azure-monitor/container-insights:latest' version: name: 'DockerProvider' diff --git a/README.md b/README.md index 3cd466bb9..7d42c75d6 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ Feel free to contact engineering team owners in case you have any questions abou ## Common - [Visual Studio Code](https://code.visualstudio.com/) for authoring -- [Go lang](https://golang.org/) for building go code. Go lang version 1.14.1. +- [Go lang](https://golang.org/) for building go code. Go lang version 1.15.14 (both Linux & Windows) > Note: If you are using WSL2, make sure you have cloned the code onto ubuntu not onto windows @@ -121,7 +121,7 @@ We recommend using [Visual Studio Code](https://code.visualstudio.com/) for auth ### Install Pre-requisites -1. Install go1.14.1, dotnet, powershell, docker and build dependencies to build go code for both Linux and Windows platforms +1. Install go1.15.14, dotnet, powershell, docker and build dependencies to build go code for both Linux and Windows platforms ``` bash ~/Docker-Provider/scripts/build/linux/install-build-pre-requisites.sh ``` diff --git a/scripts/build/linux/install-build-pre-requisites.sh b/scripts/build/linux/install-build-pre-requisites.sh index 717f12f76..385d6fffc 100644 --- a/scripts/build/linux/install-build-pre-requisites.sh +++ b/scripts/build/linux/install-build-pre-requisites.sh @@ -8,17 +8,17 @@ TEMP_DIR=temp-$RANDOM install_go_lang() { export goVersion="$(echo $(go version))" - if [[ $goVersion == *go1.14.1* ]] ; then - echo "found existing installation of go version 1.14.1 so skipping the installation of go" + if [[ $goVersion == *go1.15.14* ]] ; then + echo "found existing installation of go version 1.15.14 so skipping the installation of go" else - echo "installing go 1.14.1 version ..." - sudo curl -O https://dl.google.com/go/go1.14.1.linux-amd64.tar.gz - sudo tar -xvf go1.14.1.linux-amd64.tar.gz + echo "installing go 1.15.14 version ..." + sudo curl -O https://dl.google.com/go/go1.15.14.linux-amd64.tar.gz + sudo tar -xvf go1.15.14.linux-amd64.tar.gz sudo mv -f go /usr/local echo "set file permission for go bin" sudo chmod 777 /usr/local/go/bin - echo "installation of go 1.14.1 completed." - echo "installation of go 1.14.1 completed." + echo "installation of go 1.15.14 completed." + echo "installation of go 1.15.14 completed." fi } @@ -154,4 +154,4 @@ sudo rm -rf $TEMP_DIR # set go env vars install_go_env_vars -echo "installing build pre-requisites python, go 1.14.1, dotnet, powershell, build dependencies and docker completed" +echo "installing build pre-requisites python, go 1.15.14, dotnet, powershell, build dependencies and docker completed" diff --git a/scripts/build/windows/install-build-pre-requisites.ps1 b/scripts/build/windows/install-build-pre-requisites.ps1 old mode 100755 new mode 100644 index 1ea316798..750a7b18b --- a/scripts/build/windows/install-build-pre-requisites.ps1 +++ b/scripts/build/windows/install-build-pre-requisites.ps1 @@ -133,7 +133,7 @@ function Install-Docker() { Write-Host("installing docker for desktop completed") } -Write-Host "Install GO 1.14.1 version" +Write-Host "Install GO 1.15.14 version" Install-Go Write-Host "Install Build dependencies" Build-Dependencies From 7389a1ba1a3f2c839070349b3d503e66195ef642 Mon Sep 17 00:00:00 2001 From: Vishwanath Date: Mon, 28 Feb 2022 19:20:27 -0800 Subject: [PATCH 198/301] Explicitly use win-2019 to unblock windows PRs builds --- .github/workflows/pr-checker.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pr-checker.yml b/.github/workflows/pr-checker.yml index 40221b21a..f0cea063d 100644 --- a/.github/workflows/pr-checker.yml +++ b/.github/workflows/pr-checker.yml @@ -62,7 +62,7 @@ jobs: timeout: '5m0s' ignore-unfixed: true WINDOWS-build: - runs-on: windows-latest + runs-on: windows-2019 steps: - name: Set-workflow-initiator run: echo ("Initiated by -" + $env:GITHUB_ACTOR) From ec3d2ef997369fe3a4bb7afbd411ad65f2564286 Mon Sep 17 00:00:00 2001 From: rashmichandrashekar Date: Tue, 1 Mar 2022 14:05:59 -0800 Subject: [PATCH 199/301] Fixing telegraf vulnerability (#716) --- kubernetes/linux/setup.sh | 2 +- kubernetes/omsagent.yaml | 122 +++++++++--------- .../linux/install-build-pre-requisites.sh | 2 +- source/plugins/go/src/go.mod | 4 +- source/plugins/go/src/go.sum | 7 +- 5 files changed, 70 insertions(+), 67 deletions(-) diff --git a/kubernetes/linux/setup.sh b/kubernetes/linux/setup.sh index 872ac99cf..f18e372cd 100644 --- a/kubernetes/linux/setup.sh +++ b/kubernetes/linux/setup.sh @@ -35,7 +35,7 @@ tar -zxvf telegraf-1.20.3_linux_amd64.tar.gz mv /opt/telegraf-1.20.3/usr/bin/telegraf /opt/telegraf -chmod 777 /opt/telegraf +chmod 544 /opt/telegraf # Use wildcard version so that it doesnt require to touch this file /$TMPDIR/docker-cimprov-*.*.*-*.x86_64.sh --install diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index 28c8803c6..2779a23fd 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -453,65 +453,65 @@ spec: periodSeconds: 60 timeoutSeconds: 15 #Only in sidecar scraping mode - # - name: omsagent-prometheus - # image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod01312022" - # imagePullPolicy: IfNotPresent - # resources: - # limits: - # cpu: 500m - # memory: 1Gi - # requests: - # cpu: 75m - # memory: 225Mi - # env: - # # azure devops pipeline uses AKS_RESOURCE_ID and AKS_REGION hence ensure to uncomment these - # - name: AKS_CLUSTER_NAME - # value: "VALUE_AKS_CLUSTER_NAME" - # - name: AKS_RESOURCE_ID - # value: "VALUE_AKS_RESOURCE_ID_VALUE" - # - name: AKS_REGION - # value: "VALUE_AKS_RESOURCE_REGION_VALUE" - # - name: AKS_NODE_RESOURCE_GROUP - # value: "VALUE_AKS_NODE_RESOURCE_GROUP" - # #Uncomment below two lines for ACS clusters and set the cluster names manually. Also comment out the above two lines for ACS clusters - # #- name: ACS_RESOURCE_NAME - # # value: "my_acs_cluster_name" - # - name: CONTAINER_TYPE - # value: "PrometheusSidecar" - # - name: CONTROLLER_TYPE - # value: "DaemonSet" - # - name: NODE_IP - # valueFrom: - # fieldRef: - # fieldPath: status.hostIP - # # Update this with the user assigned msi client id for omsagent - # - name: USER_ASSIGNED_IDENTITY_CLIENT_ID - # value: "VALUE_USER_ASSIGNED_IDENTITY_CLIENT_ID_VALUE" - # - name: USING_AAD_MSI_AUTH - # value: "false" - # securityContext: - # privileged: true - # volumeMounts: - # - mountPath: /etc/kubernetes/host - # name: azure-json-path - # - mountPath: /etc/omsagent-secret - # name: omsagent-secret - # readOnly: true - # - mountPath: /etc/config/settings - # name: settings-vol-config - # readOnly: true - # - mountPath: /etc/config/osm-settings - # name: osm-settings-vol-config - # readOnly: true - # livenessProbe: - # exec: - # command: - # - /bin/bash - # - -c - # - /opt/livenessprobe.sh - # initialDelaySeconds: 60 - # periodSeconds: 60 - # timeoutSeconds: 15 + - name: omsagent-prometheus + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod01312022" + imagePullPolicy: IfNotPresent + resources: + limits: + cpu: 500m + memory: 1Gi + requests: + cpu: 75m + memory: 225Mi + env: + # azure devops pipeline uses AKS_RESOURCE_ID and AKS_REGION hence ensure to uncomment these + - name: AKS_CLUSTER_NAME + value: "VALUE_AKS_CLUSTER_NAME" + - name: AKS_RESOURCE_ID + value: "VALUE_AKS_RESOURCE_ID_VALUE" + - name: AKS_REGION + value: "VALUE_AKS_RESOURCE_REGION_VALUE" + - name: AKS_NODE_RESOURCE_GROUP + value: "VALUE_AKS_NODE_RESOURCE_GROUP" + #Uncomment below two lines for ACS clusters and set the cluster names manually. Also comment out the above two lines for ACS clusters + #- name: ACS_RESOURCE_NAME + # value: "my_acs_cluster_name" + - name: CONTAINER_TYPE + value: "PrometheusSidecar" + - name: CONTROLLER_TYPE + value: "DaemonSet" + - name: NODE_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + # Update this with the user assigned msi client id for omsagent + - name: USER_ASSIGNED_IDENTITY_CLIENT_ID + value: "VALUE_USER_ASSIGNED_IDENTITY_CLIENT_ID_VALUE" + - name: USING_AAD_MSI_AUTH + value: "false" + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/kubernetes/host + name: azure-json-path + - mountPath: /etc/omsagent-secret + name: omsagent-secret + readOnly: true + - mountPath: /etc/config/settings + name: settings-vol-config + readOnly: true + - mountPath: /etc/config/osm-settings + name: osm-settings-vol-config + readOnly: true + livenessProbe: + exec: + command: + - /bin/bash + - -c + - /opt/livenessprobe.sh + initialDelaySeconds: 60 + periodSeconds: 60 + timeoutSeconds: 15 affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: @@ -634,7 +634,7 @@ spec: value: "VALUE_USER_ASSIGNED_IDENTITY_CLIENT_ID_VALUE" # Add the below environment variable to true only in sidecar enabled regions, else set it to false - name: SIDECAR_SCRAPING_ENABLED - value: "false" + value: "true" - name: USING_AAD_MSI_AUTH value: "false" securityContext: @@ -811,7 +811,7 @@ spec: fieldRef: fieldPath: status.hostIP - name: SIDECAR_SCRAPING_ENABLED - value: "false" + value: "true" # Update this with the user assigned msi client id for omsagent - name: USER_ASSIGNED_IDENTITY_CLIENT_ID value: "VALUE_USER_ASSIGNED_IDENTITY_CLIENT_ID_VALUE" diff --git a/scripts/build/linux/install-build-pre-requisites.sh b/scripts/build/linux/install-build-pre-requisites.sh index 385d6fffc..7959b37e8 100644 --- a/scripts/build/linux/install-build-pre-requisites.sh +++ b/scripts/build/linux/install-build-pre-requisites.sh @@ -16,7 +16,7 @@ install_go_lang() sudo tar -xvf go1.15.14.linux-amd64.tar.gz sudo mv -f go /usr/local echo "set file permission for go bin" - sudo chmod 777 /usr/local/go/bin + sudo chmod 744 /usr/local/go/bin echo "installation of go 1.15.14 completed." echo "installation of go 1.15.14 completed." fi diff --git a/source/plugins/go/src/go.mod b/source/plugins/go/src/go.mod index 3957a2b74..9f30afab1 100644 --- a/source/plugins/go/src/go.mod +++ b/source/plugins/go/src/go.mod @@ -7,8 +7,8 @@ require ( github.com/Azure/go-autorest/autorest/azure/auth v0.5.11 github.com/fluent/fluent-bit-go v0.0.0-20171103221316-c4a158a6e3a7 github.com/golang/mock v1.4.1 - github.com/google/uuid v1.2.0 - github.com/microsoft/ApplicationInsights-Go v0.4.3 + github.com/google/uuid v1.3.0 + github.com/microsoft/ApplicationInsights-Go v0.4.4 github.com/philhofer/fwd v1.1.1 // indirect github.com/tinylib/msgp v1.1.2 github.com/ugorji/go v1.1.2-0.20180813092308-00b869d2f4a5 diff --git a/source/plugins/go/src/go.sum b/source/plugins/go/src/go.sum index ef157f9e9..c5d7ea147 100644 --- a/source/plugins/go/src/go.sum +++ b/source/plugins/go/src/go.sum @@ -106,6 +106,7 @@ github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL9 github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gofrs/uuid v4.2.0+incompatible h1:yyYWMnhkhrKwwr8gAOcOCYxOOscHgDS9yZgBrnJfGa0= github.com/gofrs/uuid v4.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= @@ -162,6 +163,8 @@ github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I= @@ -198,6 +201,8 @@ github.com/mattn/go-ieproxy v0.0.1 h1:qiyop7gCflfhwCzGyeT0gro3sF9AIg9HU98JORTkqf github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= github.com/microsoft/ApplicationInsights-Go v0.4.3 h1:gBuy5rM3o6Zo69QTkq1Ens8wx6sVf+mpgMjjfayiRcw= github.com/microsoft/ApplicationInsights-Go v0.4.3/go.mod h1:ih0t3h84PdzV1qGeUs89o9wL8eCuwf24M7TZp/nyqXk= +github.com/microsoft/ApplicationInsights-Go v0.4.4 h1:G4+H9WNs6ygSCe6sUyxRc2U81TI5Es90b2t/MwX5KqY= +github.com/microsoft/ApplicationInsights-Go v0.4.4/go.mod h1:fKRUseBqkw6bDiXTs3ESTiU/4YTIHsQS4W3fP2ieF4U= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= @@ -232,8 +237,6 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= -github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= From 798bd9f078b133e4c95e8138aef7770011979c51 Mon Sep 17 00:00:00 2001 From: sarahpeiffer <46665092+sarahpeiffer@users.noreply.github.com> Date: Mon, 14 Mar 2022 10:59:10 -0700 Subject: [PATCH 200/301] cherry picked changes from 03112022 release (#719) * cherry picked changes from 03112022 release --- ReleaseNotes.md | 11 +++++++++++ build/common/installer/scripts/tomlparser.rb | 12 +++++++++++- build/windows/installer/conf/fluent-bit.conf | 11 +++++++++++ charts/azuremonitor-containers/Chart.yaml | 2 +- charts/azuremonitor-containers/values.yaml | 4 ++-- kubernetes/linux/Dockerfile | 2 +- kubernetes/omsagent.yaml | 6 +++--- kubernetes/windows/Dockerfile | 2 +- 8 files changed, 41 insertions(+), 9 deletions(-) diff --git a/ReleaseNotes.md b/ReleaseNotes.md index 67f144608..e070c151c 100644 --- a/ReleaseNotes.md +++ b/ReleaseNotes.md @@ -11,6 +11,17 @@ additional questions or comments. Note : The agent version(s) below has dates (ciprod), which indicate the agent build dates (not release dates) +### 3/11/2022 - +##### Version microsoft/oms:ciprod03112022 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod03112022 (linux) +##### Version microsoft/oms:win-ciprod03112022 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod03112022 (windows) +##### Code change log +- Linux Agent + - Vulnerability fixes +- Windows Agent + - Bug fix for FluentBit stdout and stderr log filtering +- Common + - Upgrade Go lang version from 1.14.1 to 1.15.14 + ### 1/31/2022 - ##### Version microsoft/oms:ciprod01312022 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod01312022 (linux) ##### Version microsoft/oms:win-ciprod01312022 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod01312022 (windows) diff --git a/build/common/installer/scripts/tomlparser.rb b/build/common/installer/scripts/tomlparser.rb index 64d6d48fb..6a2f3c6d6 100644 --- a/build/common/installer/scripts/tomlparser.rb +++ b/build/common/installer/scripts/tomlparser.rb @@ -261,7 +261,17 @@ def get_command_windows(env_variable_name, env_variable_value) file = File.open("setenv.ps1", "w") if !file.nil? - commands = get_command_windows('AZMON_COLLECT_STDOUT_LOGS', @collectStdoutLogs) + # This will be used in fluent-bit.conf file to filter out logs + if (!@collectStdoutLogs && !@collectStderrLogs) + #Stop log tailing completely + @logTailPath = "C:\\opt\\nolog*.log" + @logExclusionRegexPattern = "stdout|stderr" + elsif !@collectStdoutLogs + @logExclusionRegexPattern = "stdout" + elsif !@collectStderrLogs + @logExclusionRegexPattern = "stderr" + end + commands = get_command_windows("AZMON_COLLECT_STDOUT_LOGS", @collectStdoutLogs) file.write(commands) commands = get_command_windows('AZMON_LOG_TAIL_PATH', @logTailPath) file.write(commands) diff --git a/build/windows/installer/conf/fluent-bit.conf b/build/windows/installer/conf/fluent-bit.conf index 243056505..1e2d8a93e 100644 --- a/build/windows/installer/conf/fluent-bit.conf +++ b/build/windows/installer/conf/fluent-bit.conf @@ -50,6 +50,17 @@ Buffer_Size 64 Mem_Buf_Limit 5m +[FILTER] + Name grep + Match oms.container.log.la.* + Exclude stream ${AZMON_LOG_EXCLUSION_REGEX_PATTERN} + +# Exclude prometheus plugin exceptions that might be caused due to invalid config.(Logs which contain - E! [inputs.prometheus]) +# Excluding these logs from being sent to AI since it can result in high volume of data in telemetry due to invalid config. +[FILTER] + Name grep + Match oms.container.log.flbplugin.* + [OUTPUT] Name oms EnableTelemetry true diff --git a/charts/azuremonitor-containers/Chart.yaml b/charts/azuremonitor-containers/Chart.yaml index 38d2e30ec..0ff1e3387 100644 --- a/charts/azuremonitor-containers/Chart.yaml +++ b/charts/azuremonitor-containers/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v1 appVersion: 7.0.0-1 description: Helm chart for deploying Azure Monitor container monitoring agent in Kubernetes name: azuremonitor-containers -version: 2.9.1 +version: 2.9.2 kubeVersion: "^1.10.0-0" keywords: - monitoring diff --git a/charts/azuremonitor-containers/values.yaml b/charts/azuremonitor-containers/values.yaml index aa4c6bcf2..3ed5d780f 100644 --- a/charts/azuremonitor-containers/values.yaml +++ b/charts/azuremonitor-containers/values.yaml @@ -21,8 +21,8 @@ Azure: omsagent: image: repo: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod" - tag: "ciprod01312022" - tagWindows: "win-ciprod01312022" + tag: "ciprod03112022" + tagWindows: "win-ciprod03112022" pullPolicy: IfNotPresent dockerProviderVersion: "16.0.0-0" agentVersion: "azure-mdsd-1.14.2" diff --git a/kubernetes/linux/Dockerfile b/kubernetes/linux/Dockerfile index f3a9efd7a..2128d6d6a 100644 --- a/kubernetes/linux/Dockerfile +++ b/kubernetes/linux/Dockerfile @@ -17,7 +17,7 @@ ENV RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR 0.9 RUN /usr/bin/apt-get update && /usr/bin/apt-get install -y libc-bin wget openssl curl sudo python-ctypes init-system-helpers net-tools rsyslog cron vim dmidecode apt-transport-https gnupg && rm -rf /var/lib/apt/lists/* COPY setup.sh main.sh defaultpromenvvariables defaultpromenvvariables-rs defaultpromenvvariables-sidecar mdsd.xml envmdsd logrotate.conf $tmpdir/ -ARG IMAGE_TAG=ciprod01312022 +ARG IMAGE_TAG=ciprod03112022 ENV AGENT_VERSION ${IMAGE_TAG} WORKDIR ${tmpdir} diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index 2779a23fd..22cbc7405 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -368,7 +368,7 @@ spec: value: "3" containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod01312022" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod03112022" imagePullPolicy: IfNotPresent resources: limits: @@ -603,7 +603,7 @@ spec: serviceAccountName: omsagent containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod01312022" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod03112022" imagePullPolicy: IfNotPresent resources: limits: @@ -776,7 +776,7 @@ spec: value: "3" containers: - name: omsagent-win - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod01312022" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod03112022" imagePullPolicy: IfNotPresent resources: limits: diff --git a/kubernetes/windows/Dockerfile b/kubernetes/windows/Dockerfile index 6a2785e69..ef3b1140d 100644 --- a/kubernetes/windows/Dockerfile +++ b/kubernetes/windows/Dockerfile @@ -3,7 +3,7 @@ MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=win-ciprod01312022 +ARG IMAGE_TAG=win-ciprod03112022 # Do not split this into multiple RUN! # Docker creates a layer for every RUN-Statement From 11677971a64bb7bcefeeca5f689ba7795eef699a Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Mon, 14 Mar 2022 12:54:54 -0700 Subject: [PATCH 201/301] Gangams/http proxy support (#717) * add proxy cert support * add proxy cert support * add proxy cert support * add proxy cert support * remove arbitery username and pwd requirement * remove arbitery username and pwd requirement * add proxy support for mdm * mdsd dev build * proxy changes * fix typo * mdsd dev build * add libcurl specific things * working mdsd proxy build * mdsd official master build * handle proxy endpoint which endswith / * latest official mdsd build * add telemetry to track proxy ca cert --- .../templates/omsagent-daemonset.yaml | 12 +++ .../templates/omsagent-deployment.yaml | 6 ++ .../templates/omsagent-secret.yaml | 15 +-- charts/azuremonitor-containers/values.yaml | 2 +- kubernetes/linux/main.sh | 58 +++++++---- kubernetes/linux/setup.sh | 5 +- kubernetes/omsagent.yaml | 4 +- kubernetes/windows/main.ps1 | 16 ++-- .../ruby/ApplicationInsightsUtility.rb | 3 + source/plugins/ruby/out_mdm.rb | 15 ++- source/plugins/ruby/proxy_utils.rb | 96 +++++++++++-------- 11 files changed, 146 insertions(+), 86 deletions(-) diff --git a/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml b/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml index 153395727..3b48c26c4 100644 --- a/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml @@ -113,6 +113,12 @@ spec: - mountPath: /etc/omsagent-secret name: omsagent-secret readOnly: true + {{- if and (.Values.Azure.proxySettings.isProxyEnabled) (.Values.Azure.proxySettings.proxyCert) }} + - mountPath: /etc/ssl/certs/proxy-cert.crt + subPath: PROXYCERT.crt + name: omsagent-secret + readOnly: true + {{- end }} - mountPath: /etc/config/settings name: settings-vol-config readOnly: true @@ -179,6 +185,12 @@ spec: - mountPath: /etc/omsagent-secret name: omsagent-secret readOnly: true + {{- if and (.Values.Azure.proxySettings.isProxyEnabled) (.Values.Azure.proxySettings.proxyCert) }} + - mountPath: /etc/ssl/certs/proxy-cert.crt + subPath: PROXYCERT.crt + name: omsagent-secret + readOnly: true + {{- end }} - mountPath: /etc/config/settings name: settings-vol-config readOnly: true diff --git a/charts/azuremonitor-containers/templates/omsagent-deployment.yaml b/charts/azuremonitor-containers/templates/omsagent-deployment.yaml index a7ea8b097..55b1f4a8d 100644 --- a/charts/azuremonitor-containers/templates/omsagent-deployment.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-deployment.yaml @@ -99,6 +99,12 @@ spec: - mountPath: /etc/omsagent-secret name: omsagent-secret readOnly: true + {{- if and (.Values.Azure.proxySettings.isProxyEnabled) (.Values.Azure.proxySettings.proxyCert) }} + - mountPath: /etc/ssl/certs/proxy-cert.crt + subPath: PROXYCERT.crt + name: omsagent-secret + readOnly: true + {{- end }} - mountPath : /etc/config name: omsagent-rs-config - mountPath: /etc/config/settings diff --git a/charts/azuremonitor-containers/templates/omsagent-secret.yaml b/charts/azuremonitor-containers/templates/omsagent-secret.yaml index 8c245338c..03d9a48c5 100644 --- a/charts/azuremonitor-containers/templates/omsagent-secret.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-secret.yaml @@ -13,19 +13,14 @@ data: WSID: {{ required "A valid workspace id is required!" .Values.omsagent.secret.wsid | b64enc | quote }} KEY: {{ required "A valid workspace key is required!" .Values.omsagent.secret.key | b64enc | quote }} DOMAIN: {{ .Values.omsagent.domain | b64enc | quote }} - {{- $httpsProxyDict := urlParse .Values.Azure.proxySettings.httpsProxy -}} - {{- $httpProxyDict := urlParse .Values.Azure.proxySettings.httpProxy -}} - {{- if and (and (.Values.Azure.proxySettings.isProxyEnabled) (.Values.Azure.proxySettings.httpsProxy)) ($httpsProxyDict.userinfo) }} + {{- if and (.Values.Azure.proxySettings.isProxyEnabled) (.Values.Azure.proxySettings.httpsProxy) }} PROXY: {{ .Values.Azure.proxySettings.httpsProxy | b64enc | quote }} - {{- else if and (and (.Values.Azure.proxySettings.isProxyEnabled) (.Values.Azure.proxySettings.httpsProxy)) (empty $httpsProxyDict.userinfo) }} - # adding arbitrary creds since omsagent expects arbitrary creds in case of no auth - PROXY: {{ urlJoin (dict "scheme" $httpsProxyDict.scheme "userinfo" "admin:secret" "host" $httpsProxyDict.host) | b64enc | quote }} - {{- else if and (and (.Values.Azure.proxySettings.isProxyEnabled) (.Values.Azure.proxySettings.httpProxy)) ($httpProxyDict.userinfo) }} + {{- else if and (.Values.Azure.proxySettings.isProxyEnabled) (.Values.Azure.proxySettings.httpProxy) }} PROXY: {{ .Values.Azure.proxySettings.httpProxy | b64enc | quote }} - {{- else if and (and (.Values.Azure.proxySettings.isProxyEnabled) (.Values.Azure.proxySettings.httpProxy)) (empty $httpProxyDict.userinfo) }} - # adding arbitrary creds since omsagent expects arbitrary creds in case of no auth - PROXY: {{ urlJoin (dict "scheme" $httpProxyDict.scheme "userinfo" "admin:secret" "host" $httpProxyDict.host) | b64enc | quote }} {{- else if ne .Values.omsagent.proxy "" }} PROXY: {{ .Values.omsagent.proxy | b64enc | quote }} {{- end }} + {{- if and (.Values.Azure.proxySettings.isProxyEnabled) (.Values.Azure.proxySettings.proxyCert) }} + PROXYCERT.crt: {{.Values.Azure.proxySettings.proxyCert | b64enc | quote}} + {{- end }} {{- end }} diff --git a/charts/azuremonitor-containers/values.yaml b/charts/azuremonitor-containers/values.yaml index 3ed5d780f..b47416bc1 100644 --- a/charts/azuremonitor-containers/values.yaml +++ b/charts/azuremonitor-containers/values.yaml @@ -25,7 +25,7 @@ omsagent: tagWindows: "win-ciprod03112022" pullPolicy: IfNotPresent dockerProviderVersion: "16.0.0-0" - agentVersion: "azure-mdsd-1.14.2" + agentVersion: "azure-mdsd-1.17.0" winAgentVersion: "0.0.0-0" # there is no base agent version for windows agent # The priority used by the omsagent priority class for the daemonset pods diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index 980c15586..4e1b0783c 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -175,7 +175,7 @@ if [ -e "/etc/omsagent-secret/WSID" ]; then # convert the protocol prefix in lowercase for validation proxyprotocol=$(echo $proto | tr "[:upper:]" "[:lower:]") if [ "$proxyprotocol" != "http://" -a "$proxyprotocol" != "https://" ]; then - echo "-e error proxy endpoint should be in this format http(s)://:@:" + echo "-e error proxy endpoint should be in this format http(s)://: or http(s)://:@:" fi # remove the protocol url="$(echo ${PROXY_ENDPOINT/$proto/})" @@ -190,8 +190,8 @@ if [ -e "/etc/omsagent-secret/WSID" ]; then # extract the port port="$(echo $hostport | sed -e 's,^.*:,:,g' -e 's,.*:\([0-9]*\).*,\1,g' -e 's,[^0-9],,g')" - if [ -z "$user" -o -z "$pwd" -o -z "$host" -o -z "$port" ]; then - echo "-e error proxy endpoint should be in this format http(s)://:@:" + if [ -z "$host" -o -z "$port" ]; then + echo "-e error proxy endpoint should be in this format http(s)://: or http(s)://:@:" else echo "successfully validated provided proxy endpoint is valid and expected format" fi @@ -202,19 +202,26 @@ if [ -e "/etc/omsagent-secret/WSID" ]; then echo "export MDSD_PROXY_MODE=$MDSD_PROXY_MODE" >> ~/.bashrc export MDSD_PROXY_ADDRESS=$proto$hostport echo "export MDSD_PROXY_ADDRESS=$MDSD_PROXY_ADDRESS" >> ~/.bashrc - export MDSD_PROXY_USERNAME=$user - echo "export MDSD_PROXY_USERNAME=$MDSD_PROXY_USERNAME" >> ~/.bashrc - export MDSD_PROXY_PASSWORD_FILE=/opt/microsoft/docker-cimprov/proxy_password - echo "export MDSD_PROXY_PASSWORD_FILE=$MDSD_PROXY_PASSWORD_FILE" >> ~/.bashrc - - #TODO: Compression + proxy creates a deserialization error in ODS. This needs a fix in MDSD - export MDSD_ODS_COMPRESSION_LEVEL=0 - echo "export MDSD_ODS_COMPRESSION_LEVEL=$MDSD_ODS_COMPRESSION_LEVEL" >> ~/.bashrc + if [ ! -z "$user" -a ! -z "$pwd" ]; then + export MDSD_PROXY_USERNAME=$user + echo "export MDSD_PROXY_USERNAME=$MDSD_PROXY_USERNAME" >> ~/.bashrc + export MDSD_PROXY_PASSWORD_FILE=/opt/microsoft/docker-cimprov/proxy_password + echo "export MDSD_PROXY_PASSWORD_FILE=$MDSD_PROXY_PASSWORD_FILE" >> ~/.bashrc + fi + if [ -e "/etc/omsagent-secret/PROXYCERT.crt" ]; then + export PROXY_CA_CERT=/etc/omsagent-secret/PROXYCERT.crt + echo "export PROXY_CA_CERT=$PROXY_CA_CERT" >> ~/.bashrc + fi fi if [ ! -z "$PROXY_ENDPOINT" ]; then - echo "Making curl request to oms endpint with domain: $domain and proxy: $PROXY_ENDPOINT" - curl --max-time 10 https://$workspaceId.oms.$domain/AgentService.svc/LinuxAgentTopologyRequest --proxy $PROXY_ENDPOINT + if [ -e "/etc/omsagent-secret/PROXYCERT.crt" ]; then + echo "Making curl request to oms endpint with domain: $domain and proxy endpoint, and proxy CA cert" + curl --max-time 10 https://$workspaceId.oms.$domain/AgentService.svc/LinuxAgentTopologyRequest --proxy $PROXY_ENDPOINT --proxy-cacert /etc/omsagent-secret/PROXYCERT.crt + else + echo "Making curl request to oms endpint with domain: $domain and proxy endpoint" + curl --max-time 10 https://$workspaceId.oms.$domain/AgentService.svc/LinuxAgentTopologyRequest --proxy $PROXY_ENDPOINT + fi else echo "Making curl request to oms endpint with domain: $domain" curl --max-time 10 https://$workspaceId.oms.$domain/AgentService.svc/LinuxAgentTopologyRequest @@ -222,8 +229,13 @@ if [ -e "/etc/omsagent-secret/WSID" ]; then if [ $? -ne 0 ]; then if [ ! -z "$PROXY_ENDPOINT" ]; then - echo "Making curl request to ifconfig.co with proxy: $PROXY_ENDPOINT" - RET=`curl --max-time 10 -s -o /dev/null -w "%{http_code}" ifconfig.co --proxy $PROXY_ENDPOINT` + if [ -e "/etc/omsagent-secret/PROXYCERT.crt" ]; then + echo "Making curl request to ifconfig.co with proxy and proxy CA cert" + RET=`curl --max-time 10 -s -o /dev/null -w "%{http_code}" ifconfig.co --proxy $PROXY_ENDPOINT --proxy-cacert /etc/omsagent-secret/PROXYCERT.crt` + else + echo "Making curl request to ifconfig.co with proxy" + RET=`curl --max-time 10 -s -o /dev/null -w "%{http_code}" ifconfig.co --proxy $PROXY_ENDPOINT` + fi else echo "Making curl request to ifconfig.co" RET=`curl --max-time 10 -s -o /dev/null -w "%{http_code}" ifconfig.co` @@ -233,8 +245,13 @@ if [ -e "/etc/omsagent-secret/WSID" ]; then else # Retrying here to work around network timing issue if [ ! -z "$PROXY_ENDPOINT" ]; then - echo "ifconfig check succeeded, retrying oms endpoint with proxy..." - curl --max-time 10 https://$workspaceId.oms.$domain/AgentService.svc/LinuxAgentTopologyRequest --proxy $PROXY_ENDPOINT + if [ -e "/etc/omsagent-secret/PROXYCERT.crt" ]; then + echo "ifconfig check succeeded, retrying oms endpoint with proxy and proxy CA cert..." + curl --max-time 10 https://$workspaceId.oms.$domain/AgentService.svc/LinuxAgentTopologyRequest --proxy $PROXY_ENDPOINT --proxy-cacert /etc/omsagent-secret/PROXYCERT.crt + else + echo "ifconfig check succeeded, retrying oms endpoint with proxy..." + curl --max-time 10 https://$workspaceId.oms.$domain/AgentService.svc/LinuxAgentTopologyRequest --proxy $PROXY_ENDPOINT + fi else echo "ifconfig check succeeded, retrying oms endpoint..." curl --max-time 10 https://$workspaceId.oms.$domain/AgentService.svc/LinuxAgentTopologyRequest @@ -563,6 +580,13 @@ else echo "export CIWORKSPACE_keyFile=$CIWORKSPACE_keyFile" >> ~/.bashrc export MDSD_FLUENT_SOCKET_PORT="29230" echo "export MDSD_FLUENT_SOCKET_PORT=$MDSD_FLUENT_SOCKET_PORT" >> ~/.bashrc + # set the libcurl specific env and configuration + export ENABLE_CURL_UPLOAD=true + echo "export ENABLE_CURL_UPLOAD=$ENABLE_CURL_UPLOAD" >> ~/.bashrc + export CURL_CA_BUNDLE=/etc/ssl/certs/ca-certificates.crt + echo "export CURL_CA_BUNDLE=$CURL_CA_BUNDLE" >> ~/.bashrc + mkdir -p /etc/pki/tls/certs + cp /etc/ssl/certs/ca-certificates.crt /etc/pki/tls/certs/ca-bundle.crt fi source ~/.bashrc diff --git a/kubernetes/linux/setup.sh b/kubernetes/linux/setup.sh index f18e372cd..9d36fec4a 100644 --- a/kubernetes/linux/setup.sh +++ b/kubernetes/linux/setup.sh @@ -9,8 +9,9 @@ sed -i -e 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen && \ dpkg-reconfigure --frontend=noninteractive locales && \ update-locale LANG=en_US.UTF-8 -#install oneagent - Official bits (10/7/2021) -wget https://github.com/microsoft/Docker-Provider/releases/download/1.14/azure-mdsd_1.14.2-build.master.284_x86_64.deb +#install oneagent - Official bits (2/22/2022) +wget https://github.com/microsoft/Docker-Provider/releases/download/1.17.0/azure-mdsd_1.17.0-build.master.354_x86_64.deb + /usr/bin/dpkg -i $TMPDIR/azure-mdsd*.deb cp -f $TMPDIR/mdsd.xml /etc/mdsd.d diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index 22cbc7405..49df045b9 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -357,7 +357,7 @@ spec: component: oms-agent tier: node annotations: - agentVersion: "azure-mdsd-1.14.2" + agentVersion: "azure-mdsd-1.17.0" dockerProviderVersion: "16.0.0-0" schema-versions: "v1" spec: @@ -596,7 +596,7 @@ spec: labels: rsName: "omsagent-rs" annotations: - agentVersion: "azure-mdsd-1.14.2" + agentVersion: "azure-mdsd-1.17.0" dockerProviderVersion: "16.0.0-0" schema-versions: "v1" spec: diff --git a/kubernetes/windows/main.ps1 b/kubernetes/windows/main.ps1 index f5fab4edd..6482daed9 100644 --- a/kubernetes/windows/main.ps1 +++ b/kubernetes/windows/main.ps1 @@ -120,12 +120,12 @@ function Set-EnvironmentVariables { $proxy = [string]$proxy.Trim(); $parts = $proxy -split "@" if ($parts.Length -ne 2) { - Write-Host "Invalid ProxyConfiguration $($proxy). EXITING....." + Write-Host "Invalid ProxyConfiguration. EXITING....." exit 1 } $subparts1 = $parts[0] -split "//" if ($subparts1.Length -ne 2) { - Write-Host "Invalid ProxyConfiguration $($proxy). EXITING....." + Write-Host "Invalid ProxyConfiguration. EXITING....." exit 1 } $protocol = $subparts1[0].ToLower().TrimEnd(":") @@ -133,16 +133,18 @@ function Set-EnvironmentVariables { Write-Host "Unsupported protocol in ProxyConfiguration $($proxy). EXITING....." exit 1 } - $subparts2 = $parts[1] -split ":" - if ($subparts2.Length -ne 2) { - Write-Host "Invalid ProxyConfiguration $($proxy). EXITING....." - exit 1 - } + } } + Write-Host "Provided Proxy configuration is valid" } + if (Test-Path /etc/omsagent-secret/PROXYCERT.crt) { + Write-Host "Importing Proxy CA cert since Proxy CA cert configured" + Import-Certificate -FilePath /etc/omsagent-secret/PROXYCERT.crt -CertStoreLocation 'Cert:\LocalMachine\Root' -Verbose + } + # Set PROXY [System.Environment]::SetEnvironmentVariable("PROXY", $proxy, "Process") [System.Environment]::SetEnvironmentVariable("PROXY", $proxy, "Machine") diff --git a/source/plugins/ruby/ApplicationInsightsUtility.rb b/source/plugins/ruby/ApplicationInsightsUtility.rb index eb143c4ba..70d0a400e 100644 --- a/source/plugins/ruby/ApplicationInsightsUtility.rb +++ b/source/plugins/ruby/ApplicationInsightsUtility.rb @@ -79,6 +79,9 @@ def initializeUtility() $log.info("proxy configured") @@CustomProperties["IsProxyConfigured"] = "true" isProxyConfigured = true + if ProxyUtils.isProxyCACertConfigured() + @@CustomProperties["ProxyCACertConfigured"] = "true" + end else @@CustomProperties["IsProxyConfigured"] = "false" isProxyConfigured = false diff --git a/source/plugins/ruby/out_mdm.rb b/source/plugins/ruby/out_mdm.rb index 82d6e07db..e10a2049f 100644 --- a/source/plugins/ruby/out_mdm.rb +++ b/source/plugins/ruby/out_mdm.rb @@ -96,17 +96,14 @@ def start end @@post_request_url = @@post_request_url_template % { aks_region: aks_region, aks_resource_id: aks_resource_id } @post_request_uri = URI.parse(@@post_request_url) - if (!!@isArcK8sCluster) - proxy = (ProxyUtils.getProxyConfiguration) - if proxy.nil? || proxy.empty? - @http_client = Net::HTTP.new(@post_request_uri.host, @post_request_uri.port) - else - @log.info "Proxy configured on this cluster: #{aks_resource_id}" - @http_client = Net::HTTP.new(@post_request_uri.host, @post_request_uri.port, proxy[:addr], proxy[:port], proxy[:user], proxy[:pass]) - end - else + proxy = (ProxyUtils.getProxyConfiguration) + if proxy.nil? || proxy.empty? @http_client = Net::HTTP.new(@post_request_uri.host, @post_request_uri.port) + else + @log.info "Proxy configured on this cluster: #{aks_resource_id}" + @http_client = Net::HTTP.new(@post_request_uri.host, @post_request_uri.port, proxy[:addr], proxy[:port], proxy[:user], proxy[:pass]) end + @http_client.use_ssl = true @log.info "POST Request url: #{@@post_request_url}" ApplicationInsightsUtility.sendCustomEvent("AKSCustomMetricsMDMPluginStart", {}) diff --git a/source/plugins/ruby/proxy_utils.rb b/source/plugins/ruby/proxy_utils.rb index 1566fe4e9..14fc47692 100644 --- a/source/plugins/ruby/proxy_utils.rb +++ b/source/plugins/ruby/proxy_utils.rb @@ -3,43 +3,63 @@ # frozen_string_literal: true class ProxyUtils - class << self - def getProxyConfiguration() - omsproxy_secret_path = "/etc/omsagent-secret/PROXY" - if !File.exist?(omsproxy_secret_path) - return {} - end - - begin - proxy_config = parseProxyConfiguration(File.read(omsproxy_secret_path)) - rescue SystemCallError # Error::ENOENT - return {} - end - - if proxy_config.nil? - $log.warn("Failed to parse the proxy configuration in '#{omsproxy_secret_path}'") - return {} - end - - return proxy_config - end - - def parseProxyConfiguration(proxy_conf_str) - # Remove the http(s) protocol - proxy_conf_str = proxy_conf_str.gsub(/^(https?:\/\/)?/, "") - - # Check for unsupported protocol - if proxy_conf_str[/^[a-z]+:\/\//] - return nil - end - - re = /^(?:(?[^:]+):(?[^@]+)@)?(?[^:@]+)(?::(?\d+))?$/ - matches = re.match(proxy_conf_str) - if matches.nil? or matches[:addr].nil? - return nil - end - # Convert nammed matches to a hash - Hash[ matches.names.map{ |name| name.to_sym}.zip( matches.captures ) ] + class << self + def getProxyConfiguration() + omsproxy_secret_path = "/etc/omsagent-secret/PROXY" + if !File.exist?(omsproxy_secret_path) + return {} + end + + begin + proxy_config = parseProxyConfiguration(File.read(omsproxy_secret_path)) + rescue SystemCallError # Error::ENOENT + return {} + end + + if proxy_config.nil? + $log.warn("Failed to parse the proxy configuration in '#{omsproxy_secret_path}'") + return {} + end + + return proxy_config + end + + def parseProxyConfiguration(proxy_conf_str) + if proxy_conf_str.empty? + return nil + end + # Remove trailing / if the proxy endpoint has + if proxy_conf_str.end_with?("/") + proxy_conf_str = proxy_conf_str.chop + end + # Remove the http(s) protocol + proxy_conf_str = proxy_conf_str.gsub(/^(https?:\/\/)?/, "") + + # Check for unsupported protocol + if proxy_conf_str[/^[a-z]+:\/\//] + return nil + end + + re = /^(?:(?[^:]+):(?[^@]+)@)?(?[^:@]+)(?::(?\d+))?$/ + matches = re.match(proxy_conf_str) + if matches.nil? or matches[:addr].nil? + return nil + end + # Convert nammed matches to a hash + Hash[matches.names.map { |name| name.to_sym }.zip(matches.captures)] + end + + def isProxyCACertConfigured() + isProxyCACertExist = false + begin + proxy_cert_path = "/etc/omsagent-secret/PROXYCERT.crt" + if File.exist?(proxy_cert_path) + isProxyCACertExist = true end + rescue => error + $log.warn("Failed to check the existence of Proxy CA cert '#{proxy_cert_path}'") + end + return isProxyCACertExist end -end \ No newline at end of file + end +end From f9209c4706a58b8f8abf908badf6aea571be484e Mon Sep 17 00:00:00 2001 From: Amol Agrawal Date: Mon, 14 Mar 2022 13:46:57 -0700 Subject: [PATCH 202/301] build multi-arch images (#704) * build multi-arch linux images * new pipelines to build multi-arch images Co-authored-by: Amol Agrawal --- .pipelines/azure_pipeline_dev.yaml | 100 ++++++++++++++++++ .pipelines/azure_pipeline_prod.yaml | 100 ++++++++++++++++++ README.md | 19 ++++ build/linux/Makefile | 24 ++--- .../ContainerInsights.Linux.Parameters.json | 68 ++++++++++++ .../RolloutSpecs/RolloutSpecs.json | 29 +++++ .../ScopeBindings/Public.ScopeBindings.json | 51 +++++++++ .../Scripts/pushAgentToAcr.sh | 72 +++++++++++++ .../ServiceModels/Public.ServiceModel.json | 51 +++++++++ .../ServiceGroupRoot/buildver.txt | 1 + kubernetes/linux/Dockerfile | 2 +- kubernetes/linux/Dockerfile.multiarch | 39 +++++++ .../build-and-publish-docker-image.sh | 16 ++- kubernetes/linux/main.sh | 20 ++-- kubernetes/linux/setup.sh | 31 ++++-- .../linux/install-build-pre-requisites.sh | 19 ++++ source/plugins/go/src/Makefile | 12 ++- source/plugins/ruby/in_kube_nodes.rb | 3 + 18 files changed, 619 insertions(+), 38 deletions(-) create mode 100644 .pipelines/azure_pipeline_dev.yaml create mode 100644 .pipelines/azure_pipeline_prod.yaml create mode 100644 deployment/multiarch-agent-deployment/ServiceGroupRoot/Parameters/ContainerInsights.Linux.Parameters.json create mode 100644 deployment/multiarch-agent-deployment/ServiceGroupRoot/RolloutSpecs/RolloutSpecs.json create mode 100644 deployment/multiarch-agent-deployment/ServiceGroupRoot/ScopeBindings/Public.ScopeBindings.json create mode 100644 deployment/multiarch-agent-deployment/ServiceGroupRoot/Scripts/pushAgentToAcr.sh create mode 100644 deployment/multiarch-agent-deployment/ServiceGroupRoot/ServiceModels/Public.ServiceModel.json create mode 100644 deployment/multiarch-agent-deployment/ServiceGroupRoot/buildver.txt create mode 100644 kubernetes/linux/Dockerfile.multiarch diff --git a/.pipelines/azure_pipeline_dev.yaml b/.pipelines/azure_pipeline_dev.yaml new file mode 100644 index 000000000..6d50e5788 --- /dev/null +++ b/.pipelines/azure_pipeline_dev.yaml @@ -0,0 +1,100 @@ +# Starter pipeline +# Start with a minimal pipeline that you can customize to build and deploy your code. +# Add steps that build, run tests, deploy, and more: +# https://aka.ms/yaml + +trigger: +- ci_dev + +pool: + name: Azure-Pipelines-CI-Test-EO + +variables: + armServiceConnectionName: 'ci-1es-acr-connection' + subscription: '9b96ebbd-c57a-42d1-bbe9-b69296e4c7fb' + containerRegistry: 'containerinsightsprod' + repoImageName: '${{ variables.containerRegistry }}.azurecr.io/public/azuremonitor/containerinsights/cidev' + +steps: +- bash: | + commit=$(git rev-parse --short HEAD) + echo "##vso[task.setvariable variable=commit;]$commit" + + datetime=$(date +'%Y%m%d%s') + echo "##vso[task.setvariable variable=datetime;]$datetime" + + cd deployment/multiarch-agent-deployment/ServiceGroupRoot/Scripts + tar -czvf ../artifacts.tar.gz pushAgentToAcr.sh + +- task: CopyFiles@2 + displayName: "Copy ev2 deployment artifacts" + inputs: + SourceFolder: "$(Build.SourcesDirectory)/deployment" + Contents: | + **/* + TargetFolder: '$(Build.ArtifactStagingDirectory)/build' + +- task: CopyFiles@2 + displayName: "Copy ev2 deployment scripts" + inputs: + SourceFolder: "$(Build.SourcesDirectory)/.pipelines" + Contents: | + *.sh + TargetFolder: '$(Build.ArtifactStagingDirectory)/build' + +- task: CopyFiles@2 + displayName: "Copy ev2 deployment scripts" + inputs: + SourceFolder: "$(Build.SourcesDirectory)/kubernetes" + Contents: | + *.yaml + TargetFolder: '$(Build.ArtifactStagingDirectory)/build' + +- task: CopyFiles@2 + displayName: "Copy ev2 deployment scripts" + inputs: + SourceFolder: "$(Build.SourcesDirectory)/charts" + Contents: | + **/* + TargetFolder: '$(Build.ArtifactStagingDirectory)/build' + +- task: CopyFiles@2 + displayName: "Copy ev2 deployment scripts" + inputs: + SourceFolder: "$(Build.SourcesDirectory)/test/e2e" + Contents: | + *.yaml + TargetFolder: '$(Build.ArtifactStagingDirectory)/build' + +- task: AzureCLI@2 + inputs: + azureSubscription: ${{ variables.armServiceConnectionName }} + scriptType: bash + scriptLocation: inlineScript + inlineScript: | + + sudo apt-get update && sudo apt-get -y install qemu binfmt-support qemu-user-static + docker run --rm --privileged multiarch/qemu-user-static --reset -p yes + + docker buildx create --name testbuilder + docker buildx use testbuilder + + az --version + az account show + az account set -s ${{ variables.subscription }} + az acr login -n ${{ variables.containerRegistry }} + + docker buildx build --platform linux/amd64,linux/arm64 --tag ${{ variables.repoImageName }}:$(datetime)-$(commit) -f kubernetes/linux/Dockerfile.multiarch --metadata-file $(Build.ArtifactStagingDirectory)/metadata.json --build-arg IMAGE_TAG=$(datetime)-$(commit) --push . + + docker pull ${{ variables.repoImageName }}:$(datetime)-$(commit) + +- task: AzureArtifacts.manifest-generator-task.manifest-generator-task.ManifestGeneratorTask@0 + displayName: 'Generation Task' + inputs: + BuildDropPath: '$(Build.ArtifactStagingDirectory)' + DockerImagesToScan: 'golang:1.14, ubuntu:18.04, ${{ variables.repoImageName }}:$(datetime)-$(commit)' + +- task: PublishBuildArtifacts@1 + inputs: + pathToPublish: '$(Build.ArtifactStagingDirectory)' + artifactName: drop diff --git a/.pipelines/azure_pipeline_prod.yaml b/.pipelines/azure_pipeline_prod.yaml new file mode 100644 index 000000000..046d41910 --- /dev/null +++ b/.pipelines/azure_pipeline_prod.yaml @@ -0,0 +1,100 @@ +# Starter pipeline +# Start with a minimal pipeline that you can customize to build and deploy your code. +# Add steps that build, run tests, deploy, and more: +# https://aka.ms/yaml + +trigger: +- ci_prod + +pool: + name: Azure-Pipelines-CI-Prod-EO + +variables: + armServiceConnectionName: 'ci-1es-acr-connection-prod' + subscription: '30c56c3a-54da-46ea-b004-06eb33432687' + containerRegistry: 'containerinsightsbuild' + repoImageName: '${{ variables.containerRegistry }}.azurecr.io/official/linux' + +steps: +- bash: | + commit=$(git rev-parse --short HEAD) + echo "##vso[task.setvariable variable=commit;]$commit" + + datetime=$(date +'%Y%m%d%s') + echo "##vso[task.setvariable variable=datetime;]$datetime" + + cd deployment/multiarch-agent-deployment/ServiceGroupRoot/Scripts + tar -czvf ../artifacts.tar.gz pushAgentToAcr.sh + +- task: CopyFiles@2 + displayName: "Copy ev2 deployment artifacts" + inputs: + SourceFolder: "$(Build.SourcesDirectory)/deployment" + Contents: | + **/* + TargetFolder: '$(Build.ArtifactStagingDirectory)/build' + +- task: CopyFiles@2 + displayName: "Copy ev2 deployment scripts" + inputs: + SourceFolder: "$(Build.SourcesDirectory)/.pipelines" + Contents: | + **/*.sh + TargetFolder: '$(Build.ArtifactStagingDirectory)/build' + +- task: CopyFiles@2 + displayName: "Copy ev2 deployment scripts" + inputs: + SourceFolder: "$(Build.SourcesDirectory)/kubernetes" + Contents: | + *.yaml + TargetFolder: '$(Build.ArtifactStagingDirectory)/build' + +- task: CopyFiles@2 + displayName: "Copy ev2 deployment scripts" + inputs: + SourceFolder: "$(Build.SourcesDirectory)/charts" + Contents: | + **/* + TargetFolder: '$(Build.ArtifactStagingDirectory)/build' + +- task: CopyFiles@2 + displayName: "Copy ev2 deployment scripts" + inputs: + SourceFolder: "$(Build.SourcesDirectory)/test/e2e" + Contents: | + *.yaml + TargetFolder: '$(Build.ArtifactStagingDirectory)/build' + +- task: AzureCLI@2 + inputs: + azureSubscription: ${{ variables.armServiceConnectionName }} + scriptType: bash + scriptLocation: inlineScript + inlineScript: | + + sudo apt-get update && sudo apt-get -y install qemu binfmt-support qemu-user-static + docker run --rm --privileged multiarch/qemu-user-static --reset -p yes + + docker buildx create --name testbuilder + docker buildx use testbuilder + + az --version + az account show + az account set -s ${{ variables.subscription }} + az acr login -n ${{ variables.containerRegistry }} + + docker buildx build --platform linux/amd64,linux/arm64 --tag ${{ variables.repoImageName }}:ciprod-$(datetime)-$(commit) -f kubernetes/linux/Dockerfile.multiarch --metadata-file $(Build.ArtifactStagingDirectory)/metadata.json --build-arg IMAGE_TAG=ciprod-$(datetime)-$(commit) --push . + + docker pull ${{ variables.repoImageName }}:ciprod-$(datetime)-$(commit) + +- task: AzureArtifacts.manifest-generator-task.manifest-generator-task.ManifestGeneratorTask@0 + displayName: 'Generation Task' + inputs: + BuildDropPath: '$(Build.ArtifactStagingDirectory)' + DockerImagesToScan: 'golang:1.14, ubuntu:18.04, ${{ variables.repoImageName }}:ciprod-$(datetime)-$(commit)' + +- task: PublishBuildArtifacts@1 + inputs: + pathToPublish: '$(Build.ArtifactStagingDirectory)' + artifactName: drop \ No newline at end of file diff --git a/README.md b/README.md index 7d42c75d6..f0fa40e53 100644 --- a/README.md +++ b/README.md @@ -151,6 +151,25 @@ bash build-and-publish-docker-image.sh --image /: ``` > Note: format of the imagetag will be `ci`. possible values for release are test, dev, preview, dogfood, prod etc. +You can also build and push images for multiple architectures. This is powered by docker buildx +``` +cd ~/Docker-Provider/kubernetes/linux/dockerbuild +sudo docker login # if you want to publish the image to acr then login to acr via `docker login ` +# build and publish using docker buildx +bash build-and-publish-docker-image.sh --image /: --multiarch +``` + +or directly use the docker buildx commands +``` +# multiple platforms +cd ~/Docker-Provider +docker buildx build --platform linux/arm64/v8,linux/amd64 -t /: --build-arg IMAGE_TAG= -f kubernetes/linux/Dockerfile.multiarch --push . + +# single platform +cd ~/Docker-Provider +docker buildx build --platform linux/amd64 -t /: --build-arg IMAGE_TAG= -f kubernetes/linux/Dockerfile.multiarch --push . +``` + If you prefer to build docker provider shell bundle and image separately, then you can follow below instructions ##### Build Docker Provider shell bundle diff --git a/build/linux/Makefile b/build/linux/Makefile index 3f35e1204..9203f1d73 100644 --- a/build/linux/Makefile +++ b/build/linux/Makefile @@ -11,10 +11,16 @@ BUILD_TYPE=Release PF=Linux PF_MAJOR=1 PF_MINOR=0 -PF_ARCH=x64 PF_WIDTH=64 PF_DISTRO=ULINUX -BUILD_CONFIGURATION=Linux_ULINUX_1.0_x64_64_Release + +ifeq ($(arch),) + PF_ARCH=amd64 +else + PF_ARCH=$(arch) +endif + +BUILD_CONFIGURATION=Linux_ULINUX_1.0_$(PF_ARCH)_64_Release # RM - Remove a file # RMDIR - Remove a directory @@ -90,16 +96,7 @@ endif STAGING_DIR := $(INTERMEDIATE_DIR)/staging -# For consistency, the architecture should be i686 (for x86) and x86_64 (for x64) -DOCKER_ARCH := $(shell echo $(PF_ARCH) | sed -e 's/x86$$/i686/' -e 's/x64$$/x86_64/') -OUTPUT_PACKAGE_PREFIX=docker-cimprov-$(CONTAINER_BUILDVERSION_MAJOR).$(CONTAINER_BUILDVERSION_MINOR).$(CONTAINER_BUILDVERSION_PATCH)-$(CONTAINER_BUILDVERSION_BUILDNR).universal.$(DOCKER_ARCH) - - -ifeq ("$(wildcard /usr/bin/dpkg-deb)","") - DPKG_LOCATION="--DPKG_LOCATION=$(BASE_DIR)/build/linux/installer/InstallBuilder/tools/bin/dpkg-deb-$(PF_ARCH)" -else - DPKG_LOCATION= -endif +OUTPUT_PACKAGE_PREFIX=docker-cimprov-$(CONTAINER_BUILDVERSION_MAJOR).$(CONTAINER_BUILDVERSION_MINOR).$(CONTAINER_BUILDVERSION_PATCH)-$(CONTAINER_BUILDVERSION_BUILDNR).universal.$(PF_ARCH) #-------------------------------------------------------------------------------- @@ -130,7 +127,7 @@ KIT_STATUS: fluentbitplugin : @echo "========================= Building fluentbit out_oms go plugin for logs" $(MKPATH) $(INTERMEDIATE_DIR) - make -C $(GO_SOURCE_DIR) fbplugin + make -C $(GO_SOURCE_DIR) fbplugin arch=$(PF_ARCH) $(COPY) $(GO_SOURCE_DIR)/out_oms.so $(INTERMEDIATE_DIR) rubypluginstests : @@ -192,7 +189,6 @@ kit : fluentbitplugin --VERSION=$(CONTAINER_BUILDVERSION_MAJOR).$(CONTAINER_BUILDVERSION_MINOR).$(CONTAINER_BUILDVERSION_PATCH) \ --RELEASE=$(CONTAINER_BUILDVERSION_BUILDNR) \ --CONTAINER_BUILD_LIBRARY=$(CONTAINERLIB_FILENAME) \ - $(DPKG_LOCATION) \ --OUTPUTFILE=$(OUTPUT_PACKAGE_PREFIX) \ --DATAFILE_PATH=$(BASE_DIR)/build/linux/installer/datafiles \ base_container.data linux.data linux_dpkg.data diff --git a/deployment/multiarch-agent-deployment/ServiceGroupRoot/Parameters/ContainerInsights.Linux.Parameters.json b/deployment/multiarch-agent-deployment/ServiceGroupRoot/Parameters/ContainerInsights.Linux.Parameters.json new file mode 100644 index 000000000..70d0950a2 --- /dev/null +++ b/deployment/multiarch-agent-deployment/ServiceGroupRoot/Parameters/ContainerInsights.Linux.Parameters.json @@ -0,0 +1,68 @@ +{ + "$schema": "http://schema.express.azure.com/schemas/2015-01-01-alpha/RolloutParameters.json", + "contentVersion": "1.0.0.0", + "wait": [ + { + "name": "waitSdpBakeTime", + "properties": { + "duration": "PT24H" + } + } + ], + "shellExtensions": [ + { + "name": "PushAgentToACR", + "type": "ShellExtensionType", + "properties": { + "maxexecutiontime": "PT1H" + }, + "package": { + "reference": { + "path": "artifacts.tar.gz" + } + }, + "launch": { + "command": [ + "/bin/bash", + "pushAgentToAcr.sh" + ], + "environmentVariables": [ + { + "name": "ACR_NAME", + "value": "__ACR_NAME__" + }, + { + "name": "AGENT_RELEASE", + "value": "__AGENT_RELEASE__" + }, + { + "name": "AGENT_IMAGE_TAG_SUFFIX", + "value": "__AGENT_IMAGE_TAG_SUFFIX__" + }, + { + "name": "AGENT_IMAGE_FULL_PATH", + "value": "public/azuremonitor/containerinsights/__AGENT_RELEASE__:__AGENT_RELEASE____AGENT_IMAGE_TAG_SUFFIX__" + }, + { + "name": "CDPX_REGISTRY", + "value": "__CDPX_LINUX_REGISTRY__" + }, + { + "name": "CDPX_REPO_NAME", + "value": "__CDPX_LINUX_REPO_NAME__" + }, + { + "name": "CDPX_TAG", + "value": "__CDPX_LINUX_TAG__" + } + ], + "identity": { + "type": "userAssigned", + "userAssignedIdentities": [ + "__MANAGED_IDENTITY__" + ] + } + } + } + ] + } \ No newline at end of file diff --git a/deployment/multiarch-agent-deployment/ServiceGroupRoot/RolloutSpecs/RolloutSpecs.json b/deployment/multiarch-agent-deployment/ServiceGroupRoot/RolloutSpecs/RolloutSpecs.json new file mode 100644 index 000000000..250878590 --- /dev/null +++ b/deployment/multiarch-agent-deployment/ServiceGroupRoot/RolloutSpecs/RolloutSpecs.json @@ -0,0 +1,29 @@ +{ + "$schema": "https://ev2schema.azure.net/schemas/2020-01-01/rolloutSpecification.json", + "ContentVersion": "1.0.0.0", + "RolloutMetadata": { + "ServiceModelPath": "ServiceModels//Public.ServiceModel.json", + "ScopeBindingsPath": "ScopeBindings//Public.ScopeBindings.json", + "Name": "ContainerInsightsAgent", + "RolloutType": "Major", + "BuildSource": { + "Parameters": { + "VersionFile": "buildver.txt" + } + }, + "Notification": { + "Email": { + "To": "omscontainers@microsoft.com" + } + } + }, + "OrchestratedSteps": [ + { + "name": "PushLinuxAgent", + "targetType": "ServiceResource", + "targetName": "PushLinuxAgent", + "actions": [ "Shell/PushAgentToACR" ], + "dependsOn": [ ] + } + ] + } \ No newline at end of file diff --git a/deployment/multiarch-agent-deployment/ServiceGroupRoot/ScopeBindings/Public.ScopeBindings.json b/deployment/multiarch-agent-deployment/ServiceGroupRoot/ScopeBindings/Public.ScopeBindings.json new file mode 100644 index 000000000..cbc6db8b3 --- /dev/null +++ b/deployment/multiarch-agent-deployment/ServiceGroupRoot/ScopeBindings/Public.ScopeBindings.json @@ -0,0 +1,51 @@ +{ + "$schema": "https://ev2schema.azure.net/schemas/2020-01-01/scopeBindings.json", + "contentVersion": "0.0.0.1", + "scopeBindings": [ + { + "scopeTagName": "Global", + "bindings": [ + { + "find": "__ACR_NAME__", + "replaceWith": "$(ACRName)" + }, + { + "find": "__AGENT_RELEASE__", + "replaceWith": "$(AgentRelease)" + }, + { + "find": "__AGENT_IMAGE_TAG_SUFFIX__", + "replaceWith": "$(AgentImageTagSuffix)" + }, + { + "find": "__MANAGED_IDENTITY__", + "replaceWith": "$(ManagedIdentity)" + }, + { + "find": "__CDPX_LINUX_REGISTRY__", + "replaceWith": "$(CDPXLinuxRegistry)" + }, + { + "find": "__CDPX_WINDOWS_REGISTRY__", + "replaceWith": "$(CDPXWindowsRegistry)" + }, + { + "find": "__CDPX_LINUX_TAG__", + "replaceWith": "$(CDPXLinuxTag)" + }, + { + "find": "__CDPX_WINDOWS_TAG__", + "replaceWith": "$(CDPXWindowsTag)" + }, + { + "find": "__CDPX_LINUX_REPO_NAME__", + "replaceWith": "$(CDPXLinuxRepoName)" + }, + { + "find": "__CDPX_WINDOWS_REPO_NAME__", + "replaceWith": "$(CDPXWindowsRepoName)" + } + ] + } + ] +} \ No newline at end of file diff --git a/deployment/multiarch-agent-deployment/ServiceGroupRoot/Scripts/pushAgentToAcr.sh b/deployment/multiarch-agent-deployment/ServiceGroupRoot/Scripts/pushAgentToAcr.sh new file mode 100644 index 000000000..d39cedde0 --- /dev/null +++ b/deployment/multiarch-agent-deployment/ServiceGroupRoot/Scripts/pushAgentToAcr.sh @@ -0,0 +1,72 @@ +#!/bin/bash +set -e + +# Note - This script used in the pipeline as inline script + +if [ -z $AGENT_IMAGE_TAG_SUFFIX ]; then + echo "-e error value of AGENT_IMAGE_TAG_SUFFIX variable shouldnt be empty. check release variables" + exit 1 +fi + +if [ -z $AGENT_RELEASE ]; then + echo "-e error AGENT_RELEASE shouldnt be empty. check release variables" + exit 1 +fi + +#Make sure that tag being pushed will not overwrite an existing tag in mcr +MCR_TAG_RESULT="`wget -qO- https://mcr.microsoft.com/v2/azuremonitor/containerinsights/ciprod/tags/list`" +if [ $? -ne 0 ]; then + echo "-e error unable to get list of mcr tags for azuremonitor/containerinsights/ciprod repository" + exit 1 +fi +TAG_EXISTS=$(echo $MCR_TAG_RESULT | jq '.tags | contains(["'"$AGENT_RELEASE$AGENT_IMAGE_TAG_SUFFIX"'"])') + +if $TAG_EXISTS; then + echo "-e error ${AGENT_IMAGE_TAG_SUFFIX} already exists in mcr. make sure the image tag is unique" + exit 1 +fi + +if [ -z $AGENT_IMAGE_FULL_PATH ]; then + echo "-e error AGENT_IMAGE_FULL_PATH shouldnt be empty. check release variables" + exit 1 +fi + +if [ -z $CDPX_TAG ]; then + echo "-e error value of CDPX_TAG shouldn't be empty. check release variables" + exit 1 +fi + +if [ -z $CDPX_REGISTRY ]; then + echo "-e error value of CDPX_REGISTRY shouldn't be empty. check release variables" + exit 1 +fi + +if [ -z $CDPX_REPO_NAME ]; then + echo "-e error value of CDPX_REPO_NAME shouldn't be empty. check release variables" + exit 1 +fi + +if [ -z $ACR_NAME ]; then + echo "-e error value of ACR_NAME shouldn't be empty. check release variables" + exit 1 +fi + + +#Login to az cli and authenticate to acr +echo "Login cli using managed identity" +az login --identity +if [ $? -eq 0 ]; then + echo "Logged in successfully" +else + echo "-e error failed to login to az with managed identity credentials" + exit 1 +fi + +echo "Pushing ${AGENT_IMAGE_FULL_PATH} to ${ACR_NAME}" +az acr import --name $ACR_NAME --registry $CDPX_REGISTRY --source official/${CDPX_REPO_NAME}:${CDPX_TAG} --image $AGENT_IMAGE_FULL_PATH +if [ $? -eq 0 ]; then + echo "Retagged and pushed image successfully" +else + echo "-e error failed to retag and push image to destination ACR" + exit 1 +fi \ No newline at end of file diff --git a/deployment/multiarch-agent-deployment/ServiceGroupRoot/ServiceModels/Public.ServiceModel.json b/deployment/multiarch-agent-deployment/ServiceGroupRoot/ServiceModels/Public.ServiceModel.json new file mode 100644 index 000000000..c3b00340a --- /dev/null +++ b/deployment/multiarch-agent-deployment/ServiceGroupRoot/ServiceModels/Public.ServiceModel.json @@ -0,0 +1,51 @@ +{ + "$schema": "https://ev2schema.azure.net/schemas/2020-01-01/serviceModel.json", + "contentVersion": "1.0.0.2", + "ServiceMetadata": { + "ServiceGroup": "ContainerInsightsAgent", + "Environment": "Prod" + }, + "ServiceResourceGroupDefinitions": [ + { + "Name": "CI-Agent-ServiceResourceGroupDefinition", + "ServiceResourceDefinitions": [ + { + "Name": "ShellExtension", + "ComposedOf": { + "Extension": { + "Shell": [ + { + "type": "ShellExtensionType", + "properties": { + "imageName": "adm-ubuntu-1804-l", + "imageVersion": "v18" + } + } + ] + } + } + } + ] + } + ], + "ServiceResourceGroups": [ + { + "AzureResourceGroupName": "ContainerInsights-MultiArch-Agent-Release", + "Location": "eastus2", + "InstanceOf": "CI-Agent-ServiceResourceGroupDefinition", + "AzureSubscriptionId": "30c56c3a-54da-46ea-b004-06eb33432687", + "ScopeTags": [ + { + "Name": "Global" + } + ], + "ServiceResources": [ + { + "Name": "PushLinuxAgent", + "InstanceOf": "ShellExtension", + "RolloutParametersPath": "Parameters\\ContainerInsights.Linux.Parameters.json" + } + ] + } + ] + } \ No newline at end of file diff --git a/deployment/multiarch-agent-deployment/ServiceGroupRoot/buildver.txt b/deployment/multiarch-agent-deployment/ServiceGroupRoot/buildver.txt new file mode 100644 index 000000000..bd2666abb --- /dev/null +++ b/deployment/multiarch-agent-deployment/ServiceGroupRoot/buildver.txt @@ -0,0 +1 @@ +1.0.0.0 \ No newline at end of file diff --git a/kubernetes/linux/Dockerfile b/kubernetes/linux/Dockerfile index 2128d6d6a..bace8d45e 100644 --- a/kubernetes/linux/Dockerfile +++ b/kubernetes/linux/Dockerfile @@ -23,7 +23,7 @@ ENV AGENT_VERSION ${IMAGE_TAG} WORKDIR ${tmpdir} # copy docker provider shell bundle to use the agent image -COPY ./Linux_ULINUX_1.0_x64_64_Release/docker-cimprov-*.*.*-*.x86_64.sh . +COPY ./Linux_ULINUX_1.0_*_64_Release/docker-cimprov-*.*.*-*.*.*.sh . # Note: If you prefer remote destination, uncomment below line and comment above line # wget https://github.com/microsoft/Docker-Provider/releases/download/10.0.0-1/docker-cimprov-10.0.0-1.universal.x86_64.sh diff --git a/kubernetes/linux/Dockerfile.multiarch b/kubernetes/linux/Dockerfile.multiarch new file mode 100644 index 000000000..6ac8c6507 --- /dev/null +++ b/kubernetes/linux/Dockerfile.multiarch @@ -0,0 +1,39 @@ +FROM --platform=$BUILDPLATFORM golang:1.15 AS builder +ARG TARGETOS TARGETARCH +RUN /usr/bin/apt-get update && /usr/bin/apt-get install git g++ make pkg-config libssl-dev libpam0g-dev rpm librpm-dev uuid-dev libkrb5-dev python sudo gcc-aarch64-linux-gnu -y + +COPY build /src/build +COPY source /src/source +RUN cd /src/build/linux && make arch=${TARGETARCH} + + +FROM ubuntu:18.04 AS base_image +ARG TARGETOS TARGETARCH +MAINTAINER OMSContainers@microsoft.com +LABEL vendor=Microsoft\ Corp \ + com.microsoft.product="Azure Monitor for containers" +ENV tmpdir /opt +ENV APPLICATIONINSIGHTS_AUTH NzAwZGM5OGYtYTdhZC00NThkLWI5NWMtMjA3ZjM3NmM3YmRi +ENV MALLOC_ARENA_MAX 2 +ENV HOST_MOUNT_PREFIX /hostfs +ENV HOST_PROC /hostfs/proc +ENV HOST_SYS /hostfs/sys +ENV HOST_ETC /hostfs/etc +ENV HOST_VAR /hostfs/var +ENV AZMON_COLLECT_ENV False +ENV KUBE_CLIENT_BACKOFF_BASE 1 +ENV KUBE_CLIENT_BACKOFF_DURATION 0 +ENV RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR 0.9 +RUN /usr/bin/apt-get update && /usr/bin/apt-get install -y libc-bin wget openssl curl sudo python python-ctypes init-system-helpers net-tools rsyslog cron vim dmidecode apt-transport-https gnupg make && rm -rf /var/lib/apt/lists/* + +COPY --from=builder /src/kubernetes/linux/Linux_ULINUX_1.0_*_64_Release/docker-cimprov-*.*.*-*.*.sh $tmpdir/ +COPY kubernetes/linux/setup.sh kubernetes/linux/main.sh kubernetes/linux/defaultpromenvvariables kubernetes/linux/defaultpromenvvariables-rs kubernetes/linux/defaultpromenvvariables-sidecar kubernetes/linux/mdsd.xml kubernetes/linux/envmdsd kubernetes/linux/logrotate.conf $tmpdir/ + +ARG IMAGE_TAG=ciprod03112022 +ENV AGENT_VERSION ${IMAGE_TAG} + +WORKDIR ${tmpdir} + +RUN chmod 775 $tmpdir/*.sh; sync; $tmpdir/setup.sh ${TARGETARCH} +CMD [ "/opt/main.sh" ] + diff --git a/kubernetes/linux/dockerbuild/build-and-publish-docker-image.sh b/kubernetes/linux/dockerbuild/build-and-publish-docker-image.sh index 267f15f32..580b158c9 100755 --- a/kubernetes/linux/dockerbuild/build-and-publish-docker-image.sh +++ b/kubernetes/linux/dockerbuild/build-and-publish-docker-image.sh @@ -14,6 +14,7 @@ usage() echo echo "Build and publish docker image:" echo "$basename --image " + echo "$basename --image --multiarch" } parse_args() @@ -30,6 +31,7 @@ for arg in "$@"; do shift case "$arg" in "--image") set -- "$@" "-i" ;; + "--multiarch") set -- "$@" "-m" ;; "--"*) usage ;; *) set -- "$@" "$arg" esac @@ -37,7 +39,7 @@ done local OPTIND opt -while getopts 'hi:' opt; do +while getopts 'hi:m' opt; do case "$opt" in h) usage @@ -48,6 +50,11 @@ while getopts 'hi:' opt; do echo "image is $OPTARG" ;; + m) + multi=1 + echo "using multiarch dockerfile" + ;; + ?) usage exit 1 @@ -131,6 +138,13 @@ echo "source code base directory: $baseDir" echo "build directory for docker provider: $buildDir" echo "docker file directory: $dockerFileDir" +if [ "$multi" -eq "1" ]; then + echo "building multiarch" + cd $baseDir + docker buildx build --platform linux/arm64/v8,linux/amd64 -t $image --build-arg IMAGE_TAG=$imageTag -f $linuxDir/Dockerfile.multiarch --push . + exit 0 +fi + # build docker provider shell bundle build_docker_provider diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index 4e1b0783c..997f624e2 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -328,7 +328,7 @@ source ~/.bashrc if [ "${CONTAINER_TYPE}" != "PrometheusSidecar" ]; then #Parse the configmap to set the right environment variables. - /usr/bin/ruby2.6 tomlparser.rb + /usr/bin/ruby2.7 tomlparser.rb cat config_env_var | while read line; do echo $line >> ~/.bashrc @@ -339,7 +339,7 @@ fi #Parse the configmap to set the right environment variables for agent config. #Note > tomlparser-agent-config.rb has to be parsed first before td-agent-bit-conf-customizer.rb for fbit agent settings if [ "${CONTAINER_TYPE}" != "PrometheusSidecar" ]; then - /usr/bin/ruby2.6 tomlparser-agent-config.rb + /usr/bin/ruby2.7 tomlparser-agent-config.rb cat agent_config_env_var | while read line; do #echo $line @@ -348,7 +348,7 @@ if [ "${CONTAINER_TYPE}" != "PrometheusSidecar" ]; then source agent_config_env_var #Parse the configmap to set the right environment variables for network policy manager (npm) integration. - /usr/bin/ruby2.6 tomlparser-npm-config.rb + /usr/bin/ruby2.7 tomlparser-npm-config.rb cat integration_npm_config_env_var | while read line; do #echo $line @@ -359,11 +359,11 @@ fi #Replace the placeholders in td-agent-bit.conf file for fluentbit with custom/default values in daemonset if [ ! -e "/etc/config/kube.conf" ] && [ "${CONTAINER_TYPE}" != "PrometheusSidecar" ]; then - /usr/bin/ruby2.6 td-agent-bit-conf-customizer.rb + /usr/bin/ruby2.7 td-agent-bit-conf-customizer.rb fi #Parse the prometheus configmap to create a file with new custom settings. -/usr/bin/ruby2.6 tomlparser-prom-customconfig.rb +/usr/bin/ruby2.7 tomlparser-prom-customconfig.rb #Setting default environment variables to be used in any case of failure in the above steps if [ ! -e "/etc/config/kube.conf" ]; then @@ -397,7 +397,7 @@ fi if [ ! -e "/etc/config/kube.conf" ]; then if [ "${CONTAINER_TYPE}" == "PrometheusSidecar" ]; then #Parse the agent configmap to create a file with new custom settings. - /usr/bin/ruby2.6 tomlparser-prom-agent-config.rb + /usr/bin/ruby2.7 tomlparser-prom-agent-config.rb #Sourcing config environment variable file if it exists if [ -e "side_car_fbit_config_env_var" ]; then cat side_car_fbit_config_env_var | while read line; do @@ -411,7 +411,7 @@ fi #Parse the configmap to set the right environment variables for MDM metrics configuration for Alerting. if [ "${CONTAINER_TYPE}" != "PrometheusSidecar" ]; then - /usr/bin/ruby2.6 tomlparser-mdm-metrics-config.rb + /usr/bin/ruby2.7 tomlparser-mdm-metrics-config.rb cat config_mdm_metrics_env_var | while read line; do echo $line >> ~/.bashrc @@ -419,7 +419,7 @@ if [ "${CONTAINER_TYPE}" != "PrometheusSidecar" ]; then source config_mdm_metrics_env_var #Parse the configmap to set the right environment variables for metric collection settings - /usr/bin/ruby2.6 tomlparser-metric-collection-config.rb + /usr/bin/ruby2.7 tomlparser-metric-collection-config.rb cat config_metric_collection_env_var | while read line; do echo $line >> ~/.bashrc @@ -430,7 +430,7 @@ fi # OSM scraping to be done in replicaset if sidecar car scraping is disabled and always do the scraping from the sidecar (It will always be either one of the two) if [[ ( ( ! -e "/etc/config/kube.conf" ) && ( "${CONTAINER_TYPE}" == "PrometheusSidecar" ) ) || ( ( -e "/etc/config/kube.conf" ) && ( "${SIDECAR_SCRAPING_ENABLED}" == "false" ) ) ]]; then - /usr/bin/ruby2.6 tomlparser-osm-config.rb + /usr/bin/ruby2.7 tomlparser-osm-config.rb if [ -e "integration_osm_config_env_var" ]; then cat integration_osm_config_env_var | while read line; do @@ -517,7 +517,7 @@ if [ "$CONTAINER_RUNTIME" != "docker" ]; then fi echo "set caps for ruby process to read container env from proc" -sudo setcap cap_sys_ptrace,cap_dac_read_search+ep /usr/bin/ruby2.6 +sudo setcap cap_sys_ptrace,cap_dac_read_search+ep /usr/bin/ruby2.7 echo "export KUBELET_RUNTIME_OPERATIONS_METRIC="$KUBELET_RUNTIME_OPERATIONS_METRIC >> ~/.bashrc echo "export KUBELET_RUNTIME_OPERATIONS_ERRORS_METRIC="$KUBELET_RUNTIME_OPERATIONS_ERRORS_METRIC >> ~/.bashrc diff --git a/kubernetes/linux/setup.sh b/kubernetes/linux/setup.sh index 9d36fec4a..00d5bc0fa 100644 --- a/kubernetes/linux/setup.sh +++ b/kubernetes/linux/setup.sh @@ -1,6 +1,12 @@ TMPDIR="/opt" cd $TMPDIR +if [ -z $1 ]; then + ARCH="amd64" +else + ARCH=$1 +fi + #Download utf-8 encoding capability on the omsagent container. #upgrade apt to latest version apt-get update && apt-get install -y apt && DEBIAN_FRONTEND=noninteractive apt-get install -y locales @@ -9,15 +15,18 @@ sed -i -e 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen && \ dpkg-reconfigure --frontend=noninteractive locales && \ update-locale LANG=en_US.UTF-8 -#install oneagent - Official bits (2/22/2022) -wget https://github.com/microsoft/Docker-Provider/releases/download/1.17.0/azure-mdsd_1.17.0-build.master.354_x86_64.deb - +#install oneagent - Official bits (3/14/2022) +if [ "${ARCH}" != "arm64" ]; then + wget "https://github.com/microsoft/Docker-Provider/releases/download/1.17.0/azure-mdsd_1.17.0-build.master.354_x86_64.deb" -O azure-mdsd.deb +else + wget "https://github.com/microsoft/Docker-Provider/releases/download/1.17.1-arm64-master/azure-mdsd_1.17.1-build.master.366_aarch64.deb" -O azure-mdsd.deb +fi /usr/bin/dpkg -i $TMPDIR/azure-mdsd*.deb cp -f $TMPDIR/mdsd.xml /etc/mdsd.d cp -f $TMPDIR/envmdsd /etc/mdsd.d -#log rotate conf for mdsd and can be extended for other log files as well +# log rotate conf for mdsd and can be extended for other log files as well cp -f $TMPDIR/logrotate.conf /etc/logrotate.d/ci-agent #download inotify tools for watching configmap changes @@ -31,27 +40,27 @@ sudo apt-get install jq=1.5+dfsg-2 -y #used to setcaps for ruby process to read /proc/env sudo apt-get install libcap2-bin -y -wget https://dl.influxdata.com/telegraf/releases/telegraf-1.20.3_linux_amd64.tar.gz -tar -zxvf telegraf-1.20.3_linux_amd64.tar.gz +wget https://dl.influxdata.com/telegraf/releases/telegraf-1.20.3_linux_$ARCH.tar.gz +tar -zxvf telegraf-1.20.3_linux_$ARCH.tar.gz mv /opt/telegraf-1.20.3/usr/bin/telegraf /opt/telegraf chmod 544 /opt/telegraf # Use wildcard version so that it doesnt require to touch this file -/$TMPDIR/docker-cimprov-*.*.*-*.x86_64.sh --install +/$TMPDIR/docker-cimprov-*.*.*-*.*.sh --install #download and install fluent-bit(td-agent-bit) wget -qO - https://packages.fluentbit.io/fluentbit.key | sudo apt-key add - -sudo echo "deb https://packages.fluentbit.io/ubuntu/xenial xenial main" >> /etc/apt/sources.list +sudo echo "deb https://packages.fluentbit.io/ubuntu/bionic bionic main" >> /etc/apt/sources.list sudo apt-get update sudo apt-get install td-agent-bit=1.7.8 -y -# install ruby2.6 +# install ruby2.7 sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys F5DA5F09C3173AA6 sudo echo "deb http://ppa.launchpad.net/brightbox/ruby-ng/ubuntu bionic main" >> /etc/apt/sources.list sudo apt-get update -sudo apt-get install ruby2.6 ruby2.6-dev gcc make -y +sudo apt-get install ruby2.7 ruby2.7-dev gcc make -y # fluentd v1 gem gem install fluentd -v "1.14.2" --no-document fluentd --setup ./fluent @@ -65,7 +74,7 @@ rm -f $TMPDIR/envmdsd rm -f $TMPDIR/telegraf-*.tar.gz # remove build dependencies -sudo apt-get remove ruby2.6-dev gcc make -y +sudo apt-get remove ruby2.7-dev gcc make -y # Remove settings for cron.daily that conflict with the node's cron.daily. Since both are trying to rotate the same files # in /var/log at the same time, the rotation doesn't happen correctly and then the *.1 file is forever logged to. diff --git a/scripts/build/linux/install-build-pre-requisites.sh b/scripts/build/linux/install-build-pre-requisites.sh index 7959b37e8..b85e54fc4 100644 --- a/scripts/build/linux/install-build-pre-requisites.sh +++ b/scripts/build/linux/install-build-pre-requisites.sh @@ -58,10 +58,26 @@ install_docker() sudo apt-get install docker-ce docker-ce-cli containerd.io -y # Allow your user to access the Docker CLI without needing root access. sudo usermod -aG docker $USER + newgrp docker echo "installing docker completed" fi } +install_docker_buildx() +{ + # install the buildx plugin + sudo curl -O https://github.com/docker/buildx/releases/download/v0.7.1/buildx-v0.7.1.linux-amd64 + sudo mkdir -p $HOME/.docker/cli-plugins + sudo mv buildx-v* $HOME/.docker/cli-plugins + + # install the emulator support + sudo apt-get -y install qemu binfmt-support qemu-user-static + docker run --rm --privileged multiarch/qemu-user-static --reset -p yes + + docker buildx create --name testbuilder + docker buildx use testbuilder +} + install_python() { echo "installing python ..." @@ -125,6 +141,9 @@ install_build_dependencies # install docker install_docker +# install buildx +install_docker_buildx + # install go install_go_lang diff --git a/source/plugins/go/src/Makefile b/source/plugins/go/src/Makefile index b3b730d79..f9488bc8e 100644 --- a/source/plugins/go/src/Makefile +++ b/source/plugins/go/src/Makefile @@ -1,6 +1,16 @@ BASE_DIR := $(subst /build/linux,,$(PWD)) include $(BASE_DIR)/build/version +ifeq ($(arch),) + PF_ARCH=amd64 +else + PF_ARCH=$(arch) +endif + +ifneq ($(PF_ARCH),amd64) + OPTIONS=CGO_ENABLED=1 CC=aarch64-linux-gnu-gcc GOOS=linux GOARCH=arm64 +endif + fbplugin: @echo "========================= Building out_oms plugin go code =========================" export BUILDVERSION=$(CONTAINER_BUILDVERSION_MAJOR).$(CONTAINER_BUILDVERSION_MINOR).$(CONTAINER_BUILDVERSION_PATCH)-$(CONTAINER_BUILDVERSION_BUILDNR) @@ -12,7 +22,7 @@ fbplugin: @echo "========================= go get =========================" go get @echo "========================= go build =========================" - go build -ldflags "-X 'main.revision=$(BUILDVERSION)' -X 'main.builddate=$(BUILDDATE)'" -buildmode=c-shared -o out_oms.so . + $(OPTIONS) go build -ldflags "-X 'main.revision=$(BUILDVERSION)' -X 'main.builddate=$(BUILDDATE)'" -buildmode=c-shared -o out_oms.so . test: go test -cover -race -coverprofile=coverage.txt -covermode=atomic diff --git a/source/plugins/ruby/in_kube_nodes.rb b/source/plugins/ruby/in_kube_nodes.rb index abbfe94a1..5a52a089b 100644 --- a/source/plugins/ruby/in_kube_nodes.rb +++ b/source/plugins/ruby/in_kube_nodes.rb @@ -560,6 +560,9 @@ def getNodeTelemetryProps(item) properties["OperatingSystem"] = nodeInfo["operatingSystem"] properties["KernelVersion"] = nodeInfo["kernelVersion"] properties["OSImage"] = nodeInfo["osImage"] + if nodeInfo["architecture"] == "arm64" + properties["Architecture"] = nodeInfo["architecture"] + end containerRuntimeVersion = nodeInfo["containerRuntimeVersion"] if containerRuntimeVersion.downcase.start_with?("docker://") properties["DockerVersion"] = containerRuntimeVersion.split("//")[1] From 80197317b728fb6219e986a8de10e6d293f21219 Mon Sep 17 00:00:00 2001 From: Amol Agrawal Date: Tue, 15 Mar 2022 09:25:47 -0700 Subject: [PATCH 203/301] add missing artifacts (#720) * add missing artifacts Co-authored-by: Amol Agrawal --- .pipelines/azure_pipeline_dev.yaml | 5 ++++- .pipelines/azure_pipeline_prod.yaml | 5 ++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/.pipelines/azure_pipeline_dev.yaml b/.pipelines/azure_pipeline_dev.yaml index 6d50e5788..311d87400 100644 --- a/.pipelines/azure_pipeline_dev.yaml +++ b/.pipelines/azure_pipeline_dev.yaml @@ -23,9 +23,12 @@ steps: datetime=$(date +'%Y%m%d%s') echo "##vso[task.setvariable variable=datetime;]$datetime" - cd deployment/multiarch-agent-deployment/ServiceGroupRoot/Scripts + cd $(Build.SourcesDirectory)/deployment/multiarch-agent-deployment/ServiceGroupRoot/Scripts tar -czvf ../artifacts.tar.gz pushAgentToAcr.sh + cd $(Build.SourcesDirectory)/deployment/arc-k8s-extension/ServiceGroupRoot/Scripts + tar -czvf ../artifacts.tar.gz ../../../../charts/azuremonitor-containers/ pushChartToAcr.sh + - task: CopyFiles@2 displayName: "Copy ev2 deployment artifacts" inputs: diff --git a/.pipelines/azure_pipeline_prod.yaml b/.pipelines/azure_pipeline_prod.yaml index 046d41910..7608af28f 100644 --- a/.pipelines/azure_pipeline_prod.yaml +++ b/.pipelines/azure_pipeline_prod.yaml @@ -23,9 +23,12 @@ steps: datetime=$(date +'%Y%m%d%s') echo "##vso[task.setvariable variable=datetime;]$datetime" - cd deployment/multiarch-agent-deployment/ServiceGroupRoot/Scripts + cd $(Build.SourcesDirectory)/deployment/multiarch-agent-deployment/ServiceGroupRoot/Scripts tar -czvf ../artifacts.tar.gz pushAgentToAcr.sh + cd $(Build.SourcesDirectory)/deployment/arc-k8s-extension/ServiceGroupRoot/Scripts + tar -czvf ../artifacts.tar.gz ../../../../charts/azuremonitor-containers/ pushChartToAcr.sh + - task: CopyFiles@2 displayName: "Copy ev2 deployment artifacts" inputs: From b58ff42a5153dd1901e3139d960729f3a977a634 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Wed, 16 Mar 2022 12:32:35 -0700 Subject: [PATCH 204/301] Gangams/msi onboarding arm template updates for AKS (#721) * msi arm template updates * handle space in location --- .../onboarding-using-msi-auth/existingClusterOnboarding.json | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json b/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json index c77e3203d..d5b613537 100644 --- a/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json +++ b/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json @@ -35,7 +35,7 @@ "dcrResourceTagValues": { "type": "object", "metadata": { - "description": "Existing or new tags on DCR Cluster Resource" + "description": "Existing or new tags on DCR Resource" } } }, @@ -43,9 +43,10 @@ "clusterSubscriptionId": "[split(parameters('aksResourceId'),'/')[2]]", "clusterResourceGroup": "[split(parameters('aksResourceId'),'/')[4]]", "clusterName": "[split(parameters('aksResourceId'),'/')[8]]", + "clusterLocation": "[replace(parameters('aksResourceLocation'),' ', '')]", "workspaceSubscriptionId": "[split(parameters('workspaceResourceId'),'/')[2]]", "workspaceResourceGroup": "[split(parameters('workspaceResourceId'),'/')[4]]", - "dcrName": "[Concat('MSCI', '-', split(parameters('workspaceResourceId'),'/')[8])]", + "dcrName": "[Concat('MSCI', '-', variables('clusterName'), '-', variables('clusterLocation'))]", "associationName": "ContainerInsightsExtension", "dataCollectionRuleId": "[resourceId(variables('workspaceSubscriptionId'), variables('workspaceResourceGroup'), 'Microsoft.Insights/dataCollectionRules', variables('dcrName'))]" }, From 15e8446271962d723851e3e6dd316b8e951f286a Mon Sep 17 00:00:00 2001 From: Amol Agrawal Date: Thu, 17 Mar 2022 11:00:28 -0700 Subject: [PATCH 205/301] minor fixes (#722) Co-authored-by: Amol Agrawal --- .pipelines/azure_pipeline_dev.yaml | 3 ++- .pipelines/azure_pipeline_prod.yaml | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/.pipelines/azure_pipeline_dev.yaml b/.pipelines/azure_pipeline_dev.yaml index 311d87400..4de0f2f31 100644 --- a/.pipelines/azure_pipeline_dev.yaml +++ b/.pipelines/azure_pipeline_dev.yaml @@ -70,6 +70,7 @@ steps: TargetFolder: '$(Build.ArtifactStagingDirectory)/build' - task: AzureCLI@2 + displayName: "Docker multi-arch linux build" inputs: azureSubscription: ${{ variables.armServiceConnectionName }} scriptType: bash @@ -95,7 +96,7 @@ steps: displayName: 'Generation Task' inputs: BuildDropPath: '$(Build.ArtifactStagingDirectory)' - DockerImagesToScan: 'golang:1.14, ubuntu:18.04, ${{ variables.repoImageName }}:$(datetime)-$(commit)' + DockerImagesToScan: 'golang:1.15, ubuntu:18.04, ${{ variables.repoImageName }}:$(datetime)-$(commit)' - task: PublishBuildArtifacts@1 inputs: diff --git a/.pipelines/azure_pipeline_prod.yaml b/.pipelines/azure_pipeline_prod.yaml index 7608af28f..0531f644d 100644 --- a/.pipelines/azure_pipeline_prod.yaml +++ b/.pipelines/azure_pipeline_prod.yaml @@ -70,6 +70,7 @@ steps: TargetFolder: '$(Build.ArtifactStagingDirectory)/build' - task: AzureCLI@2 + displayName: "Docker multi-arch linux build" inputs: azureSubscription: ${{ variables.armServiceConnectionName }} scriptType: bash @@ -95,7 +96,7 @@ steps: displayName: 'Generation Task' inputs: BuildDropPath: '$(Build.ArtifactStagingDirectory)' - DockerImagesToScan: 'golang:1.14, ubuntu:18.04, ${{ variables.repoImageName }}:ciprod-$(datetime)-$(commit)' + DockerImagesToScan: 'golang:1.15, ubuntu:18.04, ${{ variables.repoImageName }}:ciprod-$(datetime)-$(commit)' - task: PublishBuildArtifacts@1 inputs: From 6d31928c4f920c2d1b4efd1e2209d29d71b6edfb Mon Sep 17 00:00:00 2001 From: Amol Agrawal Date: Thu, 17 Mar 2022 14:29:59 -0700 Subject: [PATCH 206/301] specify go patch version (#723) * specify go minor version Co-authored-by: Amol Agrawal --- .pipelines/azure_pipeline_dev.yaml | 2 +- .pipelines/azure_pipeline_prod.yaml | 2 +- kubernetes/linux/Dockerfile.multiarch | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.pipelines/azure_pipeline_dev.yaml b/.pipelines/azure_pipeline_dev.yaml index 4de0f2f31..ba8a530fc 100644 --- a/.pipelines/azure_pipeline_dev.yaml +++ b/.pipelines/azure_pipeline_dev.yaml @@ -96,7 +96,7 @@ steps: displayName: 'Generation Task' inputs: BuildDropPath: '$(Build.ArtifactStagingDirectory)' - DockerImagesToScan: 'golang:1.15, ubuntu:18.04, ${{ variables.repoImageName }}:$(datetime)-$(commit)' + DockerImagesToScan: 'golang:1.15.14, ubuntu:18.04, ${{ variables.repoImageName }}:$(datetime)-$(commit)' - task: PublishBuildArtifacts@1 inputs: diff --git a/.pipelines/azure_pipeline_prod.yaml b/.pipelines/azure_pipeline_prod.yaml index 0531f644d..6f5d8bd45 100644 --- a/.pipelines/azure_pipeline_prod.yaml +++ b/.pipelines/azure_pipeline_prod.yaml @@ -96,7 +96,7 @@ steps: displayName: 'Generation Task' inputs: BuildDropPath: '$(Build.ArtifactStagingDirectory)' - DockerImagesToScan: 'golang:1.15, ubuntu:18.04, ${{ variables.repoImageName }}:ciprod-$(datetime)-$(commit)' + DockerImagesToScan: 'golang:1.15.14, ubuntu:18.04, ${{ variables.repoImageName }}:ciprod-$(datetime)-$(commit)' - task: PublishBuildArtifacts@1 inputs: diff --git a/kubernetes/linux/Dockerfile.multiarch b/kubernetes/linux/Dockerfile.multiarch index 6ac8c6507..180a9e11c 100644 --- a/kubernetes/linux/Dockerfile.multiarch +++ b/kubernetes/linux/Dockerfile.multiarch @@ -1,4 +1,4 @@ -FROM --platform=$BUILDPLATFORM golang:1.15 AS builder +FROM --platform=$BUILDPLATFORM golang:1.15.14 AS builder ARG TARGETOS TARGETARCH RUN /usr/bin/apt-get update && /usr/bin/apt-get install git g++ make pkg-config libssl-dev libpam0g-dev rpm librpm-dev uuid-dev libkrb5-dev python sudo gcc-aarch64-linux-gnu -y From e3c0ad1bbc62f4aa5f57557d13530718c2c357d5 Mon Sep 17 00:00:00 2001 From: Amol Agrawal Date: Thu, 17 Mar 2022 15:13:04 -0700 Subject: [PATCH 207/301] User/amagraw/ciprod release 20220317 (#724) * ciprod release march changes Co-authored-by: Amol Agrawal --- ReleaseNotes.md | 20 ++++++++++++++++---- charts/azuremonitor-containers/values.yaml | 4 ++-- kubernetes/linux/Dockerfile | 2 +- kubernetes/linux/Dockerfile.multiarch | 2 +- kubernetes/omsagent.yaml | 8 ++++---- kubernetes/windows/Dockerfile | 2 +- 6 files changed, 25 insertions(+), 13 deletions(-) diff --git a/ReleaseNotes.md b/ReleaseNotes.md index e070c151c..d0e5bad22 100644 --- a/ReleaseNotes.md +++ b/ReleaseNotes.md @@ -11,16 +11,28 @@ additional questions or comments. Note : The agent version(s) below has dates (ciprod), which indicate the agent build dates (not release dates) -### 3/11/2022 - -##### Version microsoft/oms:ciprod03112022 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod03112022 (linux) -##### Version microsoft/oms:win-ciprod03112022 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod03112022 (windows) +### 3/17/2022 - +##### Version microsoft/oms:ciprod03172022 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod03172022 (linux) +##### Version microsoft/oms:win-ciprod03172022 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod03172022 (windows) ##### Code change log - Linux Agent + - Multi-Arch Image to support both AMD64 and ARM64 + - Ruby upgraded to version 2.7 from 2.6 + - Fix Telegraf Permissions + - Fix ADX bug with database name - Vulnerability fixes + - MDSD updated to 1.17.0 + - HTTP Proxy support + - Retries for Log Analytics Ingestion + - ARM64 support + - Memory leak fixes for network failure scenario - Windows Agent - Bug fix for FluentBit stdout and stderr log filtering - Common - - Upgrade Go lang version from 1.14.1 to 1.15.14 + - Upgrade Go lang version from 1.14.1 to 1.15.14 + - MSI onboarding ARM template update + - AKS HTTP Proxy support + - Go packages upgrade to address vulnerabilities ### 1/31/2022 - ##### Version microsoft/oms:ciprod01312022 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod01312022 (linux) diff --git a/charts/azuremonitor-containers/values.yaml b/charts/azuremonitor-containers/values.yaml index b47416bc1..4460f7756 100644 --- a/charts/azuremonitor-containers/values.yaml +++ b/charts/azuremonitor-containers/values.yaml @@ -21,8 +21,8 @@ Azure: omsagent: image: repo: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod" - tag: "ciprod03112022" - tagWindows: "win-ciprod03112022" + tag: "ciprod03172022" + tagWindows: "win-ciprod03172022" pullPolicy: IfNotPresent dockerProviderVersion: "16.0.0-0" agentVersion: "azure-mdsd-1.17.0" diff --git a/kubernetes/linux/Dockerfile b/kubernetes/linux/Dockerfile index bace8d45e..becbe1157 100644 --- a/kubernetes/linux/Dockerfile +++ b/kubernetes/linux/Dockerfile @@ -17,7 +17,7 @@ ENV RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR 0.9 RUN /usr/bin/apt-get update && /usr/bin/apt-get install -y libc-bin wget openssl curl sudo python-ctypes init-system-helpers net-tools rsyslog cron vim dmidecode apt-transport-https gnupg && rm -rf /var/lib/apt/lists/* COPY setup.sh main.sh defaultpromenvvariables defaultpromenvvariables-rs defaultpromenvvariables-sidecar mdsd.xml envmdsd logrotate.conf $tmpdir/ -ARG IMAGE_TAG=ciprod03112022 +ARG IMAGE_TAG=ciprod03172022 ENV AGENT_VERSION ${IMAGE_TAG} WORKDIR ${tmpdir} diff --git a/kubernetes/linux/Dockerfile.multiarch b/kubernetes/linux/Dockerfile.multiarch index 180a9e11c..df8b04d19 100644 --- a/kubernetes/linux/Dockerfile.multiarch +++ b/kubernetes/linux/Dockerfile.multiarch @@ -29,7 +29,7 @@ RUN /usr/bin/apt-get update && /usr/bin/apt-get install -y libc-bin wget openssl COPY --from=builder /src/kubernetes/linux/Linux_ULINUX_1.0_*_64_Release/docker-cimprov-*.*.*-*.*.sh $tmpdir/ COPY kubernetes/linux/setup.sh kubernetes/linux/main.sh kubernetes/linux/defaultpromenvvariables kubernetes/linux/defaultpromenvvariables-rs kubernetes/linux/defaultpromenvvariables-sidecar kubernetes/linux/mdsd.xml kubernetes/linux/envmdsd kubernetes/linux/logrotate.conf $tmpdir/ -ARG IMAGE_TAG=ciprod03112022 +ARG IMAGE_TAG=ciprod03172022 ENV AGENT_VERSION ${IMAGE_TAG} WORKDIR ${tmpdir} diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index 49df045b9..c8324370b 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -368,7 +368,7 @@ spec: value: "3" containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod03112022" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod03172022" imagePullPolicy: IfNotPresent resources: limits: @@ -454,7 +454,7 @@ spec: timeoutSeconds: 15 #Only in sidecar scraping mode - name: omsagent-prometheus - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod01312022" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod03172022" imagePullPolicy: IfNotPresent resources: limits: @@ -603,7 +603,7 @@ spec: serviceAccountName: omsagent containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod03112022" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod03172022" imagePullPolicy: IfNotPresent resources: limits: @@ -776,7 +776,7 @@ spec: value: "3" containers: - name: omsagent-win - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod03112022" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod03172022" imagePullPolicy: IfNotPresent resources: limits: diff --git a/kubernetes/windows/Dockerfile b/kubernetes/windows/Dockerfile index ef3b1140d..3aa4054e1 100644 --- a/kubernetes/windows/Dockerfile +++ b/kubernetes/windows/Dockerfile @@ -3,7 +3,7 @@ MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=win-ciprod03112022 +ARG IMAGE_TAG=win-ciprod03172022 # Do not split this into multiple RUN! # Docker creates a layer for every RUN-Statement From e05e75e587991fda778a5c29a3d3e654492bff1b Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Fri, 18 Mar 2022 18:45:41 -0700 Subject: [PATCH 208/301] Remove health type from DCR onboarding & add private link support for windows agent in msi mode (#727) * add private link support for windows agent in msi auth * remove Microsoft-KubeHealth * add private link support for windows msi * fix bug * fix bug * fix bug * fix bug --- scripts/dcr-onboarding/ci-extension-dcr.json | 16 +++--- .../existingClusterOnboarding.json | 2 - .../plugins/go/src/ingestion_token_utils.go | 57 ++++++++++++++++++- 3 files changed, 62 insertions(+), 13 deletions(-) diff --git a/scripts/dcr-onboarding/ci-extension-dcr.json b/scripts/dcr-onboarding/ci-extension-dcr.json index f3fbec79b..6b6339de9 100644 --- a/scripts/dcr-onboarding/ci-extension-dcr.json +++ b/scripts/dcr-onboarding/ci-extension-dcr.json @@ -1,7 +1,7 @@ { "location": "", "properties": { - "dataSources": { + "dataSources": { "extensions": [ { "name": "ContainerInsightsExtension", @@ -9,22 +9,21 @@ "Microsoft-Perf", "Microsoft-ContainerInventory", "Microsoft-ContainerLog", - "Microsoft-ContainerLogV2", + "Microsoft-ContainerLogV2", "Microsoft-ContainerNodeInventory", "Microsoft-KubeEvents", - "Microsoft-KubeHealth", "Microsoft-KubeMonAgentEvents", "Microsoft-KubeNodeInventory", "Microsoft-KubePodInventory", "Microsoft-KubePVInventory", "Microsoft-KubeServices", - "Microsoft-InsightsMetrics" + "Microsoft-InsightsMetrics" ], "extensionName": "ContainerInsights" } ] - }, + }, "destinations": { "logAnalytics": [ { @@ -38,17 +37,16 @@ "streams": [ "Microsoft-Perf", "Microsoft-ContainerInventory", - "Microsoft-ContainerLog", - "Microsoft-ContainerLogV2", + "Microsoft-ContainerLog", + "Microsoft-ContainerLogV2", "Microsoft-ContainerNodeInventory", "Microsoft-KubeEvents", - "Microsoft-KubeHealth", "Microsoft-KubeMonAgentEvents", "Microsoft-KubeNodeInventory", "Microsoft-KubePodInventory", "Microsoft-KubePVInventory", "Microsoft-KubeServices", - "Microsoft-InsightsMetrics" + "Microsoft-InsightsMetrics" ], "destinations": [ "ciworkspace" diff --git a/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json b/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json index d5b613537..28996f4a1 100644 --- a/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json +++ b/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json @@ -84,7 +84,6 @@ "Microsoft-ContainerLogV2", "Microsoft-ContainerNodeInventory", "Microsoft-KubeEvents", - "Microsoft-KubeHealth", "Microsoft-KubeMonAgentEvents", "Microsoft-KubeNodeInventory", "Microsoft-KubePodInventory", @@ -113,7 +112,6 @@ "Microsoft-ContainerLogV2", "Microsoft-ContainerNodeInventory", "Microsoft-KubeEvents", - "Microsoft-KubeHealth", "Microsoft-KubeMonAgentEvents", "Microsoft-KubeNodeInventory", "Microsoft-KubePodInventory", diff --git a/source/plugins/go/src/ingestion_token_utils.go b/source/plugins/go/src/ingestion_token_utils.go index c96685042..896930005 100644 --- a/source/plugins/go/src/ingestion_token_utils.go +++ b/source/plugins/go/src/ingestion_token_utils.go @@ -27,6 +27,7 @@ var ChannelId string var IngestionAuthToken string var IngestionAuthTokenExpiration int64 +var AMCSRedirectedEndpoint string = "" type IMDSResponse struct { AccessToken string `json:"access_token"` @@ -218,11 +219,18 @@ func getAgentConfiguration(imdsAccessToken string) (configurationId string, chan configurationId = "" channelId = "" var amcs_endpoint *url.URL + var AmcsEndpoint string osType := os.Getenv("OS_TYPE") resourceId := os.Getenv("AKS_RESOURCE_ID") resourceRegion := os.Getenv("AKS_REGION") mcsEndpoint := os.Getenv("MCS_ENDPOINT") - amcs_endpoint_string := fmt.Sprintf("https://%s.handler.control.%s%s/agentConfigurations?platform=%s&api-version=%s", resourceRegion, mcsEndpoint, resourceId, osType, AMCSAgentConfigAPIVersion) + + AmcsEndpoint = fmt.Sprintf("https://global.handler.control.%s", mcsEndpoint) + if AMCSRedirectedEndpoint != "" { + AmcsEndpoint = AMCSRedirectedEndpoint + } + amcs_endpoint_string := fmt.Sprintf("%s%s/agentConfigurations?operatingLocation=%s&platform=%s&api-version=%s", AmcsEndpoint, resourceId, resourceRegion, osType, AMCSAgentConfigAPIVersion) + amcs_endpoint, err = url.Parse(amcs_endpoint_string) if err != nil { Log("getAgentConfiguration: Error creating AMCS endpoint URL: %s", err.Error()) @@ -253,6 +261,25 @@ func getAgentConfiguration(imdsAccessToken string) (configurationId string, chan defer resp.Body.Close() } Log("getAgentConfiguration Response Status: %d", resp.StatusCode) + if resp.StatusCode == 421 { // AMCS returns redirected endpoint incase of private link + agentConfigEndpoint := resp.Header.Get("x-ms-agent-config-endpoint") + Log("getAgentConfiguration x-ms-agent-config-endpoint: %s", agentConfigEndpoint) + if agentConfigEndpoint != "" { + AMCSRedirectedEndpoint = agentConfigEndpoint + // reconstruct request with redirected endpoint + var err error + redirected_amcs_endpoint_string := fmt.Sprintf("%s%s/agentConfigurations?operatingLocation=%s&platform=%s&api-version=%s", AMCSRedirectedEndpoint, resourceId, resourceRegion, osType, AMCSAgentConfigAPIVersion) + var bearer = "Bearer " + imdsAccessToken + req, err = http.NewRequest("GET", redirected_amcs_endpoint_string, nil) + if err != nil { + message := fmt.Sprintf("getAgentConfiguration: Error creating HTTP request for AMCS endpoint: %s", err.Error()) + Log(message) + return configurationId, channelId, err + } + req.Header.Set("Authorization", bearer) + continue + } + } if IsRetriableError(resp.StatusCode) { message := fmt.Sprintf("getAgentConfiguration: Request failed with an error code: %d, retryCount: %d", resp.StatusCode, retryCount) Log(message) @@ -326,11 +353,18 @@ func getIngestionAuthToken(imdsAccessToken string, configurationId string, chann ingestionAuthToken = "" refreshInterval = 0 var amcs_endpoint *url.URL + var AmcsEndpoint string osType := os.Getenv("OS_TYPE") resourceId := os.Getenv("AKS_RESOURCE_ID") resourceRegion := os.Getenv("AKS_REGION") mcsEndpoint := os.Getenv("MCS_ENDPOINT") - amcs_endpoint_string := fmt.Sprintf("https://%s.handler.control.%s%s/agentConfigurations/%s/channels/%s/issueIngestionToken?platform=%s&api-version=%s", resourceRegion, mcsEndpoint, resourceId, configurationId, channelId, osType, AMCSIngestionTokenAPIVersion) + + AmcsEndpoint = fmt.Sprintf("https://global.handler.control.%s", mcsEndpoint) + if AMCSRedirectedEndpoint != "" { + AmcsEndpoint = AMCSRedirectedEndpoint + } + + amcs_endpoint_string := fmt.Sprintf("%s%s/agentConfigurations/%s/channels/%s/issueIngestionToken?operatingLocation=%s&platform=%s&api-version=%s", AmcsEndpoint, resourceId, configurationId, channelId, resourceRegion, osType, AMCSIngestionTokenAPIVersion) amcs_endpoint, err = url.Parse(amcs_endpoint_string) if err != nil { Log("getIngestionAuthToken: Error creating AMCS endpoint URL: %s", err.Error()) @@ -366,6 +400,25 @@ func getIngestionAuthToken(imdsAccessToken string, configurationId string, chann } Log("getIngestionAuthToken Response Status: %d", resp.StatusCode) + if resp.StatusCode == 421 { // AMCS returns redirected endpoint incase of private link + agentConfigEndpoint := resp.Header.Get("x-ms-agent-config-endpoint") + Log("getIngestionAuthToken x-ms-agent-config-endpoint: %s", agentConfigEndpoint) + if agentConfigEndpoint != "" { + AMCSRedirectedEndpoint = agentConfigEndpoint + // reconstruct request with redirected endpoint + var err error + redirected_amcs_endpoint_string := fmt.Sprintf("%s%s/agentConfigurations/%s/channels/%s/issueIngestionToken?operatingLocation=%s&platform=%s&api-version=%s", AMCSRedirectedEndpoint, resourceId, configurationId, channelId, resourceRegion, osType, AMCSIngestionTokenAPIVersion) + var bearer = "Bearer " + imdsAccessToken + req, err = http.NewRequest("GET", redirected_amcs_endpoint_string, nil) + if err != nil { + message := fmt.Sprintf("getIngestionAuthToken: Error creating HTTP request for AMCS endpoint: %s", err.Error()) + Log(message) + return ingestionAuthToken, refreshInterval, err + } + req.Header.Set("Authorization", bearer) + continue + } + } if IsRetriableError(resp.StatusCode) { message := fmt.Sprintf("getIngestionAuthToken: Request failed with an error code: %d, retryCount: %d", resp.StatusCode, retryCount) Log(message) From 78cd76de4a8cae8378a984e210cdb696ef00c30f Mon Sep 17 00:00:00 2001 From: Amol Agrawal Date: Mon, 21 Mar 2022 11:24:38 -0700 Subject: [PATCH 209/301] check platform specific tags (#730) (#731) --- .../ServiceGroupRoot/Scripts/pushAgentToAcr.sh | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/deployment/agent-deployment/ServiceGroupRoot/Scripts/pushAgentToAcr.sh b/deployment/agent-deployment/ServiceGroupRoot/Scripts/pushAgentToAcr.sh index d39cedde0..25eb43f47 100644 --- a/deployment/agent-deployment/ServiceGroupRoot/Scripts/pushAgentToAcr.sh +++ b/deployment/agent-deployment/ServiceGroupRoot/Scripts/pushAgentToAcr.sh @@ -19,7 +19,14 @@ if [ $? -ne 0 ]; then echo "-e error unable to get list of mcr tags for azuremonitor/containerinsights/ciprod repository" exit 1 fi -TAG_EXISTS=$(echo $MCR_TAG_RESULT | jq '.tags | contains(["'"$AGENT_RELEASE$AGENT_IMAGE_TAG_SUFFIX"'"])') + +if [[ "$AGENT_IMAGE_FULL_PATH" == *"win-"* ]]; then + echo "checking windows tags" + TAG_EXISTS=$(echo $MCR_TAG_RESULT | jq '.tags | contains(["'"win-$AGENT_RELEASE$AGENT_IMAGE_TAG_SUFFIX"'"])') +else + echo "checking linux tags" + TAG_EXISTS=$(echo $MCR_TAG_RESULT | jq '.tags | contains(["'"$AGENT_RELEASE$AGENT_IMAGE_TAG_SUFFIX"'"])') +fi if $TAG_EXISTS; then echo "-e error ${AGENT_IMAGE_TAG_SUFFIX} already exists in mcr. make sure the image tag is unique" From 90707c74c390176d21beea947a4438e0ddbf0d32 Mon Sep 17 00:00:00 2001 From: Grace Wehner Date: Thu, 24 Mar 2022 15:07:59 -0700 Subject: [PATCH 210/301] PodReadyPercentage metric bug fix (#734) --- source/plugins/ruby/MdmMetricsGenerator.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/plugins/ruby/MdmMetricsGenerator.rb b/source/plugins/ruby/MdmMetricsGenerator.rb index 0858990da..f4904697c 100644 --- a/source/plugins/ruby/MdmMetricsGenerator.rb +++ b/source/plugins/ruby/MdmMetricsGenerator.rb @@ -79,7 +79,7 @@ def populatePodReadyPercentageHash @pod_ready_hash.each { |dim_key, value| podsNotReady = @pod_not_ready_hash.key?(dim_key) ? @pod_not_ready_hash[dim_key] : 0 totalPods = value + podsNotReady - podsReadyPercentage = (value / totalPods) * 100 + podsReadyPercentage = value * 100.0 / totalPods @pod_ready_percentage_hash[dim_key] = podsReadyPercentage # Deleting this key value pair from not ready hash, # so that we can get those dimensions for which there are 100% of the pods in not ready state From f253e4fa6ed5a6132c30c3d3ed4b4c3abb5d8398 Mon Sep 17 00:00:00 2001 From: Amol Agrawal Date: Mon, 28 Mar 2022 12:35:54 -0700 Subject: [PATCH 211/301] update windows to ruby 2.7 (#732) Co-authored-by: Amol Agrawal --- kubernetes/linux/setup.sh | 8 -------- kubernetes/windows/Dockerfile | 6 +++--- kubernetes/windows/Dockerfile-dev-base-image | 6 +++--- kubernetes/windows/setup.ps1 | 7 ------- 4 files changed, 6 insertions(+), 21 deletions(-) diff --git a/kubernetes/linux/setup.sh b/kubernetes/linux/setup.sh index 00d5bc0fa..709c8f1c4 100644 --- a/kubernetes/linux/setup.sh +++ b/kubernetes/linux/setup.sh @@ -79,11 +79,3 @@ sudo apt-get remove ruby2.7-dev gcc make -y # Remove settings for cron.daily that conflict with the node's cron.daily. Since both are trying to rotate the same files # in /var/log at the same time, the rotation doesn't happen correctly and then the *.1 file is forever logged to. rm /etc/logrotate.d/alternatives /etc/logrotate.d/apt /etc/logrotate.d/azure-mdsd /etc/logrotate.d/rsyslog - -#Remove gemfile.lock for http_parser gem 0.6.0 -#see - https://github.com/fluent/fluentd/issues/3374 https://github.com/tmm1/http_parser.rb/issues/70 -if [ -e "/var/lib/gems/2.6.0/gems/http_parser.rb-0.6.0/Gemfile.lock" ]; then - #rename - echo "Renaming unused gemfile.lock for http_parser 0.6.0" - mv /var/lib/gems/2.6.0/gems/http_parser.rb-0.6.0/Gemfile.lock /var/lib/gems/2.6.0/gems/http_parser.rb-0.6.0/renamed_Gemfile_lock.renamed -fi diff --git a/kubernetes/windows/Dockerfile b/kubernetes/windows/Dockerfile index 3aa4054e1..7c514a777 100644 --- a/kubernetes/windows/Dockerfile +++ b/kubernetes/windows/Dockerfile @@ -9,8 +9,8 @@ ARG IMAGE_TAG=win-ciprod03172022 # Docker creates a layer for every RUN-Statement RUN powershell -Command "Set-ExecutionPolicy Bypass -Scope Process -Force; iex ((New-Object System.Net.WebClient).DownloadString('https://chocolatey.org/install.ps1'))" # Fluentd depends on cool.io whose fat gem is only available for Ruby < 2.5, so need to specify --platform ruby when install Ruby > 2.5 and install msys2 to get dev tools -RUN choco install -y ruby --version 2.6.5.1 --params "'/InstallDir:C:\ruby26'" \ -&& choco install -y msys2 --version 20211130.0.0 --params "'/NoPath /NoUpdate /InstallDir:C:\ruby26\msys64'" \ +RUN choco install -y ruby --version 2.7.5.1 --params "'/InstallDir:C:\ruby27'" \ +&& choco install -y msys2 --version 20211130.0.0 --params "'/NoPath /NoUpdate /InstallDir:C:\ruby27\msys64'" \ && choco install -y vim # gangams - optional MSYS2 update via ridk failing in merged docker file so skipping that since we dont need optional update @@ -30,7 +30,7 @@ RUN refreshenv \ && gem sources --clear-all # Remove gem cache and chocolatey -RUN powershell -Command "Remove-Item -Force C:\ruby26\lib\ruby\gems\2.6.0\cache\*.gem; Remove-Item -Recurse -Force 'C:\ProgramData\chocolatey'" +RUN powershell -Command "Remove-Item -Force C:\ruby27\lib\ruby\gems\2.7.0\cache\*.gem; Remove-Item -Recurse -Force 'C:\ProgramData\chocolatey'" SHELL ["powershell"] diff --git a/kubernetes/windows/Dockerfile-dev-base-image b/kubernetes/windows/Dockerfile-dev-base-image index 501fead89..6a758060a 100644 --- a/kubernetes/windows/Dockerfile-dev-base-image +++ b/kubernetes/windows/Dockerfile-dev-base-image @@ -7,8 +7,8 @@ LABEL vendor=Microsoft\ Corp \ # Docker creates a layer for every RUN-Statement RUN powershell -Command "Set-ExecutionPolicy Bypass -Scope Process -Force; iex ((New-Object System.Net.WebClient).DownloadString('https://chocolatey.org/install.ps1'))" # Fluentd depends on cool.io whose fat gem is only available for Ruby < 2.5, so need to specify --platform ruby when install Ruby > 2.5 and install msys2 to get dev tools -RUN choco install -y ruby --version 2.6.5.1 --params "'/InstallDir:C:\ruby26'" \ -&& choco install -y msys2 --version 20210604.0.0 --params "'/NoPath /NoUpdate /InstallDir:C:\ruby26\msys64'" \ +RUN choco install -y ruby --version 2.7.5.1 --params "'/InstallDir:C:\ruby27'" \ +&& choco install -y msys2 --version 20210604.0.0 --params "'/NoPath /NoUpdate /InstallDir:C:\ruby27\msys64'" \ && choco install -y vim # gangams - optional MSYS2 update via ridk failing in merged docker file so skipping that since we dont need optional update @@ -28,7 +28,7 @@ RUN refreshenv \ && gem sources --clear-all # Remove gem cache and chocolatey -RUN powershell -Command "Remove-Item -Force C:\ruby26\lib\ruby\gems\2.6.0\cache\*.gem; Remove-Item -Recurse -Force 'C:\ProgramData\chocolatey'" +RUN powershell -Command "Remove-Item -Force C:\ruby27\lib\ruby\gems\2.7.0\cache\*.gem; Remove-Item -Recurse -Force 'C:\ProgramData\chocolatey'" SHELL ["powershell"] diff --git a/kubernetes/windows/setup.ps1 b/kubernetes/windows/setup.ps1 index 857f9f690..33bac61d1 100644 --- a/kubernetes/windows/setup.ps1 +++ b/kubernetes/windows/setup.ps1 @@ -66,10 +66,3 @@ Remove-Item /installation -Recurse #Remove gemfile.lock for http_parser gem 0.6.0 #see - https://github.com/fluent/fluentd/issues/3374 https://github.com/tmm1/http_parser.rb/issues/70 - -$gemfile = "\ruby26\lib\ruby\gems\2.6.0\gems\http_parser.rb-0.6.0\Gemfile.lock" -$gemfileFullPath = $Env:SYSTEMDRIVE + "\" + $gemfile -If (Test-Path -Path $gemfile ) { - Write-Host ("Renaming unused gemfile.lock for http_parser 0.6.0") - Rename-Item -Path $gemfileFullPath -NewName "renamed_Gemfile_lock.renamed" -} \ No newline at end of file From cb1b659461cbc58b4de37246fafd6b8f9b32da92 Mon Sep 17 00:00:00 2001 From: Amol Agrawal Date: Tue, 29 Mar 2022 11:31:56 -0700 Subject: [PATCH 212/301] Improve CI/CD for multi-arch (#733) * selective push + trivy test * keep size down * improve CI and PR builds * improve checks * remove IMAGE_TAG build_arg from prod pipeline Co-authored-by: Amol Agrawal --- .gitignore | 2 +- .pipelines/azure_pipeline_dev.yaml | 31 ++++++++++++++++--- .pipelines/azure_pipeline_prod.yaml | 30 +++++++++++++++--- kubernetes/linux/Dockerfile.multiarch | 9 +++++- .../build-and-publish-docker-image.sh | 2 +- 5 files changed, 62 insertions(+), 12 deletions(-) diff --git a/.gitignore b/.gitignore index b0467519c..b6e2f8979 100644 --- a/.gitignore +++ b/.gitignore @@ -23,7 +23,7 @@ intermediate *.dll *.obj # ignore docker provider shell bundle -kubernetes/linux/Linux_ULINUX_1.0_x64_64_Release +kubernetes/linux/Linux_ULINUX_1.0_*_64_Release # ignore generated .h files for go source/plugins/go/src/*.h *_mock.go diff --git a/.pipelines/azure_pipeline_dev.yaml b/.pipelines/azure_pipeline_dev.yaml index ba8a530fc..395fafebf 100644 --- a/.pipelines/azure_pipeline_dev.yaml +++ b/.pipelines/azure_pipeline_dev.yaml @@ -4,7 +4,15 @@ # https://aka.ms/yaml trigger: -- ci_dev + batch: true + branches: + include: + - ci_dev + +pr: + branches: + include: + - ci_dev pool: name: Azure-Pipelines-CI-Test-EO @@ -14,13 +22,14 @@ variables: subscription: '9b96ebbd-c57a-42d1-bbe9-b69296e4c7fb' containerRegistry: 'containerinsightsprod' repoImageName: '${{ variables.containerRegistry }}.azurecr.io/public/azuremonitor/containerinsights/cidev' + IS_PR: $[eq(variables['Build.Reason'], 'PullRequest')] steps: - bash: | commit=$(git rev-parse --short HEAD) echo "##vso[task.setvariable variable=commit;]$commit" - datetime=$(date +'%Y%m%d%s') + datetime=$(date +'%m%d%Y') echo "##vso[task.setvariable variable=datetime;]$datetime" cd $(Build.SourcesDirectory)/deployment/multiarch-agent-deployment/ServiceGroupRoot/Scripts @@ -42,7 +51,7 @@ steps: inputs: SourceFolder: "$(Build.SourcesDirectory)/.pipelines" Contents: | - *.sh + **/*.sh TargetFolder: '$(Build.ArtifactStagingDirectory)/build' - task: CopyFiles@2 @@ -88,12 +97,24 @@ steps: az account set -s ${{ variables.subscription }} az acr login -n ${{ variables.containerRegistry }} - docker buildx build --platform linux/amd64,linux/arm64 --tag ${{ variables.repoImageName }}:$(datetime)-$(commit) -f kubernetes/linux/Dockerfile.multiarch --metadata-file $(Build.ArtifactStagingDirectory)/metadata.json --build-arg IMAGE_TAG=$(datetime)-$(commit) --push . + if [ "$(Build.Reason)" != "PullRequest" ]; then + docker buildx build --platform linux/amd64,linux/arm64 --tag ${{ variables.repoImageName }}:$(datetime)-$(commit) -f kubernetes/linux/Dockerfile.multiarch --metadata-file $(Build.ArtifactStagingDirectory)/metadata.json --build-arg IMAGE_TAG=$(datetime)-$(commit) --push . + + docker pull ${{ variables.repoImageName }}:$(datetime)-$(commit) + else + docker buildx build --platform linux/amd64,linux/arm64 --tag ${{ variables.repoImageName }}:$(datetime)-$(commit) -f kubernetes/linux/Dockerfile.multiarch --metadata-file $(Build.ArtifactStagingDirectory)/metadata.json --build-arg IMAGE_TAG=$(datetime)-$(commit) . + fi - docker pull ${{ variables.repoImageName }}:$(datetime)-$(commit) +- task: AzureArtifacts.manifest-generator-task.manifest-generator-task.ManifestGeneratorTask@0 + displayName: 'Generation Task' + condition: eq(variables.IS_PR, true) + inputs: + BuildDropPath: '$(Build.ArtifactStagingDirectory)' + DockerImagesToScan: 'golang:1.15.14, ubuntu:18.04' - task: AzureArtifacts.manifest-generator-task.manifest-generator-task.ManifestGeneratorTask@0 displayName: 'Generation Task' + condition: eq(variables.IS_PR, false) inputs: BuildDropPath: '$(Build.ArtifactStagingDirectory)' DockerImagesToScan: 'golang:1.15.14, ubuntu:18.04, ${{ variables.repoImageName }}:$(datetime)-$(commit)' diff --git a/.pipelines/azure_pipeline_prod.yaml b/.pipelines/azure_pipeline_prod.yaml index 6f5d8bd45..e1c3a9db2 100644 --- a/.pipelines/azure_pipeline_prod.yaml +++ b/.pipelines/azure_pipeline_prod.yaml @@ -4,7 +4,15 @@ # https://aka.ms/yaml trigger: -- ci_prod + batch: true + branches: + include: + - ci_prod + +pr: + branches: + include: + - ci_prod pool: name: Azure-Pipelines-CI-Prod-EO @@ -14,13 +22,14 @@ variables: subscription: '30c56c3a-54da-46ea-b004-06eb33432687' containerRegistry: 'containerinsightsbuild' repoImageName: '${{ variables.containerRegistry }}.azurecr.io/official/linux' + IS_PR: $[eq(variables['Build.Reason'], 'PullRequest')] steps: - bash: | commit=$(git rev-parse --short HEAD) echo "##vso[task.setvariable variable=commit;]$commit" - datetime=$(date +'%Y%m%d%s') + datetime=$(date +'%m%d%Y') echo "##vso[task.setvariable variable=datetime;]$datetime" cd $(Build.SourcesDirectory)/deployment/multiarch-agent-deployment/ServiceGroupRoot/Scripts @@ -88,12 +97,25 @@ steps: az account set -s ${{ variables.subscription }} az acr login -n ${{ variables.containerRegistry }} - docker buildx build --platform linux/amd64,linux/arm64 --tag ${{ variables.repoImageName }}:ciprod-$(datetime)-$(commit) -f kubernetes/linux/Dockerfile.multiarch --metadata-file $(Build.ArtifactStagingDirectory)/metadata.json --build-arg IMAGE_TAG=ciprod-$(datetime)-$(commit) --push . + if [ "$(Build.Reason)" != "PullRequest" ]; then + docker buildx build --platform linux/amd64,linux/arm64 --tag ${{ variables.repoImageName }}:ciprod-$(datetime)-$(commit) -f kubernetes/linux/Dockerfile.multiarch --metadata-file $(Build.ArtifactStagingDirectory)/metadata.json --push . + + docker pull ${{ variables.repoImageName }}:ciprod-$(datetime)-$(commit) + else + docker buildx build --platform linux/amd64,linux/arm64 --tag ${{ variables.repoImageName }}:ciprod-$(datetime)-$(commit) -f kubernetes/linux/Dockerfile.multiarch --metadata-file $(Build.ArtifactStagingDirectory)/metadata.json . + fi - docker pull ${{ variables.repoImageName }}:ciprod-$(datetime)-$(commit) - task: AzureArtifacts.manifest-generator-task.manifest-generator-task.ManifestGeneratorTask@0 displayName: 'Generation Task' + condition: eq(variables.IS_PR, true) + inputs: + BuildDropPath: '$(Build.ArtifactStagingDirectory)' + DockerImagesToScan: 'golang:1.15.14, ubuntu:18.04' + +- task: AzureArtifacts.manifest-generator-task.manifest-generator-task.ManifestGeneratorTask@0 + displayName: 'Generation Task' + condition: eq(variables.IS_PR, false) inputs: BuildDropPath: '$(Build.ArtifactStagingDirectory)' DockerImagesToScan: 'golang:1.15.14, ubuntu:18.04, ${{ variables.repoImageName }}:ciprod-$(datetime)-$(commit)' diff --git a/kubernetes/linux/Dockerfile.multiarch b/kubernetes/linux/Dockerfile.multiarch index df8b04d19..e94bf71bb 100644 --- a/kubernetes/linux/Dockerfile.multiarch +++ b/kubernetes/linux/Dockerfile.multiarch @@ -35,5 +35,12 @@ ENV AGENT_VERSION ${IMAGE_TAG} WORKDIR ${tmpdir} RUN chmod 775 $tmpdir/*.sh; sync; $tmpdir/setup.sh ${TARGETARCH} -CMD [ "/opt/main.sh" ] +# Do vulnerability scan in a seperate stage to avoid adding layer +FROM base_image AS vulnscan +COPY --from=aquasec/trivy:latest /usr/local/bin/trivy /usr/local/bin/trivy +RUN trivy rootfs --exit-code 1 --ignore-unfixed --no-progress --severity HIGH,CRITICAL --skip-files "/usr/sbin/telegraf" --skip-files "/opt/telegraf" --skip-files "/usr/local/bin/trivy" / + +# Revert to base layer before vulnscan +FROM base_image AS ContainerInsights +CMD [ "/opt/main.sh" ] \ No newline at end of file diff --git a/kubernetes/linux/dockerbuild/build-and-publish-docker-image.sh b/kubernetes/linux/dockerbuild/build-and-publish-docker-image.sh index 580b158c9..638236507 100755 --- a/kubernetes/linux/dockerbuild/build-and-publish-docker-image.sh +++ b/kubernetes/linux/dockerbuild/build-and-publish-docker-image.sh @@ -138,7 +138,7 @@ echo "source code base directory: $baseDir" echo "build directory for docker provider: $buildDir" echo "docker file directory: $dockerFileDir" -if [ "$multi" -eq "1" ]; then +if [ -n "$multi" ] && [ "$multi" -eq "1" ]; then echo "building multiarch" cd $baseDir docker buildx build --platform linux/arm64/v8,linux/amd64 -t $image --build-arg IMAGE_TAG=$imageTag -f $linuxDir/Dockerfile.multiarch --push . From f0266dcee7511917ba04219bd7daf42e73c2686c Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Tue, 29 Mar 2022 15:53:34 -0700 Subject: [PATCH 213/301] Gangams/ts updates for msi (#736) * ts updates for msi based onboarding * ts updates for msi based onboarding * fix typo * fix typo * improve log message --- scripts/troubleshoot/README.md | 4 +- scripts/troubleshoot/TroubleshootError.ps1 | 276 ++++++++++++++------- 2 files changed, 189 insertions(+), 91 deletions(-) diff --git a/scripts/troubleshoot/README.md b/scripts/troubleshoot/README.md index 650a5df6f..c68e12913 100644 --- a/scripts/troubleshoot/README.md +++ b/scripts/troubleshoot/README.md @@ -50,8 +50,8 @@ You can use the troubleshooting script provided [here](https://raw.githubusercon Steps: - Open powershell using the [cloudshell](https://docs.microsoft.com/en-us/azure/cloud-shell/overview) in the azure portal. > Note: This script supported on any Powershell supported environment: Windows and Non-Windows. - For Linux, refer [Install-Powershell-On-Linux](https://docs.microsoft.com/en-us/powershell/scripting/install/installing-powershell-core-on-linux?view=powershell-6) and - For Mac OS, refer [install-powershell-core-on-mac](https://docs.microsoft.com/en-us/powershell/scripting/install/installing-powershell-core-on-macos?view=powershell-6) how to install powershell + For Linux, refer [Install-Powershell-On-Linux](https://docs.microsoft.com/en-us/powershell/scripting/install/installing-powershell-core-on-linux?view=powershell-7) and + For Mac OS, refer [install-powershell-core-on-mac](https://docs.microsoft.com/en-us/powershell/scripting/install/installing-powershell-core-on-macos?view=powershell-7) how to install powershell - Make sure that you're using powershell (selected by default) - Run the following command to change home directory - `cd ~` - Run the following command to download the script - `curl -LO https://raw.githubusercontent.com/microsoft/Docker-Provider/ci_prod/scripts/troubleshoot/TroubleshootError.ps1` diff --git a/scripts/troubleshoot/TroubleshootError.ps1 b/scripts/troubleshoot/TroubleshootError.ps1 index 6d97c53d5..8dcded546 100644 --- a/scripts/troubleshoot/TroubleshootError.ps1 +++ b/scripts/troubleshoot/TroubleshootError.ps1 @@ -38,6 +38,8 @@ if (($null -eq $ClusterResourceId) -or ($ClusterResourceId.Split("/").Length -ne exit 1 } +$UseAADAuth = $false +$ClusterRegion = "" $isClusterAndWorkspaceInDifferentSubs = $false $ClusterType = "AKS" if ($ClusterResourceId.ToLower().Contains("microsoft.containerservice/openshiftmanagedclusters") -eq $true) { @@ -50,8 +52,9 @@ $azResourcesModule = Get-Module -ListAvailable -Name Az.Resources $azOperationalInsights = Get-Module -ListAvailable -Name Az.OperationalInsights $azAksModule = Get-Module -ListAvailable -Name Az.Aks $azARGModule = Get-Module -ListAvailable -Name Az.ResourceGraph +$azMonitorModule = Get-Module -ListAvailable -Name Az.Monitor -if (($null -eq $azAksModule) -or ($null -eq $azARGModule) -or ($null -eq $azAccountModule) -or ($null -eq $azResourcesModule) -or ($null -eq $azOperationalInsights)) { +if (($null -eq $azAksModule) -or ($null -eq $azARGModule) -or ($null -eq $azAccountModule) -or ($null -eq $azResourcesModule) -or ($null -eq $azOperationalInsights) -or ($null -eq $azMonitorModule)) { $isWindowsMachine = $true if ($PSVersionTable -and $PSVersionTable.PSEdition -contains "core") { @@ -75,7 +78,7 @@ if (($null -eq $azAksModule) -or ($null -eq $azARGModule) -or ($null -eq $azAcco } $message = "This script will try to install the latest versions of the following Modules : ` - Az.Ak,Az.ResourceGraph, Az.Resources, Az.Accounts and Az.OperationalInsights using the command` + Az.Ak,Az.ResourceGraph, Az.Resources, Az.Accounts, Az.OperationalInsights and Az.Monitor using the command` `'Install-Module {Insert Module Name} -Repository PSGallery -Force -AllowClobber -ErrorAction Stop -WarningAction Stop' `If you do not have the latest version of these Modules, this troubleshooting script may not run." $question = "Do you want to Install the modules and run the script or just run the script?" @@ -149,6 +152,18 @@ if (($null -eq $azAksModule) -or ($null -eq $azARGModule) -or ($null -eq $azAcco } } + if ($null -eq $azMonitorModule) { + try { + + Write-Host("Installing Az.Monitor...") + Install-Module Az.Monitor -Repository PSGallery -Force -AllowClobber -ErrorAction Stop + } + catch { + Write-Host("Close other powershell logins and try installing the latest modules for Az.OperationalInsights in a new powershell window: eg. 'Install-Module Az.Monitor -Repository PSGallery -Force'") -ForegroundColor Red + Stop-Transcript + exit 1 + } + } } 1 { if ($null -eq $azARGModule) { @@ -209,6 +224,16 @@ if (($null -eq $azAksModule) -or ($null -eq $azARGModule) -or ($null -eq $azAcco } } + if ($null -eq $azMonitorModule) { + try { + Import-Module Az.Monitor -ErrorAction Stop + } + catch { + Write-Host("Could not import Az.Monitor... Please reinstall this Module") -ForegroundColor Red + Stop-Transcript + exit 1 + } + } } 2 { Write-Host("") @@ -265,12 +290,16 @@ $ResourceGroupName = $ClusterResourceId.split("/")[4] $ClusterName = $ClusterResourceId.split("/")[8] # -# Subscription existance and access check +# Subscription existence and access check # if ($null -eq $account.Account) { try { Write-Host("Please login...") - Login-AzAccount -subscriptionid $ClusterSubscriptionId + if ($isWindowsMachine) { + Login-AzAccount -subscriptionid $ClusterSubscriptionId + } else { + Login-AzAccount -subscriptionid $ClusterSubscriptionId -UseDeviceAuthentication + } } catch { Write-Host("") @@ -331,26 +360,29 @@ try { } else { Write-Host("Successfully checked '" + $ClusterType + "' Cluster details...") -ForegroundColor Green + $ClusterRegion = $ResourceDetailsArray.Location Write-Host("") foreach ($ResourceDetail in $ResourceDetailsArray) { if ($ResourceDetail.ResourceType -eq "Microsoft.ContainerService/managedClusters") { - #gangams: profile can be different casing so convert properties to lowecase and extract it - $props = ($ResourceDetail.Properties | ConvertTo-Json).toLower() | ConvertFrom-Json; + $addonProfiles = ($ResourceDetail.Properties.addonProfiles | ConvertTo-Json).toLower() | ConvertFrom-Json - if ($null -eq $props.addonprofiles.omsagent.config) { + if (($nul -eq $addonProfiles) -or ($null -eq $addonProfiles.omsagent) -or ($null -eq $addonProfiles.omsagent.config)) { Write-Host("Your cluster isn't onboarded to Azure monitor for containers. Please refer to the following documentation to onboard:") -ForegroundColor Red; + $clusterProperies = ($ResourceDetail.Properties | ConvertTo-Json) + Write-Host("Cluster Properties found: " + $clusterProperies) -ForegroundColor Red; Write-Host($AksOptInLink) -ForegroundColor Red; Write-Host(""); Stop-Transcript exit 1 } - $omsagentconfig = $props.addonprofiles.omsagent.config; + $LogAnalyticsWorkspaceResourceID = $addonProfiles.omsagent.config.loganalyticsworkspaceresourceid + $isAADAuth = $addonProfiles.omsagent.config.useaadauth + if ($true -eq $isAADAuth) { + $UseAADAuth = $true + } - #gangams - figure out betterway to do this - $omsagentconfig = $omsagentconfig.Trim("{", "}"); - $LogAnalyticsWorkspaceResourceID = $omsagentconfig.split("=")[1]; - Write-Host("AKS Cluster ResourceId: '" + $ResourceDetail.ResourceId + "' "); + Write-Host("AKS Cluster ResourceId: '" + $ResourceDetail.ResourceId + "', LogAnalyticsWorkspaceResourceId: '" + $LogAnalyticsWorkspaceResourceID + "', UseAADAuth: '" + $UseAADAuth + "'"); break } } @@ -389,15 +421,15 @@ catch { } -if ("AKS" -eq $ClusterType ) { +if (("AKS" -eq $ClusterType ) -and ($false -eq $UseAADAuth)) { Write-Host("Currently checking if the cluster is onboarded to custom metrics for Azure monitor for containers..."); #Pre requisite - need cluster spn object Id try { - $clusterDetails = Get-AzAks -Id $ClusterResourceId -ErrorVariable clusterFetchError -ErrorAction SilentlyContinue + $clusterDetails = Get-AzAksCluster -Id $ClusterResourceId -ErrorVariable clusterFetchError -ErrorAction SilentlyContinue Write-Host($clusterDetails | Format-List | Out-String) # Check to see if SP exists, if it does use it. Else use MSI - if ($clusterDetails.ServicePrincipalProfile -ne $null -and $clusterDetails.ServicePrincipalProfile.ClientId -ne $null -and $clusterDetails.ServicePrincipalProfile.ClientId -ne "") { + if ($clusterDetails.ServicePrincipalProfile -ne $null -and $clusterDetails.ServicePrincipalProfile.ClientId -ne $null -and $clusterDetails.ServicePrincipalProfile.ClientId -ne "msi") { Write-Host('Attempting to provide permissions to service principal...') -ForegroundColor Green $clusterSpnClientID = $clusterDetails.ServicePrincipalProfile.ClientId $isServicePrincipal = $true @@ -625,77 +657,144 @@ else { $WorkspacePricingTier = $WorkspaceInformation.sku Write-Host("Pricing tier of the configured LogAnalytics workspace: '" + $WorkspacePricingTier + "' ") -ForegroundColor Green - try { - $WorkspaceIPDetails = Get-AzOperationalInsightsIntelligencePacks -ResourceGroupName $workspaceResourceGroupName -WorkspaceName $workspaceName -ErrorAction Stop -WarningAction silentlyContinue - Write-Host("Successfully fetched workspace IP details...") -ForegroundColor Green - Write-Host("") - } - catch { - Write-Host("") - Write-Host("Failed to get the list of solutions onboarded to the workspace. Please make sure that it hasn't been deleted and you have access to it.") -ForegroundColor Red - Write-Host("") - Stop-Transcript - exit 1 - } - - try { - $ContainerInsightsIndex = $WorkspaceIPDetails.Name.IndexOf("ContainerInsights") - Write-Host("Successfully located ContainerInsights solution") -ForegroundColor Green - Write-Host("") - } - catch { - Write-Host("Failed to get ContainerInsights solution details from the workspace") -ForegroundColor Red - Write-Host("") - Stop-Transcript - exit 1 - } - - $isSolutionOnboarded = $WorkspaceIPDetails.Enabled[$ContainerInsightsIndex] - if ($isSolutionOnboarded) { - if ($WorkspacePricingTier -eq "Free") { - Write-Host("Pricing tier of the configured LogAnalytics workspace is Free so you may need to upgrade to pricing tier to non-Free") -ForegroundColor Yellow - } - } - else { + if ($true -eq $UseAADAuth) { # - # Check contributor access to WS + # Check existence of the ContainerInsightsExtension DCR # - $message = "Detected that there is a workspace associated with this cluster, but workspace - '" + $workspaceName + "' in subscription '" + $workspaceSubscriptionId + "' IS NOT ONBOARDED with container health solution." - Write-Host($message) - $question = " Do you want to onboard container health to the workspace?" - - $choices = New-Object Collections.ObjectModel.Collection[Management.Automation.Host.ChoiceDescription] - $choices.Add((New-Object Management.Automation.Host.ChoiceDescription -ArgumentList '&Yes')) - $choices.Add((New-Object Management.Automation.Host.ChoiceDescription -ArgumentList '&No')) - - $decision = $Host.UI.PromptForChoice($message, $question, $choices, 0) - - if ($decision -eq 0) { - Write-Host("Deploying template to onboard container health : Please wait...") - - $DeploymentName = "ContainerInsightsSolutionOnboarding-" + ((Get-Date).ToUniversalTime()).ToString('MMdd-HHmm') - $Parameters = @{ } - $Parameters.Add("workspaceResourceId", $LogAnalyticsWorkspaceResourceID) - $Parameters.Add("workspaceRegion", $WorkspaceLocation) - $Parameters - try { - New-AzResourceGroupDeployment -Name $DeploymentName ` - -ResourceGroupName $workspaceResourceGroupName ` - -TemplateUri https://raw.githubusercontent.com/microsoft/Docker-Provider/ci_prod/scripts/onboarding/templates/azuremonitor-containerSolution.json ` - -TemplateParameterObject $Parameters -ErrorAction Stop` + try { + $dcrRuleName = "MSCI-" + $ClusterName + "-" + $ClusterRegion + $dcrRule = Get-AzDataCollectionRule -ResourceGroupName $workspaceResourceGroupName -RuleName $dcrRuleName -ErrorAction Stop -WarningAction silentlyContinue + Write-Host("Successfully fetched Data Collection Rule...") -ForegroundColor Green + $extensionNameInDCR = $dcrRule.DataSources.Extensions.ExtensionName + if ($extensionNameInDCR -eq "ContainerInsights") { + $laDestinations = $dcrRule.Destinations.LogAnalytics + if (($null -ne $laDestinations) -and ($laDestinations.Length -gt 0) -and ($LogAnalyticsWorkspaceResourceID -eq $laDestinations[0].WorkspaceResourceId)) { + Write-Host("Successfully validated Data Collection Rule is valid...") -ForegroundColor Green + } else { + Write-Host("") + Write-Host("Data Collection Rule: '" + $dcrRuleName + "' found has Log Analytics(LA) workspace which different from the Log Analytics workspace in Monitoring addon.") -ForegroundColor Red + $laWorkspaceResIdInDCR = $laDestinations[0].WorkspaceResourceId + Write-Host("LA workspace found in Data Collection Rule: '" + $laWorkspaceResIdInDCR + "' but where as LA workspace in Monitoring Addon: '" + $LogAnalyticsWorkspaceResourceID + "'.") -ForegroundColor Red + Write-Host("") + Stop-Transcript + exit 1 + } + } else { + Write-Host("") + Write-Host("Data Collection Rule: '" + $dcrRuleName + "' found is not valid ContainerInsights extension DCR.") -ForegroundColor Red Write-Host("") - Write-Host("Successfully added Container Insights Solution") -ForegroundColor Green + Stop-Transcript + exit 1 + } + } + catch { + Write-Host("") + Write-Host("Failed to get the data collection Rule: '" + $dcrRuleName + "'. Please make sure that it hasn't been deleted and you have access to it.") -ForegroundColor Red + Write-Host("If DataCollectionRule :'" + $dcrRuleName + "' has been deleted accidentally, disable and enable Monitoring addon back to get this fixed.") -ForegroundColor Red + Write-Host("") + Stop-Transcript + exit 1 + } + # + # Check existence of the ContainerInsightsExtension DCR-A on the cluster resource + # + try { + $dcrAssociation = Get-AzDataCollectionRuleAssociation -TargetResourceId $ClusterResourceId -AssociationName "ContainerInsightsExtension" -ErrorAction Stop -WarningAction silentlyContinue + Write-Host("Successfully fetched ContainerInsightsExtension Data Collection Rule Association ...") -ForegroundColor Green + if ($null -eq $dcrAssociation) { + Write-Host("") + Write-Host("ContainerInsightsExtension Data Collection Rule Association doenst exist.") -ForegroundColor Red Write-Host("") + Stop-Transcript + exit 1 } - catch { - Write-Host ("Template deployment failed with an error: '" + $Error[0] + "' ") -ForegroundColor Red - Write-Host("Please contact us by emailing askcoin@microsoft.com for help") -ForegroundColor Red + } + catch { + Write-Host("") + Write-Host("Failed to get the data collection Rule Association. Please make sure that it hasn't been deleted and you have access to it.") -ForegroundColor Red + Write-Host("If ContainerInsightsExtension DataCollectionRule Association has been deleted accidentally, disable and enable Monitoring addon back to get this fixed.") -ForegroundColor Red + Write-Host("") + Stop-Transcript + exit 1 + } + } + else { + + try { + $WorkspaceIPDetails = Get-AzOperationalInsightsIntelligencePacks -ResourceGroupName $workspaceResourceGroupName -WorkspaceName $workspaceName -ErrorAction Stop -WarningAction silentlyContinue + Write-Host("Successfully fetched workspace IP details...") -ForegroundColor Green + Write-Host("") + } + catch { + Write-Host("") + Write-Host("Failed to get the list of solutions onboarded to the workspace. Please make sure that it hasn't been deleted and you have access to it.") -ForegroundColor Red + Write-Host("") + Stop-Transcript + exit 1 + } + + try { + $ContainerInsightsIndex = $WorkspaceIPDetails.Name.IndexOf("ContainerInsights") + Write-Host("Successfully located ContainerInsights solution") -ForegroundColor Green + Write-Host("") + } + catch { + Write-Host("Failed to get ContainerInsights solution details from the workspace") -ForegroundColor Red + Write-Host("") + Stop-Transcript + exit 1 + } + + + $isSolutionOnboarded = $WorkspaceIPDetails.Enabled[$ContainerInsightsIndex] + if ($isSolutionOnboarded) { + if ($WorkspacePricingTier -eq "Free") { + Write-Host("Pricing tier of the configured LogAnalytics workspace is Free so you may need to upgrade to pricing tier to non-Free") -ForegroundColor Yellow } } else { - Write-Host("The container health solution isn't onboarded to your cluster. This required for the monitoring to work. Please contact us by emailing askcoin@microsoft.com if you need any help on this") -ForegroundColor Red + # + # Check contributor access to WS + # + $message = "Detected that there is a workspace associated with this cluster, but workspace - '" + $workspaceName + "' in subscription '" + $workspaceSubscriptionId + "' IS NOT ONBOARDED with container health solution." + Write-Host($message) + $question = " Do you want to onboard container health to the workspace?" + + $choices = New-Object Collections.ObjectModel.Collection[Management.Automation.Host.ChoiceDescription] + $choices.Add((New-Object Management.Automation.Host.ChoiceDescription -ArgumentList '&Yes')) + $choices.Add((New-Object Management.Automation.Host.ChoiceDescription -ArgumentList '&No')) + + $decision = $Host.UI.PromptForChoice($message, $question, $choices, 0) + + if ($decision -eq 0) { + Write-Host("Deploying template to onboard container health : Please wait...") + + $DeploymentName = "ContainerInsightsSolutionOnboarding-" + ((Get-Date).ToUniversalTime()).ToString('MMdd-HHmm') + $Parameters = @{ } + $Parameters.Add("workspaceResourceId", $LogAnalyticsWorkspaceResourceID) + $Parameters.Add("workspaceRegion", $WorkspaceLocation) + $Parameters + try { + New-AzResourceGroupDeployment -Name $DeploymentName ` + -ResourceGroupName $workspaceResourceGroupName ` + -TemplateUri https://raw.githubusercontent.com/microsoft/Docker-Provider/ci_prod/scripts/onboarding/templates/azuremonitor-containerSolution.json ` + -TemplateParameterObject $Parameters -ErrorAction Stop` + + Write-Host("") + Write-Host("Successfully added Container Insights Solution") -ForegroundColor Green + + Write-Host("") + } + catch { + Write-Host ("Template deployment failed with an error: '" + $Error[0] + "' ") -ForegroundColor Red + Write-Host("Please contact us by emailing askcoin@microsoft.com for help") -ForegroundColor Red + } + } + else { + Write-Host("The container health solution isn't onboarded to your cluster. This required for the monitoring to work. Please contact us by emailing askcoin@microsoft.com if you need any help on this") -ForegroundColor Red + } } } } @@ -737,17 +836,11 @@ if ("AKS" -eq $ClusterType ) { Write-Host("Getting Kubeconfig of the cluster...") Import-AzAksCredential -Id $ClusterResourceId -Force -ErrorAction Stop - Write-Host("Successful got the Kubeconfig of the cluster.") - - Write-Host("Get current context of the k8s cluster") - $clusterContext = kubectl config current-context - Write-Host $clusterContext + Write-Host("Successfully got the Kubeconfig of the cluster.") - if ($clusterContext -ne $ClusterName) { - Write-Host("Switch to cluster context to:", $ClusterName ) - kubectl config use-context $ClusterName - Write-Host("Successfully switche current context of the k8s cluster to:", $ClusterName) - } + Write-Host("Switch to cluster context to:", $ClusterName ) + kubectl config use-context $ClusterName + Write-Host("Successfully switched current context of the k8s cluster to:", $ClusterName) Write-Host("Check whether the omsagent replicaset pod running correctly ...") $rsPod = kubectl get deployments omsagent-rs -n kube-system -o json | ConvertFrom-Json @@ -875,11 +968,16 @@ if ("AKS" -eq $ClusterType ) { Write-Host("Checking agent version...") try { - Write-Host("KubeConfig: " + $KubeConfig) $omsagentInfo = kubectl get pods -n kube-system -o json -l rsName=omsagent-rs | ConvertFrom-Json - $omsagentImage = $omsagentInfo.items.spec.containers.image.split(":")[1] - + for ($index = 0; $index -le $omsagentInfo.items.spec.containers.Length; $index++) + { + $containerName = $omsagentInfo.items.spec.containers[$index].Name + if ($containerName -eq "omsagent") { + $omsagentImage = $omsagentInfo.items.spec.containers[$index].image.split(":")[1] + break + } + } Write-Host('The version of the omsagent running on your cluster is ' + $omsagentImage) Write-Host('You can encounter problems with your cluster if your omsagent version isnt on the latest version. Please go to https://docs.microsoft.com/en-us/azure/azure-monitor/insights/container-insights-manage-agent and validate that you have the latest omsagent version running.') -ForegroundColor Yellow } catch { From d76f2b282709ccbbeded6bdf2062980bf13f9157 Mon Sep 17 00:00:00 2001 From: sarahpeiffer <46665092+sarahpeiffer@users.noreply.github.com> Date: Wed, 30 Mar 2022 10:43:32 -0700 Subject: [PATCH 214/301] Sarah/health deprecation (#735) Removes all health feature related code --- Health/Readme.md | 8 - Health/documentation.md | 93 - .../container-insights-health-page.png | Bin 119179 -> 0 bytes .../criticalicon.png | Bin 707 -> 0 bytes .../container-insights-health/grayicon.png | Bin 609 -> 0 bytes .../health-overview-aggregate-monitor.png | Bin 119754 -> 0 bytes .../health-overview-unit-monitor.png | Bin 151432 -> 0 bytes .../health-property-pane-overview.png | Bin 95717 -> 0 bytes .../health-view-01.png | Bin 214560 -> 0 bytes .../health-view-kube-infra-01.png | Bin 132299 -> 0 bytes .../health-view-nodes-01.png | Bin 87290 -> 0 bytes .../health-view-property-pane.png | Bin 66205 -> 0 bytes .../container-insights-health/healthyicon.png | Bin 642 -> 0 bytes .../media/container-insights-health/readme.md | 1 - .../container-insights-health/warningicon.png | Bin 1795 -> 0 bytes Health/onboarding_instructions.md | 43 - Rakefile | 9 - .../scripts/tomlparser-agent-config.rb | 8 - .../scripts/tomlparser-prom-agent-config.rb | 1 - build/linux/installer/conf/container.conf | 38 - build/linux/installer/conf/kube.conf | 47 - .../installer/datafiles/base_container.data | 45 - .../templates/omsagent-crd.yaml | 36 - .../templates/omsagent-deployment.yaml | 3 - .../templates/omsagent-rbac.yaml | 3 - .../templates/omsagent-rs-configmap.yaml | 35 - .../templates/omsagent-service.yaml | 12 - kubernetes/linux/mdsd.xml | 26 - kubernetes/omsagent.yaml | 78 - .../ci-extension-dcr-streams.md | 16 +- .../preview/health/HealthAgentOnboarding.ps1 | 432 -- scripts/preview/health/HealthOnboarding.md | 40 - scripts/preview/health/customOnboarding.json | 44 - .../health/omsagent-template-aks-engine.yaml | 586 -- scripts/preview/health/omsagent-template.yaml | 586 -- scripts/preview/health/optouttemplate.json | 36 - .../plugins/go/src/ingestion_token_utils.go | 1 - .../plugins/ruby/CAdvisorMetricsAPIClient.rb | 6 - source/plugins/ruby/constants.rb | 1 - .../ruby/filter_cadvisor_health_container.rb | 106 - .../ruby/filter_cadvisor_health_node.rb | 197 - .../ruby/filter_health_model_builder.rb | 286 - .../ruby/health/agg_monitor_id_labels.rb | 26 - .../plugins/ruby/health/aggregate_monitor.rb | 227 - .../aggregate_monitor_state_finalizer.rb | 35 - .../ruby/health/cluster_health_state.rb | 122 - .../health_container_cpu_memory_aggregator.rb | 386 -- ...h_container_cpu_memory_record_formatter.rb | 38 - .../ruby/health/health_hierarchy_builder.rb | 78 - .../health/health_kube_api_down_handler.rb | 30 - .../health/health_kubernetes_resources.rb | 288 - .../health/health_missing_signal_generator.rb | 147 - .../ruby/health/health_model_buffer.rb | 31 - .../ruby/health/health_model_builder.rb | 37 - .../ruby/health/health_model_constants.rb | 82 - .../health/health_model_definition_parser.rb | 52 - .../ruby/health/health_monitor_helpers.rb | 74 - .../ruby/health/health_monitor_optimizer.rb | 54 - .../ruby/health/health_monitor_provider.rb | 139 - .../ruby/health/health_monitor_record.rb | 11 - .../ruby/health/health_monitor_state.rb | 266 - .../ruby/health/health_monitor_telemetry.rb | 59 - .../ruby/health/health_monitor_utils.rb | 323 - .../ruby/health/health_signal_reducer.rb | 53 - source/plugins/ruby/health/monitor_factory.rb | 32 - source/plugins/ruby/health/monitor_set.rb | 44 - .../health/node_monitor_hierarchy_reducer.rb | 34 - .../ruby/health/parent_monitor_provider.rb | 89 - source/plugins/ruby/health/unit_monitor.rb | 27 - source/plugins/ruby/in_cadvisor_perf.rb | 4 - source/plugins/ruby/in_kube_health.rb | 370 - source/plugins/ruby/out_health_forward.rb | 838 --- .../filter_health_model_builder_test.rb | 54 - .../plugins/health/aggregate_monitor_spec.rb | 256 - .../aggregate_monitor_state_finalizer_spec.rb | 59 - test/unit-tests/plugins/health/ca.crt | 1 - .../plugins/health/cadvisor_perf.json | 2540 ------- .../health/cluster_health_state_spec.rb | 37 - .../plugins/health/deployments.json | 1385 ---- ...th_container_cpu_memory_aggregator_spec.rb | 190 - ...tainer_cpu_memory_record_formatter_spec.rb | 58 - .../health/health_hierarchy_builder_spec.rb | 11 - .../health/health_kubernetes_resource_spec.rb | 222 - .../health_missing_signal_generator_spec.rb | 79 - .../health/health_model_buffer_spec.rb | 25 - .../health/health_model_builder_spec.rb | 37 - .../health/health_model_builder_test.rb | 516 -- .../health_model_definition_parser_spec.rb | 24 - .../health/health_monitor_state_spec.rb | 176 - .../health/health_signal_reducer_spec.rb | 96 - .../health/kube_api_down_handler_spec.rb | 26 - .../plugins/health/monitor_factory_spec.rb | 28 - .../plugins/health/monitor_set_spec.rb | 58 - test/unit-tests/plugins/health/nodes.json | 1966 ------ .../health/parent_monitor_provider_spec.rb | 146 - test/unit-tests/plugins/health/pods.json | 5987 ----------------- .../health/test_health_model_definition.json | 42 - .../plugins/health/unit_monitor_spec.rb | 20 - .../plugins/health/unit_monitor_test.rb | 16 - test/unit-tests/plugins/test_helpers.rb | 3 - 100 files changed, 1 insertion(+), 20876 deletions(-) delete mode 100644 Health/Readme.md delete mode 100644 Health/documentation.md delete mode 100644 Health/media/container-insights-health/container-insights-health-page.png delete mode 100644 Health/media/container-insights-health/criticalicon.png delete mode 100644 Health/media/container-insights-health/grayicon.png delete mode 100644 Health/media/container-insights-health/health-overview-aggregate-monitor.png delete mode 100644 Health/media/container-insights-health/health-overview-unit-monitor.png delete mode 100644 Health/media/container-insights-health/health-property-pane-overview.png delete mode 100644 Health/media/container-insights-health/health-view-01.png delete mode 100644 Health/media/container-insights-health/health-view-kube-infra-01.png delete mode 100644 Health/media/container-insights-health/health-view-nodes-01.png delete mode 100644 Health/media/container-insights-health/health-view-property-pane.png delete mode 100644 Health/media/container-insights-health/healthyicon.png delete mode 100644 Health/media/container-insights-health/readme.md delete mode 100644 Health/media/container-insights-health/warningicon.png delete mode 100644 Health/onboarding_instructions.md delete mode 100644 Rakefile delete mode 100644 charts/azuremonitor-containers/templates/omsagent-crd.yaml delete mode 100644 charts/azuremonitor-containers/templates/omsagent-service.yaml delete mode 100644 scripts/preview/health/HealthAgentOnboarding.ps1 delete mode 100644 scripts/preview/health/HealthOnboarding.md delete mode 100644 scripts/preview/health/customOnboarding.json delete mode 100644 scripts/preview/health/omsagent-template-aks-engine.yaml delete mode 100644 scripts/preview/health/omsagent-template.yaml delete mode 100644 scripts/preview/health/optouttemplate.json delete mode 100644 source/plugins/ruby/filter_cadvisor_health_container.rb delete mode 100644 source/plugins/ruby/filter_cadvisor_health_node.rb delete mode 100644 source/plugins/ruby/filter_health_model_builder.rb delete mode 100644 source/plugins/ruby/health/agg_monitor_id_labels.rb delete mode 100644 source/plugins/ruby/health/aggregate_monitor.rb delete mode 100644 source/plugins/ruby/health/aggregate_monitor_state_finalizer.rb delete mode 100644 source/plugins/ruby/health/cluster_health_state.rb delete mode 100644 source/plugins/ruby/health/health_container_cpu_memory_aggregator.rb delete mode 100644 source/plugins/ruby/health/health_container_cpu_memory_record_formatter.rb delete mode 100644 source/plugins/ruby/health/health_hierarchy_builder.rb delete mode 100644 source/plugins/ruby/health/health_kube_api_down_handler.rb delete mode 100644 source/plugins/ruby/health/health_kubernetes_resources.rb delete mode 100644 source/plugins/ruby/health/health_missing_signal_generator.rb delete mode 100644 source/plugins/ruby/health/health_model_buffer.rb delete mode 100644 source/plugins/ruby/health/health_model_builder.rb delete mode 100644 source/plugins/ruby/health/health_model_constants.rb delete mode 100644 source/plugins/ruby/health/health_model_definition_parser.rb delete mode 100644 source/plugins/ruby/health/health_monitor_helpers.rb delete mode 100644 source/plugins/ruby/health/health_monitor_optimizer.rb delete mode 100644 source/plugins/ruby/health/health_monitor_provider.rb delete mode 100644 source/plugins/ruby/health/health_monitor_record.rb delete mode 100644 source/plugins/ruby/health/health_monitor_state.rb delete mode 100644 source/plugins/ruby/health/health_monitor_telemetry.rb delete mode 100644 source/plugins/ruby/health/health_monitor_utils.rb delete mode 100644 source/plugins/ruby/health/health_signal_reducer.rb delete mode 100644 source/plugins/ruby/health/monitor_factory.rb delete mode 100644 source/plugins/ruby/health/monitor_set.rb delete mode 100644 source/plugins/ruby/health/node_monitor_hierarchy_reducer.rb delete mode 100644 source/plugins/ruby/health/parent_monitor_provider.rb delete mode 100644 source/plugins/ruby/health/unit_monitor.rb delete mode 100644 source/plugins/ruby/in_kube_health.rb delete mode 100644 source/plugins/ruby/out_health_forward.rb delete mode 100644 test/unit-tests/plugins/filter_health_model_builder_test.rb delete mode 100644 test/unit-tests/plugins/health/aggregate_monitor_spec.rb delete mode 100644 test/unit-tests/plugins/health/aggregate_monitor_state_finalizer_spec.rb delete mode 100644 test/unit-tests/plugins/health/ca.crt delete mode 100644 test/unit-tests/plugins/health/cadvisor_perf.json delete mode 100644 test/unit-tests/plugins/health/cluster_health_state_spec.rb delete mode 100644 test/unit-tests/plugins/health/deployments.json delete mode 100644 test/unit-tests/plugins/health/health_container_cpu_memory_aggregator_spec.rb delete mode 100644 test/unit-tests/plugins/health/health_container_cpu_memory_record_formatter_spec.rb delete mode 100644 test/unit-tests/plugins/health/health_hierarchy_builder_spec.rb delete mode 100644 test/unit-tests/plugins/health/health_kubernetes_resource_spec.rb delete mode 100644 test/unit-tests/plugins/health/health_missing_signal_generator_spec.rb delete mode 100644 test/unit-tests/plugins/health/health_model_buffer_spec.rb delete mode 100644 test/unit-tests/plugins/health/health_model_builder_spec.rb delete mode 100644 test/unit-tests/plugins/health/health_model_builder_test.rb delete mode 100644 test/unit-tests/plugins/health/health_model_definition_parser_spec.rb delete mode 100644 test/unit-tests/plugins/health/health_monitor_state_spec.rb delete mode 100644 test/unit-tests/plugins/health/health_signal_reducer_spec.rb delete mode 100644 test/unit-tests/plugins/health/kube_api_down_handler_spec.rb delete mode 100644 test/unit-tests/plugins/health/monitor_factory_spec.rb delete mode 100644 test/unit-tests/plugins/health/monitor_set_spec.rb delete mode 100644 test/unit-tests/plugins/health/nodes.json delete mode 100644 test/unit-tests/plugins/health/parent_monitor_provider_spec.rb delete mode 100644 test/unit-tests/plugins/health/pods.json delete mode 100644 test/unit-tests/plugins/health/test_health_model_definition.json delete mode 100644 test/unit-tests/plugins/health/unit_monitor_spec.rb delete mode 100644 test/unit-tests/plugins/health/unit_monitor_test.rb delete mode 100644 test/unit-tests/plugins/test_helpers.rb diff --git a/Health/Readme.md b/Health/Readme.md deleted file mode 100644 index aed79bd26..000000000 --- a/Health/Readme.md +++ /dev/null @@ -1,8 +0,0 @@ -# Azure Monitor for containers Health(Tab) limited preview -Azure Monitor for containers,Health(Tab) feature provides proactive health monitoring of your Kubernetes cluster to help you identify and diagnose issues. This feature is currently in limited preview, you would need to on-board to this feature manually. If you would like to be part of the limited preview, please reach out to us at [askcoin@microsoft.com](mailto:askcoin@microsoft.com). - -Learn more about the Health(Tab) feature [here](https://aka.ms/cihealthdoc) - -Learn more about how to onboard [here](https://aka.ms/cihealthob) - - diff --git a/Health/documentation.md b/Health/documentation.md deleted file mode 100644 index 6ca547567..000000000 --- a/Health/documentation.md +++ /dev/null @@ -1,93 +0,0 @@ -# Understand Kubernetes cluster health with Azure Monitor for containers - -With Azure Monitor for containers, it monitors and reports health status of the managed infrastructure components and all nodes running on any Kubernetes cluster supported by Azure Monitor for containers. This experience extends beyond the cluster health status calculated and reported on the [multi-cluster view](container-insights-analyze.md#multi-cluster-view-from-azure-monitor), where now you can understand if one or more nodes in the cluster are resource constrained, or a node or pod is unavailable that could impact a running application in the cluster based on curated metrics. - ->[!NOTE] ->The Health feature is only available to customers who are part of limited preview. If you would like to be part of Health limited preview please reach out to [askcoin@microsoft.com]. -> - -For information about how to enable Azure Monitor for containers, see [Onboard Azure Monitor for containers](container-insights-onboard.md). - ->[!NOTE] ->To support AKS Engine clusters, verify it meets the following: ->- It is using the latest version of the [HELM client](https://helm.sh/docs/using_helm/). ->- The containerized agent version is *microsoft/oms:ciprod11012019*. To upgrade the agent, see [upgrading agent on Kubernetes cluster](container-insights-manage-agent.md#how-to-upgrade-the-azure-monitor-for-containers-agent). -> - -## Overview - -In Azure Monitor for containers, the Health (preview) feature provides proactive health monitoring of your Kubernetes cluster to help you identify and diagnose issues. It gives you the ability to view significant issues detected. Monitors evaluating the health of your cluster run on the containerized agent in your cluster, and the health data is written to the **KubeHealth** table in your Log Analytics workspace. - -Kubernetes cluster health is based on a number of monitoring scenarios organized by the following Kubernetes objects and abstractions: - -- Kubernetes infrastructure - provides a rollup of the Kubernetes API server, ReplicaSets, and DaemonSets running on nodes deployed in your cluster by evaluating CPU and memory utilization, and a Pods availability - - ![Kubernetes infrastructure health rollup view](./media/container-insights-health/health-view-kube-infra-01.png) - -- Nodes - provides a rollup of the Node pools and state of individual Nodes in each pool, by evaluating CPU and memory utilization, and a Node's status as reported by Kubernetes. - - ![Nodes health rollup view](./media/container-insights-health/health-view-nodes-01.png) - -Currently, only the status of a virtual kubelet is supported. The health state for CPU and memory utilization of virtual kublet nodes is reported as **Unknown**, since a signal is not received from them. - -All monitors are shown in a hierarchical layout in the Health Hierarchy pane, where an aggregate monitor representing the Kubernetes object or abstraction (that is, Kubernetes infrastructure or Nodes) are the top-most monitor reflecting the combined health of all dependent child monitors. The key monitoring scenarios used to derive health are: - -* Evaluate CPU utilization from the node and container. -* Evaluate memory utilization from the node and container. -* Status of Pods and Nodes based on calculation of their ready state reported by Kubernetes. - -The icons used to indicate state are as follows: - -|Icon|Meaning| -|--------|-----------| -|![Green check icon indicates healthy](./media/container-insights-health/healthyicon.png)|Success, health is OK (green)| -|![Yellow triangle and exclamation mark is warning](./media/container-insights-health/warningicon.png)|Warning (yellow)| -|![Red button with white X indicates critical state](./media/container-insights-health/criticalicon.png)|Critical (red)| -|![Grayed-out icon](./media/container-insights-health/grayicon.png)|Unknown (gray)| - -## Monitor configuration - -To understand the behavior and configuration of each monitor supporting Azure Monitor for containers Health feature, see [Health monitor configuration guide](container-insights-health-monitors-config.md). - -## Sign in to the Azure portal - -Sign in to the [Azure portal](https://portal.azure.com). - -## View health of an AKS or non-AKS cluster - -Access to the Azure Monitor for containers Health (preview) feature is available directly from an AKS cluster by selecting **Insights** from the left pane in the Azure portal. Under the **Insights** section, select **Containers**. - -To view health from a non-AKS cluster, that is an AKS Engine cluster hosted on-premises or on Azure Stack, select **Azure Monitor** from the left pane in the Azure portal. Under the **Insights** section, select **Containers**. On the multi-cluster page, select the non-AKS cluster from the list. - -In Azure Monitor for containers, from the **Cluster** page, select **Health**. - -![Cluster health dashboard example](./media/container-insights-health/container-insights-health-page.png) - -## Review cluster health - -When the Health page opens, by default **Kubernetes Infrastructure** is selected in the **Health Aspect** grid. The grid summarizes current health rollup state of Kubernetes infrastructure and cluster nodes. Selecting either health aspect updates the results in the Health Hierarchy pane (that is, the middle-pane) and shows all child monitors in a hierarchical layout, displaying their current health state. To view more information about any dependent monitor, you can select one and a property pane automatically displays on the right side of the page. - -![Cluster health property pane](./media/container-insights-health/health-view-property-pane.png) - -On the property pane, you learn the following: - -- On the **Overview** tab, it shows the current state of the monitor selected, when the monitor was last calculated, and when the last state change occurred. Additional information is shown depending on the type of monitor selected in the hierarchy. - - If you select an aggregate monitor in the Health Hierarchy pane, under the **Overview** tab on the property pane it shows a rollup of the total number of child monitors in the hierarchy, and how many aggregate monitors are in a critical, warning, and healthy state. - - ![Health property pane Overview tab for aggregate monitor](./media/container-insights-health/health-overview-aggregate-monitor.png) - - If you select a unit monitor in the Health Hierarchy pane, it also shows under **Last state change** the previous samples calculated and reported by the containerized agent within the last four hours. This is based on the unit monitors calculation for comparing several consecutive values to determine its state. For example, if you selected the *Pod ready state* unit monitor, it shows the last two samples controlled by the parameter *ConsecutiveSamplesForStateTransition*. For more information, see the detailed description of [unit monitors](container-insights-health-monitors-config.md#unit-monitors). - - ![Health property pane Overview tab](./media/container-insights-health/health-overview-unit-monitor.png) - - If the time reported by **Last state change** is a day or older, it is the result of no changes in state for the monitor. However, if the last sample received for a unit monitor is more than four hours old, this likely indicates the containerized agent has not been sending data. If the agent knows that a particular resource exists, for example a Node, but it hasn't received data from the Node's CPU or memory utilization monitors (as an example), then the health state of the monitor is set to **Unknown**. - -- On the**Config** tab, it shows the default configuration parameter settings (only for unit monitors, not aggregate monitors) and their values. -- On the **Knowledge** tab, it contains information explaining the behavior of the monitor and how it evaluates for the unhealthy condition. - -Monitoring data on this page does not refresh automatically and you need to select **Refresh** at the top of the page to see the most recent health state received from the cluster. - -## Next steps - -View [log query examples](container-insights-log-search.md#search-logs-to-analyze-data) to see predefined queries and examples to evaluate or customize to alert, visualize, or analyze your clusters. diff --git a/Health/media/container-insights-health/container-insights-health-page.png b/Health/media/container-insights-health/container-insights-health-page.png deleted file mode 100644 index fa27395cb729649f2b8603d986ab06296275cbdd..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 119179 zcmd3uXH=72*PsDKsuT&m2?!+gA`p596{UzGy;ms`dPg9k1PD^3cLem24$?b>P!kZP zNGDY3AiX&8dB6Ah-kJF^e`c*2*2)bxE4feEd!KXm-q%iyuD0e~QYKP7JiNP)AE`aZ z!y}}{!y~XHzJvP@Km?kA`-SiMTvG+F0?o3CySeS4{7e}SuR4M3%8C$oPvZW_$P*8b zj{DCG|MBw&`*?W2iyx~gzwk5P&LMnDr_SD&w{-`xOYxYRgc_u$r_ifHsEpjDYiXe& z>%IQ~peFO7rKM#FyxhEZy_Rum<|nZ zFI}#maf`srw*{Kf@H|yY6Ec%)`S-*AJhnIV{AA|MSd*v2*>gLdDJTr)ucAW^hlgGr5V>_+`vCY}^waS5W<(f3U0zjo+ytZu znoRte0dafstE(&b5s67ZmEMfEh1MM**JTnvKhe<|^{q5-Rn&$e&oMQ2LsdcN9+xad z{lLFFQLq>dKbI`Aev(Pnp_712?oeNP-!QeWP zITIlMM%+g4elO+e@qDbpny-C{43b_)jBq)o?;`sY}c{cCyTfqPas!=bH{ z%S;-)CFDm$TlIxlC`Yozgh3siwAaSFoKbG!p{@v`#LUc}G3wEtloQt+s*#IXZ`!3_ zpKn!m$OT4wW=u^P&eX5kuJ-xF*@*v3n<4edm4%$2UI`H+Ni)XnZ8LNA(aXJ&ZBC6B zJvnEKc@eo(i579D+0msH@x5Q@qn1WEE+OYFGwvcBw<_lh0Zt{9b#4u6>X3`2srZpB zDNURXL#FN0ri&jZ^Po(HI<7n(x=c|OX(pWXa`rZ%^mXK6INScwF+Wt^Fc0@#2%fA> z15Zx12e;&KPdSZOv>rZrnk6|>OKVu?m^g>7k4tWIk7Lp%_*)G*dZ(YX$HJw8ztCXq z`|y?9fecpKd20kUj$4T?F}d*_zi*fa1_pB9C3DHh zFvZdfE9}^?&3l!j2hf> zo)IlQ0J8*Ls|g%}bLlE!2EgT&eB=_GmcJ_C__c{b_5=Iti({0jLAf#PS(fCqSgWp; z!uE76s@4bE7I^wYPVW2Y&)1t@imj)o44z#R$M7J;Fir6~6L9<$bi zKIXGu<2pMhSH1ovpR#r0P8UKKw8_P-dx>YX%gtKo#!GcHhf)O(u8uI+@2@Prh(~zJ zHdN0sKXjyek^&5)RJ(7ejYWKXSJ+v0ITm=ob-$ae5!bby zZpOs*k*xLUfS|~`%#rjKLDaZ4waQJilz>z#FZ8A<$LY|Ni~BpFqQ3Z3(R`1L`w!b| z?!)!*BtNv6PTk8vNVMoz3rpUrD1*Dx1swmhJCc$OIN6RlKU$yJ8UB1lWqEnJm%tu! z)>Z3s+h_jf>PY7@%jsgbWe<_TroEwb+S{_%K4Y9Z8~XeD1kBb$p{Onb@qk7~{^&?E z^&sJSmI-APx3`=qGfEeC(5@#t`n?=0T#s6@r_(kjzP`EK%l*Qo+gb1#q;5AsOKM-5 z)a1GTqj_(n!mMR~yB70tm<9 zl@*n@Di;k4AWz7g2I`3CU{x7` z7ShD*zI>5u-y=kq&%Xj+DNsHBq#3IZ>kc}=-s0BHe}T6w`M_qX@HQs(w~8L*aAlxz zDU!KNLKC;Cx?k=z6zSwC5M9cE%qbQ3uZ6Twls_xJ_l0e&-v?n*fuLNH@sy zs&s`D9;~KWu&qw?*Vw1~wtaZ?B-TjNbVRVMW%|3~o=gbeQt;5j*WKaCQO9pnE$bY& zeth}N`-}L>uj@ip${1B6)36^w1oMY~pVy7Pz?p7hdin;*09$=52L}h=I*sM|;p)lW z@ubBY*xbp^%*?{?tD^+haTef(=cV_gnS1p%H=|dAj(tbe^t?I@}>>O9(qMB zAMi`*`(H2RO?22&JH{ikf1W@gdBdLie;-b=84sVmn59pP*>rwZ$~{H@>ib>wnDE^+ z)DOFe(bL4F2{9r5?@w+o{?O_knJG!2ZMm1^>C;lE)JcErT-i23N(f*HIbWs6Ok`VW z!i7lz%~KWbh+QQEBM>scVS2q z{VE?FCYg|0GK+Y>Zx&2%FFXTBvl7+P(>Z~< zZw{m;YlCdG*zdJ(5PPDl42Rt6LJ2|1seukGLe?dxX!H?( zF3qM_pauWdY!bKDp-X`Vki`$>{nwF5NB(}b>x|@OR^fcQs7=}BF3uXu7W*%H#8xa2 zZD5a;NBn&&+hAx@OjuQgl2*PmZix}k!-OcRaf0p5ST z{A_QA$LhE&JVViHAiN%H7A<>ULvHE z-xONAc<|KkNebI{wKsnFI!5q(C{_hBUQC_)rO3%tf|&RKnfmeJ1CJ}D&Ej>j_J%g4Gk#j-UeMCPs%%2b(5>- zXUpW&eO+2S72t2qXTyl`$~vm6`1VGz1@zM-$!inEY#wNfqqwn9GIY8xKxR|LtkPV5 zs~dB$>9;eS=Qh{86#DyeVM(+M=ra}on^Yo`M|C5XY1E&Qx&V4MIG;9L;-&yIamruO z*o}DRC)Bm&BeMut3b{UhHFI8scaNIdYBHnJ>$v>Ir=*cRnrg3Ns^Ng8PPsLILrr2u zZ4pJ1J@5HT!0pZYe#y7ubzh%<1#ua(&wC6B)>F_l8#;J5tV`@?PW4fdeix{eS_m3P z6lpLB3nIXYw+gr=XM|JbVMO(2P`Zv*;dDwsA^3((_C}7Vz)q;#$!X`YGJI0W`6Hd6 zalZG~#Dw5@*t%`o+Ogj72io^ibprP7B6nGYx?9@pat~TtPucue5_@cJ4nhjFWcKVF z{KCcw%b(2DIe9+y)iEYGWb(_)iLWy4i)W5XP`KFmxSn=vD9t zUAfZ+v|xq*FQeg7*p-r`q@B zqxa-nE6-;=%1F-BI2C4K_9L0omxC=7SGc|NB!XPsqg~%$G3^}P>sn@mTSlvSfAv?< zD*nEz73>+8m)yl^ab(MG^Hw^${28L_@NXxJYWk10Xp{_jsf@&a*-W{8!G5itsRp&p zOIcK+aXV3@N{(5n+qSrx8e5`&)4u=p+F>vS#319fVRiZ#aoK~Ahtd1ENY8U32|@-O z*3)v_nzO#zMYG!Z^qzYRqfe7{6r5yN00C27rQQH_ayVH~!P=BFVAzkM-BgxUGNh}N zN@*z~qU`8uJ6cwyd%Cwh-Rm2O`RzTzD-jQf+)srD+8IjQ{FfuT-zuq~_TN=TtDaL0 z(ouBQl!GRA{C6rI2FdSIwX+0L(Z-(JjJI#2^lhd`GZVVw$72dQkqCa-D5#yrq;0o0 zm)X?xQvRby)$yy3sPo;Ix;2d$zRJW;grgUCubz>`XUn|sV*EfRDA+v}V~GL_S0Ny# zqCSP5WSGr4kOK=soprny%_8{}S{b)CekC~Prdko9riYin6=>L)b|@>oOHL$K{~QoN~)7RIV$-Ea}g zbOZRE*2&FgEqZVG>}=kttYSrWG;u1u<9l^IXkko);m=MlT>(3DTL(fvNhRfIry=xe z&cuA(J05^3&S9_*U8jSG0U9>whU#JLtcHp}%bFWmyM70@J+g^gAsdUDl=WJO#h$}8 zdCjG$@M`_EGkoq!p=p1Z-LU7O!f?g4J)^$iTCi@ti;>Po2FfW9>}F{WQE-vzUa7WZ z)O)(xLOv9+C(+t)jvs$2HFGxv`jdWOL^_%|fu>@0t;8 zI~sMnUf#tM2&@tC(>6XhlE%Q6g51XLW$)Rus3z&wt=zaU`zZvZQ>{l(H+XI^3Hg^} z{b9%1lOvSM)A6Ar3BIj5jd_1ZY5H@6XlQ$G1?x4sO=ank zBJ}pkeYs2u33ZS$CFD7g&&l*sF_p3?T)6#e5KX}=+X`@S-0V!Vf$Mr*TZ3gi0YYar z!xZh~)Rsh9tc-9)uiCZPrg5kBA@R`h%(twpsG4ZjGZh%=w6-J+KLpwF^RLg_!*veK zH#{aeyMq=-_p0nY$SM4JFpn|r8FKiXJy%s84x~R{605|{r-H_(6WkarBQE825zUqFTW;Hu6mxPatA4g>Q0!51rRwZ zD{rx`NtHJk_Dw58pmv>NOHGblX|MDWk6|b=`6ih6z-FARNE~a>1gp>N6#kmTxL{T{ zNETfdd$qMfjdVwpBW2V?uW;d%22Rc`8Y~uq7!s0AtlM9#o=wAgO4Fog^H;UzRQLH+ zKJQ&`8$hrh#c|3LQo|Wr4cu&4`}tetOS+*JyL5E5#lydhsb4dpAiKki2&Ho=>-t zlwqh*0x%ccLrgV6R1!gOhmeGH6S`^DR+_J5$P2!Wj|wbu?38&_4X;u-m@n#7!jG|z z7k^A#u#a&rwc|O8z8o!#I%OA>BH5!Y>7mZd_R1DY_stfGJSCa2T302sVq(zHvVhaB zGu645ZQTm;kv7ps0$@4; zn^bni?CK!tT+T^Q_Q72A>Y&ftj`LYtlP6;d=L7Xn47eU2aLY+z#3z#7jAV|KR!#8dfr?ysE zs8i|?z_M(V6haq$s;PPQuV~_L?gcgESJ?GvfieSCURdH0_MI2YEBh!tvU_i*p4L?` zhI1}W^QLi<%C6RxSw>|GHz7W=%I?;Y7rm#J)eV~Vxzp(ol_xx9%Ay<|dhR_N*iA=j z6g03pB%Dyn&o1jLhoF<*4yxAIp=bB0AdxlOR+Cl1D>#0CxPF?~fW zHT#mKa_+?xD;_(`qkg6M=V2dpd)cq;`wBH4hYHN(hA-k-xWuD7x4j?xeSE2mx%mw% zzWLI&{w2Asl%(77GREhtx?HtJ5i(>Z*Ofd%BcHAP$qM zCo(mp17Lw-9-%%PwKTLKay`d7@e`P7FLe3tvVr_^CKyG zqIs_E;iDfl+FzBbN0}X-bCgVeQlzM3Ds)#mNLl8sM20~;mwft^$KD#`4$~9CYs(_C zV^YI=HtVfD7q6eig63%erJyJJ`-M|#4d+pg zOQzfPbO@+UNTiAr(!rm)$WBR0V^r%OQ^cVzv7}gg7C_12t$J)-tBrz)1#tPOy|~!b zt?xPdO*-moN(uN8B2DL_d4^sl-|I?v7U8j4=+hJ+X`0<&1%C|1rlOBH%EpLL%%^%Syh}ehY9xwCEBe=TXanOuczCv6vbH+(i{J- z8Xu;&dgDjYw|W=5NLJ!0hh*==AlPJ|UtgX!q*4cot~NUMLL`n5vBKlhW<41n%+K!& zOl606tRb=1M}bq~#qN)h_g&((d-9tg=`9?L^1Q6wMXzYPRcbAk+!eiC4e-jHqm~<- zME$U9oIDALV6*Kik>8F3DogLXU3^3qri{D}GgT;&jPQE`L_Sc*Yc=JO>K7jt`>q`y zCz{=OFb{Dc03l&DT6ziXy1zk~>}O zig0DM4KQLWD+L?_+wF+dsZu#)f7B6kmJyNL%zdEh3+R@?;5?4Q5^k=eXeUoPq6ubF z=cgtUkJ++CnMo1~@;fQy==U;8NifRPv2?0{zuHhAu_$U%xE#?QKPf{UdZwj5c;6#wz~5=7tz*(Q@bXa{b`LMsfmHB@QOA{x&61nlKb-?Ug4>Ks--ms z%5-yvjxUSxZn!Qr%Im+AooJu0axtWH(QjSSjGJ+58KD4m*T>ooU52p(8spg=SYqWB z*!>l2pV?#t^d$IdoAfmIN_K}~B;a4Ks!$w^j~OX zYN86?p$@q-LTgQU>mspaJc4Zgau&VQi6v*M{9MMOV{pWTeO(`9+2<>^(=Uhqur{Xe zW%vQ6wpiqizL?u@dwTn_XWh7D#&U7k*v3RFo_(7c+Q&d9w1HXqrt`!5n5i1!yZW!V z3k{}~QiPLgw`y+*R;T;cZQJrL%-l=y-&vtGE1pWUbAVmy-!I6j(r2sVE8-?S%A>Oc zU>ui}q(-)Hb95s}1s_huSUZFfh{(DC;GlLYV>3VBR1ZF03S*2%=_BO^J)acAJfO?h z!l4+{V_|h}N*5-XpvXB8H8p}HkX`l;2tf@W6-f9x@l?8sC8^6^IYp$LDa znv%42D~ET>G=geQW1E6_*<_=}$m%sK#__xRUdsFqYrKjS;2cXLpA=BDu3^!lCM1s4 z(1r!hy2Ab}h0zMk9`@0UWUxl9|AG-qG+`mS9 zQrEi*h^uVruN}m6`-C6EyeF6^XeT%n*qs>B>%6Gn#r@rf_BmWGMgDSFD_PVd^LCN?Xec!532r56YZb5;8d@^QHy#*?K4g>nv?&7Ue!q?}}1P;P(y4 zbe;;r7lPK8qnH%Zq*FN;@}`6inl$NOhumHUcg~`Zs1=UR4&vy}%RJ?T69$CO@L`iE zXUhP)g1>WZiL!?;Up!ctdqt3DanByv-Eg5+G(seMfc>DFUdKmWJHcDD7iU=a+lzj8 zvzneHP+zaa+Z)r@vLN!NLlmOhEjFfTBITzdB^Lq!=ZqF6=lB}$fOT(DsP6>704is(8Ps8g2L7|uhn!5$}m#(ekS z3xJ~<&m+%#ptfF@N&klp4bgY5bQm4S3=QRM)?oO%I?I(NVs-g?&AJC(mch2AxgraW zuB!a6;k9zPr{plalK5lje(UyzzU7C-P8m&S#dQU2+aoO_(r0zw?5=GY3V?&ob~*Of zAMl0);mZIRq)&6}0UUUKw{p*gJ_|uOBTO1AKm@Phy)`4G(335?%>p#9%~|z~#f8N? z%M<~LKm}R1nk%L5lj!55%L7&7q3@rxSbD~r*bixp4|23pr=EWH6dwJWgrOm7uN1K$ zK_!lSMzZZmv+2Yty)hI5oEasYp}RTov#r?9IH0Z+bNwK^=cXl{eLmrKPxxPwBIHsH zzM-S;DI_qUFFL`~LV8pWT$rbLbhELwG3-`nR+#C&`V?0Z^~a?nE8;1bm-Tio^hWGP zzmI6@2ZGEGi{k&~tgFEizPaVL7I;NPDBTYKyBQ|62;h{HJmSa2J70)iu?|(5 zEJL^!0Nwb3N;@iJeE?UAOSY&1kyoqgbVbwQ0otaTe9B_|*o>1PbJggj>L7eynV-D3 z=6t8EN?wi*H1^~WCmZlu^|$)H0PRL;R`8`@Z_5~2<%-M=%W8^n)T!BO_63$`!kGk! z)^%|YD}RXK<)-2L>{&BaA{igE2Yn;c*W3K;v2$a(w*_#lun8?#mHJa>@q z_Y{)G>P>2)vO+ljrcCxKy7zMHh014v`6IL~xGBu0d8{oBLq*ZDL>%P6WKI@^$j9@q z=&H}zp&)cO2IW0T7Y;(K^bN+d3!Fic_HR)ul1keot$kO~%a-=D_$ru91F}0s{7qm$ z+Es*E0t!}Iia9}gB6#ETz^ITpt)k$q@MwYMgm2KN4|cWG4I9~+B#C=2Pv3VYB@D8^ z(tQ9hos#aqP|-|;$ zHojZw4CB_FaSg4SGTg`Ps02|g%84Qr6UxRf!0R?wMld^gW}VuS$6vRRSJ25_!xz;q zj#lrPYbFFa&~{To&&w9i-d*jx*DGncgj7-UQkWs6vk3~k+i(91S%C<7Xap{rWzu$5 zIcV8jFt3VUw|S)hvQi%os%7bGGrUtdtIJ>&c8?gX@o^>vv>g>~`91+GKYG3WC+*y^ z$)+ga=}_xWNqAu@*Y4P2YM&%J$Wo%k zF`AJp>DJA-O!7o_o%yg^!55Ko6X_OkBJF!H`{J2foyVGA`n>o~u9G7~$Jq&T=pLtz z@E?MBfW&@AME`B*;Iwz2rlm>eFktfss5kdGJKSg4YO{LRUi4&iZgTd+Yy{Tqm)f&g zvyH{?{cYg3UzX1hjC4TfnbZ0Pb(*2a5&!sA5 zMo2w|U|%52(v$5dPzx(%Q>Nfg%5HSKL(}i<)VZ~xwIfgBgfJ}niQ=2Nu{K5K(b+QF8?lMEloZ~RUt)xNIKM6p3u|KO=#Q_#oq?IO=dml#%UHA6(T6Vt0H@LsPJ?p_m zbGyQ*EpfvaBK+kXAxyjAw@Ur$l`nwE$_wnz;(towzPjiptA#%-Lo696Jp0>8O_c=K5aaw>w$0CMS z4_&C2zb#?05pUn(L7~w5a&jyoAtAYfrFkhS025Sla&lsB?k))}D#8xH!V*_iC35-d zpV2r(vdIVJ<5K<(Q6C>Aa`W)q!O?9hmX`TuIPheLv-cB^xcIoxr%#^@T1#|aw)!26 z^B5eMR##Vp{r2-|X^lFrudjDF5tNET?p8*6ad$~cRcvh^aB^~{ME!fZ)f7ApqTvA2 z!lI(Tk%5>OWrm{s4DJOI8C0cY6Y_9)9lUr-v+FO*u5h4H%?F1B{acu72Mk*v8$1or zW7y#Y0v+BFlm!hQVsEJ%8)w$E+BW?o-4%AkN58(O_#i5mg?3gPs?uO>y*l{5{G)1^ zQg|VFt_mSd_ogiH54UG(%2Mw-V^7$pj}NOXH)$|#xWNGk1uoMy6LZGl%Q4EI)kte` ziFRceDXj~`Q5t8_KPq#WCeF$CmDLNU;S)cjt+kfAVXHp{&_MNAx-Z^|-X1u7#DG|i zF)1mjD)7{e-!xU&VjgaFZ%Bu=prAm~0`&Fky@v`o?nM2ci9hdR)H}rS1`bGOV-`J} z2wmP9)ZlU{bF(eJmAnu4MprY!Ik7gZ7&lG^_|L7$%1K)_UVKHh6uy^(i1J5PiK{&S zHlFA!jsk0P*ekLwrlSU*md&!!CM9bVD|OoG^YV_7zNl=#3u?C0{YKO~#% zwU^$?nK!C-wD8O1;CudR9HLcb(ty5MRq2X^jPh-oQhhvR)_$wn@WZUd$6w`;h}1?P z4~K2pZAHpBPY49zIGBl@Q`3CEgXM{M=}JyoUbez7T)eD zqZtLvTc>Umaz+U=$&M`kkyB}25G^fe)P0#$2-QtZcAmC8RHzHWbo{~g4B|3!BK0rWryqVKpOT6(J_*}wlvG1(yR-?rBaQ)vgu z0a$^fqnSY7#O^Qxd_m;TZ=WKJzq~F`i@Dc$wj8@7GxuC8RUk&v^@rh@Cd*wy7HujO zy=~h95ecsiU6;A$?=*)NxS>USah@#1Z5n<{FnV{4{ONr$u|q`_x}lVzCQd%Fa<2$` zHWr-}&;0#oRy&3j*8{T#d!FS6qufMyh)!egM(%h!pV_>H$HSp-k~LSI1*~&x*kV6=juuUh&JJakgE-*Ei=08 zhtNq)UsJf)F!LI(gDB7NtKB_8gRj0T7W*~0Op9&h4%HMaro3`i zT9Y1{18rfuyda?yY1o+3r|igAB{6mbUSB0Lk0ZSKT3|CqcThCr8E(e64Ju5hQ@I5- zL0@I7*hGJ4yc$+DKB+d%aO=K|vBP*+`yV#@{k9vqHF1{>CAJ@lqBbwGsqVjwBY23l zXRDsCe(F~_cF!NjU`k?G8Xyxo6L#~=S3_G0Y;w6m({WkeO8;#*x&B9MMm-%t63j)q zUVH-ImzS5z_SGOzg24}ulgrX|Ywe%$TaU7)S!?1T&7BZLF)NamG$>mzH8tUCC)$2q zcd=+xxeLb0uvC3DS7oK@?27awAxsgq=`#$2LY~ihsL`^i79hgO%}hb_&kd~!0kw8*8fGWtk zx;1@)+@al16%rjz2`kkr(kSrVn@^Vy^r=q`27CaTKg$q9u8|!1c0);`91Keu2c2!$ zL|6rSa3K2+SGfdVBfXVE@DuzL-G|uDap8u2hs`$zK8UpYZ zR9_pAZp7=i2ew{6aR0iI67kPQrp=CnO_OGcg-{U?@NN-+YA{XsjXFuQdK!-Z-qo+R zR_C{LXD;6-MpBRhaHrC$(9QZyV*Pm$b7NUi3OG9ec%HSsas!xT8H3I^u0FPct`~Wy zbA>=Gn%%_j#+5>_eG}Gk2l_w{s195a;0HwLi42iqThG_BAmh)ZrePg_VDrE>{{p+A zRKK&OsIfXHeg28ji+F1u_=f?KAMIllAf?6D6?p8%48 z;O7spUpxdfjj8R42xW{P&Ig~)IZl*i$V<_~E#(kS0V|0(-YJ6ehg|VuZ=IgIjk$E{ zU&D$OwQ|J2J(!QGOd8zkgPg3M@OM(M$}ENvu~k9VX>-@rz=_y&R9QkR1>B~UXT6yi zwm&4)BA^MuFvEW!ECss1g01=+-6VC7`z)j=6VTh}x(IK0azh3{aHgK&C zC8I6+E;YVv9O~rr^_ialX5h@R`cXl$-Jnf|LMr_pdv0u@)ROn1xs?ns=%1p66hW`6+{cIWxf(LpOf5?D6MH&lIAbZp|YX~m!}rpd4*Q$7#$8#%@3o2y3bSsl5ZKP3xzbgkM( zJ&m407?U&ig(bFicTWitzsxz51#!1x(|xx#RXDFQSDo+8BdBMDGMcE<^)7QT+N&wS zKuYcXk7f~CauDdqc16q*09lEn&P_c>BnnipkBoFWhD=UTos`tF?hyO+lRmF01WAjdXgjeWQ)z3uxiC<@ zF1Tnv`Mb67XORJ{pB%kr(1WXIqVGm8Vkh3?z_QzZ^9xW+v1)@gC) z&kqp>sR-G+WZ!x4Iv3g^^5DvJV{N;0Aufs_jf+#pf53HEbI*C@Jlj+3Zrx9;cTMXi zTC3|}<$h3E?X;PekNZNIUG6L3aJ4zuU)@>Wcwf4E;UNFZeL*T5F6Co_GHQ_V-F2CZ z$q<^q6*t37T7lbIzG7Sf%$|Ih?TTZ=^VJmYf<^5uGv1u1wVsM%ZhOI=xAydsW?KL8 z>3U=SOdsKW<77SZLB7{4WV>62Zqj~H(jBahB=5cFsu6_@8*%iHmSSwW#yt4>jWCZ3 z62ch?>DW*QSJ?-Ipew&vx2q8wP+5wQ*$+}Dk4XKxi&<(*0YWOPv$cYwp(4=``QS7&zDc zu)T0p9+0zTGW2EWb7g5r#7zP}*4`8?vo*C4`rEbq*G382c|f~No=w1W&BzTYxJG|E zf`-Wp?`MK)?lsG=pIiyZB2@dESL2gIy&$FExdg$RHR!8l`OZU!rmF31Q99Po>V2qI zv^H?%Ksz_Er+Ez;xYy%hX_ba{ z$^~z`@zo!^;jgt(t`6h84WnCUAk{P7*c}9e-nzF+!i52J3~P;FW$0W1#(w5-36_oo zq4u~e8xi|wWIc=>G`wa7j2g~Tq|Pi62m%sy>XLYfOznhBAqW-jX&UUW0Qb+{fT`St zapJ<;>j~m(x=)J&`6hMLt_~v)#>-PSZOq-5B-~roj3f12Y|ZzX(%*Omu6-F}&9r8K ziIS$zMs@g;!=y>&>b}}Gw^?QSw`E&G&y}i^Cv#~@jpQg{Dx|N5rE(L(K}Tt!pgYpo zC(qMezW?=JkU3IZSXz>2(h1ftR`PM6)poYkP5bt8T!-dCdbsFNBF)l@kDLloA4Zhd ze||QV;s-Qwa{lJUjd+t-c%8TUm<}g;Vdn`!PLW1R>M?ZR1|@Z1DyH>VK;qFA=nNqeExK$+bGeAmPh$uJMxdSW#HjY4ImNx zS=C#9)s<L6tFxn#$ICYh&Yo`)R!c zr)*%sN?^15tQ@J^x;V?i-r3Vh))d#zs7$wmRp~kpa z>&ckTKxpz}v`tTqwQM&BZMNvsfBZ_wg*2DFd|mgl8ejG=C=Jw|fRY3KAbSOBa|j8i z(!QURMxs&^VSoTMIVKKOQyM;fH}y_a;3Ydhv1xL{~TWqaT?6uKMj%2 zAVgIbRrP1sg8;;dc~xk=UpA~Ow(MoeyyBjZq~+uVBY3wM>6_6WOW+{DJ!#MV&2$Es zcb0*-*ZF!A*oeB^+Xy9bXYs(wsCxDu4UMZmBuCu3wlzKNAC2l)#Ay_!@#Dt3%*@w< z?|>|$*2B~x5A|T+?^W7*Rcqu!ET>5X79ovH`lc%?kg-o#PG8-(#qat(3URN0774W* z&xsf~1xp};OfVUJI$eOw2rJ~IHzvs?(Nc!TXtUBc^?-!ovH!FS_RRkBKLzfVw|l8+ zCGxZWnLsENr_TS+!LI+Ok+%P*jYBllkx$de7CF%YrX|@f{xRa(8XA%7lhp&m!*xTj zxSu=Is`hJRtBXSP@8cV*+!u)cI{jPl%U6p~b!2+)iw7mP!fp?W?e9xV1d4J=ir)RS z$@oZgh)bS+5hFb#Z=x;8xQ3J$0*%!2cxahEm!aq@<>Zi_qggJQ>|j0^u8fs;4)2~X zP>p(*l(bADow>4Ni;Gwr>TSI^#Ffnyg(!9h35Q6SRZP&`s)nn+#5Nk8TM%IUEG*TA-lPi|3~l9 zX4-}?T90O=KQ085e-*#EOnUjdY0UBb;x^_7^~83xa+mDy?u8?tE)0tPNNO8ZfBiVO zoLAfBIUn5{1xG@XsL-^%u3}+CgK**Uly3f+&4q#KMK>5AvZ{{*)QR`xh^2zXM!at3 z3a2CG4-6YOzrppl?^n%!{fcQz+XU&C-~uK0gxh{4@vbsJS94L2&eH94vFLKCL)EKz`La08?g?dCM!6} zH2gLW3&0?he}e9fNNyugSMfyeHV+S=*@3!Oem09@F9JOPq;m)KoFi?F^T)!i@_tQD zN7|@1Yg>2kIauf?EKHanx%hnV?ROWhVFQJyuK5{(t#gu!IThg z_B8N^zexA)8BI<{rvDSdBd*3OH{Wbpx1(-&d6e$e4KAmN&Fu)a_5vvgNJy zI;Tl22_l_6rZ?`8Cy8))BfH8T!2(=C8`8voOy5@2$k}q4PP5!998l8fkEynI5HRkL zruLL-T#%Ltm0*UsF}>}t2>o#h>aGhj>&Ssgv{-MSjFW7spfC>0KYe+!PPTp#@4q28 zMj$MP>)3n#-f7She?`MiB4THu z>`x^A{(ZbZk@$D|5uWCKJNr1ts}SeTGKgA3d2jC@94I?Fetedmefo&<)AqINm8-u_ zq3pcuQkbI!9#rM?|!X%l!+|E6OoO*{PFE& zpq>aPl}OGxA#;M)lk+dWvo-RL#%;ud01D+uqjl7_{@8TsFA1MHP&v{+ zosW(I56SN+(o8n}Aox(+YcmUZIy!BxW469TGD=?#8m(8ud*9(NB`L;pqR z&f4$xDF7fI{53b10^N|BVW6RNVs2~JN+05J*Sx7ovZCCA^woF%mJ zwRj!IW+S<{9z&l?xIWNHdAqKYoKiu0DV}a*r~$cL?ff}VWp?*xF&Pndu$g=NTCM8_ z}HZhD~glng|obt zS}LEtd{|-7YmEOZrh-U@_PZn?r;jG>@8l!*S3|q{Zk>62GbYdc zGIuqfFo^8d1AF$Pi=TdpEh4XdmanG8U2-B!vawSdSAS}w{CVxcK_ zJX{TZY*%f)_3p*By@{14>vRX+c(5RxRbu?$BB+C77Lb(hJ(Hgr<=e`_&$S+f;f>ejXGed+$VxSNfd7)+H0 z_l>GhOWemxi6gJJ(u+zO8}dR2WZC^%mIu1jzlW0}rEDg!zR9J5!zbTctgCE?I39ik z#N^}o`VX72$5%eh_$?hMd(!uyK)WeC>L_@6LbZMtN65rj3@_kJQbjcxWz}`gtNpz9 z^7?dPtmGN9Gj}?zF{O}k9r2&KJvg1@tk@cTS=RRDGr*2z-x0;%oUp)a4 zC7asso9nSs9d>7b23lIXQxbBo$ygW}{f`5{o7MvjQ;z}eKi^5=YU&k=Kp^UcFaUb~ zmw!Pk+b;%<`4oPQs!Gb^N@zO|SBLa)RWkrSuNAf-9m9l^^uS{j5%aRDC%ZcXC?wxK zy_t`czX7$p8%Dh3_FRlM-LIF@E82M6RCLC}vYN$66W0`0q9};!pGAzcUF~dmGqk&x(spcR zUidy~{~x5iRalhW`~Gbq4IZBOoA3qjZmSGjvKxgOoH#2qN9xF?7SwUD7bX z4E^3d&-eLn{C3}++2G)SnK{OCOIF}y zt1IVz(+Ir^Sd@-ixy3!z+K{ssw=p|#9#mzoHu{#djb4JDSsx0^?o+fF=J##$Pv6>G zIlbH}3YVGf$Wm{*=~;-j$P+UawZqYj3arBMG>H9_=hY%;qX(Ygkk>7TEx{7PXbB44 zT6`vIvJ3wRk#at>UPmqN4{S@FtIF=)XAhyD6bRpELTi%G^0Q_BU_5b~8%~(VUloM5 z!V8~S%*EmE2>Z}$PjQ%zRGT9n&MQ_f%zXdIZR6`F8fMKrutBVXYO7ig$h7)8w^`BJ z+BCo>E<6-};&Q_^Jjok(1RAQ$GEK%n6bYTmUrkrVQB4P!-1Z%R+Oa$RwoVLxO=;9A z#L&&=`fknp2;6Us4VOI`Z9TE7*A=DzTNMI%3S8zsPCtG!ug=nh;a`Wr5 z+5!+)epwY zKH|X$uXMRRP=H3tEjAS+6*O%aIpl7>uao=cn|rW!z-hNs`OIW!w7gDz?~REV4)2jA zAy7fW?u&X_2QIR!So34G#AWP5(6F?{V0Il;i2Y4*q6UoM>=i`HkXh{IZM}um(27#qQ5kAbsaIf7KI*%aL$yR%@qBZr zaa50;%|r5wUmp7)-}!FTmTZT=d$k@0yZJVrWvT>2M7jSga=v^|1zu&B z5Ekry@lPWvBl zQoJG7+eH5n?X^i3ae2*=Qdjfxa1FJP-Ic;(gusz}yXpk_)&LUf{e2~a26`ZncfdPg zp;Jje4M@>VXwUUS%0D38Bn=JJFDp`)zk_(yrRjNI_qOnDC-`n1aZ#_?$l`JgpRl#k z$Vt2$E`dJ(azU%m8J))~fn6Oski*sGq-yaq#i*aH)2R#v;vb4nrKTKCfBaaX3Em_) z<)SR~;yTWQ(Evm zYE~!uyol^67f<7}h@}>c8BF4S+oZ-jE zO4vm|$v}wcGi^gq<uA`Clj8yvLinGL={O-dVBe@fH1n8PFJWsE-JWZy2l7**F_ zW!5C9G@|JSGo8#{G}o{E^(TpjKon}6UeC;0&Os(yIMHB})){0bqtaNjA)_W4;& zcdNpZb$9T#k3oc#DOfvY8TAJzLUYBXab5Wd&|~HWxi(nHx*LeBd#P+t@j=)Qce!C< ziQ)38@=3!3`nRNy^Xw$@q6ey`&>hxcya42f^y%=7>40&&DboAiYngH9YRGy-9g7ol zhu?u$oQL}2FfX0jT;#SOZz!|QnzQ8IK82W&RsGJg#9@`yX>picO@Vq1ns2n(k?}2U z_54IzxP}Fmko)Pj@Nr_Y*(44kO?2($5Wb;7lIKQwCTWtHxj%8Hh_~=Sm`vah=N>B> z>33>Gr^7roj~z(Fo1gN7LQB~#YIs3b(1&Lc%P?>arnD|-TH5+SDC}R7$;S z-B$ih><;qc8uEGIw8BL%=qoZ&H4uJ~YaP*AY*8x9v0zf0ItS^_$k=Le?_G!%yI%}+ zr@WX)9bNly=-)wDRTZ=ot%XCDne9Zjx)kygAUjDmyYW?$EJ5xS^dbZ{OX-27u%gN% zay9jR>B3rhy~ULhGa?^!`DIhEkISWL32UJyNNxn-{XIL!!Y)ZkxZPu7zZ%nprwT{y z$IUZG;9)*?y#MI}3bsRe&*k0M^>FYY$-%K-pIpt0?u)t8>vJKyk$LjSRFU&`gTmu4 z_4lkE%~vo-h9r?5{LBOKw6I9eg%R_P=oEO1AbI9ZWsi1Lv;Yzs4rey*7C?>)SZv+V zih8n%-_ekVy&4NXOi#_ep-VoG9zi%&g3)B}7;g-{qFIr8=!E5ruL#7L zt@)1ZX7L}lH4Gw)@4YreuTIR2#Fj}@1J}?N)>a&$RN!At!uDwT+{kfdwRdiDRVU!rbyqBML!Zd`6|hmT8(TZM|aiZ3;422bTdn>!fy3)K;?GCjqRH1nvu># zei#UJZ(j(A)Ys z@#*xgRS#R>uy%fls%r4lW;p?ehH0LjlH8EJ-ne%};b1_g?ulH1Kt$8HZ$aTHq*D*5 zKliV;ehIWHrASJ$?C>Xv9LIx4pulNA#Shk0|vtY#-xF3U@{uU?-P)yr{%WA`*a$(!D)@o;pyzpHVZw{yB%hLf3l-32(ZJ~-}-$%$)aJI z(x^YA-#S7(WAUx<=p*uW!%(EZ#E`JXYhI%9?y`5PoRUxbMfo4L zbv@eK=6=Fh-m$!)0-JH)dn6Pf_W<{ab&^M98&~!_?~xvoVTE&q^twaxcg!Bu-s!Dr zo0d1HZW^)7uje*&Vr-@Kumx$$_H20+gcTf#1#PYzCk{cc7ad259kw$|^zUtt1MA4^ z4fENKFCztyCv0c4E^CfXrIB`m)QLbXwI^S!=G6%w)xNWYAGe?e#Hs!y{L1inK zP4c`H5ebXqbps8T0&_~8{C%0IZy4PL~RC~xq=-J(Fy@e}eDps~qCvmdtyMhVV^(=c z5M^#~D1CP@iR*z6A{I(gJu{9(fZECuPMvYo*xS-rrtnViy!zzBy51)39gG-~(T&H6 zG%((sz<}#RGevZ5=f2|yQ(06BB)P<&yEbi8kNG)sgQk;iT6(MQ5rTD@F?fPrd{fTr zFxgYzRttpfrS;dsl!B3hj4J?Lv2C1+_<&@=xAHD^BUP=66s=VCJxx$R zkZ;hrePsWM=TiT+FD7_z!AY&hI$H2Bv~n-kGqby-WO-2F=<%hm(mzV4jRar-KV1#z zwBeXKubwvyCa^vKTEDkopl((gDN`LwFzsKa9csTPv#A*|XyE%m!Tjfr1NG|bkSe0O z{7;O5Xtc>r*;xC!WOfuXrl_F8PxI$JpekV_^*3Rq^g->&dzQ}^M!FU71E2h2%*H5p>zX~&P zz6(#0E@peS9xiZg&1K~WOEVAWn`I3ZDWg7YpT)Bfq ztis=O=e*L6jyxqTSC)JJH^8mJI>3gj^sQIK?#j)j=S>|+=0&Fzuxo*o#wR9<+0FoH z;vwe4zk-MR@$6IiHOhcz0xJtQ_w$!^i@aU_oi+*s+Q>KB1u-$$ zE;%0%VR>RAYqlihG=cYe+C3XmPrVepg`Z{m3+GX1V}px}FR0`z2-7DE$wC*mmVSQt zoZR~YG(87wh#o~|i$a~G8>H4uk8iwUQbXm1van#2o~u168P5crhMFp+86$LdOF(&x zf+v@&O9BbSRvA)_Pk-9K;6N)6JrqDHd;P9vh~oBb&gxdJ-0fC|S-D{~Bu|K4-3fdW zrL&!z<_uz*!h*zG&_g?0KU}2Lh~p$CL$U|OMrz_`mEbcLY>P%b2PVNE5uHdfWEt z(=!UUjr!64wCIt7zzx1GhFs&TT^8VBx6Y^~&+ufK5@$xjat$lZo?42Br|Ny2oGj*X zNsoWkC71I>>0ZkwGY-$GRDAy3kUd>*JbJL{EJebX>c+LPViS`+ra`MM1MS@LwP<+% zd_2hyJ#BT-e&>YmW(NwCe0BlX-Y*JMkvIt3wmsjo2724+z#|Hq-S1s>jJDBVbvW6A z)2|5?L>8ucCd5k`q?hIouiv zFWC)Hn~Rb+qp*UTu1XHUUZBfU%DLLZp2r;V zQssRq*}|h54@etIals~|h)U=)yj(+fFegB8h<@d~w zTBqPQVCYC$)7YW6wM^vN7R;PP8-!C_VAoW%`$4D2Jn=- zX;t4ydELy0)l|OBEceom_Pg;3J%Izpm-nDNeIJS1Bftxx3jp<|#gaJ&07TZW+kta{ zNk~*sAbaYJFs)^qghY#(!h|CtBC0O=%|~BQDhir3C-Z$ zHu!kuK|Cjkr5=%7)t}GsV$`z3{-c}4mjd0-@jTl$s~--ioS($h%{VDqiHki=Z^;Nf z&Etjbv#E1E@y=+%8K|L_>`M1tYoXmT4wq%=Q({qcD5_qF;ycHZ(=+G6Tm7SYhl{0ByZ&H*Xy`jvo%iX*Y4t|~?)3p`z1K}TkLR=OPWzug z_sJskny$e}NuM_EDfOceXJ0PRQzyaAE<*l(s(Mc9->GJlncTtPZ^^&HNC#E*^Ay$O zBNbe>izlKyh!cZKlF#L7@G_cl`+zyyl4y88sfpN#xHm^!tKUQM8V>i)hIf8qQ@O(2 z{Evg5tDemL*!*>p3&oGDsZ@gMw!VqR80MX1H8}pYEs^dRbi9|7s3lRovM6=jhL1QX zduc|W))e9Y5_T_Yo)JCfAm?@rbFbU!Q@Kd7N@}&k)pyZ!${r9z1BoYAJSXVjzFjzo zkE2zrv0fq#4$G4dbJuliE>IPvLRZiyj2h@Y`5;WgNM324D4dTk!sirTGx-B`NJQZD zHUc7sbBw55U9x{IMXN*{!hQa_y~%E@x#eoriqd9!J4fb^J)2W%UOQHibmgR!&x>4C zdWuKYgDv(n$B6dj40Gc>XppSz8ZZr~nP^T#q9I}+?>#y_TLXoC3+EUI8&xsA7S4^W z@9wTTE2Ri&2Fl5r!LO;`zmw4QqBy)>ieH>K+$+53GH)8S!~C7_+HhXV(RCoq#f>B{ zj_~IopYz!;Ny0%*k@DO8q?v<8;a`^#gxf4lXE2_RtmH;V^xuiWPX@V;x-M>A&8S2K zOTTRKhZ#3))AwzBx*k1@Rh7joqFVk=yAU<3@>ewzQkfbjKu}kq=&&#TEyCdgaxPRs z+fAXN&A~z7NTE!f!K>Ux^|m*VIPj>N+zWEBeH9!N>IKtNp3Z7T*tb=Q30i&vu zF6XI(AE!&O^5`yzSCj)kBX-bAEsO$kCu`%7EC(OTl<#DTD}l5pezZ&D-*!&a18GOf zdvCuVbSuTpYxZQ!Yg&AY5N~*jbT8WBD1IIqV(1GxoJ@1R9xJmnx_<7iQvWEa)d&3D zTI@5Z&jf3`_09BT(~51hVn;5qd!|^7dwa5TdZMFoae%JrBHMh%<+8NqnS+kkWVN_Y zN*O`)MVj3}!sjG^1UHN}wB4Jf_|vwxS0uC2knw4}`yl6SV7KHzC@&q==b#Q_m)xJ! zHvD@So+IFU(XV*amdudVQy8u945k6l8U_POXNpSGp^CWQ@)@GDiqnoTYS)={#{haV zk519kVdm{x-kF=(w#oxe$+1U%Zr&@7@%+@~D=%6*|9oz2cVT2}5)^ib<@LjaeFPpY zEhF@3Zif?I{>V>f;fJh)%+A`=?IBU&VK32M(SvP!UF|cV7;D zI)86?7Q5&&SPE&s$rfr0C8&@L&+ANASK?ps-X0pWIuwQMeXkC)2g5oh>4e(0%gRy{ zc9Mv!gySi_LpmaXwMOg5Fc*^J-BsvfN+QE3{TO_?iS+Q$TJh1N3ejnH?`zU%8(6 z+}SKpMg#Rh(8%HMXztRQE_{5x4z>G2Bk2r?x00c#mb8i|=jq2A$hb^Q3~%&EEpqfz zjZKE_U6fmW5H(u@lF4;MRsK6AMM8TTeyPkdGe>7OJG%WmDQ{(SD*`Q#GnFqd>DqiH zqS#ywPOq8ljTMBpx-ClqDUoZOu$33I>3T3BV~-;irr)&W#EC5*e4CfP*5%OEr@dXZ zDsdMgFyG8BNPWS~FkeS4B2a8t{PyyOs`4{_NzvX#novY2-@sn{c80LXw-H5jU$qRs zJnx-zWrz+(Q%ymJ)gI!l+I5!yHiSxR0AbI6V#%!O%5vrjbqIPV@3HrO@OH&=&u{!L zpg^-_N=zT34iFQHrH$T`WW}z3LeQUY=N;T-&okCBhHo}Jp4eEF;bx~}!uR#P;hFzV zWs4X(zdbc4L@YQYhBp@JT~OO}R5V`%@US~o1{{g5W<9LIyJuEJL}||~K)gyVREvM5 zw*^7eRKNUWbh&*M1b`UY74xLR`|`AUqC}Iv8A}^@#Qf9Dq_L%)rDB`e2CM_eh}4J%g;W-@g^CEq6Km*v)iWeKTR9FP5})+Ewn&xcU1sU?}p+? ztgXrQ`)EaYK8a257aK_h7oFmWvFk^v zF9Gs%RVAoYuW=eM4kydW%j5W59|&I0Cnqt2()t{Cq7F(JZyq(c;Zk)K(g+Fa{vW zah@^mqilFhIP5>XJ7#t|CYdsj5Sj&bS+P{25jK`|;crvN9(f8@@}&6obJ)aHcTePJ zk+c5vMF2+vmF1d9v~xqG_=hJ;g}Cx&o@yV{SlPfIoew<`wZ9et z^LMt};bRT94xo7lAE-iethr9{Oy?T71 zzqY?H?ETgXg!NK*if7xsP2d+~!jTL$xjF5e*>Riv#b51n*BEc$ME3Ep-S*7(+y}v_ z3Ztf_M|5|Z7Q613#zwN$lu=WhrQa29!CUT%D&u}Hc{9UhGfrgtLq<<+6It05U#;P0 z7W1mh&PUha9Z-H37dAl&%6l)evs#UpIbYIJWlkrAhi69Q0O`d%8Q!2cR z$K-aST7_!enfmt6NC2~~8TNg}O}Zn~(j$YM-$C5RfAWq6ok5LAT-74OZ$ot22#?Mh zdgfYhyNVj_gb!b5UAaB_N5dvrL7v`dj}$x>uM>FTiiI$7VAOk5#a>sSLQZsM>Dsia zERv>@%5kE#zA<>ESm{31%~;0x>S3$=Y{bCtM&`~!gRF1Gu3*I(Lo@~LBaccOgV*JYhQC7%oAdf60mKh77=is4IEB*Kx!!#x8xj~eam?UM^jAVPi zr}bM&?6aS&5 z?`+kWomDX)FC&7tfH!|}o;dj(f*o>%_PwkaoQ6#IE!h zDgo0CI_ij%R8DMnU*ECu=Od_-ebr4Wl$AUft&#U3jBVo*TJbTrO%`b;yHcoA%c1YK zigtD|PfDuiL_p(%GWrX>pY~3v^}5bPW`5P z`SKBPPb4r`YZna|Md?{sFaZ0u3;2kGw<>6H)2%h!g3mJ)1C(O`1n(TL+EyZ6q1so!*AxJ?)f3nywSDo zzQ^7;OiR?gv0`|X_&9xjSL@`YX2fhp>z(lWZu;_Y4`p5nLBp2wjTe(ggnslglbs~e z=fi$*>{ESOfm3YlLR_D`WM^dQ$s~*D*@RbLoe5|k z5(~n;Z0(zBkRtH&uB2ONNOq(*p8IKq`CBf_6p^c;9p^knyE5lGpYC^)#mdi+cU&W) zAi$Z5QT^ z=SU?L9M&y4*533%6z=)()pv|;#;wIpKjf43^NSV3LuQYqpG|TCn``m7>>A50&3NKe zHCTGB88!p42iw(0iaknsOG5TrsG^f&3Qf*5zziPgw0*mziv&boRS-F-e0$0Aa5--~ z;x?pmI2i3ROB%1UdDJ_ZR&}cc%yYrN>b-L7#|uJ9^fgk`|HKMfi;SvPAWyB>Gg^|C zZ@%_f5UFzaeC|_O$xG{g-&py{1Sh>3jaU6ZI`O+Kb`4ROR{pF{!fP_)T`Bf!j`y{` z!>2dR?n}Ct(+miXw@4?~w8&S)0!YQDPg6ykdqkHp_Ec>Ucr(xqk;%emm$WDLvo!(c zAAgn+Fa4`4C05k_xY39Yd;eOy;eqy~=#_K~Ng%rwsjP}i#qYY-Nt$+8ltjYfmxU_D zRaDSMK02kgRrvNbd-qN^=m7heU7fL% zx`BzPVNc2}ssX6>vQ&ntmGE8}Fx>2LPTgP`%sQ=5yGm*coO@QV9@>5)Fo@O;C%xim z|2*NaJKgYnS3~3!t--eY(JxyO#T!TJLkF|$nYkbD87wteKEpyp)wm4qyh_Nwwu}x& zT~9!7WN!F;0@5|N$E*rdq6)4>UJm&XeWT{!<{ShTCXpvTt*>wge)=y;rOmZ!FSC|2gjX+G22@K8v?R1B>@N>m2NkOx) z5WGY^7jloY9i!7JQeM-+vXbfj_;`T4kT*8}oR5!hGOs{6>plvcL|fL9a2_F;lCW*} zD`=NA(J9(w_ft(Xtwltl0&@wnOTn$Z>am)Jv=(WdtH=~$|KYsEHFAi}Cf)5e+JsEV zs?ZtfwX z&wakRTp(srnAOzUe%y4w^Bv{XxSO{)XI9*jjDMqiYJE*H?HFO-RMI{oPzi}{u@-3r z&UJIWJ(;GN@dk9lC^oGU&5-IR=U=Hbl&FiE$ZB@ewjyJi#+JM^tW|!o7}q};C@m};RT;>kMhJ!XwLZ4FM$=;-HQe) z4&M+fuB633a8m2tI0tjqw2j~kRy&-G`zW%v0{4#48?_) zXdA|(+6A_z)Lldc(8Za}rT1{zPuWtozwwTdRig9|y1APZR*y9dH!sMnrfw&sB2{`f zMh1LebGz4*W+r=;L+a4nigspYlAt2zeo6}3KPtrmmv-tlbfLKPmlh7hz{Ub5o_BmR zypn){ciK$|SFdyq>A=p+yl6rAZ_jyip&j9B1-ahq8f6AOR!IM8qVCga^|_f`z%H>l z--mCc3Al$omV;j<-RV=>I}+Fl)e{zf7w-APcAL1`nN?UMy?m=t-_4BI`ey7v4w|X| zsg_}htf~Vr+abOk1jpqMN@&*#6Xt(Qv~9uwi`o}BH*a2JX0}FbBbhmPkI0sgzoXsZ zuYv+M?A?1x~z}9E(iRzmx8L^!t`G;FnF!{-_u;d-eN3KV(HM?_h?&Y5);V zV5Ksr3vR~7KmrLIS+AwY?JM%Jw)BQP9kb08K>bMm>1|=kY@6hXy_Jbot7FN`?ite5 zusbC^AJn>~i+wCbgS)3M-^MXNVEr7An{TUrb>t`U}!Gp7E5sh$>-nsqiOI$NE)f$`~Ue@NGk7gm73KBI;dqSyc)uJ<{^#*od$P*#j>>bK+~D|QY*I>8{T@;WzQGS#A8iis z{Ph))Y*t$8*OMmZJ~0Mi%P4(4PZ1z!TL_&TMgPZhvyJsXppfFT_n}mX33BYKj}@Fu zXp1eW;b2qSa!9&k-Wq~Bs`_e``g~M$KxlEBjdNKS+m){nz%czIjjsJ4CCI^_2n)B2 z@Xg}X=}sS55iM>1b5Em9*$u+Q+}FZF>DNDB<})+&|NbEvEfEgxAde2Bz>oJA6sNnW z((zvhpM$`Q|9_aG{{yVzpc)?^Kfc;4g#b>UV?zrOf&$$6$D>mCbgZnH z$;rtXquA(RFt{-{FV92rzdspLNqb>Z$@|=RS)87}v(m-uB|ZHKV60vTVgX%&rlcpL zT|sa$5O64sa7d;tAImt_>jJ)tCjjQ^3xGiyA&DU*8V9g1z@?nX%+ugYSGsnQ&V~x_ zYnRDV1F%wL$-mz!;3dd2^mPHk^FpGd!S6M5e~^)od|m4f-=4E)0g80*>kF}$uloP8 zrlLlGaI`Y}RmlbWPTY)V|Ewn9Wo_RKAkWOS`rfO^e4^SddKdHZm+H?jW(HTG!x0K9 z1q?bz%wpa@BL9zcmG+?w{DH0e+qY*RDyn-C{WRvV|C3kvf9X*aY5;u75tG?MtI4fA zy@-h|)S@kms7+&O?4P9wybM1h=lG%g?Adn{@i|;dftArLDGYpkuxJ%2vn}h#U(bH4 z7w^upxe7sEy$3i~A-ayQRk)eZX5uhVa{rHex5ud@|cE)nFsPXWZ0`Fu?=wd@H{ zbiCEoO!eKU6!JIzK}?8m)c^J9 zm*f+{2_z#~yQ%-p&Fm7E|6b)CzS1YMu`-7~<&EH>viiSJ(gW)&#oxp}#6SgGxC}{oo{#x7sBHc3W%rvz}v~4}v zZ0YhF;erhJ7)+3mJNP2$Mp4YdCcrx3*qbS?-@8SjW5Wy2;mq7fMO0BIP06ltu zhE3AtkA^)8>?mG9mYWN0S z(yZzVD`Uo5#}P0grEyq;e!sCcvqz!B8(Xp!6y9p*QacL ziul%{67(eRdp#tcaU9-e|JpG4!MpciusGy`pL3!5k2eyA@RH`8(QNLoxBR1FR-&!0 zqBng(YudWjIBRQ=g;l0eEvmHh!9n^}MNP%0+?{TglqSL^v80^dj(8JV6{ul|P8 z09-+%;LhfnaxY+!gu)IFBhIbVUyg2sF9yNfqM&P7JlA*Uuop!-&5y7XIoeEyDCt?p znh%ceGP*PNg#P^CYB25AmZ##q{N(}M#|lMRPp2d5u~`V!dX!)(pRDc)OFLJ&j+QgJ zJw2zJ{zuwNOGTwdeFV})|LT!}%>grkO`|8CRjmxzq$lznU4Q?xJjuHV49yv3PHa$_ zQblFZ*Wn^hK^-$;>ArOQi#FB0xEAELMle4IhMx~Vh4IPMGD(frY{{}b)9uiHM)O`I zuv!JO2&2I#j4qWkjCtApjE3OrfRDKOFNR{ouIoC8(8}VPo!v+1NT8Y(;#3*<>&Zq60N}P-Xpqp&K21tu;ops zMB!ExC%~~VA}9Mxxa3Eupyd(G>l9w>pF?)~Y$F41NoEl+?awL8MIRyp&O`n{`-A?h zxomsUZO!4}Z&Mp^g9m4plw`p5Th$iCt*f90k9nn`G3t*oO?c z4_kNg}l*Ttd;C!dE0 zCP+DY6y^A7NtCPisk`ljIhfwRxe>^xzUV5cjjPH{eeC~O%HCo&_2391;m69(j;qEL zAyKKWu@0wgk!tUKCwf0k`A$v%o*5gZrDpn~>5W8SAb>+wA0XkB(6xT_{yRs6G?bK% zfr04y0%r846m>5|$Q(q<;s z6c@7HNkYG=Ni12Xf9B;Hbv8tXyw+XV$e|?}PLFQ;@$5#-jpdG7@L^00qjE!0{g(Rx z_X<*9v)-DU=P|ug`r!R+e&B|37xzD280lzjM$)HZe_~xrD!*(t< z5?k$7;#-9b`E;Je$-);54EgK%rC>pLCKkMIt_fzQ zaN~pSutYxhO}kTmT!iT`?+IGgm(WDXjWapkWB8BuBtcg?ERt7#m(Z=1OuySQ$eubw z?sfLNlW|QhPa3SZBog-aFK%x9>MOTiuyW%{hszrO@dr|mhq}PNxZQp`nz|nw{p$sv zk0NV*lK6~b3q1XPsWw;5Zj(7{{!{Z~IB56s9j&ZxHD?lfcy!8X#Ky%b+)r5Zjm~lL z^G`J)&~t#CEP>Y`s=~K+0JSI>2o&s9{aY412}bLFF|iU1+`W!cw|XHNCH*^BwwIiA z$)t5pAF|EmrY5Wz@gI+F652~|GRI^*WXO9s6)xCws}S6Y{t)GSabIS8^vEi-2ZAyW zQJ71%8iT>$q~uI%2EI2Gm=5J`7YTv~WRVQ~jF@7rcTXB?>}omMiL$@q(|plVWA=-F ztu3rbhQP0}Tl`F;X_O>lgd^^C%gCnA)Wf_!l}}SSFDdnS>?kpl%jWd86;MPXl~8?M zd)uiN(~ryTj%*h^KY(y37ihb~-Wm>96`(HXM~aCdj;gQ3-S=?!7aF=7XpmP|yo2e& zv_@I&ObQQ=v5#eyzh%0cUqN8g-di`EFBpY~yAHxj#l0vgr)?$u!Wupf0nWT)>`ehk3=B1Jj7+b^yQ8oKPb{F#{$*Z$ppm)m4Pkuv)S%$$nN}dlL;B3S5*FsjWug9PH`Dk0kfwI<0ncqZ%~w zzjkArIsfg)g$fgFlGG?TKzA(r-1nS;kYEQgvZ~S|^#vZav>2gm4skV`U7t$^;O#-&nMT3SJY5bt zz8LttP5(flRXcmehZMkyLc=1FeA5;)Vq~ zbD%DAPr#@>Z8qe!DiN(0N?^(DkW~~lGl5!9)ZZe5{-`h6t=?TZyl%|8VC0Ya)Bm>E z_9-gt~!k6ulNFA2a#fH-owBwp_@VFNDlJCse+c?Y6yyPSEn79RaevxUrd--&rw z(9%CR)BUN*tN5;v)~D;SJ&CKg8U|gBoZ#@U3Oa^^U?~Dh!hnQRTG~KD3qQTqC zZc`0Hw8Q3)7wg=MsUU$sqBxo0k_-A_{Si&m&C-*o74Wx zLq7>Wm!~@v{Cg`|!3|la>ES9zvD$8135m)rbKSpGbC?8*v?7y|i1z2o2|Znpx&oBI z!20tdsCD@hmcn1S_I187TMjW@2Tv>148=eJux%NV_F`9;c3eTHH|Fm!UcSUiu}7|_ z+inw}VbSr4672~bb+kfcS2vN%ojFYU`Z}pK@a*m>jcoTqtI-yS*}_6gdO@Ou03LYc zJSOB3+Oy}+P4^VP^{uO#^skl&A4(v$6u50SSU#4x5QzJ1e`>m1L)2A6NzabX3CG9v zrceKkd&O|@aa1k6CIXfnQm^U$?ivL!jQ8Uwj+&7)K+fjo?D~G2iN`UJ!QBaLQDSuN z5TG{vn>}oHfMQ@^SOI`k-G5tVI-n)xt6Ix8MO;Bg);r-D$iOXJZ88!ZR!iykVR4?2 z+OY->-{aOgr!nd^8i2kX<9QY3c zRN;Lonb?{EC?gZ3hyWuxH+jMr=h@ALu`J&hzOaN_8ICw}W$a0?D6>504#qP+xzS1j zL9h;6kpv4%2XtQNW1b<=p%TWDSXjhSZxbx}W4__!rZ9@}PZb#i8Z*9!v})n=Vq#!( zhDcrG8*h&P)a0`hIBY#qaca1xyE(pO7OJ~{u%;%|C;uomE>hDt^J67PB#)=WHAud&>AMzcu~g;xm2R5e{3#IW<0KZG%h35 z#h2HfTTFyUGlOm!BL{GhD-oh3&U<1DzNcnNylVW&JT%c_-$hGaXPB z)xj`7uu0ulWeHNo9RD1^5Lqg`NtvoM8KtJx0Wk#L$JEL?;@0NZLZxv}(idw;f zjbG-^4WDJOOJLC9Oj@z9d<}jeCJr*rO448%RzVi17U7PIa`Z|(Z1d|fKTC$vXRqfd zzsTi!%2`jzcA>266fT8lox$?{6X*2woy&*Mu&*%Y9_Ama)xX^~W)qLcC^?O~W2ac2 z^;OtHH~|OIBY!h>aipX5u^h@k(4Yu&-F~ zqn?W2pLklaCuU?k1QRA^ww7w$JhQy8ah+gyLxrM`ZQgkvj|pb3&T1HxtA{Ez9}~6g zTt4=f>NvULRhm0_nS7Wd9GjfHHK(c;YlATWH{prjetB`4t~b>4<4wccYg(6U8kCh1 zpVi`XTHg!trORyeSL(&KrJO=D=%v0l50@@)p!*9gXh$umB6jAuoavt_?&djs_m6h= zY*sA%r6iSED$1ytgXAzUFsfc=MMq;BWo2PRhm@H}Ny#XB5E|}gD^y+@7#fyE215)g zL>~{y(DoP&Z(6;g43h5P*>t%M;gujD2pYXgxp3=^H-KL^u2$nO_snN{TMtuPuao@S z6Q^aSmHk2iEpZOO$gJDW@Kgrku_rJwC4T)WBVyN{#5?U(Bq%-y;4z!R{*N#74FQ`b za0At_8D+ci!c~zQ?^!(*NqJiw#+5oFQ zkljl>)9A|i6qn+2epB9#oKX3rb-=9OGgA%$uIqef+F2_h0F#!C%kWEegKn9jA1>qy zU;#sF&kyF|crtQH;ytjl8{oZ$MAtIW&NMjxy>hc08GuHTad+gU@6XqcFVf+Q7WP7#|*+r~X-f*nYK^*0(2w?y>w;M;TA9X7Jl>rqEZW~(B-!y zHdE%o>o0jzJb>83wUYPHV^QW>(CS`C3)=(NpgtOreFZ z;Rb6kL!3M)?0ea!M$u>fIQSp+!SjBSe=h~Q+%h&&*J6s5ZlqGrajjH;7$ z!$A|o3I+(y(ltO}l4h0-@^H~*)q!Y(tS7ku6HV;vgowW+y|jXCZz|ZgWzlz|H>QTZ z%5;dr>tfdILUelWCD$_mtb@Uou2W(r2ps~%mM6e*t6l!hBEOMu;)7p2Y&<9n&51`8P~^$yNgPz0TBuhyNR_)R6p)dR5E|X z+X?UOkXEVE?CbpC=O4qOTJ>ZhR&7{_T~nJ$x!@i4N7lK1#s0N7M%*GsY~K%26L-UN z8!F#&=PlR6@Lw=+b%m+G0{)osq0xuW2)q_2uB)FYuDi>dAG?h~FSGJ|edvA0Do+ij z5Y(L|S(k~Fn8y>;DHLHUephZ>*R=Twb#@>)mbE#R#OLw4q4lz*n!?&%3*xlvLQB)I zPU2Lzt1ar`s)4$ypZW!qD+(ZrcaXP*{oLYFW;=CS6VP6tyx^zq+ z!Cxq~4FUu+edqQQLviB1{N}ez@xC41>`5*AeCN2ZaH&4oT197jzwEp-x<%o2-_6Wq zOZt3}cO^i>;LSB#X%8Y=JSs}>P~!9F!1U0$j<^&kpb{{M!yhD#D%jcEvqL28tr7r* z6NT+SjITwz1@5aM8PH*_JCll8ZryCTmEhUU6`RwpeO%;?FB6!&&u@oC)Z0L#%Ep<$ z1$9gf6bS$d)Opw7|D)|K!=h~4u3=G-p&7b6r9-+KMG*9o&QU^YhDJ(Y=nm)(B0Q+R|F>&cPvR z94p6he2q%;VTQfX#)poL!>*I$#>$&B~AuzM_u7^Rz*0wj`NA`a}?4=77=e>KM@B3ePxowVik-GWX zwy5*8q?*0zGvc(y=Coh*iUKp5Dq#52feNcheBSF0m@wc1N%f(?1-d~0lbII?ll{l& z_;_R~zU*t={3{-iO>Kde$XT3iaNY4A{5nBKaN~_)?9_k-2JWM3I94WOyR>@V2s*~l z+^?Lm(;aE?@tj5U(1Sj=^?X-)W^q|E9OjNX8aqs|;#_7TBjPp~{gpZVV_Pk$7D+Ml-nlm~3{k1xo0(}}8g*{W?<iAbVzYmG=|he;g34~XFDqQ4CiqPG;8IR=UzX$?Uz z+ulFM*V1_y8s{u{`@&DlN7G)mF71K9T6MJ>sd+}ju|@qCtjCb51I(l8C#Am_(T~Dq z!*bjDk!N%izb-C5Y^J*w0!2gdo}tjN;kv_gIX6dvPC1 zP7D<1h1(tt(WP#>%W5S}jcsQ-9STgQ&=Uykc>8MkzfMWH;vfvjOs$@GTQ6vc{lZkG z9cJs!lJ*ulkrRQuVhA@h0kv5(sd$_{@~*mfuDY|qh2f3enKDtw=-#p zfkvYTe5ArO4lBA}fusthZ-4(K>$7@eFM5;X!D;=mcQ6jtlB(Zq+R4?ei|H_1U(rH^ zs>dVavM6QBF8@}nKNAHSl)Zz^_qj$<^4g6{^|~V3xE`@@H6En!+Req_s4DED+az_1 zpYie7Y6IHF`rgn^R_=IQY!gjhxnaHv&X&ic2^%BsL#iH^#C6G~eC(}P1DcMum#1Bn z<_3z%PUBkF5wS;H07SHKT@nqTruNr(@Uw#S#dFidq}g-CfvDn?Rb9H@X*<`G54gMb zQn-Ibid9pD_Z4mCd2J}*N$sXSM}0gKE%wFUwH>BpZXhO$Nb=yf`{<;Y&_=EA!qUYS zJA*xpC(fR3^#n3H<{RJZ$RUfYQfZpF+&O+qs`Iz;kZTXDOj1la{e6JCuaMppu zfWXP>o+xQI5FmZD&JJjJd`nA*sQlo0AtSW59&T*S(Q|fGvkAJ^(Se3-$#njzIbLeB z9_b*KqxL@RoSwDdvwruz??|POC+_b?q(4#?*KX25tKJZ4ZZe6ZNX3ZUVNucQR zAbnj>3hqV(zFI8js)B9;Omx(J(s2DWBJ1KE2+y#cYrLN!?i_o*+e8EsCaTVfdsZg{ z^K^(XCZ zzx_>qGubI~GVA)mpd#N!fVuN5wys?X!SO!id{r%aQO;qCizVgWQ@j&#`Y75hf20+3 zb2ArGA&3@jwpyKrZ$+RbQh(>NYC;NC152}C~UO51Q^ zf>H*oRGq7Agl~EVB~(1Q9v-hPfz=Zp4&k90FEyqFk6bk#A1VC_EHN9v{Hq4^!jF1_ z@)=DNxeX3KiR2WphDr@*BpiSf*Joq4p^BW1x=!RE(p7GqN}Y+%yj6z3_0%f!3XGb4 zy&}ZJtC|7HP!vTGy@)=%+1vs1zwKV4gCiFLYRo&M9K{wEt&=?y7nP)CVh&?hAwG_bsCpQY4I7C=^R$WWBGZ>X2kI?!Xf@`x5Tjqmj~+F z=RU3_ep!x&bvAW5_qPos&evC|{%oIyez1LzFmgg#u`k-ON_p|OL9>ug&zZ3M*$rAm z)q6n9eX0vz7Hd3zWlU~Oey1Vleu-uh$FtsL18d~^67QF+o%H9NT(5f2eNVl#drYUH z*L3yYb7Xx8n*_!V_NWWIWlBY}y(C&!2D7D!%qc^Zj0~g>Mh+8~60!oKD7*m&afG=_Xf*fj)1( z5)|Ch#FNE!%(_e`WgH$I3anCf<*+s)kfQLviAxBY1HxSEkoucg3B`o-$IXT98kX@C zuL*AiWhp^GVU}+Co_!I3Xgdes=v40r!g8#?W18-Bylpq8nU_o|B$3o3SW2cY`Q*MG zAdq9i4iEu3VOGqXxgSj$D#@9-^^W(xe>akLaiNN(?d!8n(u+481lW%+vDe*VKWixfF+46G*{4V>}_)gSF6tpkojS(rJQEm0D| z_^S7v#bMfdm5wuFJWB{#PJ>0Zdt_AJira-gaXh`$`P^F1K0IEO-~CKRHBpt1rS5cx5dH}*c|LOP z$Wh$H+jrtt?*oc)`ZAr-exj@>`0g$^p+{lEVVfdTtnKt}#B1S4S=ibcfw zk0OAnhlHF|FEE&UR|xN4GiuO$hvIS3tuW}1(BAylh`W+0{}U6IReB?6CDZN~p!tXQ~7LP|nV=hxtH9m&^))l|{|elR^c5uco}f`h1HvBqi~+>O+2Qu9{Qj?H{O7d>8J40HYr?GTh? zopJ-`x_!HJ@q9;3fO(Wgbpf0g$mb;L&72D^+%GrwHsA=#SaIi)U66EwA%afZmT zd-DF}!Stl@0jgImt@1#BNcTVW351y+6Tnv&FOCE@YAPlIcp>7PPHNY0E``?H>2!Mg zbY-d~ZliBQ7icXeIMp7hoKx=z@x970S1+B~tEDyzt0{rA?h_Q0)EL8&Y$8IOiCnZ7d} z$uoT!`YBqafFaB&z9yOVD>6t~DzJwM)0liD$+9ox$*l z*C* z_y*?2eG=m17tY>@+(0UTGmx=gC!(gN*3gmvZKxqEA_AqP3{wsxWai-LCjAlG1Bd=w z&nwt)oLMdifAE}TihUF)JQ0UM>5TY zSVzF0CPwW(YCy!P`!H=)qgO-WeR}q=r5g6O&eUE6bfc|BzKqV3y=X_;>Hr8VZcma| z(G*PhMiGxy|E6E&PDs@5@3J7n=e{j3&m?!B0R7B&VJbr!_?;gNO7B^0jhE&SFTBw= z>riL(d#+jy6Y~4*_9fl*Wyz8b^6S^9bQ19PDvuxk=)OiD3V4sl-e6MIPIENb(w=G{ zRbvF`JBBE3;t;8%+&Udik ztgx(#8^|Un%P{e%iSV-aC;CN~fbtGY%|TbB0Q?$tzHeH(a^i{fc)L-ZN$c|Mv zs8f?qr8}tYD?CrFj!?e_%HOsLbsjrLY@D1k?zk_H)`xnE zKeghTe}~EED=8~ONl2Cq`vX93Tr99xo2l}6`4(%b(S6Gy=LSL}{b3D2I3T3N1L09H zRh6rqiBI1b9i=^YtK0rO4J`5!mbwY!f!HKO3kMBx%dLhdhW#B)HuEu`p7~>)idcDm zRrbK|SfNikP)0KjSLy0B&6=Hg{G1xbF1FHNY$Tbm+^MlYFkb#X+=OtJd9qBs9Wxh2 zUcs|{Krt5hNc>tVWhb0}wQ2H~=a1&#x|eM)#EmDckIch_=>>mBj!@0{iqfPyM3bNE zTj-23%sx3_$Sl(JnN8|(csLtss9UvfQG=~3R#-v+)Q^kSSI3qf+baIF4~E{(l-gMh znA?a(vf)MsE_(g#9q?6Pjv_OkGQ*L2|Ngn_d6uKHgG@spQ(OzR!5ONF>wUK{c%Yr^JkXjCRB^l5Vk4Z^GLj;~VbNs`HV{2E=0b2L?m> zB+o~+GaTF5ufIfV%|1I8YXXX~C5VsJgt6yZG(HncJ=5;XQ=!k4W!9ynNn@`q0_0`B zH8avAJH`pww{;D;6OigWvmh@i` z8IpmiN`(R2S6p-$QC%B%{>a~ZGtO;Y1#xnb*Oz+%>J7@*Kr!XD6v26;3ta?$gz~&j zBQ%LU&7i7IllCl&X#)ks##~cfOjw(wSpgcbWijdu-k3!47lJu{$A&yRjRBEYP!McgvcC z4h{NH=)Y;kZ4)~q*LmuPQrVT1;MDSMl`~A#j-nA@7EVe?69j2v`a9aIQkI%uNWRDm zGNL{3l}k}IaJ!XvIW6Fo4iFeSb;c9(z?{sxwq9Oa_vukiX9!EWNCb{5 zU*m%(?HN+;_rTXr#FLE3Qa0MxB?h(kRvQJM5go-5SC;P6^VL-I;aW#ydtD8o$_H=m ziRAzkY8E#ZN!l^&;_`E2#KRc)oR##djeG*0K+hB|K<>beF7N_u34>-YAr7rHi>?mv zy-vbFLR$>j2J}gsI<<>hVJG(`hg}kt?hSRk0}@>IM=Hw2+D0$*CJVOQbLITDCnI}; z#&d6m@$o(#gwi^dFOsQbirNK%Jh&w=reEn@_&=gO`Gow1 zq2bhfmz9g;8KF)cU36IH74qTdT6A(u+uQF!`ow83gef1q$isl{zA;fqC!iBK`dPgkqELzFS$swfsFfA zJ;aTIA~`&tnn%n2$%<2sl8&@BdC%43#pR+$#Ww4|zo3|@U6i?=KiTc-Y99LJIgd$w zxvT(|j?uFeF5RX5`(SiKSWIj@(o065OQUn@QCe z_k)`&zhNbtnDe{?AHk-Jwx?@RLO%0RSTc`3A*Jc_oVifFcMldpRnY}F(QPT!!JE*( zt0H%fWf)!cc42Q5P-$ll;*#Gp$ouP$g6PTxm2dUnRU|oCi;XwOxKTX6OS)5P>G{3+ zu)n*C^T(34z2Nh~r|@rKw({>}8wYnHg95R1*-LPXw!w2k*%p4j_C1VY%V*N;Bg{Q5 zBjF{bks66Tj@|F9IZBVZq)uWU(Zg!9lt`ISOOMPvv%FXJ6<=aDnpy>fzsO4u0v&Pq zHC*?AC*C^4BsM`~$G*3cam01^4wrtp7*Ku&1x#GN7ATstIeFJ#b%)b!@yWYq2PwO+ zO%!+(8-KQ5{ob;jEYGWX=GJ|=d>TmBda)6;)8R9#0Ft+;=&2}s4DbQ6U$}{+2`o6^+E}?1Q6C`T4gt&j;CnENy32kqYI2jv6mj#Gg8JH zPPY2_WU4g^yUI7pk9A4&knwmkbe{pi>QWe^e|gUcU-|)wQ^5y~1DDx)ME!0ftFr$J zW{7O0zlnssXemxQA#O@_NnyV*r(Xj&aZ3$)-2?;biP!FwN_xw5Y2rq>iLBR*kR(~L z9;I!*Ywz6x`Y)b`dzF8R;;aBr$Q$ri)7?WpJ&j{$HqA$)(2oV*zBOIFEyT74E=5K^Q*;rQUe`JI&l~f@Z6Wg9f?$rN^02sMK zp_XIHbvwEKzx!ehHH)=!8&_8yo={3*@{<-q}$HTjCA$V`?T>tb#kpU^R~t|KotY~QWsy)3grOgPPH z-M2O|nd1}#2ZuK3L`vhhQ&Z#0w@LWrUADO#B=e)Ng@FubtK}Hlos!|_Zp9-W*NKYk zJ5pUJ#Z)3x-VCAT{QV>1AC!?3FIz&9u{a(cwqbiTzG3y5%a8Bp|ILXR{ve_BwT6?LAUZT#(mub_RQNNJ{CvQz>5k}Ityz@`tsp?3x|0OhU7 zvxcCsF(TI!^jXjLEP2Dc*pP*w#=#QYtDvQpFWup?8l`~)Cwz}X5+f>MeoltSmvq<( zo6A%&)yTqcjHUe9es%+OFZwBNg#R9TNuUhr25;$;m(E;9qu(TWn*G;sEE`=jG}C7) zGpj@Lx9|j-EyjeO_Y(Wae#yNF-okN6bEczB@P6pE!%O5Gn7GU#>^R#4eTt)MR1{;i z3dntDEPb18)wTEk!WDzg7mc2KR}`ih5?sD!Z&w!rI36BYpsP0FqxtBHAN!%poUeLT zFEh_=JQ+eI5iewOycj7nDWdKp?DyN7u^$UJv&prbJ^?66xC{u>jZ~@Dkj}-y;d*Ux z{rJm_-0iO+T=yy)-buR|SvStCNA#k@Qi|HMheIM70 zyFCsB0?w+kq2i9yUmL&aO)fv@$;wWL(T&QcE@RCDR;$)T!W#!mATk13R!7H~AVT!S zp@2N6DyB!s$!G>@G_T842=BJRj$4%~-K`(`rf=?8%FdmPC{X}+?X)aaifyOc>n5~A zM#KGd^j#X>3GBCRcJHO6PvuEf{1WY0=T9NNdBH)Y5Zo!L9^ZwunEA39!a*^cH3#g* zV38d%db<~sARv-<(9eFnBm#7((?4tM#_ryvd!G&2+q`PF$*?{X49Kpa&;){s9prsF z&cu|A7+UdKn9-s5QJwGEp?X8{zj_|Jtl>mInx3O5Kayk8N$lRTI0Ww>U26NkM51); zx3k`kufewRHL})@xgq;Xi&V32f@M7J7l;oaq>HCIae2Eni)&eFVQ+P>Ul%;EI1i+}SYcQ%p5jrnXAm@KM2jKx-z z(RHCM+}5zWP8v;E)3msJjK|LPr`GAj>hu=4clY<*X~W#yl4G`eVfx2N@Vj1u)MUFb z$}+l;)EWHlZ{M2oJ^DgF7*&_yILBsg$;EiiTt%s89IN5feEI_*ArukwQ2>o&){Y&1 zeY?H{Rq9n@Xzn}dB-dK+Pd(pgO#|8x_P;5e@8&vmxBL?;0uWGye?iVa=}v!e;s2+p zW}R4}{i{K7ad9?$d;cpuZRs`&N_IA3C_kS_{y$(PxFi#c)3Y;XAt5Me+tDd}#<5Kx z*9k{RNGPn7mK}`U{JCmyAAt$!=@ISi3My)9YcIL=3LzJlm$K>)|2vljKDrbXG#*Mu ze&_aWVkk5aY+`y(Zi8|r6bePyr1Z}x5n8A1lh7&>*tl=7`MPXG?wU-re^euEZ4E;r zzJ9DK^!mh3XGH0HT@C^@0^~O%n6EZ>ovzb@x~CD)pJDG1QDppMtM-bSatZ|zF9a&a|I z+|lxS&Q-p%zz^_FPLd}?-p3L?Q$XI7U9oKvxET`_byV^PqwSmu{O4-a6XIQNeIM{9 zM)Vy(aP~2+m5cB+G(DrD78Tn|_>u(SeZ@JYHUf;k`xt-~UpJKY1`_fE&|9#A6#__1 z-GS@bMnPOTXkLfRI0=`j0U~wTV$pH?KaucIt|EUhujK`_ocn#8xfY;904N}Av`{lm z2p0M;bcJAM)fK`9Ba=5OJ!#UxzQp8YB!~zaGP+VIP63eEPzLdm5(gM(JmT^EG9^bw zo1bbxxt`T4_z3n=YQHTbZlOp@z_?!(H=b1dB1k!+N!BMj8}shnY3f03axyOc5`)0E zZ<)?BQC1`QsynqegY#v%_*y(;S^yLZK3MLV`R@JV{CLOfY&EqM^4V|(ARiTOn|edz z+nPE$J#Q6*1)b+*bRaiV^%fKS4UK=sM)O=z`AeV}sjwMh7@9uo0bs~JXmMGAr!}d| zQaR}jsp~baEzix(y)jaNy*8M=g#%SG`LMqX;Tt_fW$^_seikKy93dA}EF$cjpd*28 zNm>RoG;{EPq`i^qB}Xuh^Yyn{4?8DpKGrfvc1Zi;GA=J=EdlU2u<2?$uY)ei#r1WY zu0{aHEOlTrb^^8x2>=3mV3>Wd?YAVsSnrhs2+HxRK^`r2K-%ojx(+QyaOSj2?==VC zG%r>6ebV%O$Df3=>6>}8m(?%fM$?K9mi;-9`AtCC0$iQM)uO2u4W;jEWlP;3Dl>SG z=dr*wnEmDJG-35m3zG&gCWk@Fp{4NjX)@e3v(nRTJ zJK2#60d+niZ&2NCyHWH{&(P#}l#;Z&hK>Tgsa;<=o`*hIA;=^2djjD#LZo&Vf~jiI&{ ze@X2Q63U5U-M;`OMVgFv<>jLdTtBewjSdboom(r`4Jx1GFudAerX2#lzCM$BauO1! z@+lGXn^(Y-*I}B$dnEyFcMdW`4?ycM?ThH#1n)iAc!}ZhAJ%FKwTe_#HG(0-!hFWH z2|$iKy4CMo#@>d)W5L(YkH~XH`))UyU3~aO`Wak!_^;SaIdVHnun3PKnZP~5Qh#=q z#Ce)ojB%4%p(dYTq~#mj&2GP<#w=Rr?)0;*Xs4Lgj#eb!>GRJq;SV)c;IHpLt^V7} z^P&kB@yY(XI$GkrX%k+iHZ$k36W}C(ax2d!+rVJ-%l%G zG#&u0!7+{Ipgg*dnTN+DRU zc~ZZ8D06%<_m(+)p1^H`)EU50hMBxDWRR?jYKFCS( zR(z;zbD=Sg?O$${V@-~AeQ~7I#CY@;T#AR5M&yT`nYw?{TW@4VU~ja{)!j_W44lc9 zg#D2=Z7+fKNK7JUl=dU#)UFD+?|ti;Zi(Y5NR8in^om?1l01Qfi_1b_(r0h7 zJc=vQO7ZFTB)KA%D(}yEGUBIFkw53{nQ+cd(z7D^4=@uUC-0kwTI>g>gr!BKo5H+L zF0BCvUMFa0;)7fT^MzxQk2c~LY4Vhn)x(z!_UO4SQ+N%R?nb<4MPJoR$+(rtUF<7Q zaN|lpm5H|dHKNYga3p+8zOcqBK-W{-(!jQ9aS4VSj^};$8VLEm!y_S3< zR$pRB`Ozs~F5u_Q0;of8{aK!`n;eu9@%dI&07+wa!^dD->w%2b&kAQakA`~hvmzya zE_x#dCD-fMiJu?83k{>r4#Y7?am=t8OChKhtC^lLee`NNeh9oZ(a|j6)VV&e5da zBdU~BIGkAvv7GcOXaIdq<5&Rn*dvHRTMkn)D;DBi2y&~ZA}2LI2rn6ejb%y`E!*o% zKIHx*(Us{Z*phwpq@f4)coe);XDMR->}}#QpP&bjzxegbvU~=Y1yvMeck7q4hs7Ys;F zk41wYnAjj6W_|_ zp%{+@;h~N0g~4yt)$!_ZhJ{odluKCsMt|kAE{@k5OK{RFA2!&Iu+;IVpZX!^3R zTYbxieyyzJsW@*y-wTLyux;Wn6ABPV5LaFG?~}0~rMfP2tVN0#xA_ITGx~)Ln1C4a zr)?chFHd6~CtGLD=)B8Hr<?2~!c&yWy4$E-c2wRObF@bKcaaXyVlUAAbs_#0rL z{hyHcZA#sQVeW1OiVyYn&}|Stig9OpxtmEe%F%JU8bL32HJT^2S(s!J{#Sj{x}ukg z1^Rvn<;L0&1pIlO2UFe@4!xWXf|FTZ-gVkhc0Zv>R;X7cq0aDF{`B$pl$ zB<#X*BtK-!KySP|hPFvq&l%ao`~a-r$DgrO-7GK}-p`t?sBCNI&OD+ak{p}nuMkqc z2RkuI_iCy-SE`3_Itmy_5TIPr2MFg5Xe(>>TF(b%NudK?1cVE^$9F3%utT0)Uu|c1$)c@KVL$x8q4~@H{*bWif6+KcX%=|yW;oh_^ zfnr==@1h-YB@!rYwxIG)NirV{jMp?(31cEP8+a&a_H5Cj1PJzC8KRl$g*{kM)~&v0 zS?Hv4_+eMSnAtOC!Nbh1GnBx`4?3TF5r5cYbm}F`z0n9%pt^b_AsM|I<4d)kxWOsL zfQv;bJi07ku-7!mftXjQBlZGrviRwkec05wd8e3tPM>ZpcmgT)n#o>W{+ckAUC@3mqvVebS6s4E zc@8^-cB;*8T^YtIQ26hHT(z^$uRyQvgTJY`;l}I9kk6&8?5c>|TxHl2g`nJd+@}$JXH=~m-*bCocKBlf>~l~PktbwORw@RW z6@t|z82u?+p~pZ!JUbdn&UbGVQE-F|k2n`(MOm@t!X%Yn#|lNPAaw9_@nHvQ9t)W_>F)=|SUGnX3pFt4gJSvg3Xxl1ohrzo7$8 z^5SzcqUV6*uHtC0LSI_gN?@vd;X?_bGiH1#nw&)loFS(llsD174}B845#ipD=nJz1 zd8OhOjULyW3Nq-XSM74F3jWF}v0x5!560$VBDTrVjah4^MyL!6gu(TTkW&z?_1Kzc zifir5Z=ZK`&KIQD{XsdEos zTuO{;?!6hL$U_?j4yv9Vu8!H%{9M?!aJgk&_H)=f?@5=0J_8+xn-_fZ%p(YHwSq3(%(rJt;t-8}jeL--IL-KG#x# zSOj*-@$VO+6|TV&NgzugLc@U(jZMXq-}CZ2E!BNNkqb^~T5V~mr}gK=^RMBSD3{Al z_Gm>_+f2s&>_Ji`>f20JrlHw{62#Q%xW1>(q9fXBQup0{UQ$KAZbHJv6`sOo?a^*)Cyi7t(h*3#_uM{>437dW5-F#cQ z1t_75L}|}M@wVnt{fGVKIViR1Ot_I4zg?HL*;L{$(_%g%Y3$pkbS7L=2A6iG;P36V#-UNj0Wru6@ z+{q(^5*cYEUmoCBjIlT+#-#xqNNC8TWkCQ33~s{c$g)zOVG;B1vrp8=dD zkAbGaby-E&iF}>eQK`6(XLa==Is4$fMqEWRT#>{{-4I6+Jr{wO=q4e+!m;cRoevdB zsHoX6Nn}`-C6OCkxo^VS9aHEe5%)<)UJ+v^6rXx&HY=HSBpKddYx9nnX@zu<1Xz=yus z*|$NZ^u;rBeV6_X7hK3fbBC9!az-eg^s!}gn0Wa1G=wdL3<%@Hl$d*ky8oKI4=J&^ zP*&;Ocx;p97Y;A^WK{DvDG3^`eHdNL)|`!~gHtY@E(?@B?4DGDuTUZ7_D)%c=J_Mx z8dsMkL&-m)8bW-Xjpn|Pq$n1{1nxRFO!crR z=FvZ1Wj9D(`-=diyp8dVyb&FPHvKQkhk(1Z|KoG>FVKdRxcLA0K#}}!t)ZdPjf>15 zVg>-l5C0c1sq`EOegEgTO5wbJe9!*;EP?$-Px)UIlDV=IgXf;peDf5?qm1V>z@@eX z$^yFr-Q+oFLN+;|7{tPrT79FxofpOs={s{>0t~k2!RYjf z_!r>DG}{7S456I9QI6nI@<%&NNmri%CGm_Zpnm~+QV#ld-pW-K<}h2wgjimG7)9w4iqSL$;rvR83ALUKiUE!pcRk~0I&>G)2@8=I0g?u{;0KftpR|ldcw?y z@9h5Gd>bNTe3b0I7{%c)kK+VZZa^7+=zn67#nC z2xWRHx6@RES}@Y)DN9^Ls1ivj-~@mz@J$4Od5w|Q(1RHhHMlrjcUg zdM)A10A-UYyaP-7XVY%uR@Rn>v%_RLvuU*~(BzvMfy*kr(VSO?HT>rE+l{%6g*$n} zm9$+R8$U0e0xHFApi{2`U+Wl->tZwFvaIpPJ6cjJHm z#j`c`K5n%)fwhu7M$&c702p#^SC45|fwAOCm$fga2w?yx>dPI|i#ZsCo+q|}_|S=# z50kXbY^S?TdnG_*AcQrUz^XnC5W2-fYP{J}f!5P}0!BHRO0|oCa%RLkz9~U8McMTL zXt?bo`p$P(lmEr3FM9$g$ZsxtB2RuT4R*XC_drX_EdY8uKZLtgJ&Lx$b-fngK?Y?e z4YQsmdceGE*R|wQ(z7}VHzDZ=BYyxOmD&5K-?kXZq9P;##EHVosFjPXo?MX($C=uO zYMF{-v~0IrYl%M?rhP$$zn!Ebhj41=DXT}}Onx=CMmkIbE27p-6W8#7!Y9BM^(PFG zz(U6Id)|G*R2v^n5TQI4d=CHmLD!>?J+*#|to2#AJ>=8Pj5sNe{N>Ll6Ia9JDDc~c;+=8->9H^nsW^Tes8{Sy6n78DNNoGvpc98e{m}5VSZh( zJ>KfRz4j@tLi+q$HQ{$g`#r7tt`vtfhAHDe2U2HXZvY87REn4!U}gS1hLz7lt!PkC zrJK28Qw>}2&bsNg+-D?m;5t&*WACrWB2>QA1FqW1`ydA-EU zOQy@tdw4Vbsr>;%C5F-ev;#y2VA6_MM;i}aK_nrfHreheuFt&zjJ-blSN-5*DEc1j zT|zqBx>=W~8poOOH||##X9;|E_zW_W4KoH1cGY1f-|=9t&>2$2a98N>7)TW-XklLf zoIkh!yZ3tb`Y1kL#E{&12?3R0d+XU6Z4(eSn9hT5Ftv9)K-m;dfIwe@DJma#I6&J5 zb{p0iQu)_=G}jlU!uqjI3OI9pg)eEChne5%atbu>D>Z&lZ*h&`1Og`T{M2W=eCFQF zhxWiP2j#DQjlo>CF*d}=`)I9(`$+UkJA)@^a7`kSCH zXJIa^0PAH%fMJ_odW&FXc8lN=3+?5^t(kJameqdxS$Xd-OF`y^s$1(*bt3aM>H#k; z&9(g4T>ks$--!W{oe*V+BA{12=+9dk7T`Ebe@p%P`eY6Pu?I9jEvc))_d-olP8OD@ zx2=e0e8U(mUoS)YRQ44`8#jwGM^&MiVkl~K`6tQ~$S(%~E07iw?-4&BHii2_Ga{Ps zp@hu%sZ@gVG-rz8&r3RN^SXnNVg$81^|~vSEmnQ!TY98AS%K_bK3A%zTwp-ka-;-r@8(ku$ z_;P*S1dnzVZJ7WrTC2hU-34X>lW?%tD`|a%@K0!l=%$+j*1|7`Vv`+@mwNFWXD=9o zoVlN|eKOoNQKZGmz#O=(E9Bg4xF}m4KyR-HNJZ->7n0nW-phyYdm)}w}SOJi$h5OdHJBtxe z5B&WXj|#YCrx3gzWCcGV4%> z46?Kn`$thd$cXj{iz4#Or8N48cNiP5k-hj_*|*O&h=@rCB0pi`8dJ>Lsk6U&{41*0 zbLvH^Bq#H($PXMLGD}h@={ifSC|%s^;`rmBhw)olr+gDRQk*(jM3nb#6HUZOv2Ulm zQqtUFYZSf~cdPbZx>sBamTl@N;C zkyxK&;qK)tmAKeMGyMWuH3| zmE59eywb3i3ri2Qi3y-Sz9vA^?$7>Xjtlg1mmLg^?$p7>kkok36lVX51*%A_GM-vi ze{Z-cG^5wRg#pOY!GoDZqO40?$b*4YI?T%9(R-sZp zMeWqS*XCxSB9u^J>tr&C{0hy#t3(C6E!?0+)hS1W+5lwYsX!n$0)LT{LSg~#7pllR zkSib1DEM$rZG!r?nOmBzsGJP-1S`GXp+Y0X6Ji{01}Pe`bM^Zu-6=^IU4IGj#zvJv z58<)QZ!3x1L&DXm1l-m9C9!{n-lEGX41=Lpf9-UyztNWYMP3Bti-Rx1XydlR1Ygsp zY(+vM?yV!_->$wr0;q{Fp$I%CxNK`xVk`AkxOl38?5;h+5_>4I#AO00T ze_oX|Xy8=SKM21F= zDZo)U-4ed_CkAQc6Ja|f^HId;NqY$h;&%7#z&kw*i}?@)pzs4pzY>3kcQAjjX2Zom zsg=06YLy7z%GMW<@NRAz`}Dr#D82loE`ZPM?f?= zh>$eP9CuWnDZ!vLA6=z(>Iu_JPr_Ma;hV}5dsza$JNZLHt#Sbj=ATN|vrLqgUSCQ= z%@OJ>R?JFXLT7ESCu2p(JX!KBo@Kq3_#5uP;pS@(RkEbDv8?2IN%3x3AMoXgc`kB%$SRO+64untZ?-hN{XwzS zYi8#)LXE)Dzvrad?WAH7x^b?&Eq`~we@2XBxB9f#F_Z48-$sf@ZXH(K`waH2nIlKe zsh0P?cB+RI1UWp`A}KmmA0GxpZ#Q_JW-q-%fk0`K%mx1?b}CZ$7g)QBq0f3ZBDMFlcE8i6&C7l z01oQ_|49DAJY+BKXrzfk)(z>)#eUBGYO(%^_*E2SMU^*>lI68wDYZ!a0T#|w%>B)~ z+0T@Cp@tvtV9L`y0Z&bg#A?2D6_VQr;;Cj4MF>;9g|?)?>;3@C`YafJ5AtXpi@KQq zm)Q~0HB9VGJWM^F38?DDyN6`pHy zsiP6*NMvA=&0tTBnpQM}vY>A%xA8NTqK(Gp+RscGR%3O9(cY%M$N_|~i&TTtQj{8^b zdB(2_l=nm9@86py*XU_Ys6_XO{Eg)#b{4eSt0?swJ->_;0V%|Cr!CPCJLa0fZK^VT zlj)04!LvkX&Wly=p<06^x{)>8CjW~k0sgU@-`lR+%<#QVO&8As9}o7E6A3g^1*A|C zT6&Eqc+RF?i4Q7MdsC~;dtd2NHqS<05m-^(%0Z9BGr(enf8OocH|`cBK_)#R+`GGW z*5Tb79CvHMGa7d4xKw4XbEXj|Mfo3`fKKYU;Zv1lWd)cXq0|A=lLg5H5p=TP4ZBM( zB=y7+BWzI3fC;0MXRlCsHqUigmEo8mjQ`G7Gn2+CTRvWF2WZoz>M)h=@RPoUg=i?& zvt=tv$weoGb3Hia%jTH1M}7uF7`>~hnty2oKQ>iLhRRq)6h-@mMJ2?NW$R*UsL%p} zk1#Oeu{oWUTW6!k&JQK=ma5qF@Q?c4CMpik8Lj^+a*$&ar9qwb(!wC4Pz{rA}( z8Ll-LzbH*@D#pF*$zm1w;%rLaebq^91MB?=LTjfF*=pg*1U+mZ{sExmO5(37p^OUFaD>x=gUM_*K5_v8&u{$0(ADU)*+ zExWNf-_qo|dXTl?iOD=rr_6z)3|FRGW+;>m&68bxwr;S1g)#K9Q!%Tu{sLAboTJonzzNYh7EyoC+uVK#J9Wu@64p$0z;AW!^LdYdi|S?2I?WKTnZ_S@ zi;w`EV%@ieB4Z!AVfQQd+{R{Pc^&rnN$30W%y2*7^_ciluukvsoFRH6mN^nia4C#k zRuvqn2sfjR$D@c9I)Nwi;hZ#FZRRLto(1ihxBklDCcj4V`saMdfXlp|m+3Q4b5+1LmGwKhBiqJfU`m(P& zRGq9+vn#X*aY8hRB#alpOKIZ1)&F7cJ>#kFkadXB-^aBO@zPRLF{BXU|9}J3D*tmEZeZSJ(H(_vZiq_&pxKZt2e9eCF%*oJ!@~ zF@C;jO>xa`IT>F%)d^~tSHkj;X#AbWX!#5qMH=owd4pl`pzUdYsxY;$?y&dv+NQ1} z^ktSamp#GZ^y7mXTqYZ)Jxjxc^uo+ z)OrFJ&=*D631_~t=QB+P-VUARqJ*UbwtfklUwQ_&@{n4y;ZIcje3h}#n(ak^L+e&@ znN<5j2kx_zu4iPoIO`}t!=k>sN8&og2!xAinmtIqk3Tbn@Vbh_>!NrP|1?5`33}qm z66g@;len()rAeDr5l7M|@YRw|h;KVbNbQUr)91x6Rh*G5x6(~1)N<=A9zk`emNGxw z;ujN<<^vBy9xCn3UZCm|9Jx_8y6w|ccCH{?hS=O3cK)Q0K}0K*!lAG@H$2 z2u%=|$`!Y!jVu2?S@FSHaDWNjY_(9+;H=?lB86gN%wn%6MNK&5LF`kEDd^MWsB z@MRfuFj`v{ZufGd_+HX8G5RCY$)_!teP0d z8LSxk1o{ZGAA)dB7m?v$X{=9AcJ(rh=6`iG<>Q%wb{>rLj$+S>Sk}yx<`cXe7G8hR z0@^dg{-B|M-jV~7NAGm^abAJo0Qp=*5iRZEYc^b8Rb{CLy+gnc1XXNBnA=LT%6wWU zcg_VcP7E2%nfq!k@x7{zzqkvl%Q(@WfV29s?UOBHGn)4wF;|q}`k0ozl?#p)UPMM; zh<*)cnNv(>+KX^HgLBlcOEWEBf7flt{P5Bsqp(Tb1^v%OM_eBq^>m-~s*Z)bbxW4> zn?#k&CbCs{cay%qy<_LIKcBWzvnW4qEi8TJ^<<(Il-+D4Nqb~4VFXJ$uxa1p3GWclJh1-KC zY8?Qx&3-R}LYZ#QhNIczs30e~-3?L#ibem$yqwV-L8)%dhIvj;94+uOi;4!@T=)${ zlTKO4OPw7*W#;hbR_3!`aR><($Eg&~#{t9l=4 zew~g1!L1DI@F+NnQed)2{c}ld4<;UUYu+YXLLR02T=S7A*+a9<^ZGR$NGkKEvtA0b z7rZVbX&~KkoVj&VzGXz$XYzh)b+k{=4pNfa?tK4zgj-#p-+m&a^%KF^WO;BABpa`Kkp zltIG^i~Y6hB-4m#wjxw0T#0J-J~z_!1Hvg}Q$%2h+(MM9NnDW-cuX5dWkFRbK0i#K z-WSFfd^Qyw>Skdmw3;|UnO=^3iY1@comFEHb{-3dMBrpA0bg7vfLQOh59{tMCXLM2 zWJWgG6(qB!)1nbMZ#`6fn)YeD8jYi@5{_)>3NQ~2j+3qO4ZD;^kZ_>k`>~@{^FJ$4 z&3(Ar5jDb`Dt?c5SJM{tR(IG zp+4xQ-M!B{=#^s@`7xC6o$3#iX~HS|4c|pZ`G9tXvk2jGsHG-{Um`HF+;3795Y7Hv z@Zs&CQCkN3K$QIE@_y! zN;>T*T26KA+-yb(0=5Pzv(>r7$O(T@Y@g-PbV3ovCy|;hX-Qr9l$-7#L*;ghJOw`3 zq92Nl&*ljfUfNTi9{ z|CsXDC~l_KN|CaB$uSt~GKJZ3Q}UZsn%(FBk&+vz|3_UpX_FO6Y_AAAfBEia zgP2<VF}<(iig!>-rMHSBGDTfRd}`RuQ`5fO2| zYJ-0?y_RQXif#82iAgG{sGR8l^wC!K-pCN^Xj&l<%bZ$R4KhvX82*@Hj;W+U zl?_)y$6JE4afw&vt%s6522L5yvUll(0#sn{jR=|FMSQB~qC^;mwNDHoT0aTcS3c)E zM>YSG?u4Zic~lyfE`|aNA%~xj4B`7h4?LzF+_80eg%+;69FBZ`x6quuOm5TG%h=^~TUtT*yMOv{dnWVHz;DxS*V#X33I`>p8)m#T z-*EAid5y+GIFLbhzwGz&2(F48nB z6VmGZVKsK6;!*d6Ju9s_kqrfM(`UZ_p^>Ut0F?%`Q??%<^x~Y4L$g)GMtV}kxhG27 z%I&`a&bB9}Ib;&BM-#(uZl!TK?6K*{#U|!bkmI0DWtQ8fKIrL;q!-PC3WnA;RPpYc z6Q11(vJI{xy=FJs6<+uzkGgYigQ$WLpwebL4*ndEbAOPB<3J${n>oWxCVaJYGgzf^7Mz1z`j zlI|-;m5xgTZw=n)r0&pMv6v$WgOATJivc!)GO!xftY!jEZb)jvcc!UO`}tKHP*UmP zw)hQHfj_aC_gs2yTIKQ&sd@_%=LQ|}6dour7BBtvCC{oodQEx+m@qnCtn|B13ss0= z-8Ry1Xph?@;qsXUrGwfXq9@m*8QS!FY&;~r`|h)Oy$3GE<~PC|*5I}bwn@I!YclpI zS%FumM5oF5T;G%Q833rbi}(c2ZvtP1<6zN)wNwgZ&!OM6MJ>KtIu5V# zdiJmLUI4=OXO;IZ_ZfBv*`ZZWq?zVHj19&%DIV| znVF0Od`jEA9h7duXI;KB>VjBwFKc`JMauTCjjy=z82oMHdS5U8tM!|0tVQ(r1@-wD zUG^aUCcdo2h24ez<~aajTYNzGWwL}`2N?v-L`w`mAD7ctuiRHR@0;fPpB_m+jn%!j z$a&8jN)J2kSO;Y9DhQO&<#^=+GB9-oSH8bbUiNg!z9L1M{g0dj*x(1BKR>D4?v>pG z#t(v(JUlqCTN5OBJ? zsOxUFGxp@$E%(ni1_?GtJBv>8L7TwVJDE8OEFO72_01;EbvDTl{i{AY4ECNDc@f$A z0I_&ytZ&jgm86-TSqT)hH9Qn?sVr{#*jx71-}uv zwli)sUnHl~tB}PX*-+ z%>qu{=10`wGirAXhM!k=SFGw#XGS%a$+Z@EhtI53UvxG)e)G^M8%OG7o0O)vmTl{J3WW&_2}rD7R_zWS%{!eu8b_z{kpkc~yAd zfuXHHBIK&OCzmAB!tK_&Lyea}^+B{eVGk?^l|$j7AvK zH|Q#6a_Tzj;8pWdqZ{6nnQ~7SB(7xKwf^!_xmSMXf&@neqXMIHA?VoE^_}qr;f58< z3;n}nV}5WA_P|}&Up-hv%M;|HXgzSF_p*1n-nI2ou z&Ky9=ue1KM?!nK6fl@XB@^%g&u3eMG$fcim2+{f_6*_UnOlfK$m)$7lI4vakZY`A+ zTi-*owSx&J{|`u7FU|*M`UnI47$Ab}pz&w&VhK$!{WFr#sP9$f>FYRG1@q~Rb(HnhJ!fa#bOKR?2fYxxsOeieg)HD0UMf1+@ zPo`Td(q^4{H0>;;s3)hn9PPk3-=a;khz<=KWrj~`m)`@Xjf+vvkXblrUT96hjDV8u z+`rhD&i&Wlm5Gy^>`Ze0?zud}d3R+-@QRAXBd^m;mhZ1R_AXKEwGrM6U6KAyrxL^! zBk!Nz#rS)kx9jKR97T|sL_v)IZ-mU2+u=1Yz+J4#S2Ntr6#93&bE~B0(AP- zF;Bb$yNugZw2>gRotYJC2+6iGOU$Kj-(-hJ%?3lQRPYpcX($8n$y3{LA{E=oy4*o< z%B;oP8o{jeQ`eIsV49#DZI|s99H*16g5Cw)dc_I4GWL}+K}OS8+skO@ZQ(^5zadcy zB8d^vVokOLLEPQ$OWe4QzvMh*Jl0mszCx?Jiy4Qry}mCyYM>Om*-oBpl&>iqT|Blh zkV-4DOp(8Fz%W{F^_~|+ou|DZK6AAqA8Td(`RCgQH5II~VwY72K)C!#o2_lMwX*}= zwKP=~6A`!h2+K%U6!|R0gR^`@?})U&50lxQ9*mYp4C<7t2}ma-{h5_p z8Z|4_)MOt191pwJtJk8E_e@koaS=ivFKS+&H5FfMv3{S&S5T14MUXz~3k#*kk8{c7 zR>odak((tcox>hu5I1!@E5#oO&v^dya9_0PHAQiWFBaB841DaLt~6CqeBit1y*5_m z#Ie<6bYCG?h|0nteH*CJwfpFWnJ2`e)^3L)XbI-O`m)yR#Yw2GbVJ9ZN}|zhbIsD7 zx#x6tNM~bug$EmxX<^Sp=`s7+^~BPgk__6*zE^x3{MgpWVS8;8swe1Hho!cF7fWzI-STfB#oz7a|%AUQS?$|cK14@;hJ2q*H7ZB zwfA}7`niTnzda%*7|+rmC%RiT9TR$rrvu-46OzOn6ocOU*{wWKEScYftw^zdfX~NpAAGY2wo5e(csM5JAbF zr_<6jkvL#2)h)i(YFELVdg-&tkdxy<(Q|>@S4!R2H+*eRH2fnx+qQbF?I`{4reIk- zI2&k@G+^bis8povGv^!H5~=ar6E_fQ(5@9wwVw5eL<^{DACx$-G))65rkh9?CU|>n zUb@ef*)w{kr(L;|65)UCVBF%DUFPARg`%XP(K=LN-i@zZAQFbU+mG)0i+Siv|38k4 zOLw4Hk!xQ{+UDj}F|dLKUqsW?iE2KXOzuhqeJl-|F4XqhEK=qPjy0_D7G6N3{WH@* zZ~|`iMD5--n%svy8}J(~&&B(bX@x zk3l#bPgd0cv}m9@9d5t2-V-3Z4veQSv;6Q{R4; zj~AG$q{|OXJIK~IdD*l6S?N@)DJ2BB@|g|UPbu!e3Mrxl-^1?7d+%UWS#5zVoSYE} z!@7W)t{L4i29*^bo~yq-^n=g*q-N9rC+A^8@UNQd3@MAPw5HlLPfCpEyZS@Kr}Tk) z&o_g8e3Cu+f12(N$z#=fiH@tBxQJe6i^%mGwmr=IsiNy3O-AcBg9`l1EabPxT0iam zL2>@!@mpr)q0jq@pQ$+gorNI6heLL4MRb-OW+9S0+PY#L02!+hv2tXlWO_ zl|MmvIAm;Gz9+OP<=K2zK)w9V7geXduVBdHFrWvh01X?R%UmTXMQh>hO#LK94_No5 zA5VUI`U8k8N2}c&tfPj??GyZfv(p`n zZ=+x(b^?O#=u;rAjVnB3TM7Ufr~pU^BTkFNtb@afGh?tF1030;0yqsjUX*+D&;r}u z+YGQV>=B3NAA5T!fZ9;A$qWFaFOBSWkND~^It!>4D`l%-g(wEm zHEIgiTk-G^F_Xv=R&wLF4VsoMBzXeZqFIIQ9KQTJNizS(~50k{KxKd+252PtUY9qu5+z<8UMDCZ_djh(&Yr`6zRqbztYn4Vf2P<2!N1HFK4o_NGSJ$H>wKpF6N^RFPH>yDP* z*L-{hyuGrBWsnQ;QPOe52yDiy9|z4M9*>LTw?M>T?cW7P{o(Pz6j=(6wV#w&l#kO; zvBf`%YFFaMRsn8|4ui7AZ=gShxGaIg*e8hY4RBf~s-r1z09z;{CNY6;JHM3-d!-Rm z0ct*)brlY+Yc`rFGj0=C;!&^~aQUpztW6NT5fn+M$NMn_w}KrFO|c<0`#>+b3`SRm z;TK}~5F*|Ix38k=b)aSX#fBM8I*#Tzp5a~D#F;b3+4&n^fZA@@lYhs z6pRGp)kce?(_esJc<*WvqBQm_a!h}gSVFfrPdh){T)m-?+Bh$Af41@Suk9WMmgjxj z!1+;Ktx@X*ObBC5ZjNBTu#S;H(#~%W_6N`OgvE2;R?mv5TLpV4@89XZ(Y%@1Xs~ck5JPMMjPvoi$U_BSE&yQ6 zFu0YRiCOF8F5N=CjtAhs=po6bd-=B;ztb1Vk$Fd_$+*3vZ#x$U!%|~uu~2@?I4-sH zguZQX%Bw1fs%+U3agAtroRBSe8JC*GXzzoNG&ip6b14$``qnYy%I3dJk^IA1+r{fd zgNb|5tA>l^XTTCdIMTRBMOnBK=BbsM>Am!JrSQR29U<48A!djNaB4C0Dmk!cbLpYC-Vb*z*|v*&~{wLzny+?ab)2P)PP zjCFE^JP3bls(J_J$q>I60l*&*WKPrsxJ1AwK3lOM?wm61EWJ`K^h%&4Z}sZ_v`?5 zATb@s6b|cDkA^R%KQrZM5u+7W=+dbsO#6;12l5oStqheE`LhC;V2R>sR}GmxePqmH zHkGMi)98nqHJM6UCqnVuJJI;?LBi>=9W z3<0EN^;cs5Bxg^u$irEa7Yh%7_2bDVeV8Rk;~DqX19{XQt{VtAajRW3%N6$fS<4xD zZXV<$y<~G6xKmT**^CLeo5gPWayHI~#jYKBBU|YyDRHYNx{PLUKQ_bzPhp;{l8>2< z6O!CPi0~>vAMpi;QnMKqE+FUC1Q`1s`0VZHhxyq#Ct3iTLfl7tj9ziHOb+$RqRf9- zIFxJITJ}WxGTFJYTP)#saaX79Q0A&93jmrT+7QCI@Y+QDq=}W3k@NO-_RFI%Mi>G{ zfzkDv9|Uoxt7MG;T&*hBYmskzk#@r&$u$aS$vZ=~xE>=sq5fLGL05+pz(L${Sl5a` zIBp-YGccLYlCJ1l(;HazJHro|YvvIbNj_M2#;GV#ivA5`r8AC23k>EM#YuF1) zCi5ua5Mw>LO)eSO?yyCs{DPD@>f~bSm8+ zvom$&Hc^$C)tuC0z61Lpfhb*-4jMyHfb`;QzX=mU2gC;Z)-?!@MpS$SNfN`!wVcQV z4%*#7*_dsFE*EDh4YauP{4~F%NjYV|s^59biJvVhV}W>kypwT=n0A#gBs}nZ+?NGl zJK*>!f765vNiy?G+(QaThmtKYwSew#`0mj1~a1#iAKU|z!V65*kuZU1I!Db$ZO8a8jZ z0mK2PH6yIT*<>FI!DE6r+mToW9=gV;Rl=4o5ZN$AN2pRwb;k^cdfs`f&pRLc-lVdK zJ{~Fw6V2Amka4QjNH}(n!@a@`eG&ywgN|9ep3> zK?Zn`>7HyX#s=WP_&_4i(07-pYV0iF0l0 zi0Boe{~PNnRsqMbLH1F2?lZzUmVG)gp(KdGyL(sWNl~a~n5G%@FM0e8hzzbUo`30r zSa$?6omM$q^3x42>K2Lg2%TQ#YA}7c&1Xl>Ms1#lv0o7hVvVjRVuN$Rz*m`0Uo!A8 z7$iyberMBF9-#!X+IJq;xQY;&2mu$W-6$WLl7ufVWHKYPj)|_*1Fyy>O=r^!R6A5e z=SJe%GCMSLb5@YcwVpTAdaml^2YL41$fYLf?OSlyWX%O;508S{IL2KpF@>23xWbF- zi}o+>_IN%UU>k&C2B+Dh)ecVZsitU@_133uk|J0WSCExD=YfkJcj6J6G6&1t)psLL zr=0&xCFqFzN%}ISMciHMjPE8)_a`Q@O7|vyeAHFFKjPZ#TwcWOR5>5^&?e?=pojhB zHxFy&B;{ykdWojspJ9Lc%M3_H!LvXv;tuI<-keutBy>U@F_*j!qGe&(9nI){il5mIi8|DvDC=qj#ChoZ%&d?Ii?8ItR0gbBPH@>P0 z(`!W(qzu_JOb?#K(RW27r|~-{2!pNDhEKbycIucnkw`JC@G2H%P0>|~{dY{Qfi9?r zifc?os_Od>nBi|BWBB)`ic6gE_;eTn3){g&(j8^8*>MmYq@^qogRK2-dX^mXa|-4s zYK&v;<$7Ns!IW_o%K6Au27+_d2hXMEs)2WH}5* zdpd#05JyR$U6H+}jqcU&{lg9%|IPU{x}`!_udlIX7ZWxxfe4;e3Nlp%K)cC76EmN)o-OER;Qx6JM5a9CUkc z0~zl_x*1vW7iQt(KFt_@)WAsA>%Ys{3(?U zx<#RpSMQao^b^)$^)yhkX42+@ia@m4Qk)O9h9&cadcL!1Eq)sL9QMh=+0o3pXX~@; z>Lv&4=*dS{LFd{kUc(K-Rt(4QHu9LazKut?Ndu1S`#vBMq)J%lNGWW-F_Vv6HgW23 zsr`|B?9@-*tbX0cf4KOkssKCVYvd%OQ+9m#6kN*Znw|rR`NcaOG0PQFjpy!A_FFjT zXd9((O@2aos&q6yxI`VqOmn5phv$S@*#U&zE?Sxcdr-P4T_|=DY9-B7cFI*Wl}V|Z zD$VF&A%Pc#So|beP%hu1sgeJ#^@IgsFdq+fSzl`r!gpMzX1n_`nngrJ@L;N~Q25sdj|q+S|<)ZpIO zv;6KO0kOwD;(pmmcZB(Qn3^n)M_cSW8+`h%kpZ)WQ;x_XRw5l1+ug>TBrx{ zO>>F}c5o+<_Se5yYQSv0Yc;bZmPq7CK;I z?I$ttyJC{@CXLg$<>6=bv)znSWR1^A+j-{n;qtO(SY`y36kTS^)tU{u&waOJ#M835 zne~I1c+UPVpOeD-9wF42LlHkH>wAK_Q?d9?4&8+UxplAl?e(V%6K4OO4;qhB-m73P z`LaRlD07S_16om%6P~LU3kTmCA!pp|cdl07rO%XqfjtMy!EpA!HE$FOfi~uHeA+Y{ zBkiR(3ww!EYUkm_2Nvufdy0|znnB5~P_7A%c_OQ!G_b}T9f2)(8UNcxNYI9=9OK?(&T`zFuhev36njYNMmBICFg!M`MF*=pTGhF`t1C#Ax}&1{9H7 zDO`8wSPiS@S%L3gLDUYg&B$EW{z!|>8}~xo5I;C=_OedbfTyQtMN8($u7+XxNH(8u zAIgPgN9ik!u({!1IfEjUB5q1thO2IyUGDgu3EUHU1$OF1RC0(MHfAf?y^Tj*N4>=} zxy1aj=GScaY*@&Ke>8d9ZY1MDUgu$4OUaVzFs!g{=DB3J zzzRAF6hY2k==%$&Ire;IzV%f8vJ1n zo9W*go0RV9R%{E|ZJ-~1DglFaiuCD(#^ZN`M8rjBgU_O-hhwZp_1-1HB9FVA5pE&$ znKPbk$LQ#P!CS7h94;>Ev&nWfB?4de3kU=LQc1e=NibYYbcGo_MAIKzFu|e)*1yb% zSS0sUM8pOm!>&aP$N1%oMDw81M3L}jE1K(Z6ungG&mn2Jh*CY|N=hv!T|CoQ^)VF& z)Iv5lefQ^(G+^XxL-2^DQgOxiDJE2iPW=_cN*~RcF4mjL(M-!MOFKo`PBqPzLtjRh zV4%)ib-IG>E+!_F>@DE+$!9;uC~L_g>M#)G z4EIrL@>9urEfP8-G05=Z2pu6F>{EmOOXiM`_v+?G-6uT6NGO?N!RPX zb#BU}h(KGi;KbWPa=%4w{DnK?`V#sfiX2Ou{c}Q35K@RQQaTXN{zR~V%`Oz&&0N@{ zEx}>BZf3+v$()$-Kio6I=uH-=73>`%oS*cP#3>l$r7urrv}A5n!HQC`X%z96PnDv% zldB^_CEJZ6pR7HtU}2X{9R60dSI6*-Ur|tYCE}YI6rPXKYpbR`8{8nhklmxm{hR5w zlgPjslc!cO2t}A*nwN_5iJQm+<=4Hp{%ZC7P|MsYEN9}U&?(+9SIgW*bE3ZG`zy6m zoTWX7V*<(Tc_-v}yn)QpMcV%%M*0s5`n973KnwTU`kPOaE@3W#G@VV_v&bfSSQa$s zL{bKEVl^J25_s@Qp+7?&cQcwY>vz^dDc?&Bt_@i} z-}qvgx#)Odxl6&6Uoo1?3(e}LY$SacIfpyH)hT^yEw1!ip#5i2W!Q`~dgph2rhpZp zELd)6NQSln*64&qPQhDf<+J!z>GCS1q zipzMin&aK+i`|#x?n4|Ea?EW^>b;)MQPppk50^FM8a&R8$zzbILD=n&v) zCFn%U9$e^PW>qP5Bmkr=K@rcBMQUe1HLhm#>P+L9WAY;dTU`V9a6W7>*2)_q_dOSJ z5t+%-V;njy4E_dFkl9x1Ui;98O1>HkztqTAFB-x{Zb!@S8S!&^onYl&c_LsA1*S^Y z&4|z2wE%NjT(g9qw8LF>jwsKKwI`A^vSvq4@eBqx*pGD#zV4CZIvESb6AXiH(UXP$$e68`z*Bk*Ia z(^kwjouT~d(`3q3i3y!7oQfS6a)r{Y&-8h zHo0-ouE#~e%E(di+xZRy#iC2Sh!p|?}a`^pR zsO7Yqgm-HWL1vaoG~2V%dKuZ|W!Rs@Le;TM*R|A)l+G`OZrijLs(2AL`gcDc?ith@ z-PV}BR=p;d+kM!S@nrJk$;hY5r$({DyU=miD|r8|@{4{q4!;M({9{#lYrYRK>|F`4 zS#d!DQhW3-Zy!62lD++1hH*3OuP_^@ zx&3e+pCoI^Whi^px;$Fu*fz@BF6VTmv9MmKv%vnl_)1*Tc}I<}e9IE~A)k#l)8=Si z1lL>$_WR*z@*?RrU!!y+Av9l|1$tlv4JHgi@ys$$voAr5p~Da3<~G(8{*Y_azd!dw^kmcDmWI0Y)Teqn$_m205Xp$>lqW?AUB7zj+8~M zHP3b3G-+R5M{A-NNXI-ADGbG&c0^b7GOm}&>bkT!&PhGfstPP2_o7g_Y_YG%S&cPY zcjs#m9JsWrlp>au`jP@lbw%6@F`L$LUWtrn9XVUoHr7Cu(de8PP!CQSUi;@f^=6U- z-UN>LGALrB&Y*^TlL-^ES47Uk&{a}7str|!c;qLxTYk6CX3tkj(YXJf9=rGz#A5$_ zK=}9NO)h(Ow)rqj;K}p?T+bUc%F+B34{`*p*bpM+DkqxGAy);?F?6J7v>7$UQ zU-C4+KIsI_K<_|clwoYTH^_uhh%q#>-81?+?t+K|b2Dvr3YrE{Sj(n*Vi7PFPUt#_ zfT|x{RlUHeKXu(&;9_PYu8X^a#1gSM`KWmlA3-ySSW_d@K=E#=y%=lHRbv)X#d0!l z+0v3kW;0^8rx?jXA)HwdeG7}UMT$$&WQ?@?86f*?@_R7j<6IYf##XP^MV6Tl7 zhCOPIWkTsHS}AbBo35@#&%^Bzx4DiGO<1QQj^3M1F<6V~tFPHH!e+jktwp~fS+;Cz zx*}Iey=?7h&w`@G%&K$~nX*|nSunw_j@b9-2ha7&EDO#TpoV5!Gnx3S9_HUo4;#2d ze{kDrtRs;%JI-N-|9wvFm|^|uznC?az9{1teZ|XM9~cizubySLzo!h&56)*2B5>!b^*Qx=jQQYoAaZ!Kz+>uc zJjR%B%d(W6Oa>kWxj*JJbsTg15lWR)$n+^Y{G&n-dkKXUkJaH}cy6HgZbZNy)+zKU zXJm?19RV^mZ*?FZ#6!w9%{Vzk8N>g~fqtKli^)laOm*yOB5eumX%t0TPP+i_8ID`D zzOav0h1urU2|jQ(V;fNcQvie1;sZ~)GK^hYkQMBzAKAJ2nWH#HD-;*q5fr@@MhLTO zFD4Nru=t(ctuHpoaG)=$5ls)SereWZ#m23q&xE{&eXpNqYS^V~MnZmtOWcxw@KQ;t zyXrA_L8g1~f#&Zj_5r5F*wC=++>b*)WdD1ogo)E#+NXaq!mL4QND&>sF$)W$L*8ND z*M@AK(W*c#YVoHK4qOIZb>`no9N17^bv~Y6R_BT<*cw+};R%UB7FbP8)qrhbifza% zuJ8TMQ0^EZ4_hErQHG;x!!I^=QR=aeVR1s-@d!aLEO|+L3~PtZ31N?!5?aY(nd33< z1&ur@$yL+**iPwu!dw30i&6Sh9VZ;7GXk_#a^sl#qjxCA`QXe<>{DHYwGMie>?N&a zSJEIRukBHar_n`}O&{w#!#Fh-L@Zjtula>=@w)Rh7$26>6(O{Rr|(a`M&;EU9>%QU zg}FSJ)1JocNcp*8XD%}w2k6$!Nd?alLC@>|Ov zvi=-dS)%fO8cKqtQodlwT$Y7Se)IQ-;vfo$`7aa-*Lq>ucGR`GuRK}(f!)k+_Q@Y= z_V<$DD-Yjls)5M5mEk%oWx~}pQN}S_z`}QniOX9}B6Oz~wzGQ(7h$iM2m5Ni0Qpi@ zEQOS3swzZmnp;?0#RGPpVtJ0)b{2inMI;4LfnfB~{3iRIc{`>rogN|bs$>T`tQocg zBf$#N@loSe=?t{JAu%|em*$UQASCUeDT^CV= z;*)Qj(#V7@YBDKDJ%MiJ%htP69rE<)m^NAS;5@~DU9-&mK-m>#$3oeA%d?838yXjN zdb+Drz-?U%_-m)2%Q(ZW){mRnEi2mWDUdKNL+EA-lw3LSG#`wH4WH0^jq;s`KfRZQ z_^)$Y$J1`1*h-=t0s}kUw?O2x!2C@VnN6+69=|o*@G9l5UK+yLOq5stD`KKvH{~RB z*KGD*sp2$-zX(*FW-NSy7F`2`r4>{N%8!v^!*k|{IgJOT9g()fVrZNhS=%c59ZW9r8Cp@(+EatUB-ze(2yeNr(!d+GE!%t zUN8<8j(9O~80i%z8=Gujdjm+Po zL6B#^^u(L5!#he<<-hM+geTLd^PSClt4otict&jq^1aNPd-3)v#IE;@@`H=Pw9 zMJAnMk4O|t`?Jd}G{ZRT{bR9ZhdWO>9-$Pr^rV|c4|G_h+mhRpSu6(n^r`Eq;1Qb4 zp{@!07@2_!$NEgU8Z6Co+r=Eq&2BoG(vq*PbJtFztXEJtg*|@Ll-6dMNmEl1l-CHq z7h2JF#r8T}rMzlyg{gjTN)DB$=aqlAB947g|00++?|9T_+M5ip?vn!#7!Dz{lrAy! zYn1eu+cTaq3r3)L-!}Jejw3jbcPvh7k^{Dn>$y+aay*zz1+GDbT60s0j~}iC_(INf zz3t~8A;3~XE$KEaI{MtQH=8+G{MJ2`DN1Los$1}35t)O}fROuY_y_f9hOqM>vCxI) z)>!ti1)$l5TUf);Mv-z~COJ%XD%u{}(*CQem^QZ#z(2cu_YR0N)wwzQf#{XCsgzuC zdj5=k@kimfZG}t3FtA5aYGP4-K%oiTywCeu}$Nv0{V z|CXtsf4XSIhBM1Mf2LzzjUV2kUKXo(by{tmUN}+FRIz4ie@#-P!eYC=q*v9sboAKZ zBZsYf1f2Ijwg{s{jP%5()XHw!vWlMiji#nszovWvv`LH{=%i~0a~*6aLW%7K_ejcY z(vpTo>DMbaDZMqK8UC;r*!+LG&s!Q6Llkd#_;g)B;e#xa2_sg1uhFTur;~` z&`N^RlF6-Jx1S%c*$Em4dTZX)x2yXz{a^j}n6(AD-CYec{vcZ-DjfWf0@c>SQwEe= z1=fvQYwgzE(HVQY>v~nIsWvlhQ9nLBY!Hml0F^7nsp4q$dNvtOE@B@fV4u-M?@0!* z7`THVu8|vqrZY)Ec{8}``ZD3gi$}!r65yZsUQ~bdh^W3H_F2XivXp^RPJSg~=FQ6M zWHXnnKNdi8cH*A~h62RZ_t6bTk;gTag`*Y2Au`QH_fR?IeVHv8I7O!N<1|0V)wG?8 zAg{lSeTB;wulbjFz2{fo8<7QeB!gT{Sa+(pgQCXa_IxEs2_gQqzCa0LQ0MI?yOjUx zF>ogt5|KU7O?%$3sBRM^ z2#$iR%5gN=_s?lkqrk^B=J6lh5kkD6kf;TauR+uf{1djCAWw9$QB-|Zp#S+;+Qr!A zCJ&j+L0<(=8Qr1w(2NlIec+fJ`k)<9o^d6dF`3&j3rp!;g03(EMxD>8;_cGon8FZM zTc99GHLHM2yBGm~7BEe9wMDZz0O3)x3CAy>(D4|p-4@~=%8ZY<4ETMRHyQxA6A+$= zAP}wqZrqU={|ba~)w}oyL^D*h);fHw-mgYGxb}Pi0MpGujVi_-rz{#-ubW6qLao7d zyA33jBO=sBNJpQzE1W4 zWs^mbN{1MYqwC&7x^>0Q1mP=EGLV5K(j|oyTf<&;7u&tIigJtLgb`PRPlS<1DCfTxP=Hv6)q8WHxhF zGyCJs!tcW4V@G`pk2p@Ya_4$dr`!J4`KuziT&m zPr$I$9nwooUX`YUF>wF`3xD~Yv&cL=KE&=vo*u_BP^N$&@<#?GmTJjPz=p93nrNN` zT_%N3-vcS4V-=#g$8D_6$VRHC5#{yAYqGMSxS;^#?@EFm_T{mvGWuHP_7Y&6(|y7t zGRq2!wSu|AhKb!Wz;2sP->*p?Hud$j_5**`8;@pt!y)^ic-a_8X*_Dw@ox#Y`arYN z&N}2Vu=tIA?hI#jBRc0!$EsXcfsX?_O}s|dQT0|a@;_GpF|BM^RBTp(!&l@->_qN` zX&&M3-euqF9ZE{%u&C5Z;1j0qIweZAZqEXQP8w)^67<}4?=0;C0i#f-#Yp7^sI4PSP7LELdjIUNe+7j?aTDt(&DG%qS4<26-P2+ zRERGzpTk(8OmNC+#%ZP;=IC}19pB}DvZ;~GM>)yRLP2z!4}sL+HGj-QRZP27Ha zibQV}cu06~@z`_rg45BHaSS9ZB|sxK=KuQ;T}cBCfFY%Ydw_T-<1N$z9-$zF(rS7I zqo{eZIK4$i05Rlm$r)9`K^f6f1%i{yfD6J!M*~NGSyjD^&m90knh9g|_y9=lV~um5 zqwei{^yFn&^sU4ClP&GRwS4ZPZ&&itro*q}J>Tc5gYKa?T7`p` zMDHHa^kt}VG84Lu@%!^OBWL{c*j1)ZW&u#m3!r2h3b!-neW;=<6>3?8EHi%3KfFBi zq1?BMaOu{G0Y?X#px=JbIN)9V3-Nf&vXTG)q5C+4LB>Z5+(aMGS2jZ$X#+dHi=d=c*H0847V>{5x6_x=W ze+Jnq7x+)GwMKb!}Hkp*as>)9ZtIeunZ0H!5*f6ov zS8ZfDm0X1WL2;DS0(tz3q=JMGRRUYDEN-_+*O~uSs|GHCg7>cf>lM-Q{8}>oA|@S1 z8pYikVPTwY(D|l9T>9W?M_dI}=J)oWFEDkN`V1SRuLUHXN*v!&`b2g?+>QfVD+Wl6a$I`A?7hq1E^ zi?Vy8{j1a|4Iw2UN~ts=okIaM^U-OX7 zWD}(4_})Is5zW3dtN-8WPK0%^J0jkFh66t~w1TF|7({Ec=#{i}#`8K-6ntKIbn>iz z(p;+d^w;#&k}K`*C;?_J6JsZ#()Q^2S}cdQheoSw%wsPCE&9G^GVatSQNOgF{E{`j z$4vuKe|qnbY55q6={OgvP|0#{F zIO$lJYRP_dAHN5d>hQc?F%C_Ub`Zs69YQY4FxTrb-|PG7A`-|8==)-*7}+?RF!xH? zUr*>1doKZ>JBdH}mISVN+bl>-PsgfvdBPJMESoAFtLIu|d-m;~T+TbKj}?EN0+v#{ z4Jm!Z6K%ndbq?ZN;!XaJ>t?tM_vSB6q}X*=$4j#*EUqy}m({v|*iqo*T`@}sx!B`C z@VM2cA@^hvI#0gT-*G6EX*fJwe(XFHpZN_kgUR2MBrUKjNz~$i&^}I(gE; zRz^D)Wzr;|&H9%U_ljA<8Wt-avDtZeg7vePbPuv`8*Mz7LHT?_6kd9ZJfuF0N$yc1 zy>=(#7WuzjfaBX36L7QV^yGC&?ScB!^rlGzACJC!cohyIE`3X`Ph(4v&qgbG;&{RE zD`kYLUU0#O&r}SuCTkJY0&Tzf zNF)OJQ3B&P%w*p_&@KB;I}}xXE%~pY^``Ka%KfHCwEQe|#`NuQTS0_nk4De)=EWdB z&hbR*@b^kDK&7*(?`PCOadIzeF}^mMBM{o;aV;ZUC!H3=Kg)?sMvD+8zYx6($wI_e z9+NRtrB$+bQp|HT(V)Lb7d2BWN;>e~+S_U_Pu33-%MypAF5(<t)Xkg%7+p*PT>MZ(bIltu=Ej9liDrj2dT7L5{;MvVp%{RaIYmMT^V-4v5-=Ar7( zGCRR~FNyKEnZD@+Ym*`Tm&ZW&*|a-H5itak(DUEadD^LYGa=VNJq27|gXR~ekKCHR z{3*J_t^Et!K}nJ?IqcGs-z9 zfv*Z^0L$CN5HPy7xj!#%x`K4F)B)aUjaua+Z1nM*fde)KK+sX~^O)0{SNctJf0?-f z9v=@~7;G_K>6$n;r`5zE#-dN2ikjItFrrv$&Sepr`=E#{@YY~ol`!OYi&5*%(lU8m z$Ot^@F-Aw^X(ne<~!k~^-6d|0T;PTG0z`b>o5mr9iAa?*j<%k$>BJ&b@P$gLS>bnH z;WLz5693$FfyaiWQpivUaZb7qMko5sa;+1Ks>8-sB?;jB8LYsgA0FPCs>ue9jPZOTUJ!5L z)((l$0gi+Fqsu-}V!PR4P+F`S!a#klzj~w_fKNPGjYTT>0H|ZX8+x?DXe${wIlU@e zE+xroN`b$782F(T>i?U~kP#37iMnzsa4-%H0|(RUV0K~gJ2p+;O%4Xc)Rtp_-1|o+oPWr=EgH5Fi!ln5d@`o^lZc*;XI`19k&IH)qG@&@20{2G_qyZCe9O|bF`bZp4Im*p*uB-S za{x_3%|&vYe+UKsO{%w`M@WIQIY|w?sbX{#iT%$Q3T9?cO`#-2M%zF->-=RRjJMtr zAYF4P`j~)i1Y|1p`~kQN3Q}^ucgf=9CscuH4T0+I{(@MeXp2JAU{Xj4&|q)zUWd5$ zVw?Na^em87!E^r6TRxyj!LzV-FEm9xcKZnp0M|{BnZG~?nY<|`&B#DR_ZWvf!davo zh*d*j(eGFSvH6M|=RLtgP|U&w^lwc+uXG0US_5jVs3DOz@HL80Ae$_?bw&$#MM^*n zezlIBP7&wOQ}W@e1~Qrx637Qc7&cH~UKoZ%^x?LQoNEDra&<=O9N$zozZ_ z!+$GrFwrF+2QY?vWPDg)im)xX>yZ!TlO9CSEnc$Q9p1>aeO)0nt~b#dV?#OkbeWRbE#_#uI_II@q zc}_FW{*I>Gm%N4(~L)Db|vpIk@r(>3-2J{Vw(;* z@V@&`y$9xR9SGpNL6AXw0KKn(Vi9X!z_)5CZx_Oi1d&2w%z+NiS`_3I+3gz36_5G8 z5H!(~Jdh)k`No`3No7Niv)%ws29@*Kb0iU8w0F>MM*lgRV6qS7M8DWL29V!Xp#w{q^e754DSL)7x+BA$-TW)K1y;ba(YKm573ufwiUK@_*G37E^#YfdCwdzUmmFxCC zEOx~juhdg_WvtGq=c3qNOgv`!`p-2AJ_&f?lwZjeJ#qh;{HL)om@N2RQe52PU3Ok0 z{yR;=ju<{^+>bd-4znNT?LqX=q;?=(zldFUe}^1rI6?4E@N5Mb2uw9eh!S}G%>Tp8 z3sQY9wGez610v;Qnj#z&InJPPr%;GnL(lntPW%9fT9CW207Lv7vmjDigD%k)1%8C&n!bO%t zj$eve$^6M5S$A+o2T|Z%t6=h{nO6X|nS9T~HM^47mnx#I5oR4@fg(iqRZDk;nQxFN zuyfDVKDHTg-r_R%up7xs)`PTSyYZByEI<>sGuM@gc~538SsJKv7;zR#C75(@m}PkfYOtP1 z`{BOsdSMV_$Zo~lFc7K0jKTL8ZW3Scb&?`KeSWsjWG%4F5vkK6Jt0*p47VRxCf}

a=I6Dp#TWnm?OGbI!(+*oIv)A^9?P(>hHSNR zC*DcYS_0Ij3wx9Qhg3x(_5Wl4HIB;Mc!p{ieNkNKyKOq9U}$WhfV8oXY-q#p_Zb?o zHJ}<-{@>vK9|JKpzt?XnFQ1J&m>tq=?Lx!!66XE-GWGq2(sXk_WBaIvW(F#U*u!K@ zW&U(Elhc168PR<0@4z%;{=Gud&5-xm$V2|?m&mf`v`vVgicRJyOE$<)ZLrN0vZ*h_p&3Ok|} zJ4CTi7C|2Uim~d5>ah`fLxyoy_{L<_5O!D&DwEK>PP*F*hJa(QS}5jk zJM%xks0i8L8EwYGU=3l<_`>s->%Hv~@!gnszOi^~l*IjGTJN^HWAvU9rk62wNXR@E zOlq}P`ci?p7#8z5`75yG0xI=Rhj6j5#>O#ueRge1=!APSrdQgB9n*tUFt-ydBB}J=TlgiI z{f>d^h+2pBpI>HZPih+jnZL)wwvPVNAuPl#DekmgA#TV;Hf9|0#@u)hoh@jz|hSH zOAYE}N@ikm3=fG|{4kr2epcWtbuFje`Q#Zu{(sqimLq4BAV9S94yc$SK zsN=4&`Zwor`0wu|7$V_?DFe5O@ZQ6-=eh?GL@rM`gvEod>TtOU&KA=YsEL;&9Xu&|!SQL1qj_?d71#(FWAE-+4MiJPqegP!Pqm z3D{%sb&eOGZjVE~)Di=w61lYczyW3k92B9po#H@XP!0;HPHcw-1vTm>m~@q3jn5z< zj%=J&NWb5~@&Z&iHrTMXAfm@ZRsUQJM%c%_g}DjXU6BGrqmzDw(y4Wxc`0#0JIi!A zq8oA6E85e?=LvDf33S}h*q#E>UA~&jOa>wQURetJ`)D~yhIH_;8~dZBy7Jmf_38CO@UI+7DT_50!x*GE8tIM2UC__ zg0iFxaO@sSJQul(aKd`iv8Md^_hpV0D#`q<^j;%67NQoPgHJ#Ej=8qeyNvbBE`WRb#>KPa3zaw7!7Lu^^u^)C1EUR z)|Yg&pXf(mF>4o=GwpK)AljRsYzmYbNTxT1!R3AiXg^bj&GPObYDr{{=VqjQpa$L7 z%L%6fu;{4V|JXm{d)AwuP2%`0&9RLdvzA*}?IIs~+tFhehToCh&*kC_7H!q*DydCa zejXO63Rg%2FY{J4lMDkGgo3C2QAO0h4KndR^R;NGn;l0W^Twq6LPSDR64iL@W7O7j zerL+26q`7)&MlkMa)X&!Vqx??>h08t%XX3Snv@CA$b~g;gslbC^_#X#iy)%ZvAfU3 z%8*~l+q;(S-aQe%IJJ3fz*EpbBo$v%h}}CfL61FCy%N)qvq+Jkw;#raz@8@;%TQvz zmN?EDB|bk1w>3G(He!>oF#O^hyBpX9wX^x3Um3OmlgkE7hbgFmlXkhKti-H)0_-&w z`AqOock0B?Ibpd$+aAUSF)J&W# zk2Ea8t#rVikV6IAt75>SrOHMfpn5Cpsc^Mgs^4#ovF~wi)Hnkrb786m2nL;5u%mqQ)i#0!NClUBi0Fua4vBb_TCJ zG-JEzvHQRK6d`$eeYC|}weFRD@@)wA+4E*Q=)#Ii}ve7o`EdK4mzHsSof3h z3}0^5fE4n%Myb6aF`(jdEb>2gx*!%7%V;9fr+$AND7%S-_k{pR=i3ixkUP;Iguieq z^HwI0FibFcuE0KbJeGL>A#>t*q-km8XI%#C=~0zD_xI>}?*%$9YfB=%YNM(pQo_(c zWe)r^!&fVBYEE11V#>Yn`EIx|u^7+P`=l!3Vha}R9=~da?BuUGGUkxeqXK@U_dB%@ zkL0S7MKZx!Uqw)+qq#hI2N4DXlPZ;3xtq)pPG)o?CbXDZC%81y=f1%8JH|9;Oq_4PxOfZ7yHRP* zovHd}Jdz^1$V4V&Jjyk!D6}%zhMbR@lGOyC4=lat%E{+5zdIzuh zx87jK9ifv%eN_xPU><`4#zSRV_CiR-hQd?~>0|~ZVfupVgE(19Pf{cnA2*t|{>&%1 z@j^)qJEhR8G%q7xyET_kosnimT85+Fz|aNc2Zm6s?0*hr0@m7}3Pa77v%!zP6w3L9 z`4L&*otnrcnd!XBdyVj z!C0EeFM0VuhCm^tPUV=xf?04B$Bc>3w>s;fy-XhkPHtp2Vrq1QDEC!nnhV23*K&U9 zs<{`5jRqJlxrgzl35;0B+tTUOSU@bEewU`Zr%%Zj!v83N!G8+mM=tA|zx%88vGMtn z7G^nasa?p~K6OU;?jhME6`ZUXLNz{Py`3Zwb;qvE^nB%tio9(~Mjg!4^;4s@&&S=Ni8@7BQ=U>PZ?Ivm>0vd2gGh%|UK5D||x%BKn1!h5N8i z65tsHS?=?w8M5~&zajrAINvplx^}B*>>{dz>@wT)Tg)T+|1O8u9s68o5W!#FN$*(JnY|*#yG#Qh_ z$u7u}0Nk>d><`34{ugUrJRdRnFht?08tZdvH)axk-{zz^9HUT1`LmAwoo42`Qe6v_|}CzkR?oJ3qntVhd-D zlrQ0+_G_G9G#QV|o2sV}Uy+-RALnCD8-PTt+NM*=mXm0dQh`N98dq{HoF8Y2$p=@d zjj&a|^H7HD+nezZ`(%yD{7lA95tbYx?4rV(fv0nH=d7V+9gM~gDXu#jzD%5yEn_Ue z$pCR_!yNPXWQ6~nUbFzuGQ*0z6uH>D?}`nf9UP|&I`4f+u0HgaymZ_HZ|8nk;kcHe zuU*)Pz?i2gh(V*1%l!5!qkNI~t!9;)wAaq%S^mPuR2DrG_-E*yBeojXEbR()%5g+P zeYyB|LojNl2X?#RXD_{hRJHD+&I zU?U_!c4PijRd=A)jMXc&IYiq;&K$pqn`%Fn?+z(GRoK77kzM-(UNdk^CVzP=9@PI?Oah3UxxR zTPYWNo8#>#ErrV6f2RMRYLdC79;>?~;6nt8!jqOdTf=M7o7;yC6O=EvDzp?(rIf+x zCfkt>BOm&F&_WyqCC9UN3i{40ri=fU%izC5Y&bkYgic$zh|G8URwfvlGEUs|H}*!3 z=e=g#sN1zb(|%1s?^I3M?#&l&9HKCOMposdUOLR`@cWzl^!PKI|BdR{(Zk| zrL`WtuA9&F_7?fL-n4LQ6^zMTTPM2>HmkPEcQ2PDuL|Yy`tH=3Qbw{};oZPDBV996 zwJMJAkpl-Qw zPQEs3Y@Vebr=bYUZt_v41Ls%Hr2qRE^L`4yg08j~A$RE%S%bsNW_&NE2*dD*K0P$3 zolILl2+ubt75XMis4eBQCb&Lu$kv@2&CYpVxO39@P#dx<&_QIX^^0j6C}t;qF$Rwv zST*R$sHJIJ6_YUI6=z#SY2}Z$8;&*Mg-up(%=W)mR1zh#V6Iu(IbNtrI|*37Pja)8 z2LW`(TIi#pZO0in^u3r+jsGU};Z;`SqsS*f$LKrgm2t z{!?Xuhm@|8iE@L&i7kov&sqEnDv62EB|g0dyOb%X@e`Dp-j_Q(x?=#o0DY=R$>zkw zvRGDmjI=Xw-#y;0WPGW^f<8ah4s%o98>grvCKE>4O&g~&ZY{Rlmb>F{CXYF6&nk?f z+pgGDDT$yGZ~1nG@zPan5DGCI-Np`#GVHov3vsas0T zg`_Lerlddm2)7&gjV@^cw!x`e7TWzT?sMNqf3NK?CSKazpQ3c1ML(JT+#8j2l$VdL zo(OsxJm-51-=aG!TR|tp#j!sSTX`GZH(DoIn$z&y`8xBl&01kD$B5@3qWg}8Xn;6+LO-|Om6go6ys^7r8) zSntLSgG=CB&8BeyQxgA}=JnB+MQMUWxK@co7!yCt?&|Ejz12A4yY(wQcjydD-oYj- z2cE_AShenFF9pNnA~x0sNic_2$7M>*OKjR#q`M=B@st~(>jh;mQvCVx&Y~5Xeu{qB zXZ=C;R5wps9gI)?wn z^C*pfNTDD6nQk#+!7(t#LZ+lsAHKDZFBv)C!L&{NGaV=M9DRk1gjno{E$ndDhzFG|sVh*<6(dDsH#5!{OWR(kmh0J7Hv=*JdXsnXrO4!|@4!sj)^VPT z_u_pt$|I$2VDkm7fN`yX&hX@a$XvU+b9j+>!4}a4+!=4wJ6l)bcTg z`gP5-t@ZxRCLtKC@!U+vc~|Y@*v!gh$2zydS;(eub+W&EXl~kfQqaA=B4lVy3_e}6>XZkadX}MXXE>-4W@3>6VvfVT5Bpb z`xvjy#n02*M0;Jw9y-Jscdp(+Dp@_g=8p+Zi4x~1=$E%^RsP&t?6Q}pC|@)PAF*4#zMoCU|T%dod5-VX0?+)*iFX{_t=S3^SGWaQoo zx<#Fj`9+t*ywX5xG~#<>f^hRa^G)BB^P0!RTT$%UWo0#XHufn(wxN~eN~{&VhP91b zwOhKhg0nU@=(1uTz*cuCTp>=Qa1{D;7BhlD66L$3z&HBG;i(SCg95y0#+MdiF);@g z{YfSU&<0a9%1)Uv{l7(1Alcu~u;Pl8LLVK*wPMA#Ggcn+3&0mJo90SNR()mTLw*&l zM3j||lgIVOC$~~A+U>E-8fI07GmlA5bWG*%YUg!p;VH-LI?Oy=XrV$!McaG+^6a5% z+T_UoN7B5Jfy2#s3#i^;-zz8k_TT%8(}i_ackcPez+u4YBWX0SHi< zI_lNB`<1M#%-0W?Q>eYRvGJEf?+fBE(!oEw8k9D}8@IjA0xI%*Pd2$lMSqL(UMpr( zh%^w|Ke9qO56mQRXjeE7T=)9XJf*o^xsZ3ZUN|<7N0et+$eZ^x|Ay;fZv{GpL%5WH z(XZ#hs|U2UXK2Z*9T+s73#bsy@^Iu$glZAHEEZ79)zw?vV`1~H>=5oI?Y2j=_os?5(9uX#LUrzSEBQw>(` z9Qi1e=%95~bz zh`o3%&&sZn8!tXVC8zgrH_i9@hc@! zeVr81gv;;GFMQ_tUm)O4P9^c8Ul3E?M|IM)R(A9?;`Y6I<+D9PmOq72rn}d@ww5RZ zTqnEUY)y7nJp|>ZF)P0Bwtfj>AfSHT0_wI=XM-_{eNl&P?Xy5+^YUq6i3j_y*CL%LU zYnDGL=Nm7Dx-QUf!cNO_g#4zxb6rg(xb(}CUg!G8#XmHd#P_?(e=V`6XfwLSlB=Hk z4QQct>}JYyDPV_PcTmoq8oAo#9}Jv#lQmII7@`9wQ|%|qRa;+bzM=EAwJs(#$z)@b?hyI>oag3eoERHBV0ZSdDKM)iS6PQA&# zhRO(^-K7dl&qq}>;}AQ#gw2Yg`cc?c^<&!2vD3qiJh*CUtXZgb)dL0Hn0WwN$^V+K zozsM}(F%>{fd{4zAH0I{bE?Xu0qhlT;ZoBjp)@|l-m=T6(Yk9XxBzU0rw-qxj?IeX z8eHMpO#i5g7yt5|UOgi=p7V8=^{}W|rJaX?dmcPr{pY*DS$I7YasBd590}C)V_b8a$k?ota^r5bKK>s^7f60pOJ(9#4gMFB7{cTyRXdV-M+&~!m1Gb zgv#diK6}mQX`&I#_x4SsCp~N;HU@LqBJj{k>&H^0zN>3(z6DCcpp znxjhs*pBK&>MSx=4$Xx0OiX$8%{ZP$1yZPfrp={UtG07<%nrP&<;n&1BM(NuTdx>; zmV3R)(?<>WHEcXc5n7@|*WUjv1XC%ITIoZT@>UN|5kO9_CcTXPqLC%d?A#kBs>D`S zsraeJ^aSN490aL%&L!B6(UWiJ;Z?ncAeZ!COrH#*Imn0oLA4UK|JBs)EQM=N2=PWS z#tn_hN9((Dr(PZ(AJ1+}Wt$-7v++d8oAcyW`BkFFP`%ua*?`=1!mVIUqF3yl|Kh?7 zIpx0E>WW!i86(y**q&4&B&-6*d#Ox0Ys9Lo>lP$-B7RLj>V)WSNq$IU^;;ib{MXN= z1NE$j{GILUw@#T-;hR5*`fi}^6xE*G&@*;uT3oybT!_JJcXjC70*L@a;(>}J+ULy;QoEhGIv7d8+#QJ z9cm2okpW#zjUjOE)>PqR+F?eXBBwiC(3se-j=l!Im>|tEQ>B{ni&R(JfDb5_?z@!N zPqh;Zgp7avWi$Of*=N^^Q7chNTDMcWDK0=z#BDb=EAMSa6N>FSX-OPZ(k*(XOvzD% zlc$p8d0Mg2=3QkKB~-~0*TlD3npuA0Si-@ZdsT3Y=$X`n%78HBT0xeUlOj7$rGwQ> zPilkrQkPYFfTv-JWkv+8Xh%W)&MhcMrEi6SPaTCm#jQ(nkkkHodLF)9-gMGc)eqU% zbF(c^qnMaDZfGbiU3V@birNHmC@Mvy9BD9GRAG6beoO!kB%a;2+P!}5sv6(>1>r>R zF2`e3dyTl2s?=eU#Kpk^W_UNU&c zl+cwtoeY)hzO(|i2m||Sf3W(+;0)^HI%L(o#t%v7l!F?ogf#y8+ToKD!dz4^c!ylPS`>~yDxmEj7TS>{+{bO`023}Sq!iWqYbC>> z(Qr@ai>Z>$;Q7tMG((l z0}kDN4>7HA-LSqNGqBE6j}e30{lZUKx-}sNUxMUOc4$bUXPGGGt)BIlX}zELe$xn9nLm#FVddz!i&+dHotP9Y z-K^+*Y5VK*ZUWL2WwN3|`E51pMj#|GD$Zc+%isuX1_kbbNlTZgT$rfF%Y22Xqm9CG z0oYpM5NxqS3B}6Sxp$|>q{^warg$QON6*NjqNe33=|$(Xk1%Ye_-orl%dVgHvq6%x z>QT4GBR-UGuM?Mx95tn6Z4KwgwZ6_IdsAjU#~L=Hui@|3?V+wcf5~a@DJya^)icFu zBto3bxL%dzd}N?N(FY37TxgWyB= zU25Su;s`4XwUNy+IvfX+v|oQr^~#-+>&|vn?GC_cXr+{K(g4VnU&qe zoul9~u*bVKKb6V-ZnhM*qUiVDRyy zWujhyYL1Hdy!+jW!^jrcmiUUHZ%JSk2vdOnEOAEnU=4D%*m@uN!A z8~TbEc->H-%oVk`cXYiU(x?7ik$hAWpX85;jb=Xa*0rq^nAMR+5zbLyP2$Iosd}i@ zoNV-%Q2=b@V8(w%22QEcTW>9U77Wlm9GsWcwy}6o3LFpG{JYBqELH!E;8FBQA`dc& zk^7&}2|7UHY6LWNm9Q{zN`Y=5l4dx+do$D zGd^EU=(ydF1ey<{rRB3k5TE;gz7X0ZwQ+DV$$gcDgiRLp-k`j1{yHkpP6;|B*P5?~ z8jn#JEk8fmgLXRi{vg&Y*L{(zUss#z;55V}^>wqxo?vYIrEmGmev#&9@%PQZOQVw} zj<vDD$%(fuU_cwtaz^sQq<|&+ot*Cmc}@Q-&&kzVbLmZ9bp23QbZ71bT_)dI;0zg*B|oLfBWRd?J^m>Uh{LwZeq4yC9vEB zudN0YBeGKDYmL+q^0D1;hlTToo8X!{22m4}RDEhR1)$q>C7uZYt?)TgZb6jZCiDHo0 zz$y<58!jgpymzGas+_W3q=}CMDf^OK8xWPV9bepJkR*5ndf3wJbl(Z!?VwC~WaLjw z&EN9rvCUoa>>Ki!W8pV)`GC6f>l?h%b5?uJ@b3vHfK39K72d@~%YiTD)ky2{(_w2s z{FtonX=*534O`MKw|>0_V8fe$yoa!z5ctS(VVi2`Jyn0YC!kj*ng3E{gfy*n~II+v>f>kMXA3m4*qH(7R!}pl#m!saQ-v>8E$L3{AX6Ag+hkj zad5!J2yH%yZmSHlS|Po>*cL1O!-(g9G4_FzAm~Q6&osm0eXZ!kCh0T>4W|O)7)Kmt zLkIngSzJATZy4bqYi8BkXzD(tM~B}OeHEj`oz6!Tb?Qt)8{+Ou9g-p8=maLI#!H07 zA|aJV4B<0QVIRIS)xLJg#rJ^~@@u3EB-u60DUdX7W%3T!=E5aGa@qxGr6N!a5q~%tz`t{Y5%CI&lQgV}XW} zuuFc}?X>(RX1t7V^V4TGHT*rCxR2}YyLmT8p)0p_f~$M!U;P=$e1X|**|%*ACAYgC zcaz?%@e=AU6eH~BTeFkIFjCZ2zEy2BMreqG`MNozB1`}qruD3>FC%v2#e+3f#Ctb- zl91ZjkA4$Piy^szyB@G?0`-Ugid5qA`z17%j3KdP#q)=y_zSwlR&-}lEK?-Jln2_RCNutjpzlKB`g+M5eIXE4YLx{bGX006wsO~fXv<_V*3yn2E9(1P+!Qq?}4?M}%yknI!!mF*Cr zA29G&0C9G9gw+{iV~(5OuW9?*-KS7sAY-+;dhcf`zVj33Z%M7X0{zn7Pbr}-Ao8q8 zU4R0}V_-iZ>W=TNkosDZJRu_hF%PbzPy3+T;uUrP3EsHR#v{vj$MPfMyP@}P&{?0_ z!zl4Rgn{R3mMcKwR=WIiztPzODs?)~^=yELkxu7%{AXs#;YyN6BNMT1V~Tfbe|{!{ zL&#$DtN%IiWdPHxJZD17?EZr`W1`FXNES~-@%RVzBm2=IX5}0 z2I>U9H@Xt+eYzkjyzZ@lUSdA37Zu^~kdB}P6P z_Q_9`@LHW5&YkYK$fs#XjfDur?0HGYIOiy*v!5^(zEQhJ+_%uZ(na>JsD%$1CT1#C zz?at}2xYHUTY=rLc9jArn5k2opCL{?+{esiq@8gBVbd~0ZUmw2svoWn8~g30XPeUfqrb2})S8(G>7PEDz@j<8mFd%Jjb zrlG;Wp%IVA_ORiR&(b4h1Zjc7PBJEr*V3X)IL&^GhF~=JY0lSt*|6=rCo<}1?t2#~ zN3``yX95@lcQ=9J?i2rq~dM=vI_7# zF5A-*FAsNt1i0E6P_lp)9SQ9L1oPDOUfBs?&npEw@5ZIacLN{of9-Y2c%F6*2fq>y z6oB_j*oiSd0I0T6cQg(6Po=EqH+X-0C6@Ggaa$fC+Dn z#2B_Htff3!coeq|tYr=>GaaO=e=YrdU*&md?FCrL`f2aP`u=!ojK<*<_XqpG(=z$ z%Ua$CIY(-l>X*IM|8B}ZuLQ7g=FxuUX zn&S4N>q0+LBaY!erAB!ki}<3ylm(Xj#Uqc4#421P-dA9g$M4EgU2kEP6K`w7X|ns< zq*~ADPiaiYLdztgPEh%bJbtVG%@3kPn-jmQaIPgz@ApgD z3t03y&w$6ph!*OaM^OTyz{pq#)_EPdnmsw*Z(`k&m2q$!P-keuf9E6SvSzBB!0jkQ zJ5RSxc0>j;P1*e74zb>g(a}+2GcT-HuZnPpjh?p+>($&6_s$N7BttBa>D3{R6WB~3 z^KU0p;Bghnh@bCf+g-k@xZAAay`)PFPwqU-yo4-#ymwkOKs%V$03ZwFL}|uV0T3GZj*9r3gZ+HgLR5)%{G??C?|G24D~X^ z8>5sKt7lE0I|e*8CBeNnX%fCW3gXQYSFqT0nC!5O;Ppu-@1q4jCd?xq2Ig9XEf!3V zFcn!M>v7m(reS^n@_5>}MEU}T@>SC%RRWEraQXVe9x3PsMhRQ~`WfO^1RNWQ;L=I6 z!P33EiLMWSjgqtmZroz5=C_@8OMY;P?%j^k<#z493KqvWCE|l56{a_nf4e9!JIp$q zRAf9@xB4w*(u1rDGMcqfmqkpg(hxMRSeAm%ntEIc{eqO$EIx6yJxgiL{f(en?p z*J@Ux{@IfrRw+oDMXE~z)Q_r7_r89m!=hfB!xY)Y8=G0Le8V_ zZC(K*(-}xwLh3~?bdsLz$lRALRx1EL0cZuwQoB{B|4@vR+7aZt$c^`&Vwf=#0|rj6 zmHobEtX)3HkyRT7QpX6(-o5S6jsZnywK9b?%^dj=+h))%3%RT+U>{caVU3%QpX~e- z1S*rstgem-pmL^@6G8@~V+V2D&a0HQW4z<@wCjIpa;oZdr*fs!UXcW|oR!esj8?_{ z5kWiDv~uEIw0Lnvgv-&wi%W(yM;QAGVe-@-F~c;^j%O+7`^!r=g639mVO#@S)_YYh zmX{HCx(%nXp*9i6rRht`5W>*LrpFJWq5A>nEZlPivd7YDvSiogSi2UaMT~cuim+*> zd%bzUkEsdFP?Y%Le+31Q@NFmP**UW0k`A>tw#l(_Z|DIKrSMa8;P*=1Sv1lh8Du6M z{+dpP-*`(4h*(wqqm`>ApVLV10@R~h`ZX;UmH)%sTSi6I{qdtpNK1*JbR!+oU4o*3 zICM8igLDaufPjQZNh>NK-Q7rsC^?KUG|~b?!+#%relPBQdDpt@-g)uptVi+8nSJ*D ze!rhm?SIrlLLq*WG`!vfoIhaSW6mOT?W@y%9912R%4NU9hN+@RRY{K`p>?qSRUA?j}>6?EsD zzH)#UKcfFZs=}tgJBdkRMNURGrmS4^DmU}kp`VIBGLeoZ%6yMFnL_S`k-5U*S>sr7gaE=U) zjQymRqE3$)mzLGz}?u0I9ranlm@?0;C360u0%Buh2=lU_e?EoBZzdy^#F!SFFtGliO7zUvO=-j<;g{T zxRK$vQ%9g2^59x{yhQU(z{uuzilE~0%-HR7KjOKIJYm%0OUq7-`wX8KjEX_#o`;-Z zJt42g4DRIlhiG2+VHLP>mr^Y8Br$3fxBSfF1I<4Y)=dhCm53`ZEAdg8=9D4a2srKy zcYo`mnqG$EuTuHppVB%-=;zoLXFPX_X&v^{@b3yAKI{6k`d~DQhU9vWBdQ6z$T{r? zpmX1}AW@iyBw`)Tc>dUMNjN(j(RBr1yibP~rdxL(k zfgIB^L=T=$uEH2fg+a0ShmixN(`M;;OBw+_G_UrI&*x13Gn~+~Z;sHso81bQEnk&Aa3O&C%GX^!H z(>`2&DbJ*1{>A}+_)<5esUv7DJx{*vsnT)6^5<(jc<`8SOrQ*w?z-}jxYw_|yT=WC zkdm-tk2`H}G2RHWR-O@7!|)H$RaN|FW<=vLD1Ziya-HUnGKRO>>P4<0$ESX_a=VUl?5Y5F|bNB2{c{b7l(}Gn$6I*F*3K7%~--ySO3}g4xsA>- zF+Gt#;v1}sX?eJF5!o9cFdv?Wd>U10#;#PlF}3;Uo)Y*Ph&H zW(SPX{#W=jVIb+1Hhc5d^j&HrlfkNJ>PzoGpeZ2cRakayWh^bSjbnajUE9+~A zx3>aVH-j#`Ds8mYtNq`jfjj4;2Q{0tN{n!6n(#>5NR!7o9X|d6rgg5lf(2C7#%5uZMy`Oxx03(z2f!;&n4T-w~LT+tMviTBAL&mii`BA`ViKcfLng zgtHs8)Amh8=M7l#%f0_e41Ae8G!!*OGOYnqLd zk@S<&^NBHO$YXA3tKr#lywv_^iiE@Q&&XDBA$JoKV6 zB~#@Jpy5X|mFSd6GYKznXzKh<+`Hq(&J;^FF|2g|zqU1Ilo&PlgNo|7R{R3*l=NH@ zv;@BsngwwdC-!$NfnqbV^R41j*zDw>iSPu#hTrha@XwKjHuNVgX1n?U-o1dY?t8^@ zx1=9imo}z`S;xWONN(ZqiP7IlPnb^V#8^h*lK*D#g9K3t!vFo70gzMD+Z@QV_9y3f zSC*4de~Hw#IahLR6PN&enf|tAgGj^;8FomD{#hPj7;uYQR#j69&cIS<*%P}C3cfoX zpYag?`FL!qizs))!lxgd-3l1}pd6hX=Jb<>m`U;-nv8-6LZ=QcY{=EqwOV*>N|< z7?JZ4IlIg1Uv`p$ew_YvqG(3=cFKNsIc9e|u?vr{VP9Q%gyiEf@~m#d|FtqTO5GXq zs|nl8fvO;a)lQrvrLKx~m`E+<+023ohkj0lFo3o)O^*Z`^Ddux>1>^WSWa90OSnaX z^na8Q1sZiL-^bne64?!VhDWB4F3x7&Z;9O=r=V1zfn&v$90S6vQQMza`v^?pSzF+c z8tWJ{gcA7-Jl6hA*%ZStEk4|~QIx7jJ=AG{tyeuX$h(KncvH(Ai@2Nz(DzQCq=WOs z1Zd3>y~$IM0RG?AyYagNf;rIQS3)BW$gCB(UB(tsC`5)O0)+JFyzzJ8S0)F+2hbLX z%XnGZ-1suF(63YMk~jSY^F0Q}8=>;Jp=OpZ891V7_4INwbKrv{!S4#GxwysQ+4`g5df)<-Y5U8gBr>u$ghK-!#l(ce^$PqK zdbu;4vMY2OWnJhi{qJ>C2cJUue{Wcr{-3^K!MKy0kU(cdNeaCZ!jcq}Nl8!CsL6F_ zdX)o%wQRr4e&__a+_Ud%fB)aLm1*X4yp7BI@@1xr)pY8} z!yoZde`IR+mCMY+VPcrSGSkuG`D78IOsIIZ_p8&vLaF0kxPFbrzCwDVO(9Y0MT4}= z0nLVRL*|gaH0PAUzt42@2KYpOLXr$u4YL6q__qevRAT3h;m&vODT2lAYC2{~1QL^a zDsyh4GI!s!`e#+189_qp(eQ}qbS1M(mAFGtHItjoUbzjnVhTKUP_2EGl}(~EobP;i zoAzhN=Ta&f$9w;-|0GjEIRSIFiXvQB`NfL+_1Tz0X>Gn37n$kog2iC6qk#>*db5znD;i+1 zoY@Tgpdm3jcV8Nq|4n?0qokRomdmBS{ zYDuozR8d)mB!)&@uiX(M+f;1W7jHmA<`Oi9<0|`bVtqWES;ja9Dw_C zZMV!Wq+U3L%S0XsukMNVf&aU8ImDE)HTG$M?#jeQnSXFj2rNO5T0G|9({*;kGP96I zo_`zAOldZUN#LQj!*s*!N5#^&4XOvPW}qhnww_jSl~>sjAY< zfGLU^Y2S~Hi7}k42m4>suPjB_UObz&u)i1vnhwo zuRNb=p^FwiMG0;M`7~|L^!Xtf!+zyT5cGx*|)&j<9MI11B871HCc8Scw$mUR|^ z;sBa@>qXq8b^No0)R5>7y70Jmbv)m)S!f#AiuBtk_5wv~!-k31NP_8#Ddp_v4oJA{ z1$;0Nq$)Hh&U+iN?0mNzN$maEJ%;4_;_2|{>b%f%Qw!1izK9vRi+I+$ zIrd7~nxZm$t`_2nSz39baVZr2W&N;VP>M(N#VJWX0J5Cvq=PzeXewM%xmq2eD}`)pZ7j~D8mY{VXkct6VkZRc?xT*bOtk< zA`r#yoTcnCRc7wh$C^@i<_!FtDF3{CE{5?3;oLa%EBGpq(gx|)L%S`pkquBj5opWedR=F!U7Ei0EJ}NWFJs#W+5sks zgJP9~CUGwym+(g5W4c*XIqV*|@w9lbeC*3+qU9lq>1;{Bjv2AHd<~ba(R*jx+57iT zdtG`N;OgRReEPa(lW1fAQ(jN2I94>BsTaw7NGo6d>BrI$nKQ>pzElyX_ek(`qk#p9 zA#g`Sj;`;nUYE&(98*-bj2y|8V_B4{yU7lRsDl#i|KRjjxCXbGsvQt}b?h8?`xh+jjwe zS>{mMJON55YHo_YY_deKT81jUhP+c!Ep*C#gmqu5l62YrEY3BJd*)!g(2+fkxIGiS zSW=ev^OmSpQl0Df)7NR#wqbMiMW$F*L>H+kTc6XN-5E#0adVq{Na_xzISC_%BBeAv z4XHYCJeS@+dPlqomxU*RfrrpxG>ZL}TWP7o-4?qiUrjB@Qv#e_9&ak^srgx%*1EFCZHY*Il|yOTnRZQEmx9cH*<)35V@0BGb4 z-iQL%@MeGHQvqUyNj!44y7@I=D&xjTf?}X>lR0Oe!rb8+3_Jo?)!%=Cod6o<>tTYg~=J3Lp zX;yrfk3uxx%u#lffT4HYk^anx<(VZbH+F03c`UGgsY44XrN~^r7|Mt~VEjSmdXDw6 z<$3Ip30_dp809{Pwql{R8Y-J{=MOAe?zJ&LmGuMbX}ZtDL44vifq%H)}mx@uql} zv`8-ZRS-O)i>|%Eg&^<=!^1{hF@GBl zfecJg<2!(F!g!Sae0g_}S!2KI$3=2FZejPEG=j#Kuliz$SNhUT_hAjsbdeKsm{-#% zrS9jY=scBSSx_=T@UdqeuZ^Mwn%?T9c@a}I1UKo)Wk-Y;JvW)E=C~s3DCCgb!m?|t zZEd0*N*9_7SrWd)IG2?3k<;w(I;WO<>GAWaN`0^>t_be6V+#x{8HT~!KfQ}#SHO5@ zvx97!)^u&x=Hf@@3-%^JRq=!${w=Jp%M5@^hpOiHk1en>5 zuUmY)-Rgi;F|w03zqxg{w5B+;^B5Y;J*a zAAY4wCG*EGH@T2Le)>kk6?aA+F-4C%iA!Bzvxo&UDPV4ltRA>z;bca8k}%*6=uHFn zl)8PLU)!iahwI-j=Y8vTge&GRf*`nYEWTC9{df;$YYe6V`A%>(*cyf0!pBrSg= z`zc>I0{bmXQ5VLYxF%ilv8N$Dl-_V((OK6p0#>{>F%!;9JIQ@xvJa#Z6<3h?PR_d{ z^A@SyE0G*nf!4Ng54g<=YkjSlgVw#T%o5vpxUw9wqIXYjubYOmKVwu)RcTXc-ObYQ z=^M5DbtZlr!`!sJf7iZdaqiA(XshVCQH)f$%2kDI%hG&3D*C1gc~!{n#F3~xf3U9R zbnjy5#c^T_NtYH+-wv(0h;|Xo=#R^-z-yo6yOSY!o}AmAK1qb|%Z+hT*gZi3 z3C0`rFOS1Awx~k8B4{UA^{gV(R9kLqGaL+1Z-vr_cE5Se<>Wth!@>JN(aR>2tH^q& zW}^Q4MvZw7zQ4_sZ;id}2a4fA8KX7D$KD5nQBsevp(KM@%G+8kOui~ik??1z3g#9_XPPE?=RmrH6pzQSp_*un`EOrj`s{$ z%`A+=XWcPw{z|l~Y<)7d@fBGYXWocq0Q1ZOUV);ZNUIk^&*c{;&ufF?HSq7gdUY5hpd{m)?xX#SCl z0KYO^+DgSk#b<5#2k-?XudzSnM@)fy92R0y9E za)i#uhMD8iY%3e3w4i1c0>y-+Xa-E{-t^rz6-8T-OIE94n;CMdbWhAG3s7nW9 z5Hare^VhC+hR0q_LB7FEa9d*x#ZX1G%9>J{s$srKUPauIa^a;<+@kQ%MyA<-ZfD4m zQmF6tVsqjgdm(WF9n1WI$7wljt2frW1Nx-orrR9|oGY2Z6imW5M<4Oz^3{%&)9ZqP z2~)dtn!F67X_}z*Z2a@WM|XmTcJ3+mb%rU%^-HywE$v*3>+jh7%wy)SFua@l{UH77 zC}dKgv*4s6WHjTEf`WF-!R`pbg$A{KZ7GQr{p{!Gt)&w-Bdcwt8q{7u5|hu9eZQNfqqm^yP(1!T z?bnuV0Ef9MOYngZ)>s7gxm(tuA~pqGz{{mD?FqSf*`Ft7T{zcd?(gO%%8nJ>FyFo> zV-`X@7LcqMuOZC#D*P|}8z=nu3+j=>A<-8nE8+f!}q^-ug#t=}rV1*Gr zS@C?p-%Db2CBgV^MOeQ2ZU_=o*-aZ4FZtsbVyv5Fd|P+HS&~N>O#GpN=R7b1O``rM zks&iaHa>FlzQ@6d`6WCBD{zqdQDnEKg=xFmAq|Qtmp;jX@cQTX@xL3O02E0qs*I_y z#azL-L_dCS4#npufjLo$mKL&2lLC3Sv33q91Lb5%>qsTsW152fXo$B^?4{(fVKw^( zxxeH2w$5GcX4-c|Juuv*;FeggXL~NwS`)TpPS=6(I)++hna;G$VM_6Bri<))-{>3>=jCk~iw};FTP`5x zoR%%c4pn`iyeoln!9n*0#GE(F)Bo`%{K2Q0u!iZpvb_{d-b!Rz;aYSltmY|vw3jg| zVqz(^O2cgWtxU$p`6SS~9y$hlL+n8P28-Q|XjTzsoEv@iP)_Wc%)y>HT=uj6H!0Qg zT=h@*+Kd7DW@~i$8S|Zl!SUo`f+4c>pO4E1aWrKZ%83=!3dQqpm`_!4Qn0ozD*ALP zA`BH`>dgx|s2sqc(S1XN`yj+0CYfI87e(=UfRLn<^;6n7r{T!+WT#u))3m5-?JtSZ zQbDbOW^FjO9mIv<$IYKGBCfsU!#StCe(o$Nn<1-@i#S8Q2{^V5EDV+krk@gNB6^%A z#~>g1QPXV)*CaoFpv0Y<3?8~l=hjF-A+Aq=OYxB#DYB`T;*?Xzd~g6fqq=4$F7~-Q z#M|-ss3r#daoX=(XFw5-NXy@5UuB52;Nad55VWPkZJ~01n~v+? z6O5`M*2f#hMN5T~d$3u@p%$>rVK{&K)jOGR>`?7P(YEz`bf%g$Fz2Pb}uKm<1_6z}dar(5O<9 z-GBd48pUpi@=h{)d*pigX1@NAJ)ke@l6mUD&i{BvqNjInUknyUM)k)9tDSxX+f;K@ zw8~A>s1Cj{Yk7Wt!2(G95%E%G*&48QmBTNk>PrG3QKO9Or_Ccg1%rQmtM*SozZDcyGykHV8LC_ltU3e4@Unlz z6>`enIMc@CvDlp=ly6$WVtn#MqmW^KB!XGa=Z4HW48^eoq8;@+-bC z&%+K296#W*(9B&vbu^Jx;{}B+B5_FKXXA9q^IZiy4hY;3k#=@bTQne8%O+i-XNR7UQ3RZvgwIDt1%_@i9hd{o50ax6k`k z_g=A@>S$(CT^>C!>3Ut1pJV;v)!z-Mm2o5@is{b|;$2<83C)kbAQPF{SJHa|E5kDb zZ$?|mc^_MCcOR$fHP-hzOjN9lJ2zrtv)eIpn?1+kjK3Txo6jMjz$~zTDB`xBB4XWL zTNCiZQSFgNjl~{o(SttnBJ**A&0a&@KM-cFy=+C02x!%C1`trKBtsgZTKc%70XR2(73%*$Td zyD_eEqW$vy$uX!P6WL-y0uq-QtLXKIXYUtN`FN zR#th4s38wWm3|ww0o6))s)7zvZc`FB?4F(?y1U(=fa4!5f+M2M8P#iQk=51D8e1`A zojygn{9gZ1tA|VTZzTZ0q6_uh571Bk`%+T+fA0;;^P%^`cR+ATIq-tVj=r1$tEeh}72#`;JP{QR(L_Z5(6Spj^TAJB2K0W1Teo_)Oza5zDQZs6+&<{aap ztp48o5mD=R)rKYY?#Z>;O$e>`c$R;hk{qDcp_f~iLwdPZ$B{NU{@Tg$N zEQhewW;@y89S6iSGAG+YeS>g)$9MYedX;0ZxC|ovjtmnEX^T|@*3Gz}&5V?^Pwf)? zeGks4NY$E^k~HPKvQDD5R3BE=*;T&9v$4icqae88Cb6#ZLeQiCOz}=w{ya&3 z+aNTh1uZTmr9YAod{_}dk-n;TgtA5S^Kcm{TPUoN4M{%W-@vXk?NYMkje8@IzI&$U zAM_XpAR#;+*nQrqoMr$6*fxCf?@Ipuko3#~#MU9+B;u6Tuvim#Os3LSeCoPYl1ib$ zPhD-76DVEjzmn(V;gbxp@0m?JwRRhT*}21O8hDtLUMc>?XBLokSI<_TL7#_(?^!-r zoQ-J>=Z0sV<5XO?8o%y^Qf{?fdA;^$Xz#V_NxYjWF&;MegFO)ipXr-AC^42r}tPx!`q{T z{|G-0%21pEdeu;WW+C?_Rft@S^u(A0Ar%cGR}^&n0Df}Y==t_-21NWB>2dzsWQm}g zc7hc$&=!Ij2Jj2_?~Tdtlf`YBv=kyE-Kkj3B}tw6sRV=n{NqZyRAhYUPIq5-`{ z?-f2psBnc!>O-+d)C&s>vu>o^yarVq9@u}s{1e@Kuv?^k?6`W)4eM;8o1UiJdZOR8 zX|;Yn@X^cF(^h*ZszziyrkFDH+UCvIEc;sF9oyIU42xGIZ`Qc=7)0Yafw7-A@_3MT z`I;^L$LCJpV(xfBQ{*x{`rz~OYWpAGeg*75@s^zBshza|!*q{M7kN!|8zylbV-=LW zq8&|GoC$PskWD!H;D<&dssha6D*^Me&Z%uzAN`}?8GE{vZ-pTm822_>Pa1i=8Ojj< z!80%n#(2i2roez44gBwM)P#V73E0uVh}j{f^LVjz6; z416wQ3SDQXsq#MUdt@wh`j|K@Nmwli=nV6Z)oKn0RdtCXsP>hKW~ObFx)pNl1F|X4E~{S?*0ZA$8IPSFDk9a2*@M85qSTHeOgf%1Rr?jmECz0kYgFoE9-sJoP*Fx-o{PA zui9UezYgv@={LM6fS_hxt3`=3sl^zf&9`d2Zj+$C`;C^D>)BlaOMX(DCHy0P!WIyE zEjkM=rST`uCy9{M0Y~kh+RrO8IOQkUV_0bd0JC@23*M+}>ol%PTn;u(l+9(Ue$Np)PM^ZimSxgz7Bb@-xjIWNx9aLjK*ydjJSd5})= zX2gg-<}EJ?MT##oj-3Z&s1G45P`V0#Y@tFB85tSNQ*j(lBCyrHs4cSp@!1R57B|X4 z`MMpEBMh{Ogrr9?%MRo`2kd`(8mm8a`H@$(?wqA@;}#0BIJmsIQk zaPdW)e4`yfQY@A+mN`~B);rh&E!Z`jX%%=0@80jNZUpHIC7CdTn{-lB+Gx^YQU$W` za-xyA(%8JhKM-&CI>#AjO`x5SL42R8w`rZ*!6qd{K(ZSk(Lu^|h^9TKkE zk}CFyEc56Ijf&XA#_8rSgzk!2^jCYZ^lJAg3PQ;=0uP=CuWIh;9*>&!%~?`$U%xZ% zkked+U^e@s=v7enu_OLkyxA5=C7uz^NHe=9An@H7N6|4(Y$0-9XT^yZ%M$Jb_rrpr zsJ7KqmGyde4N1nHA}4d?F?Cs0Uiz-(Ml|3nbO?25h6>K~>8Ic^*dq$QWc@D0uXUVW zKr6OA!2Z-D0n@KAh9IIcJajwBE<~UxCYL!+?N=zIsq750ODCrACE~w7MO^(k7?-Mj zkK7~+phhK`P)OQ%N=_+Q@~YmnO<8N?r_;Keeh2Iu{y2L(M|FymA6dDyK#QqNFp zq*%`aA5yUQ)IT5R6jciNng4WK{Ie5Q6;}Ob+-l0=hxgwXQx_LIwVeKKS(6<94iKMI z(=tF-0n`X#Q5Ip*kO8+O1H|yV!3{hCn{={T{kViJ$Gy|Og<=kv zPomg(T26IIMHi^t){UouTMKabWwiW!PsU4lBs`4aB}1sF)u6<4siD@Tc3#D-@K{ zV~shWh=J$8Q@9re^7GC?{+f~Gotn*6yQRzB-#Rm0GPq`y8$!w7v*Dfkhg*nAZQe0i z*pQa7>Xof>-NQB2BqsKkxnv28wm!o093w&(!ygg2#8Z35AS%qeqtyvhHs5`tDPoh{E<#A^FS?b z^0~AE*UHX5iZPeG7W|g}F6lh!)YHrd)-;Sc*(ve(D(a&PN1a}%Gjs9WTW1!(Z`A zvC*3G7rS*Zes{;su;oiC`(UqjI$V5Tso;fMu_=~^7xD-QV_p%q!WC9bzLPv?$#0ca zs8B{+C|^&Hl|X1{gDW1llg-r{%!?HYQy{|#&Fv@DXFuOh$|F2P?~DAQf`4*U%P!*$ zd}4i%k}mzX5+@HZJq7|UX}Um#NL7lGp2H%FFMn&VKRbYq#`IQ0^Qnq9r3c1#bOVKY zWw`dt^f0WWU~re8al)~CMH!S?-rIwt`k2b$YC`8H=O4Fgrq(iaRmh}BSx)e!)Y;r2 zAZ~*J#$<$nqD~6FwBpIQ)|D-v`bf4zA$)*Li}b$0<7vw&p>`@p+PH@lOu2VGIJG=g z&ALgvfJEoklMw(JM2t z71}(DglLuVEZML>P!4ps!ZSSF4)wyjm7VUfA+EQiZeR57`DN6F<3FAm2})B-urzai zG6Sj8pqgmh*Ys5D3vL*B9NMxAeXrM z%Ips~wmktr)-O289N4?BJoRX5VASn2o*Y4vM^>lR-D{KxW<%`x2P`ZU&?5|5Q`nj! z>;^eReBl)Bb^*z4_ZW_x=)?6c1k9KdQcJ z%xImvr#a7(l3;dA3y?6hp=Q~i(=e~_P{JiVQ>U_+dZI~snp^lFvArtyh+a1-HRFA~ z6mLU3a_qpZv8B(D6ekLo__L2CLFY>Z!jSR$GL4wL>{Oaw>Dp zlG@qha9aul}|zZ;Rnm7G4hf#GAt_^ajmQLq%qf84<}=H3Hwc?~NC|K>eAhc;N4UUPNDP zjEo2^UTXbW?V#%|vN~&lD{k8`yn%bWK$DPOg7;r5#7h)>o3t-C%>LNJOUKFB4)J!< z(7^9~w+cJh#uSTSxsTeL|JO3xA6J4og>64&vAwXGXhr9MNcrYpI|KyE$h4rcdpjsvr&kBxAIG-WOp4DI-{Tnj$wm*R8Am^XvL>^bSBs!O~ zM`SvC2YZJiPqy~Fu8xR`dwTb`HqtbvC-SQQFBDoPrs8Cndr#em~zmIF=+rHXOe7b?_Jwb`tn zS@id?DvzluQ`bgW4~9u8PxU#5i}ad6>fYDQAw1%#xP4Qintx-Qg!8{soJ~|`fw9x< zPFS%j3ETFQ=YDIKc9rl^373m=+4PD4IoG%)7?`kp`uw>e8|VGO!GTe2rqjPOstr1$ zl)NgJmG^PCGNrK(jc~W|BYtKO& zqXT#~ zXLi4l6Tzoyx2c~T+aeCNLNKA8msPgb`U}zz0s$u#1H2x&thslRBCr)Q zJt1x>&^N3*^O?5HVuwowi77n6FpYh4hDQhZ)lT3i<4yD`@jsL-u(P5L$0O$!oD~E8 z3U<%D4-O&KJmD4+zQp=FQQXt&q6t~>!uY0Nece}g16CgA1;(z5Wn*ESNOjT923=(!)i+9`N z$l&B_LG^MDeko`+pb)TWQ0`pRddzMSwKL>Pa6iot70!2WLHhR}>6BVnf^RZU+Wj<6 z=i_5Vf0sYB=;;GG9kGKfr5xPnV{d z=J~C_i6N+PpYv2NyKEH432uS)0vI|~f^4halBPe8Y6C$)8L|Nj4$kQ8uU2{4xa;{h zSn>lvZ@vX03X$4Rab`MxeolIqR`!!=)w3;uXBUS=O2KLJ}HZEV2 zEPA21A-WZ`A^Wkm+g&iFOz!)K!KiOZUVx_Nx(Sh;A@(40#ZYiY-I@k>Jb;y6+YQD_ z=hj8KEu!rkFz_vYsq?1fAhsFY7bZeB81nK>+a{p*y-4zeU4B==atNsWwir@43=K?N zHmGhqA$h>cNbwGCg%uSv{D_k_kb-G$jr6|O8yxgj1huVls$PXv-~7a{a_JuY4&{z# zpub6Yq(7tDN_yH1LX4roY29M(U2^yf055n=t}_>s1$y#abZa_v$QJVAxsCug6mNQU z@#R~T2JSR=b1ltZXR!j6{BfC4ol0~Y!B=P51fJ_S$)q`C7GzCYJdYv?vX~Xj;%Z3F zR}RHl{@zL}4M@`S=k?l}9pzilZ44$omtAR!hHX_m;TBJFKOQu5JKvF=Qd{KnFgMS1 zXP<7HZrc5^Ibku&ygnisD9{KyM zv+QU=hhZGQQj!oZc^TN=@9Eu5TN&pQ6{J%i(_%QNG=t{-GF87m@>PUX`p+O_=hnAv z>AGF*a}T4t*!2<%#j`aM>d~n^eCJDogDGoYR|9a-y104y+gwIIRr)?R0{xx2>o<_(P|_1ii5y8*k9L`A|nR<2`3*s{`?o zM<2+dv^H9{5-Y@)R%ikA$T4hR7$sSaz|}m6<=oh|-InL%v9|<7>)Ffvwxv}2FWC;? zaUxB;ww47rbxSr+U-hfU^~@g^yc_f@LQMu*f9`33MUedjyZI08knkhgxaCO#V$OBq zP-WACfA9j%&42Jh?oE#cfyc>Z9JRl7pIaxX!7B3xEkwP?l0)5WK2iqTTzrQE598`nIfnh%N$tbtN;S*^FssL&WTC@fJaH!Dx#hL0DS+ zn@U0{<+`3jHi;{C76fJH7mt;TS?cCAiQd4k zNzC`=8uLmP)>+P#G+msNJpvH&QJkWDAV3fV&SoOE!>OHJH%LSO+_hS(+L4rPO3O)J zjSv@X=eIviOl2qx&bzq}l$6l1g#w)2=K-F+MAvmEa=YqDWUaqXF=V`^_ zi%+1$#7z|VIMKL=sXyX!hXy@VKjo@WFv%N&Bf>iqIY|6t8H*3gWDt1_!5u=3LS0mj zooTu9hqPzQZd4tcBe4(CacAnbj`|i!e+krDi%l%Eunv;0M~DjjOnVu(8;38CF;k?qaYzQ%tUt_wY$ioVjY*|`TiL31Y5!I@6K(i~qc3aKIPpkqe;;}ese zYsCAC)a&n}=L=+~7?mBYekJsr{FatgItR63Es$^ZWD1<^3=cQ)<%=v8^=2ronw1VO z4k9seZdREz`)x@Gy!kh!6#|&(UiR4%3&3Qn5{VmUvmAwo)eFXlwSEk0DNu%sv9qPK zetOUPCB3_+3n5IjQ_DVbH@FQeD(83X32r*bn_b~sV&MQDF_$Ws$`E-uv(MKw{L3mO zEYF?k?tg7C4jVweBbP?obqc7QQ4QHnEB)p*&k-1EkJPF@xP96jCF_|IpcpB0S4PuR z_%oRC$_9(rgwqL0>HXWXWVJp)9U%MjWP7{S$>tGfu|3+xtvrp*-{E`+`|46zGfZWE zVha$HiBt~x2cWB3LG$PTUaCs}|9I1D+TYt-hV8&UrP4ZUiB!q3j3aU~ABlVJGB*@7 ztmAUVqxVmYsC1JJI;*ToKbIU&G!RK58*WN{mq|w#y)l*QW#gf?X1*P0UaHYF;CxmX z+kCkHq{i__Y5VPey}LUiu8 zMono@;5e{p`CL5R!0~^_xcdfLYGf8A44?ZoP(%qu<24)Jio{2@zoO60o{fziCcLZb zXkd>F&$j$pdrwbF69@`jYC!Hi&`v_HUekZb@^o_5uuK-ZOus?j=y4f8wf!9%Nv%|m z??tLj)1lS-j%`I>!u+j+Eu9B|fMguZvaLbjZ}DGmGbrd7;8KnNGHf@fKRVqgMSy;9 zr9|WX{XXdj&Y-C7Yk0A40SdcX5D(e``G3wP3RNuv{DaW>HFOcGCV78rrzwzt?dmia zqxW-irTAo=%tDBOTu6{|hmBRbO6o;@O}mhn(??3~Lb-R;H>G{H4M8kVs9A9sLGB!= zQtdh;h>4RoP3Z#<5He_1&*I9l6h+KGnA!K%u#{aX^u>FV46IuVHVj`qh&k*>DkV$WXH34vJ{Pn{5pu=IR=PJXW2?SE9X!1_4W1S_;?cT>0n%Pl{a^-AxeLvah2Uu$?dwu%}D0h zG6?X|=7JK7ktv2%bB)If(Z*(J)G)CLfVZgw=!^2ER3-gpxriABa zUZT1?ssOshkxNhh6;-Ti$y1=-um|osd0VsfLV9=Wm8sbb9bc^MuH=s*lm-lFi&PZ^ z)YJzEDW7Q#mDug38pE#e(wlAGc~X1TI6a)%tYL&3H3eyveaX;o^eP5KanFgohvd5B zB5n&^^K_N9b3uq3gj8mb(kRns;kAhKtk@Zl_-q1=YYhaWt_-G;7uh@4ZGs-z1q9R9 z0El6P2Q>SY#=x&+qRRf6v)N zK|p0Y>cc@Dr!YChv zdgkZR02^BZ#`Q#iqz)rjPs|dck!|<-fl78)1bK}>VCScLD3Au|6`0)A$t<(Pnjr#omVWu#DQY;E?z0slC zTHwq%pVjL)m*!|ChZR}fFI#j^m6s=dP1@H8giz&SQ>; z*L-K}ztnr;t=Qp&xI(idboOmN6Gx!5B6d#ul$TdmBS6fwr3a+bP_Xp2%xUx@#IZpN zcw`KAQqj}zoF<~C15Xdi0gqwymEVD(4pzxuI)h3qjlE0rVY1}*<;58y%{FTtpaoI= zdUVl`l;}0b*aZXxCd$keCq*U>ai8PSw1ODj#*LE3+5IE1D@{SZNTJJMsf-?82SS=v zPRbM>!xiUf=s8mT^85;<+edzoB3+^B8AMW#8k4N=5^ZkB3=ZkrQCYS*hn|=@9O}ePxWLD*LuhOy4+sI|u zVPtyR4$|v&WIIUnD#I>9#R=t{fOusMDEbSKzw}IueDdVUB3~i~$fR;0*z4}A@c$s$ z%g1u%aO(&d7$q%Std9+m@!RL9;pehs6=k|X!P&~3*sJ=&KBM( z{uQ0*1$3YbI1YMBY`#@|iHeHybUtxR=&K^s99p6}mWZFvr|##yi>LC2K)DO1o-pW0 zfZt?Ip-LeqX&~s;uE48dA@kV>r}y{mj30jmu451>Zi9ABxRG;@>+k~gi-E?5xPYe9 zx5y`Q&0@4flwN8WU2?C<%`HWh7i$g`e@$_8{_x7D=w4;Cck}OmoI2lW68bxmpd4Pt z$AexteWEMNnV$hAX0o!uD3ezCjM&O>G#L9S;vTU9vR^%zA%Au%g>)&UE+(ss{0Xnl zI^X8Yrf8yAj!H<1d3Ec%Xk0P!ZTP#BtBvytcMkV1Cpnj%9iu26CiDSubw#iqMa2_r zv z(il;%Uxvn8KZbNqayRmCPpM(-pfVeB2N_gpX^t?s1KR=+D(j^HxnrgZeZI|Lg48;jbq zFPSJ*P2xRxn_R5qMSL|}-yR~bwwG56kqkfQI#AY3@rB1t7MkBi2I9}t z=t`-(eVX!}f3&sJ&!)&k_lSHXH!_B7mC9pMDH~!#;V-#9dmOd^{o0S2i)0IEUCL6ifU%6w|PSm#C@3COl{6R@4P!^RUTW0qK?%#yZEF7EO9cN~^dV#BJ@kvy@m!e{n# zxVHSwLwnSGLRNQ0oFvs>T|e52XWwYQ$o`E;s9*USIh3=`2kI((ibaQ8tX}JQI~K6| zY!1awL;A1)yN*$x)h5x(L{3XME@?;L_)_0u$6>+WIUvhlo|3%`!U1K{v>@@lHVl3^ zbmx_?lJpSPHtf3fR(UgB3pr)BGRi<)%*nCKAu5JckgRuIDwu4}VgQ?L#2=`FU0b4F z*1v~$o8nLouhCM_?VX6YW>Bm~$BI{BSAUG(pA$G1wH2gp;GzD*0BhvF% z#YjYyq2$dcc&r0KoqgtqS6d`eS4718BfSK?&0P_`YgvUNmkaes$5YrkDXa733?GI} zagf*rwNYrUa~uo6oEkxKE4sE5pmOO8*27m`ndWVn97+{O?16xU8( z2#wqijWzv|dLRq5M!eU2zdkM#@Y0QwXFl{Vd_h&};z!V`liK|==`V1p2I6g9UCO9$ zZ8QdLRXi_@g#AKChRVi{(OwMsYZC+^H?-gkMcf1~pf<@EfTTKa<4Y9j^&FC$0~t1- z{aC=_=Q?U}gex^z^CrG>a!Nn%i$E>826!4(Hx+mXGR;7Gllx5Gd-y73BeQZx~up=C_1LH@EIaJ!$vYMjyT>)SnWr z>_UgpzHHXj^psiOsArmb|50~i|BtA9L*TK;8@Q zxju=7pA*SLPf@&sA6b|LWm*L|J|brHTP~I^T=YSXJ^|l49zMN`Yr3ud=RI3O#^Dy$ zcmtZ^jg%3rl7mkDO~(kog*S-J4@b4f z8iB75_1T2@Bs)2^>Qd=9X!Fj^HX>2kZl1iHuu+XyZN(d%!gBMerZ>=|FSG%N5n%o^ z0V{Xi9#B*L04GM)A-suiIN101!36C9)>{D7Lh16oJx$sZY00Z#k@Y6lCd7aYHwb3J zkw7Roaxlo#aZm1fV|?0idKBQ$+%_H5hdMmYDS(NylCRAAd0HLQPTadmzLXHvVlt92+YMClzP+Nw%A) z%aB5iD(?j|M5ZkQA8&89tF`}nkMRP6NG3)aUZQ^5E?>jm7#oYz(AI_?$ep%_g!y|2 zV%X7SI)L6TPahUsSFZL*sgQY9+Xy}KfyLEdKZL}hZsQ72prhz^c|&cF5WJxC33kt4 z)44u|d8fi~F?z4_E3R_$ixufPPU?S*|pcGMcNzJIv=jh+CaGY#&-g*pI8<2HO= zp18~cjWt~6K`XrU`R~crRS1*@W&c-_zO$4+!O8g82H>$F5HN;U=5>Ja1UIiV!(YFB zO;2ba{|ETOfe%k;N1T-!tb_J?j10~)iu^mGoJh%-`%d^yoE3 zX@aB0WFbQ}9}--|FFJ1(lb!RAzBZO8z_wI)K{_}ptZE>oM$A)PT7kp}vJD#vdNH?w z0&+nF_|uMduF#9o3mem_1?S}9qH-Y-qKj$6X((t0xdnNX^pf-=;3q_*MBJ5wWO7!y zsTdlr2B1ZKmY|-mE<)%wxKVq%2#SEbp$Vi`PmCF_B;o_iL4PPItwq1&oev~+6X8t5 zdoZ*r$LOs?9nC=Y&G_3`z;Jb8o)cLkAQU@-VCm5(9Uj5@u%$9ez8=b*hV`7H55Qen zj_61y{h3enJSKi*sxP5|a@!3}CRgR%%-&l&xx-atZ-p1LZ>$ zG{kVA37o;@j9drZn6aJL}!+AyO zMx1j&y{+}j1$O2f{@8!xBjpK-{)$Y}w{(9^Mg&l@$aD+UjEBf6sV=_W+fT8zwH=x2 zv^KodWgQyvk|v&`E7ZpVsp>J-B;i*=LiAT3{4GsOs=9}n$hr*NKi-{6g=S@ofP&Qz zw{o@U-heDmMsvQ<;(Z_{6&2Lr$fwBO^Aubi6_9?aO&HzmJ7$z`$PTfAbfM7PbVI|g z7#5iaZatOEs2z3^UJWCJ%jM*ZKT0a7GrrQ2QX%?)HJ+J>GK>pVa>@5}dOi^UjBMd2 zb+Hq=_14_*=5+|c7}K+v_ggXh7hJcJ?@DiLJ{>8qC6Kg?wzt%!S`mG4m5-!Rs^APR zx}Y#yY;iJ5*gKem&OJ0R^psT#3LfoA6B!=qsn=me0*NtNhUh5LznY^1Kwy^|2KjJ0g%7a&| zyzQG$vK>P($m$1s%I$l+(J|OWDgqq5K6A^%TO2d-toR^jR8Xc46bIp$8TPu=BAT^lWbC`OYs#{MGzdWKLQPR~LpVB4NDj+}T)Ht~1 zeK4+8<+4L_*$De;{lds-xbUp&N!xw$bBbqGO0BY$Wr?*tqWSC7*mF2HIj`i>B-~dG zmo<2%(ZkBY!0|Vp%AEg1L3{S#VhdNygS`)dBww#7DF&sw#ZA;3Du@{-b9{x+o%;mK zT&OF49g-fkc$-WMXmyder!7^ehLPZ;dl>3Ljw#X44kz0#o0Qsn@j`BpD7sQ9?2OC4 zN-tM_Jpld%q-MY8FGB~y&u?hsS;kR^CQbh1;1+67_f_8^r6d){{ct44eCW7WX!Db4 z`}xZ&0b}`quE`|)_C%*&>}L(eRq3mK3V7~Bb`bEe;+aja=^M#IjhuVw$SOs(EyA4A z@dI2&6iI3&jaklu@-ts!WAA@ZotrUP>SS3+*StEEqH1l@Xu|(3gyJ}8!=LGeoV_mF zWteckCK<}_JrPXLL1#lt@*!Zk*~5kQaZicm5QPD6_Z`OKw_kTwCYZNics7<1^RM&>mz`gC-LiKQkh|@0( zFK_?4u^q!PN*D}XqvlY?@&QNQ{&ByzrUG=4rlWF=-6Vr&%Qx{u7ER8bhQ$wM#HoKJb(PC8FEN$m1!o~YHp_q7If6}0R*6}c68`(ALxVas4ZEU_e?bBI=F%~Xg#qm#jf zw1$_M#qGJ-L~F$mL4}<7YtCZ=$uQx*>o^knR>0NBwtSv^?#@|W0QE@Zw01d^o#(xI z)o)|}UHmua_1>OaB0_5p)B>;XiqxHHF4lBFce|jkN@DuXeB71Zm3o~|#I0vHE0#rn zjG;zXPvbU2;7GPiyX%jLws6-2xabLA4dcSWDQ1(1E9s1S2?xIM}KIjGUT|8AiDT)Cj^7qf0D{JSfABF4d-zP78L9N`gW78{Dz@Lbp< z-7W{5ZCGA7I78h)a6%abdrYOr;xUw zBQcb5Cn?_L*2Sv)o=ZOUa-NGm-bLd1vT(SJg2mBGZzEOIOp)E+=9A}unE&B{a1Q^vL#*8-ewC- zMLiSMgP$ZyD!9(xD^U<-$oDn;gR@V|ATRCDgNFM5#vfLadtI#^PM}ffm|eGc%dLB) zb(`6~bqzr$=Olg|XWH;bX={y87yW7MJxzu@>oVi9wtzua7LT%j2;VFvG2f+z$~?jBzrXq+N=d;i)gkHBPbDCl3iap++5}iy1jG}v-UKd!SbMu=rz4(q zp6T#4r6!us3^0gimZo&bs^~lb4f@hOhf-0|TLM|n-4FZHKO1b2GEM z=!0O8l!xH_zjxWVG0hB&D z#KbHCXX_&DPZ@&bC)Q!HE%oZx(T8(_QIU~lAmt4}fXw#NrAv+f#A)j2RJ5nd)&<^& zgB=3Xd#1gFD zmA)M5nZFF-MoK}Bs#r4mAS;<}`nQ?PMTz1_{qNq|w;+QF6M!y=F{}B59qT-pl$~Hv zUnN`uAb))yF5An60675)7f)3aV2WoO0B+m?j1-=YlX2>-KYQ6^9^`{Z43XMtj^yn{ z6a5?wYKFgQMAYuXb?x0uXWTE*XJN~w9$f)u$0kU0?p7cB9?PLTkE~AXgMv?A#Db3W z9R9)cCyo@BNu(S=Tlk+VHHrQPNfiBb7xP}H!fBv=)-s?*<0T`@!;PBKPv+hhiJoxq zHz#Gp{R&quZ~ywaq18e(0=j)ic!b$~qhewxT^6i~Q~YJykrL?Lt9{#g?CirARFGo7fJ?5+Ln>22~eTR>V+TufI&y>MVsbS%Uys68UV4oU}Kf zLnXEs_#4Z_r$ws}e0!HwPENVlUpiIdHRd}-aaAX!6~DbcXATVu&QNZ>C|`fDzXO?T ztwp5B^H@6N773mgh9VGTtN#Hdzl0{{e&gKR_oILjc$zE+epDb(BJz*D(84 z=huH)mhUvZN30h}j@Sof6#nu$czLRjGFF$m1<7gwW?VYNH(lY9KAU8BDvcEvR&Z-A z%K}1?3(pO7OTqQf52n02M@G?}Sc??e_?^4el*bh9Iq|zN)DJ<%bt0Vm(SEM$-Bl!c zKt?a%2HmSA=}eP4nVs*YW>Rd2S65)ltR0#l5j1U=ymp%TYJN>UWYaCjERc-%TZDLt zj{9M8_g#|Hy~d>FfkbC;Hq z#`>pOWb?XSNCtGK5*nZT7wH8!e1DZ-Q^n%J$MTa2p@k=8S+&EdqA zMEK`+MJ$z?z+2uNW(2&NN1}IH05P$p?Z>C?xX^;XWG5eUc~iV)0g{ZY>J-?okIN8l zY$nFWjxVAcaJ0HojX(uWR0V-Qxah&pE*A|y@b8awj7&w#wo~Zyx-rQI)F2HFVOdCT z?9gdv^jHM5$7BB@ME|z}N4w7$nC>T!-#FeakJ+xVpFpfJ&~!fLH5{5oei^~)79_Pm zuvyx0$0+7Y-Ts%T@3ejE^U?;)&bu9w!i!>_RSh}_zjk^BuAAkJ_ZQ!=c zjZbR$UUL=(^Xfnl=PZ3MAO0#%|Iz(8sL|pMCYKaPAfLW8^B$0C4dd0P(n5ny?Y~F~ zgEx##d*avU>_?d zvw!94m?9PoO}j?V!MiFr3FHYD6pg_{CX%X+IxnPZd9#g-wD9b}2CRAXF|rStA|%F! z2CNxDBj2#|j9CsP=*WE5&$YQS*d#%Do>s>GOswm7o>xB+D)yPTrcrVbe%ZhQR)0=zA4IMH1kAui6 zw-BF^vnyR>FL}~5IhqpqwAUk8E8Sa7q#8(g0$d=u%)v>N|4Lob_N`>@!H{pALg6k` z*6OyLnt5UOSY(yxrXoHpYl7IOXlmn={O%KLU!BEOMP|(B4#({cq({Jpb;ov8Nl9@~ zNo2MU?cbzteO+_&qo!fQ_WiHrw&LU|giXRW#&3<(Q=aC6IMn=8y?7Kqb-P3fMLbNI z$lEbhI@^aVX&b@I@9krHcsSUCx%;Fgqo1^D^nSOy;&cE#$?yE=Nqq}K><;D=ylRKx zGAp?mUvBhGlHdL(+hDV%GXj@b6UM#37r5xhQ!y=iK4bBR@sW4#g!Y5J5=%Dw=u7Q0 zK%4pFMf7#^T7;V^>-hY7dJJ;hUM}n&%p~eJqCm;GvtMMD)q4_oR`F;h_F?bvlj> z{lwFOtcY=gDLhPEorCWohsIPJ&d{UF*a9uDShHsn@DH~0Y)8r*Q>{22^|?2JAAuS zPro39qK(9m$GrTkJ*(zS6)~oLH!5vB+%0j+#(M2EUe#Je=WlTzVXON7^!2|Lx{M1C z2lN)yBVodpf94(1<*PDbi~;hbnPYN%j4aJR;`1dOd+kJmF-S379TN2-=1KFb0nFbR zzw10cpFtR%E^!HL_7#KT+_pOAU%xN54rSQgYPfsJfBGr4_gzhlVxqa>eHsC8C<`)5 zSBoTyrLCiq7pssJ&JHOD!+A-_b8(F$@d_11tpc6(9w#rHBckG8@sSQY7J!(;dHy5& za{EKqNAk$v{E0VKh0)Iaai-5|#3ejaJnuvfROPg3uj1OvPwVa|22aYrdKFtsvv)p; ztC{W81g?5jY$hw`U0mdmE+{QhfMlIu;k=fru$iXLe1cD{A3A+6`) zNXG@^h1(H{^71AI`;Kid5bW7b5IDLHg%O`~%li_A;JdO}T1<8OAF3BVmGhc>^+XRU zzox9#j`lYTzQc+ejyrSiDBd%67&JYCQYxH?2kbwO|Nn!^|Lz;Yo%y+?rGKhS_{{v= zSrwJD^uK@qre|Ve8m2i?g{4Kfq0Rkk#egN}_wo<9>Ma zEr=@mtM9EV$hwVke_+2KLnhi7Ml^8A38`?OdkTOL)*D?m!M;oswwA|dx(|3m^u-&N z1DYA$zkmPZ^rr-8sK9ZF08T`1pCdXty6XA!=YJeR;BLXAO+DT(Q-9v~HZwEBa-#ti e+CsLs;^0IlgS{llR_HhpJTz5xRZ5g@hx`wRO0h2h diff --git a/Health/media/container-insights-health/criticalicon.png b/Health/media/container-insights-health/criticalicon.png deleted file mode 100644 index 0ccf87fa9ca9675926e9e86c12471044f6255d6c..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 707 zcmV;!0zCbRP)Px#1ZP1_K>z@;j|==^1poj532;bRa{vGqB>(^xB>_oNB=7(L0$52zK~z{r?Uz4j zQ$ZZZe`y+GFs4HOCwA3RJuPcEBx?Uqx%a_=wPPo zFA0J^=!+O}a2`=Hl8{V*k(EX7k#!nTtVuE53Wa;OsrYnRmdq5NEsM4)h=rIwIEiB8 zsM8%vad#-w zQdCx#L|gRH=@i=pyP1Anze9iDZ_CngB0}p&^6E63B*F02NjmdpLze8YqCYQ;Q)|B> zOHp)L6`Lf;jF#x!-X~eI!%ls8Et=4mK?o-axY#OAyFM1)7qcB!6bXh6)6{cE7vN&6 zu%gIN4PDZLxKD(I8dpgy{&<*Q`y}6uTqX^){-X_gX(ro;~*Yv;IQgt!LBKJM6MeYc~62Tt} zF9Si4n-)I4OI{QVM4AT>_{8H`S&9#0#L4oVM>M2YuOi-}0Bg4IU+z>yuG-J9%e}yP pGxeHOG-*=Nq)A1SWPx#1ZP1_K>z@;j|==^1poj532;bRa{vGqB>(^xB>_oNB=7(L0rp8mK~z{r?Uv6= z0znkV`(LALgcqrUNk0gbGD->zg#slAk|0PDDk1C=bm&lrKoBG;t6O#Q)mMDA%F~^h z-5C(}3t!NkH?yC8J8$1i(S#}jcdZEAwIXoWiol`vONUOYHCo@@r&uCQGqGgP_>F5A zLrfS71spKBgCm-rODGw!K_4J6c&&c>o7F`TaGWs*b4?Wp9Isx2(lORBkuf;3C4rdO zB=bh#*q?seBb6;szIaTj?E=M@HpCb<*3#YcM&RQLF`3?>*SC*;S8<_YwAcF z9GQ`B%toeJo%g{9I4qxubJ#jr*9P}1NPwSIF8}^#^QHbwqQ|GkKf4^IJ%&8(@{qDS$G>#3>#~!N*f$4T)L@bFLa?)7Gu1|+N!b!?k!Rk zXxJ*a>Q>=wH(Sr*xMZtaMYTAQWqd`dwHt9gUgfY^;@ zs~6wxc4>BDO^g|~)dmEqoV`E4zWdQc@#TydNW(q&`Tp+x z|31$EGn|Kg_St*CwcfP|S5cP5LMKOugM-78doQI12ZuNa2M525iURzmYgPpj_yO;v zCMy9~K1R6*d_Xc6R}zPVtBS?AH$(#fB_#f5aRc{z3<=?-5@Alr!ja`8i;@bY?eZCSr z(A>%1)RImkdbQld<96iZw*A9tJ1Sbv;dDcKsnG$kzP=t8NgNkxeXFvns><6-;A3Jb zY-2a3$0a^KKIs*3MI2X;q||sSUoP-qp{8ehrOm7GQ;9~7W|dLW%vV~}ue80y5p!{I zxR6PRp}BdeY%Db$E$td}{7A&&!UEY(^`hBTp7OYM2G z;_ey1dt^54`k=#-=0LRPX~0QINzpPf?IwzKb#;MpDJnC!x#s>|IWgmOuEG=~2L3W= z4wGeJ-ZytK_Go$*_TNx7Veg(V{qGfwYVz*}81L=bp5KJs{dYNtFKYeGDAP;`toHet zQ5|sb2A^+xf&%KTij2fy*J;dV&|O?yjC}v^M*Ly_8%UCLV}4=+&m5ZBSFYPqX*__P z@hB+8wPW*ktqK7Vkxk)w6xvoy`ncy+TE{X7Gd4FjPZw}Px%+z<7#jNGXr(RkaIpbq znWHEDQq27Xj!MY2x87#);;hbUZoF8Pfl|OxiUZ?$7+fzY+UGajR{gjGgq=mo8Qqy8 zUbS_6!ooVSl7%VDtsdMG5)v0@kgz`{L*H`L3gj)8o9je<3l&r27(Y-?mwd*VtI%(- ze)ar_64qfP2m-=fgp94cMgs*+^Ul5~u6miVPFH`6QThnxxc;q=f(q^3-JO*JM=UyiHgJxw@6Vy99ZwH91FMy`=uFQ-$tCEH5)FdKcA-! z6v8$`L&M*RX?|s5QD!z&Ca_Oyr&N@b&iU4kCu`z1b;_w+1$zHZADvmRT);ikHGARF zeh|wQNB0zwy=ra>wFNKl2hm!3At8jJWq-uE`i;4^He5V1TY;}9g@&6)ru`$pTw6Iv zH4Ha(ExluJ*Ig?R%KCk7FS<7RBI{tT8GM%q_27c9g3X zL}cz;ZuUnDh9&>o>?(#av_IS{LiQ)OVkauOS6Y>1c(~|<^4fS?HTB}}`PXuKFAAM% z9Oj;?J@QCTPSz#8Py0wbqlnS4v1OKI#UoM6iPis`@J%E?a`De=j z_rvdYl>TaK0oYVAe%FH(;(-E%{maNL&F8h#i{S$YPX2Cm-kVIpkbCtF zTSq^pvl4ZlySQjlai4|0w3fi-<2%$iZfQ}>(i1*GqO1r)+n*QpR|v;zyJdc?+U=yg zwESWP2?<)!fdA9_5PQ1pwz{{q)76iOSkcPDbVR0e=<@cfTK@YS-3!tC6G?^P8p8p& zB76^BcMlKCs)t|fA*$=%dC43Dtu<1ByKpf;TG_bXnD;djat>#NvHncPAY}5_ z_j@49%*^!EdY*XDyfL5sTIV_mho02q{SBL3MeO6l9c+)W+4b;NGe|VnzcN(Q(VL$d z#nUoCk!0UFoSW{gFsflZUUm>VCL($fOQ_+e@37sV-DSC7A6h&&iRIUx$<+VW)6k$!OYHTo^0&?dAUxLn(Qm3w6{Mq84}nXqWFh5 z9WluQ%W%PAevdciHf*m`xy}2RTiien8&Wm`*~oMrXMb*;o*sQh(s&f0WkVK4GJhuh z{o#CQn2J5dO0{Z4UzK`by}+g%mq@*`+!@Pg-ZU8c_DZwH3=992e15F7)iI9Py}x8A z_TIzwh`ksuie=TB1DDC5|IPW1@y*$mAWi?r*FoowXXIoyGq}CZb7oCk$Al{*G!1!) zp0L@n!4yzl``E{yY9j0eg*8rJZ|Fxjj%lC+hfN%i_4}Li=F%EdNa58wDC(|fPv!pp z{(|THn$>(e^~T_)GUpb;(P1$2>yzNq-ekel9bnbM810)kwx&LvhAiHnSKEEG-H`hK z^w5cV&z-gP;jDh{2i^E?ajjm?N*(HWFK0wQM}U*|cZd{q5Q-Yw51GPRk<94tB|$CvWfvYYTeo+6bi)QNlmrb~54)Cen+ceAL9`D>6xGEMxwwzl!>~W_zN1 z&bJF^M!(Jp(?Ig8#7V=dZ(u`1gKGnCc5!}69@E?G`GQ6U!fZ@`2^Q0K@0&^>s(at= z6?sw_7QqQoqZ7T!M}CXU4ju53iKUM24JUww;6fKG4JAi1gg&q0@|LzgJ$m_IMmQ_a zh^vk{abECC3qh6WhN}xw6u!L-A3M$#4|Fz;kgy*~0fA}1*|p!u6iqeTpZ$?*E8`)I zAY9_JO<@R#r4iE$=e)kX-9JB_uT?&|x2~O#^xrX}o_BcXARm7-AxpCt zvXrdv`;)>GaQjnGBxFk<8o6x7PiQa;H&F$PLHowOLPhTs`B*({?(e2Ph*?H9!Lz*% z(H4dzK*@v{-L0ERxoG-5+;t*sZF{`Q#-n|K=zM!uZ3?;IIni1Z$-vg( zx*03>CD{frx)g&$-qEqv6yL@+RGnw%>)z0c7cgRx=5zOVW>sr%0NZKPp9~}XB-E~w zWPgY3YrV9XY&>S)iBhzPJT7Bbar8{(<|K-ZK)vmU-X#GnaAVCtCV~JEUcrCJD&r;K z&DoBav!hJWd7EE0STEpT!sg}%vR@?<8}xwXtlsL@zrZBTE*lEUEwNwkwiwTbuQrF4 zCkvRrDm$$EGSfX@Wn34WG>ZgounaG>w_9oih2YS{>`fKHgx`Ldejm9FILj}pvXPdj z8+{WkZg$SQ`uxL_mkz|Zj1c+o8^5LUVkw8 zZR8??h-JUZ7C6&WcdFNbd(hYkOag-wqhk;v^fQ0Vbsqu=QQ`S=fdYy1{TklG<0J7B3F8yf#7lce`>yX9HkD7m zQC>Qzk{DQiRv|sNGMdZZw;N%PWVfp2Rck#nl~%~*QKeubv?L7fcay>7S*s_ssny67 zaFU^k-B`f2#r)NGt{OD;%ktfXv=u9hZujQ7kiK*~l; z2UL;Dp&sE3c31e$6!5#w%BrN%0rSz6F~EW90M}q30RAc#9TK+n!Ab}s{0l<2?vVPq z+gPeCx!>odaM5MkA4pc>YiU7<&tM!~Geh5C9*}5C3@koz&8BAtP5k&`aMAyUw62Ov z#5vGH;@ z?N>@kiTrgbI14GEp0BU}JTg&b8)v39LuQW&<@`-rdHb(HBip({{yY|r<+uf}A!^?4 zWKy=W+3tu%ausK-NTmpkBgqmbwruY`M@$wWjCrSl_oMw7@wt=a=x-%a!5GrbXy?eX zpY5naZM*!Q9vW7%x~fz7BzHlY7!0aTtz1IP?xK&cY!>ThHl0!#!CwSkv%gVAehIbf z6efe+%z0{Cl?dwRJBMz2SX`P7z(L0OiP`o>#A0Qul--w0NuywE)h;*V)k=^$mn}n7 z{lc0md@A8k)fM3oRc11&jLG5KbXGT4ThgsD;Tpr!LKZu~pSg_&dwHFnXDVM?OJk4B zF9KQbQRkuT+&Y0fPJm&+3D#La@+wyPUqOF?&~}-pPaYtd@ZFjfq3JuJbyxc86=M;T zX_nJyyN(A9jLzNtC%0&qyA7qTzZ7K+JXW((Ev|>oX{trsoGhW$4w~d`2Nw1~b;4u2 z()x3*L-`t0tHJ!u=kii4nu@8fBVHr*)IxB(WwIswq2X0na>nnh1@8oEV?(e!;m9i= z`jwG9Bft%j$aHk4U?i@-_3<3ZOAP)uRnfy*{JJ)cETEpaKPD z^^pqeza|fITV(~z1QNZ#f8;(5eZ%Lr2==+yjd!gooJma5K(Wa!HdL6RCePOKCTTe~I3%vCTgH?*JY3A_XB zXM~Qem1d}=v^kW4f{=1YvEglx&u`R9X)A%{^u|mnHXPdluI&r8pslW|HDMh6i&9Pg z{vRi#l#7a*8m+dQLSlr?L2X$Vg7lJnT}Vo zvIgV(YMMU#d_f_yo#K|_{nz#%4?R3x7D_(9eiAIk3FWII5K?S=y3*4P<(U26Gp$Qg z;^y8>8G*kl^gjA_`9#BOHCr>^iYGK$IyhtFqCW46e6i|)wn&qgb<_?4<_cP5tf+DF zpVb3HT3ld-h&hg=l%W|_htiOWiNE1!GVM{kKj8a7MU!t42GJ!XHtSGLP?U1F;%0@x zT+ukvuMq{cI#Bhbc%k@!Nxr!+#q=)Ct`HS2=siW?|AoH5E?>?tv;Oe?bxC?2oAC0a zwwgYs-*v47Iq~?LPrr6ErNKsfnIoA}&FD_TZz`&c2Vy>;34z@#UA9wn(BGh}(o_Bz z@v&@T1J@S&GOMoQ+1;X>0i;&T%GoYN8pB+0{gyh5+dbI+$yF<`iQ2a+^U?~8Zt&58(i zB)g6jX(a@`3ArVDpV@Y_1)b1TP1r8^V)013UGNtI7Kk(GnmsB;zxO}#LS5e z?(6$e(fpbTc;Nyk&=-BAwrSF(ZyEKad3*E_Y$HSz1XC*0=^~UUxo%#B&6E>x zQ-@(^cHg(2+)~{zf$x!~Bdt{#oyt4KTV4M7G6%Tma`iq@WA@!+mi0XRCQ&r7>>^6n ze!)Z(q!kW5KSkDssMiXyZ?_};$_CmT{bpxGy#7x2l#x0Gxww2p^oK91{=?l_FY&Qd zXb^2t$eN}a%c_%?ZjruqRTxDC32UA@cYw0uhYgi$f6e_uxLsQYd;_1+xRZoyU zG>KGcex=zq05SO)*uM1`l+dO%_>5b0F9vl-Mcbew@D`T|k?RLd>Tx*Xv2$9!rIb!;R+ z=IWyqacA^4lk+WaYMnFuz#Pg2ZT`6Gxy&V4}l& zYF4SIX3Sl{ZGr??5ONgW%sFVg3Z7@G#i^#KyyV1X48~tJB8y76J6lnISEHU`l$Oz4 zt8pff4VQzuNgrRz6Vr**^xiPX4b5iT5J#jHE+j9JDjk$n;6jA{z$6>_`$i@e z-GRRWBWg6B@Cy+r(?@1AW@z>i%4@-XZ*v$dKfl z9lMwv(`@xenVV!N1LVh5+*RsynfB)o7(<&U0y*{t$Z1z$A&BS(k1-2cJ=k7)LRgd+ zF`G?HQUcSlCuBQ3)eK=fd30Z)ctWB~-KqUoh;5k6CLVqknrlu&Uc)gMNr-Kf%E61zQAq_peY#s8dya?WI+spqX4e&HRsx6BpQDbJf^Q4X~n9;<)-gi8a- z{8wK7RmHMS_>!LXF>oSZ>W5r-E2-bdO%1>A_S`>F>MfrhA6~x-QDpp(`IfoMM$mrE z|M9L_z|*VkXBVrsdXE+-PZq^ptSScuq%_Ow5q)a=oSV%&jOrH%nu(weRJd%+uM_a$ zU+e4ZC*YgDex-BinN$&gPha?*Sv~E099NZ+q@e3Taf7;qI0pq&)fctgVdul&JPHv* z`Q#6t+SH){e))leL%&^zgWwf{k~~t%_i-)&^HeX22Aq{$QE&VZ1fo!MxYDLg)A42I zmtcqd!LH4B+!5 z%2P~l;JndkCq@pzYk;20?;Zk zWd;DdYHEk}b5Auo7-uBIBcklK;wOp6(@S4~ZiHUTJEvLO*eLzrwV8y#<|^jyP9Ng{ zj6@xPH(b*2+XbU5TW@UF>sY$J$nl)RDmwNLNmw-vM>ondAMn<$GKUX^(a*A$@=W%gj2W8bLJa8>M2b-BYMuA@NfNHC3iyDjcb@wvw@wKb`*Kes`40SRRcXiF`vDf~Cphr;AFnS|4 zSARA0sNv{Gu)Xz!busdq!9a%IxG}R@u4mfU$gA9p{kUKkGFTw7WKWumMR-wg6tN1J z$8tHhq!L~?c@KM(*=AQw$zna9f@Qe^i?k+CLljJ4Nt7R)ILpUifnWCBd7|J`EU^h# z3CrTJ;BxwRnPy$Vg+4YUw=ZrSU$%sAS=%zBa4W66ld`byU1HR|l@88&935CE8mED0 z$taO25MShX50oE!Zld~qqmN=vK|HB#b5N2F*xeGDPsdllIebpCq`)w-kSD=Dh3pt- z)5~5YM+h_kiy;L`w0KdBSC2Su)t#l(hMe;Qxk{IgCDE&X7g9e<-2z#-q6iae_pIGl zosOnA6B1BkE@f26;^1lxr?3z|gX~9l9D&lPosEaw%hV+GP2b5}l6# zMVJr5`}*kYEADDo4Z=dUcRe<#h7Ch|aVxgun}^`8#BF2yt*z_oeH6^HR&wb{F^_=f z9>Z5$?y;5SaCNu8`&x8H7>hRnhVRA*`&31(w1v?@MR?6$^u6qGT0O_hC@7{H~SC1uYfTtFctAVfzYd}BSXE~O4Pb(u<^Vl09NmBSl#B1IVYXYUDV z41c2-eN7c4@qK5lDSFeM+9!sf00Xq#zxOE5r`x?p!JN4#s_sH1fW6iuCd)Ri}gs$gCZ+LT_zvfOKIW~wkihVQ&ZH2M9N_`O2u@p<=`t?LB9Bsno zHR(>#BDe1N9(c@(Z>bn9OD;2%J3u9lYu>WW^OD+aO-jV}0Nr;d*a;~=$+zc>pbh+$GQ;9 zi41jId;|wQS^_U^S?{=9%m`D9Fs@YX9E;oA!F{diYK{MJ;_AZ<>+4XYZqXzgo%%$? zo`BM!n)N9zjPaTa>+hoo ziwNHBMFdTfrAH(g#4J;ZdN&fYhAJ8n#< znJ@GB0-gBvNaYAqR4Pg4oV(dPd^c!hR;rCe#(*Ei&WpPLVg1H^HHu6ix7*>8a&v_F zhsBNEO6$#Lr*S_h3Y*Z1>1!kjM|oLWs~*@7Z9p5G462A-lxmZmg+{{*Y0l?#(ti0G zHx4x1{=@6%WDRy1DBIdp!(S+TCBplaVbiE68$2W~Pie0BjSLa;6#bEUTMJL#xZ`o>bMg^NtVSkiFI zWm(8&v9*t?-i|a8N)d_?RL%8jnw;*0Y?-yB%2}ei$Fl34#iPy%(O>Aisf9)RlJKh1 zW~DN-NRztF4Rt8`UKvuSzMziv$bMhQnR8;~agv5w^f{lDaTXy}B4k`7l|F1BuP@Ht z*C1~ypuDd3y(7g6hQypv&o9V3UCfZa%i3zLcch4ts-+VTDW-!iG9Kh}hK&c4br~8a zMt_DnaosUd`|dgiS6h!;?e0|u)w2m~Db!)F%Bn^f$`*i7jVmNSFCgAv?22M9gVCJF zZNx2vS(-&AnN8tg7#iXL-TejK`Br{|SuEYC>wLpop3(h%4mt)e>c=XU83?5=YY(~J zvin-;Z9()}#)FHNGB9J};m1|>HC8fvsT5O1tPmtwMZ49~0CS;Yup2@Li=`aS}-J=PC0{o9S!=u|Qy zg=^c+(atXzor|_bg+(EeQ!HGQ+*ju0Y)h=>V6CPRoPje zlbCVpj!`?)^!$34_r#|ZVw;$ZSA@~610f|R=LSunNuB|GZSC~vb8s?24uof2(|ScA z!f!hq`sx_@Tmpl2Il+S_?DXIyv$C{_Do_3VcT>lDHIxo%B=A&6W)!gqw;-j!G!sgb zMW!c~7v!Yf+yd;C7$g<1+T%D()mNDJ7;PZvd||tazcAHfyUjTBdU3r*B0%Z0Ohe}{;u(}lqrZ@=@GR@)eiqVerI0D$PRUF!_!*(>ilxqFURw5^oZFV$%$&MZ;$ z=et8#u1o_{ZKH)>Q(V^$JETT$%ynU{CTP03it11%($?ose4+exA=zTI8OLI3N!i-2 zq}xM~yk}5=Jo#EVbb{xK)+RUIGfVgLV`uN75*{h>*~e@S>V(A|g}ZoadLniAXlIIo z#_V~XAOyAwox<@y>9@6mr$WmL0;YHrce7Y75lHCT&9AhfWl$N>0XX%1P=*anGvCyg zFJYt+=;$Akc!JM>R!VJUo(f>8!Ko5q5}`K*pB6nwn5LM!nZZqBeMDuOgzlKm(j-=e zA~@h5TG)hagmQ$3BaB(h5$5Owq9npNY|XXkj$dAbXFaI5V~s7NJ3k83Ty65GxjV&Z zX9jv4jOPThV`PR>y+}6--!gj#FQf1Vl00DWj@{LS)V4tfdX^*ojf!|FnF@~3Im)Wj_B+dSjzff@NmkD;#UuOM!RUj>LSxGRf1M0D@wXTL8B^TNBQasa={ z?6K$A25E-WlGjXi`_FZrZE*Km8S61gS&Hywk|mKXS4M;S)2Sn7ZNMit-Y4^fHJD$_ z4$DVi+cn)>8#RN6*E*-vcI?>ZogqoVs~WR0Kan!uF+*xsmhA&F~J6p5f8W)5g}h1J0Z|_$*UTqH)L3)zR}- zUL?BqF3Z_##epG@e=H8Vc5%aA6i$j^!lC}_(G&yCY$p0$VmT5WUMZ~8lja@q6*n7Z zQT-_-=F$z_2|*pP4`{h?QmJN+3Kfw78+|T3K6ue6c#Q;D)}wkWoH!>5r$)AihTAF^Fc9qWIxIu zW~o$wqztqzPE2C;uL%X^EbX5iSEIzeg@ZT5aj_o}H6_XRU8z4oE7W!|^Hzs-8auRJ z?Hfg{XcELq-+FL*Pb5T{)0Yx3b}NK*a#Z+_>wEeO2dHmZ+0Y->iEx^kX1 z{w`23RkOx#V*Y{z@{P7to3zhkB3C+gxly07pL8RLJ@y`l)qI8{7o){xb_!oL?b~{YtaV8i*<`KG*;GbOx(QiT4Ri3=*T_ z?lh9|>GQ~%PG7KnSC%h-dPKSDJ|Ge_@JMO~E&RS0CS~PMRRI4{w2cX3MMX<>*zDi4 z%qRV*h#Pbr`#&BKw6exYqO0~3-cU}$tnVy(jI^r@!lOV7T*vW~Vo}D)@1UPTi(MSO zPffNnDC_$vP3p<-(t2q${}G<(d};qn3|1tEO>H8jVyM9*GQsonjE$i|+S-GT}OP%09cmMe#{x=Dnq0CnPvjXetY~drQ4leraR!(&PZA_s|R>l4nX#zVIkO$#U2or-Mwk?0hQ! z^xV#LSy2e^zU%^(@}G*_mS4lMZ7<|@9rId7 znr~2m^a|S$iaSJ^pjZN8P#Zv!sW)4yK=DB7=>t^>fQap}<(2!40qlz}1e+>?g3ne{ z&FIBWk*L>SRs;lu3$C4CTLaJZuv*lJ2@0~QIG;5SB7 z>9@mkGy(Fo+}E=u?;I#`(LX|G@J+cJs`LA?U%YtnP!-}d!WE6@tWPXM!uxl%!Hx!- zl0UnHfO>I76s3LBE?m%MKO5*d-3kTY{rXg*xAz-(_MrJwy*3BG9{a`z- zd=!ixcqtrGQNdxm+Fr&sSo>z`A#HDLY)mlvZl%dd){cfsXJ9i^*aN@lL)NTi?HRSG zx27y8W}n*U=FGJ12o&OwuhMjPr_`Ywb>gmW zubobfU3U!nwaApB)koD{2fvU&BJ*3SX@sH`0U%C2{anjC1BfDDNU8yBLt71Bv~+jj z{n~2dbRY*%IchV-?4o-euZSR_ptPT4&`?=U=1<(IQBLd+8LQEri41N18A{Nu_x7oL z!x~pFsKz)PlGpGko~_nWikMfzSe8SK8TmHSnB;}|Tb66&-y|8BUW>_D7)jl;qoYbt zW`#KmiGaZifCtps(<7L+nM-BW z;(EY)-V_vAG|61_4v8!_98Zxtq=OOZFLO0{KQ$wMh}~t+3$ifA(KR+?a4umcK%KigQ<7}Uuh}D?iIoJj?XxW= za<7kCj#;hzf%fWt>@zXN^_oe6!du@np0{O-w*lWDC6BQD#NA3Du?OrYHOtNc0Fo;+KT>-r#nu0GEz0}IX?(*b3l#Ez#)ZXFV-;~e(Anw0B7gf1U~bgS zFSU6!tT4VI{3R@t@LCDz4!&Sp0%%j&(79iqSlDW{=5(7Jr7P|(raoLYi~8K|TIDgl z9w89w@ffpRZl-hDpZ3(>*x1O0CeTZxAKH%IpELtZq3j9~)m*9TKM5Z$9DrVV^uyg1 z>>MxR5g^UwdYo@JPrU;SJ51y__}z+wDSNrt#pDD(T2Z#aaU&0%A>jZ!sh$Tx`>%R3 z_P^y{$oQR&`C^v!%oq0iDqo~&82q(JU{)_&yFT4ad29Ji&zHIu`{cZw-}8b=^y%IS zG6@T?H943mQlRkKEAzNMp=);D``YGp^~H@-b-3 z>)Hbjro;l`>erCq+HfxVp9)<07w;oWGikCl3nO+ljD&qH?1>*BPr-^q=-`->FDg2` zHorecZmQqpxi@A4}KrBLN;y zF9wMtF0kj`q1y^0oo%HBlinyQG1rR}O=DwzWemGb{t0J6J*js=s09}vFLx&(Ux*jE zy6P~}VC%gbd$XD9b(v;*xEl(!;af$&-?Fc&`KeG+9;HAz* z`BD`UO|s4W>r>iIczL{dqmR9PrRg5p!E|LiPOC4qm|!FOz5oy}@X5o|)wAVe%1J*O zl7(n2gjM{q?F;)N1Zv&q1<*6RK$cR8!=;Dyy|k|C0a$>`w}~$6&MV&4ss3vd$&|_5 zG;VWc8|l&S%n32Rqn>4cPmjN=6_ZWd7n`!^V$h(5hmT{Xu;#Fx4Uf_*(AsH~O>}dh zU$|-(`Au!PzTEV!=)ORfi|D5NJ;gF_X+mS>xcmdV&~qTeM?lk~4V6J!AIoF;rKU<0 z{i}rXKxd-j{+KDTi5KD1GGV@GcshZxdz}l@j{+iMYU&8N#g^hjh7sfV8Q%sUj^YS* zv)nKudKSCCwoXxaUe(c{cZ3$sXq)^m5hJ>3{O(o#itb2tYX+jo@|N==0K{a+_HFif zA&gYFW!JWEjk;8L?<=q8g~c*cwiS7x=QvxWhLx=>i5a>JqHo3L3hMp2idcZLYmoyv zRg?_D>;fEs3pMi*8e6loWF7^nZtuM0hsXQ#57WOsDeMH7>&b9_FSfrY@){zGPcZIb z;w-DO%~x5XNc5Wy3FguFz1iBlpSq0UY?KF@zO}d&s(B&x)VQwXoKJwap1Psrv!xWh zJE$4JBGU5$+Ub{KCX8fD9!+eBQwixxHQfc9U_*8oF(-!!@v7EtyF5gR((E zlaW+1De58GGuWlfMfS_IlVeM<2dzxA1Eo=zu~B;H;y7EHaiTzBYNgW;J+)vAl37SA zG0f?trONst!zkG$Z^E+EO0ULjbTh`z^-Yb9IG>$Sy8d$>=Yik)jyIM5f>3!{mr^Kk zgog*nGD9D?lITjzv29)*>ADi1<(EfAlbA6D2+Mq`m2XUSNeh`ZfuWyzR9+<+=kRYQ zbwg0o#hD&0=y*Ln(OACtqO$Ndo2m|iu3z+lf|hLA zp177U#@W!Amay61K|f;bl|s@k+{+P+<+UiQ$`C9BaI%&|O;MJ}J?2LvEH=cY5P(no zI@MGr^V13tyr)dNI$z#9AIz&3Ijy{pF*hTxh@3Jy(Xp1QnvhE7jxdWbZq7aR4jM2X zCqxM{l$HMV@n@yg9Q$&3R}2tgcrB87Y@uF5W5^6}k9_}1Trimkxe+^mQZ z)&e9fdVW#L=B_ACl*d3D(V3A0Ms+ms;GvVJXSn>nYFJri)|_xo6>8Pi7T`dMXxw(1 z?7}gpKH^Bpc`o2{>zqO3yPKv9u;bfooOieHfaFW|}pOr<-^2z@|o7o|_W9LAqV5U$y)`GLKro#UtJ2{7L1d zk8Y_LA6CcF;mv$mTIHDWN7zB;jjEs%#;4QPMzH{5jlbX;{5*FP>GYR!-v(ezjTWk= z@35-@dgr0avttwW+U=j#^#{qx;)SSo1)W z)m*t}s&O{ccSs^jQ83?}gFFVh61o@))_MvDSl%OZf2IWX;CsFP;*ffDI5R+*iGP7Y z8H0swhVa{n5OQJNt9gUJ2MfRHmED}SljkI=^b6#P+A<-8_0L?WF&eh%#5bQNVN8x% z6WnnTJO>6T!XDK}AoD=~Xk1=AJRz9NeXZbS$YWIAT>nzBTi4hl1ol>#`w zb)Pc3(`-GT?pB{_6{yRG4-7bS(;S7iL*X?sKYIWPd$9_W>dOiBkmH5jl}L^aU8$-V zg-Xf9V`HoML1p>bNEzMX7)A8augDntr;Vk2TKb$Biw>n6i*GNGj6^!`I-O(Cwq1p( z``$wP{6Cj6?pE8RC#-5Q%C`9}C13Kv41LX)!S&W!K9~}xW8P$v1)9+FR$-he=C~#r z!xgMk1ap%%@lX()Vgyi2Gr^oSZY#}d1un_N8nz0_>>9JqhTHl8 zrWD|Jci3=i+^9!Ui8&wmK~=tL*qb}{0GD$+#l(Y?iBt&RUCFt}7tcD(9vz!k0`L(f zYzIJ6%O-?A5C5)eR0e^9$ogY7sF&+)v1V0neg64xE#lgwbFii14o=|Lv>k)IaAMc; zJWeyRXAHs)d)9LHGFyDwq0XD8#@8^z09Cu&8C{Ax@G`Y3=1zOgZYk!DE7~Q#j&xge zVkoeQcjCxfw#s@V|=l()=A{1-rV@(}vsoK_%t5*crJgyhu zF6$edqzqquWhP>F)v{|W7Bssn;7M_+6#J`xhrxZt>`*%UBuiNmn-NWgUQlMJ97ac& zPf0WVoMzGs1S29-1(PFF1gFB6HR#A*Cu@8{w3P+PVQ_iXda%k|LOuA_J`&evPS!#A8NHUHZAeLjjlO@KwAbRz zt~4XFr{slp`3O+|(9mnH$J-9KouBA(nZ4mNxT#9cG!9N>Y*wog?`*AB|GJaANH#as zHpWB&I|IscHfU{J4M~;;GOrPTOfljq1g{XQ)Z1tijYo)A$_Dw6Q#+nVo9!5rG`OKP{q5R_DW9pBX#{mSU@u8= zz0bv2-^`%guqVW_>pi51Ze?Cb@v|AaSPZPdC6QS1@3*rV9Oy?tc>}ZAo$Y5D@8{QTwwT(0oK7z#-E2E6 z-P==a8v-hn-)hPZmRQS)%qn@^upEomBQ#|l6J7T*4KeDf4(V?p2D7DFzjc3Tz$IZ6 zA^ckrxln8N}Z;YZ_?I}lKLt;wXP+-YBxd@b=*V#J-(2)nMg!0RFM6&A1=Gy2{) z#VT1+}(oU zWcKlU$Zk$8lR6U@DRD*QkuGK_(M&<4vA2hHOloz=(J(7UvzQYr=Xv+0Ot6z&LNTmS z4ARNO&9H`Mm|A7+p}3u(VYKNxj$n6~+0T_9oS9?}Yn{SZ#^9@nH-#!AqFdWSHlyzqom(b8Qbd^hp^dgelP=zk|4 z_nxhqTy`Zc!OC{kM+R9Dp~WDA%2*l#FXX#?t`@Mv(ZL4_&>V= z{u-C)Kh@xW!)4P~Ce;?8R|6OVc02qcH?x*x;J5W2hXSffEu0BQawUms^2B6hM z#l#HC$jC5N8j(wQ9lgTG#Dp3X6LXsa=sr=gux21b35;`=|0dWfYHe*@X}2o+XY>aN z@5;(bA)qJ711jqOlX)hEg`oz8hHj3ea)VzA@$+lWwvtL-5VPx$x*f0d0dT|G(NQ@v zUofvUcd%owRJe26^K;>%3+?gy@M!uT_-BgwiKg+{vHu5U>bfE;vfe;)uz5+dIFqFGyy(0F+j3oc@-7d_$tq|E=VSVbTobKJwj!=)pT5nZ@G z&)*(U{_qP3?A-$CY`Ojmc0D9^oso91HDSZA|le19Y}h5omqYefhh1iU*L#X5N1jcHu2Z2Gjm*C+CT_1MabT}({v z&+sJ2#|;O}|KBv-z6P@CXLflp#Br>Og=Jk`NEle)ertqt-5<8(+3vLN?$D`M&oU|R zybL7)*|>eaMp*^f;f8tuEarOlUJ`E~pUqFc&t~**Zci%bZ5G7+R}!?LQb_Be8D;B- zemG;$Q};;DRI&Q{zrS<64`%U3e$Ne5^B!>-^`hRN^xz~Fd=A~Xj1_wvmv99XHP-;+ z@e}}_jDS|ze2ZI!X-TsA^~rjQfK%b)Bajd8JGHW6DsvikBR$LbcIPT+fF84r76CV} z?NS&Z;?PR^pA!{#&+_0vI*X?$akzQZa_85=ACYX!q^HMsCq$Tj%w_}=Qij{trO%Vi zd)Z*w-ey)?6Y-s%oVb@EUI5azC?MzP0>bt?HNU4}Y~wwHwB&w;)~j zXY(d=<+}6jJ}r;b+5jLLUD19k57pKHaxsyIp`Yp;?Vkys7VB-MR>_v06Hw3Atp7}v z7WJ6#RGC)2bmH5P0G+XfrWB{E{+;BDO(R_PesMI_4L;$GaOFZ$Y*k3~xvOO8#*~V% zB*ROG)9FQ&kXu9OjC2y4@e9OyDLzKBz;fZp;ye3A^HD1^%Fy*?0Xg@N?xnl#D6oN@IYW%ZT+5_@&tP!m`%8gp5-MKmr0oE5N0fq z3r(SPTjsMoMs=xNR|u$WF#rMRgh%b`W~;4q2OJvF=~WD(H$ktJ(hSzSga4BR0xKSL zmF)g1PEt~G0+8U8>a_`i#Yw5U)&m-koc60KJW?igrKdj07$xVUqH$}-1kS8B)3{2` zeG}U}dyph;%0ZzAIKnYAj`p<7no}e#M@&1j&@?}9Wr($al_dTKfh;M z9>yrk=ki@=iDY9iPpWdso144ijym_vpqXbuo+XO2SGjgWxogGWvolXhw#(g#Jk4s8 z@cR2R;Vj?VJ!8=O-IWn<(3hJnNHW{c5806OXRR^7i36+gd(^){#i*?9;5qRUjWTNe4+aLbk5?T}>wwnjR+#6@^tjP%C%6*O9ciq72n3{lmR%6zNkPS3 z-qBhVEb{En6?%g5tT~r%02vY0xBvYe5sSv|+1>SKti%1aL&je~QVPR?7>;L)-|BrT zRuRvdgF-*pG5pUqyYJ*$@dg3KF*wLU{ukiF_ANP#B=VUtedz)O;XW5nHtrFa_Km-x zm}t0Hqv}huoxwVDl_Yr;b9}AeIT+1je?CFfNSdZ{c*XQ59^d7I_S-CJ6_vR9_U7j1 z4S-iSO&^7jun}ogrPJh?@>~%6KNA&=EztTXE27y)oJsWRty77co}mdNulwuMSilv# z_D7Lb8FV3hEZ5O$!KB#ga@@F)l#p;1EQ0_L)g%nK9LTCwz3c6BHpKW;HTew?WzqxM z!B`<$i@GgR!)+TtDd`Cm>by6H4XYPt0HAWZV4k%Otb`xy?9NDP9``lqsz^DO{QVu0|~VW2A2ir&>WAn>KHT8CGyBNHk^qw*uq@5ZaB`+i0_uS{fG4 zBqiDQ5Vc5!3r~*Ft7Es(Q?!J?ryQPuwga`9G1!#p%LSy{GADJ z_&amn4xyVrdu^2L57&_!1=6kXj{1B@^Yfa;901fDIx?&`+r>-LW!1V76LlC@+tVv zr5f#~TRHuz_S~PFB+u?KC@Kf7SZ+S4FccOkdO=$-$LN9;ZL8{{;7zN%lOhpTJ_u+H zYw}aVqvIT06oUXQH0{Sdx(Jiz|A(uu4vVVo`c=BSQ$j+zhenW4LJ;ZhF6r*>kdTsA zy1S8*?r!Ps6wb}_KJRzFbG-aRz0{f6d(XPp`qfej4uIENtc{v=K4iIpS?WBw#I@_W zPpBo46y${ck}}9-;PGro-mB7v5Z7h{$0RrG~))09Iu z{C4jaLB@mN5x2QdWh#%2&{yFw*{-I6Kn>+c^Uq z=i+r{>W1JjEYT{96c}%mWq`oG32bvTG~J4Ynto;Ode&?{IW3fqqlCmF0U_*szJBbZkXj?weJ6#stzjv0b7zOzLfnz~uE z?uZ>qjOdsr-k1u~*eSnKlpiCVvkk8aI=}ct-JRfJR@6B$qy@i88TEmh|L4evd*j+Y zlcQbNHPm7x{r6%76eYhI4)jVI7UTD>Y~A7bGs@~wq+490uWtZCG{2N=5Ok}@_w?l( zrGWrT?>v$9C`p@Ut`w$Veo`qaIPoWYvqBhvRR_<#dgqLVsi3-)2u~?YQGE$DS+36P zagz-)oU{=D=%SKtJVt9iRm@7cW$>jR@4TwyU(fCwC#n2Xl z5_+DkUV5bdDmay2v|8#4xgps88ts|c#_^}_oD}5xS)G5|=2Il5FTEG8XM@PJTs_X- z2Cl)B8IDo4>8DY)bYjO&|3e z)XsWly>#_(Y5?CxH-YmF=NhLUr>h#m;l~MW$)=0fi-)bA0lqjzZT8<@JHf>~yM$k8 zf^dk5nL&$<2Huhxo)r&&L~NQJM~9y@Buz3MizOO7he5JQ=2x1SaHs~$5aVymcR;a4 zpaHJE%LX9-_l=h0!J!!X-z1j+I(e1R82r`a=#z4(fI!I~n+3vad)Szmq3u5Hy@}g5 zVgGn*tSd=OzQrISxd&ukIsDo>mmb3VW3pqhEGKvWP95iggbCR*r&U~G1d#+uy^%vv zsuiUgLs39+a2$#xDNEj1n{ao2#}M3B7s6Q$IcIjaS=M|%w!hjktVGz)@O#$>$h+>4 z-F6DdMfDmx|M_!SS+lbJ(}!s_xPHKnQs2$pimA!N7m=0zFyQ3)jnm8l$Fj>7AHUxA zC7lLd2z#MomF3*$no{va^zq9^TRW)$FQcb-`1Oei%Yj=b*m_iook@^mcj zK^w`?|J^Ws%pw!jr$&>&0u8u&-q$t7|JHYKJ0en?@R%Q4V zadv1XDkfHw2LUWvV!~4=?}K8lly5LPx%#L7_kT6zbvhupI$Q|WYI4#8$SuTSCq~ZM z$!VH$uD4f`MN(2k#78@m)9fKdP6ej&ztfc82@Xo9`15R*@J8u_WD&DYtH9oLNjTtv zvRzKs-TubCmcCfonXR=V27G%UFzcxOf3I8ceg6W`_G|zTjUeJe0+}Jxnc%OQ&Frby zm}dbdn$PbNoUx4Vf_k8~n9h`@2to*#ZdcaL{1LhmYrDPTzp}&HP`7((u%cS_sFN_ADoR3toidAbZ3)N>t8T-mo zHCnfAAD6Y$TB=CFpx@i#v!BTI?}HA$RKXx>X4fnYFo*%5u{PiT0qmZ1s zX^q&Fh}vDSdvE zBS;~N;d3;Xe&YbMFZ_5ThE7|kkORj&I>L)a1$B#ig5D5~Nz7od&^@J&(!~4X%E>1( z^46e82+e#>sX$VHISVnlGKZ8>b}f;r*%N}+POy)+vZ^ZJ^`8e8AS}YUvZ;7rl=LYm zphd!=J8>!eQ6GZvM@4tI<0Zs(yS^4r5kok9mGza(E}wPX)gXyw>q#mA#n%4JgZXBn z^=}f3PHk>a1~_(f!5<{%;deC*K-$zap9khKiN0s)#Pd%I>+=qHa^1N~BPjr(;7sYLjbWcYMxuxZGjm6vA$2^Z@Y~GI^{{(W059-Z2Y8X?( z&C+eIfcYILXu9Lhto4=;4-c87S)!AY@bOr*i_4D1%k5vazFYk7#aWxe@O{972@v`P zq>O8(ucNSeX~(l6eou;-BCn(DsYm?Fimd!n#Xb%4av}(Ips(o?rg~gV^R;-&e2>Yg zxd9PD(Mj~`sB3swSbiXU%3_Qp{hqU{7-$exnoRil`GxWNK)9P~jrrGQo9C9F=}l>A zsd{o8fEGaX8^W`z4f>E*#l&G$bclD%r4{hC0<{k00xt5I7J&P8oAcSg#A8(T5eE7y z;Ul~Qm}DNCjOAc^-!Iwe%045g&bYt1(FLSF#QmyRj^nojfCT0P zQ#w@)hkzMPVoGVUGvMIf|Ys)Tk|*D2Of|Gjm3(o?YE;=$ub|*dS5g`DUvic zHEDc;^k2dX_N5=q0Ccqn;6Rr!OP~z&fRLq2?nN6ARi-|uu%j>vN|~I7UZf+A7fvaT zFHp|830r^?k=Ip2d^8PgMB|;#pl-}J%2W{GHnLnF%qfO%fJ(b}uHXU05F>6YAd&$O zRH?CranBT2{pFNsme!l~3RA5LeOU0k1}PTEnJEZ8&t0>whX6GRY-?*7Ln^8@7Y=JwVs=?I)sJLh%} zJp43tmEb%njh2)aT3Tu-0nX)nD1PKQwgI$>faUk5nzYcGNGw5{zs~d<5G#TY32UWu zAN0{y-g<1AUSI~y1cGP211A(g9sBEO9`5Y$G}+I^svN7a_jKtEaO3p_?cYC0$NqYa zYJ9XGNSZX$xim%@h5LZ5hZHP}BHLGeb7s`<+C^~stP>kuuLo%1eB6L*H^9gOKtpLT zoW`pj@d@VBpaOv+g7^6l4*|auBe41Bbu)|8MfX;N^sv24oHYf#dZ(~YVPg3B_z4Uu za>$ziu6mDr14Zj&`6c9)I>1yEijJltt{*v$*2E z-2?D=)DAm&Vl3amhuyih1>#S60DvyNeIHFc&4l<8F5eK|rp|gP252?~ZD{vVroV#U zRa#yehkRbA>l`4~Qz@f*m`8oL*i`n7#7-xzs+CmOt2tvi@U07^C&uv&f7}Wj=38`( zwWk%d`Yv{5K?RKskxTUwYv#G+{ zMV;4}sI(y3um{|j9|I-e*}DNoR7QWJ{t?O1E|c*Nj2R+Tr9xX>W<5({A)k>=^}119Gu?tGTh4yyP=; zhkyR?7z$QK9ogTK1HWej*E!vU2sGv!*pl*_-h^m%MgHrNV@a zf{3p~u;*1hG*x3=+H00`m4c4U65qp_eQ@hcHu?ed%rfDb5{VRM2TWCV>bi3qlE{#f z9-RI`Ac9%SMjcB9x;IahPNbQg?(KFj^|tM%(ar0^+l*A<;31L^W|5 z?bL4 zl?gnynC3_$^(9TliB}MGT_WK$2o@Q9kUg2&1zEkm`xG`I(;>wjLF)xsJs29zd$uPGwG?!M#7JtjDtH z>i3RkO6e|Tqj6>kb!9@;&5~M3N0B5QKTl-;mT*j8o2L4ngG)#)1GVj7*AJRUnUYhW zy;5?~2ac-M{zfSK)K5F=LOEA2Bq$uz!~I(3h2Ao3@+rK)NWP5dd@;8MCCRdq5G(7oSvi%OTplHsb$A?j>R_ZqZj;Btmf8yN^AX@!|7CxF<2 z&;s0)nRf%sflLgDjY0O7T+ss+;|fgk)3+NHbbE7{BEG*fx_-!GREhbDCVoYQWz}n) zCDbL!G-_{{o5k+0m>(oB`^w4^6}1P);8GHb^?F>e8TZX+k!Nj-QL3`ZGkAr-j{%sB zqLv_pm0*wS%cCXYzjv1t#|{QJBcOOyJEo*ZoW_Cxp?kM)aR?>TFJl+qJa?%tWTAxiuQ}~7Q4&t>^kcMG=ROe8 zy(K(VWH%Wu_O`GrI^i{J3LUMAnl`FU)kdcQ9378@RdL~bq0Yu+`d{7sH?fUc8KrwX zrSrug3!!4~;)&H-+Q6f(fth)E#b%ic?PN+tXLp5Kk3UM=lf+yq+`ZDU-+^yM+%naU zZfSq@B$VUEcpPQ$(EwI1jkj#RGfV2vTF~eIc#4W(ey9XGA%_P}W9A#=jr_sR3Plqp zccfFm*0Q|pRtlAoUA46Fw+)waQXtwZzDD%O(S~@OG77U1%6;$_=lUHKScD&=@dZat zWg+fNpU#K3ma=g-g>DkE8LzjQTazf*_zVYR$0{r5e9D$GY=0vrKXIp#{SMKpMHXPQ z)o<(Qe$ZnfKBm4;hC5Z2|ibJpf<+sS-NrF5K?e2H6yC_h{pzDxdy^j^szscBePpBXyn2fcfHto*( zVF=y!)*sHzz2^&HWrD9-LqC=?XFH~Ku<$wrP4lb2bxkLUvIp#l6{%4#v)=YjQ6oBh z=|Y6YYX&OaId-Na%cZ!^>25-9iS6=8RLjp=hDE=Zd$`yrkC*&Mt_{|d>R>6GkCN+L zgUkI>GpurfPWp~lR{$L?JllfRz(xK9Q$1!b>#Xr~<(dI%x>of2Y~dZSz; ztHmA+8_SQjHzW(@oH(NkQJcSO;hC~I*)iPgPOh)sEV?=1K|eBwbob`3X}8PP7m79g zh)y>z8lQ-DLao&x3`p!DkEbWOx3TPqnR@-X>FUw}sRhY0^R$7=VY`5K05pm@!7YCu z?~mOQ<5+-+v&>{L<)hH;pRZbz_zyKi8wJCD*Gu=dU%mu{n~POO{)iNLh%7`;TGG^9 z%{`jxE}E%H7IHYK-b4`fcn9jvCsr%RKkv1OqD|$MaEYz*$cEby%a&B_jlX#{ydxc# zZrLF&_L}6KrwGk6fzZgCjyv-4;qQR1f8Vb9fW8RuYEE5QMeMz36D{AqDuWz`Q4r~N zXx(Yo7$jmCP3`V}3$=#)4T}3swdi^9$NdsV7|Twhu10fIP=~CSfC6A@dljOS5iL02 zU_f<{UT~S5of3)KsDJ7PbRas}mr0@t8>MD7x{WX0QRh^17-StrD`jRZxED<+wl6DV zC7o$nSySZ329xMZs2HK6Eb@1_idPQ0jmTzm28Lg3g+{)x9zfUWn@)!VtuP{@lhe3G zP^Zakxn4AEHkU{5L}T|FW5Ds4idA2m)sEcIU4T5s#5y6alT;POuv?5H<^F59&u)aG zAJe;A@U$@O{=ywB#tC9}hxM)MzJn7OCy&p={^l4LS!i>psYq!$Wxi-NA=YHv#y}?% zA!p&_SM_j1qAC{6FnwICh!`v;&NI<7fOT{n7_8Czt-HefTlsvgl5BwF0xjj_et=O? zJ@SGXPFGrBu6&kg;$*=t@G)BZV>Dh8zg5){V#jGJGCCXXMhD#tj0FJ1JA1PXLOj??5;#PF6ac9-FA!Fg3 zKd>wo{%GJ>LA+8=tro8;K$s6nam&}>yQ@rR5*bsI#~Pvjq8lmks~*QOEUOTktDUMd zBZDAOp*PKP>fN^+24p2=vAwx@>5+n^8_rviuI0>Bot2x8>?0nxr}N1@Ry)I8!^mKE z=4QU}`o0-^tkrdfjeR3Taq+n39)2w3R(|5y=Zv-TQ8WK~Xd&QOVw!2Bkg7N<#8{F^ zFD+w z>ORGm`G8S$pDx68ARy={M)@#~`|b{w?&aUyBc7jd;_pVas7Y^l<2;2Ip4eb#e(hi}ovayG)28Ai^fzF$*&7kGG26{{(R8(MV9%0OH$sPi9e zvX~iK{Sj9t9;n~{@ilxOPser;vKphS+3W_rriZylvUPPLrX%cNF$1GCkTgAT`jV)W zARtMxtWs-06*-9EKGny{#qA|4Gp2u0i|pjb_bvN#t##C@75@OsfvPb&fh6=F^i`+4 zPxO9^&c~6EU;PuOXKtwd+&U`6gchS>%(v-=`=ur4nuw9*aqV@vQz0t z4b3?h*1#KzF%!Yqs})qKCTBqs;Xy>jlL_W*?pp40dzmmvMUz)9$1kP#7W~tPgOsAW zstK_FxU#Mi@_Qi+43tl{AyKF}t&cgqj#7&M#p_9yYkH0y=Pf{5+EBE^o&4l0jNx+e zabVgGy_Q3tG751)CMYn4)P!Y&-Q#f0&a#CSTTZzrZM)-&0oKlGTuMprUn1<`hIMIjDRr{?<5C7le9bA|i@sCF`be*Uq7 z(r^(3>~SvgQ?W7ci!12^J>|*MYJ|x|%Wrp9MJ_OtoP8*X=xHa+k z0^E$+zEbq2^0zwY;0*5jwD|pYt>^@TD}@oiHa8(tUb$2Va%18HTxMcAyff{`fsD?9 zPHhzl`aku0jWN$a$T3-%WS9A4Gul!;9PwyCdcOm{7bcFoM8V4vrizYSqG+!VuNKVP zR(BFU5NL!Z9v7l!5yjC&ZG?rj2+F*DiICj(EAHg#`z&kE$ap28R&O)c0Vf-j*?85u zBh3Dp+I53@s09;Bd<(;3xZ~UG^`*6#H)XZ26juco+2@<>1bTN4cv^+aptscyni^re zvO3D^?$jzHGGG_VcH<3ue2~1e-aqQyDQr&WPKGV83{_cPUaijRQ{9kf=IsQzsftm_ z?v5MpGi5f+N+`6sZ1N3jH=fN>T4E64MNR&U>*^Xp<*{+!m}fedVHQW;dOP&ns0I>Y z!#YAj*<~ibyVRj;LW0$w6^b)Zy=b2dV?cu0SU&OQBV>w!-))x6M1Zm-a%6EE6K`Ov)LryUH``9$&-DX{kMk}eH7It9ThA>=`h$pf*0waBHt8b)?7@Dp_>jtqNNgvP;jb5gC$Z{>&{ zL7+4M#VRAgG@5M#Y0+v;uSj~H;l`8Sf?XGUS@b}aB*Hi-DH4}%UYpy%gzhu25X*2tkhB5^$ z{1I#Hr?Be&bMN~IM5bTb%-wdYSrgO+D=z}-8AJ#l+BgY~Ze}KW^x)}}P$TKL5}Vp> z&)Qh`W-$n$`LBQZ0lSv$E{EI~#pP68vAkMN8D^btKM#?3vwd~3`iE6lJ>ogU{5mf- zBHba)W@&!m+!4UrT>1tE!Rv&?+0*cz^1{dRp9ki!T_=+L6QA)yQPH5pw_TSY_CM#C zhWB3m?*y!iX?V2e1Ecobl9Kuqf@ey;cxlTiRG43kaLOs*-;{0zd z(3IATqQ4x^Wsq%kui+In84YV& zoqndAds@lFcs!ZXYFv4ZV))A^J3t$aq!f>s3~o87?g`iG?`xcHiVufgF@^PHcv4xG zs|8_f(&Gm^Dtef4kb)ko6%K-y3T`sE+g-apzVVv*qs11%Ez)Hy9D^WtE168*#|%_o z3z%A4mpx>uO^-+yvqNvRn<=s9bxv1fUp|LUKmX*_cBF0!y(UDk|&*}Qu)yfSL}Z&y0x$0 z1AWi?uzQT@bXuDq$!7G#1~BSzTE@DkLeRR#2R(V2Ir9Uwvg<#;`I7=Q2N#Pzq=@ki z3W^De_YSTu(qE7gss+=`$j*zn+*f8+p|+EBj_+%YsCdu#;eN|3dZO zffJE4`LTL`eBCZQB~AK0kK@pl=kDI^)5E#t(qz?dC0UU6;r-Ao3}|_ zHjtk<@skq#h$x5N7eAVU^Wdf*4mlm{feGV0c1KUJjdV6*?e4%btM_jZS1ed2nNp;1;!EED0UVHGb~zYZs2okl#(uPio4vec^4t zH8@6D2N*~7(K8`64QE1DpjT9MJF+<%m^0aN%@Nd_3_jLN$5$+2xlV0NlhwWo_)7%{XCUoawM!HOfG!J9v7+ZNVi>*)Rk$|b zUeovK=t(eRy1)J1enlvB^pv+=;b6r2CvU6t7f*uc7;eW?i;XL|2QK=rsMI$JSdWd? zOMzPY)#6@kPDk=vW6}eXvC2`^$%pcIGW)dGYH}!r)uQOc&4=#FBjf9b-jXBn4>)_O zS+Q*1TRQMT?Rh(HW693YPLm1`nES63SVWsLgHuZi#xQ?oa7o13s1{*`;rrDra;`cE-vajqh+AOU`E}-a-!ZT+6{pMq(o;R^ z(fD6(xl>v!ss1@iwg-*)6r~eOQTm`MC=y~Ys4K&y_=~TSsf3WNTW$pR!EohCA345z zD)wc|jq3+g^$w4P4pi=uu`-2L{V5(kj z=iPDff|(x*Vg(iF*&rqPZg$ZGgxI@Ta7^uI7fm6VN*&{; zP};V?RjW-_jGZkbLWeD-(-Z3?9@ZQW=#R$lBko5Vuli{-O_M&0QgpbQ>A;~E{5f%W zZ3l%80dH2w)`cBN zBpe@Ur10V0oCMm^ONd$6E)eo`}RFHWd)|U+~$X_ zp!hKf4xKmA`k!{Mekyh}C@Vo!-oR_92Q!`fNCa-glU5q}dK%mNbF}IXS`ZO?@OYpq zn!;G-5Jh&r+>Q`mRdZ&K`%H?ML^7ot_NjV4tXOWh&J~5S%izfTzV!QmP>EyO@1W0R z+N3~(;bIn>+$qRW>4GPbtW?4OnETSFya{VzzTk?~y$!u`o$5pb(byqf zEphhAs@z}b`saglg8u@8rh*ndyA~W>i{J&!4J?Y7@MU`sqNE(PH$7SVKZjvI1=B=3=Fz8B{%jF-5jPz zeXk037%llxPH+%s6yRA|TyFmnHh9wYvznT+x?a&0YU>xw*3{OD++VV$dK>ivKnAeC z38WRfr++0TFOoWjd&tO*ZV943;1EY+9tY(JLfgaYt=AggY+pp*e9_|7yyCrz zJTIURya-(>g`IO*HaI7)35rcN;1E>a)rXCtj)kX0scOQd$jYKi*oH>gTf)>5w)UG> z8la$qd|Ro)D8u}ZH%2t#I|RHo7s!GsOyZS@jC5=C>5<&wGv#dLjbhFxpp3QlVM8S{ zTpeqw;;TrneIXq9ThdS!(mAYMP5lB>g~^^o5)&?8#Cz7HALe=sm66^CKJhCBCnOP) z|MGWnU=1dOp5`6QVpooq@EfjzVN%)fui3mi2YjzRB-?F9KHnVPU+%K~hAX_W)Pe+` zeGXU_G_*k`l`VT`F~tZ z-wrca77_~lYVW{C+boV!=M^PxEMJh^zCGl!rN2NQ&+YgDhanl?S0P`B(&-J0Hb64= zhqVOU{zM}ul@(q4^!*G7T2%O_x@aTx=G%D?_WHyENLCNP`+Q8Tox8yiKKg7{F4%B? zDXKHhv57S;RK(UPGASr0w|^o`;$)_g-+Db|)eGwqG;28dJ?(kxcDX8-z*N2u|D@sa zD1hihAD4=~f`!xDGy87Xm2_gKJ&n6(b~@ROmIfwW)h}kiq#8C^;@1yyK{#Fr#_@vot9M;M_Lr2EXe(>$?Z~0cK^?_S&B7O56#G|NG!nQGLCk6T zeY?Q0+C;sn_5P?ywSlQeyqJnj@q$QE2t=Iaq4=h;8~s+5MK_9Zn3S)dF%Rj(Xe(gs z=Z->Sg@x;I?xco!sDR;ia=J6X=x>Ns+7O%z7_%)=?f_^J8z0PU7RJP_ky(1d_xzJS z5W@TVvU}Fm*k4Ou<+b0}Rrxxa`H%GtPd?;P!ck7dmwGUkbfgBEZ|ZJW?Oue{N0IXD zKU**wfL+$D9?GA*#TsNdM%=jyizx;71hdVc0$HjYq5q$LtQ?JdW2rw!Q7Sa&~9z zit1yZ(ma+~q$aE2DMg_ZPPmYb;;~k^udd4NrQtNL->J*959J29zw<#CJD}_?{wcLs zRMO3k^V#wqWIQ{xINWsXc1j7WRN zDIfWNK1}M&IZ2K7A;yLrVo*IXtCowbm_)6a=#~NvyjrcxUkAo3XgHN`InWNU0m&u( zA2tM;wipL+dfw{o<@|EEwM1pj>)?gVHK}vEHI;X@CmY;=w4l4=a9s}==;4ZccEkP{ zw7>2(lG8GTCw(L2Sy^k1{t=YP)1HkQ*aiqMdui|C;j%@JH0ctq-{Y849GynI+Km1O zdId5uZ56N1B5X{;e4(|yUaTx_7bn|t`)L-TXDyJP*voba7Gh(Z_=<{Bq9YeT@;oo3 zs@e26;F{Z6hp4Ep)RPeJFYNnlm#9_I03ARmXwLLOx)}=>zJ+EGra$59n-y0oS|Mo6);D$3 zs6kWjO{2+mBiYwE!|}@?;W7Tmoa^w1-vCU1O>rpMBQubQdVN+DBRo$imt3y z@;l)bnzcy=qVnghf%EqM_NUC+%{L9CiXPmzO@<9$txU%FVmD5ALoJ zj_w6G!wtV$H`g6jXEvgKDv*i#u=(M;eu}=0;~wo+S6}#v$83S!5s7>QyX{8MX|=H4 zKkm3%pzW9ux2?}~TRZ63LT+16JSMCm0XwSR^{uI4ZB{wHyZR%}r15M>MvM)hi=htz z%J|YY4pWR?aT-n+Ev?36Xmr^$26e(jqU z7*1z6oi@v3iH30NU*4#}SflQ@3rS8FES7))fk2lQk;xVKs;2y|U1OVBfPaH1&B$(h zm=uM8^#xEBRu#K?XWg?vFOETtRL)fx1RD)40PibHoBgb_C!yTZT)w|Upm@-$3Pq&c z4+*fF33S7IYB0=bHpAJZiV(Tf&g2oI?HMhguJGQPs|hk#F8ch_4}|#UfP5W5tqW@p z*kb5h@(X@hN0Cc!K`7(DcDOLdH=#9@%7kr!RuVX3l=CZ z#mIW_po^iRw4AJ3^I_6hv^_`;#~PG7IbA!~D}XusNExF89~--->U>1Y$j1kzqU7H* zg=qUZ@M8|!lCocykL~4kR|5HWv5Hf=1ZFRZI~<@j%lIgY(^s8Ps#YbzBVoqt;ONQ2 zoM?lU=nNJ~Jqwx-zj)r9=m`$<@yTTf{M^pV3wH#KU3I;emT$s3HjpZ|)RM)6HC}K! z9;bniWTe2j)D9d?#Y|p|8W#A6*Ixncw+24(^vbUOQl*jfUo98zY9bz>diUPOK<|0$ zg6E(@&ZZyWG@uPMNk9&2q!wSU`&VS2uPpx3KV&0$V_pB#C85?P6$YMhI5w^3)cq1^ zR+ey|uXZ)=Bg1JBB>W>>6lBF1E< zOWAqlfDf8&cCA8_>Jn%{NQ6B<+`78j+VR9S6g908m^1}AdPPhSwITI75&o`}a6!Rm{e0nZ{1L@kLf~;4mRHv zTW)Fcg4@0AaApP%v3>Y@@-agXGHd>8=^yg1?3XQtBs+_qwW6npje#9SVXL*7GxoH~ zCIKRn{E`rUNC(R-`eT+z%AgW!@gCd&o*nj8Zo&I_iD>+1E3USTD0Uw>a;FSr4fu{t zKbzue0L4=?Mqj|j+ug=~A49k_?V#%*10uw3cCB!*L$c&};2v#Bbyt4~4Nl)7cd^lN zzhad^{`-rq;q?3Q*Z^!I#v~#xb6rpb5))>-oCg=ktT9z|C2{>z3C|NJ;)dl2TkTO9 z1cC!bOEUHOvcNmPJX=OGlpASxMb??(G8)=_53)YV7nNE@PJ0q-BOk3X3isgDqk})I zBB&Hl6Eyu4>Xgaa+cfn*-ail>#>r#U=n;f?`EUN>y#*gA06%^dhP`*LGCpBi#31Zz za_5)TdhH|f1<#e&KzKL4VYbUIbFBxRC|K}~5coth(A8a_2Es_;(td}QPQrnfeg-O# z^4lR|2a5Lupwip~3@NxzNH^BjvIIHkkXBT7#@K9EMMwj}r}d%TKpoH`0(HszXG8P` z6{sI9$0Z`h-M!lSjenaDH0?X|9trAd$j}yNvwKMuVK`pl*Rb&?-158&HGtQkbzr)s zD*Uq()}oTM+M9m()f7!c6*5{a;$2P8u1(I8Z|fz&{Vjq_UF;7>WF&3lj_Q-hZ2Z>2 zK`}8eNoxD02b+BgDxP{siJlFK@M0ne77L({nd2C=KVxwZY*=nQfG`I{BB>+JE-+81 zBLahd+$hZ1J<2|(dR!UUFE=0vf+6py1c>2Qv!%AcQa9wrpH&AiYqA~Ahgo957sp?% zr%B*PoObR5z@hV;C(tw+ez26=xA9<#lg1*ZYRK9QUD~0%D}rP&Of)mkdGpo!~QfXkgiS|b>T;iRe`N* z)*iQ;Nqp+GBV9hkB&iZRZoQ#LDZ`$(mH*hxZ&3JscPWS2qJRPn69fto^p${fZ)dTg z%DNx~g15{`qO%A>?f&uundp%_ZB!snuRCoF{mygzj^N>oz-1uuWVFzi%cUoI+0ikd z&ZlX%h!R?rBfExpliwqh`!Nm;^8+V(X*GtD{Z)G@12U_zqDH{Gp!P9>(%h?a^h=dD z+{RET>&au}!uJOpOb&z{EEdznZjFeIyQ4&{-cMeFO6_1(Oexc!>h((6q*7X-J)QPd zj_7{?#ZBk0Uf;uWUZOMyQ+IVK5bI)q1;L~DM0lu=o4h-yH9<)1zf1Y7YipP94Y=rm0&X8^~ zJx*Wbwr#ZN;v-#r11#F2YE-7Pb8r>3L#_oh-z$E8NKqRbgO_?JGU%at?J*k{DxBRx z=>x%rGaD6x4ozDwr`)2H$MFg%f5p)Syp$QF~oKxq=6^xMH*Au+M zLqz*_zp7s;CJ^HRt#gGF&T#cFmBP>O;hD3Fc%uJVya75ZKMZo$V;J_5LI({ z<+16upXjc+g|Ey|Uqv5I2ZP)L`apDaH)7e)jhPB9OmML9>nOzTipY7eRKP7U0l(`z zWAzQu1RcSj;dG4x4KW5bDoHFPHL)8S=|mKeXgRs=0(xP$N5QC%`BTa3ss0ZoT7|pp zR)T?n6~jJI^HTehKjUj(*{5LF{a0NbbE4-#J8VDPx88H15kM5s=x?v6QP2y&%KTzrDuU|==AbTPtnHtxEOaYt` zc~Z4F+}hk`0(Doy(hTe2^X-vdka=ncvN;8>uEDPeI89?}gU&m0XY&}iyWiwwoT(#H z$#9G!^b(%GP3N*mTn>&^-ffk=wv1vjCJN-}#U}O>pr(E&{17y2%oi=oz`o6$#%)Ek z1+3B~Q&@dqYwtg!JQg>~th8V^{Ru&F(uBod(C#JHv;!|(^^tBG711#PZD?cnD2-jp zwcIOvFh$i^TtXI9C_<&xCr8d3oGOeb&H1PmsTU}*ygFLlEGy-zDW!IYa!ajE<7|>& zzc!X%1p44l@cn#1FY1)Ttyv1hLWOPi%n>WTWTW2+WBa^Xmf-VVFbd#I*~MNv zJ-<}s-3$QrBT5_act35uwS{DObPi_5$cGt9CiH2d2~YCVtpBs)`T1~lK@L`$F4NKJ zWn9N;qy|z4)hgr2EB(B>H^DkY>&dptWW)Iehf74y<_{ven0&Y(l_j4J+(*!U)Wi;P zu#D3VN1=bhllZDx!dCqI4G5p%`s8L)@F~DX(OgLH;MmFHkAeRQIkOO-Wztz%6qQa2i1AoOgse?GUyeiJ#?_ou(Sx0s< zRt{t<(?TK3Mq3o#^M$sx&UKdm6BYtqXOui_1(3Y!UFgE^z<#EG7EY^uDK=@LaJb&; z#?VKAXoch3@_z<~{+@7&o5k3 zno^Q!Ad6~U7PVb(xg&?Dubg}93nRMGZI!+Bt zW;*l=0gXegM4JV*)PMGfKbNT!nOf_mLg8u>@Hnek*fj$mf*?8jQh^zhu?h3G6ti+M z!E@r{fbuA5OAv$xBnC_x#L^dUVEu1z+>=N&9lP}+gw%)OO*NA9&ge^PYil=wQ(!}5 zwOJMb1}voKIF9F>*?}Y$jan}#Xa|rbE9G6V{aJIpq*3+$?oJ=Lu}-ef{Ub}J3_JcQ zpy;gP-&VJdN}&@7Vj-x%y4Ar|03W`@HHJm2F#yQ=KwTy^oh{F*y*XEm{neSf{n|K7@u>qhA~PHXM| z5tA?=wT|qS-S28!XAjp65yMq6%_i)SYPIpsvAC;NtgPSbS6!^RAL60V^ld`%$jcb^ z8f1K{`!h~ckuJN8$|R*SDVE$n1+CS4U(!&1G7;UEGlSbm4ZO=k}PZwx7SKL>lKxDbWg$N z$Gie35e0;fkjP;tfO2jy(SN2pIqm1EWP=^#tSx%wcDBs+sR>f;N##V+7STS6Fbk>A zl3U22OP-#q?4IyYWT4{SPWO>r;i<`ew>FLRhvDkA8_PnNb+;f>XvOyQdRc7C9FZmJ zT=e>}2nD>>LU==}O^$UqoYcm{I|cD&{t#Yl6t6h zVG;```?ka{sC~+yxfpezHfJSumm?FSXq{cBqi~MW-~rN($ss-^ zyC(|wb*ASU4MPznhPs~uqgrQ7rT)|H;Gp3g1T8cfS&3rxJ>DFidw~cjGR-v4inX$# zHwH@bEGGIu4iMEEPn+eGwavvUR$_C7<yQaSdY2E+rn$ZLyJBp#8(2`qlbUoatH@ov{SN z1sk65jcdfBqU>3XqgjMlZhyI5IJbH1lo#*~9}z|;9pi;$#5CcTKk2_h!Wlx3t~c-q z!_?zZ$$mGSu~YRVFu&>5m}7QZj&$=!>V4YNy~ev0Qf&KHvosQoFLK>I6-<+#sL>c7 zVGCr)vmC3M?|2Bk4tR2{gs@Z54@8$C2lc1A3w0;Og4p{BQ9lxU;-n~#bsd)dR2S`W zqpq0s-+Q3FO=5%Cz%C}cqMS1b2nx^$opZymQz_f?6#z&@bLJxnvviWV+h=-ETd7a=CT+;eZa5ZrluzP?aPM+o z%l`9?!!B;43sB2^t+_a+cTlF(~mdIu2 zUq=(OL3B{;JuUfs5tmKKc)&_H1oUVT7u6rGbK`VpP0i@>!Iy8V@@2l1qhB^6yC)e8nxv0an;~D2)?v*2b^Jm?fsi9i2zAX| z#aiRe>%u2w7P-LR&@q=W^cp+Sn+-wuu+;~d*h_R^*x!!x#Vq?Rc0`J!RLOPM9%1_F zP-{E{Oo5zM^5h&>E-yQ1*}gk}v}`yih?bOTRbkz~_cfiVq)@-UgR)>W!Z}-oQhpFX zIwazen5gwUxekTr6O_|UBDrM1@7zL7#V~!}xwN6`eEhP}!7^Z<_{mqL{ceUbTj>p1;_XTgPkn=?tiG-1mm3Y%Nxn9cddH8CqQQT|<`>LKC zb7?L&VEkR+dK*sGRen^>8^_$>y^3Ca??G95E)sjqx;Aj6>ivi*S8|WLq`@hg+Pb2@ zGSu5J38Oh5HRj3pUG7}&=4eC=WENx;?xx`z#E0-Q$o75l2%kC$9YTldPyIU}+^c_L#s`bXxArXoJtu8<_@LMR~6g5n7u)g?P{w$PzT z^Tfo|>2UrnqzRr?|DX=?e`tEEpg6mxZ99Y@!QGwU?he5T?(Xg`!Grq%0fM``ySux) z+u#m^{c}I>_gBrvRkb!|)za=>-N)&DMfy(x=Z|^}Fz54`;u>zgJiiylKl(g`RH$-Z z276gyc!s8Z$HjzL;^`g7O$SVF@}B~_DZmc)cKi1h6Rzc0)gR$JoLTEOZ^X@)Q|;Er z%VhDde-8&gW}Wav(y3R3BjB=XDrf(M=M=XOfx}#Q?JPxVQ3K;rFqrfSz*s7;3@31` zk3mO>^w-}XCHkFhkAS=ufJ}$R*HuvGz9CEtA7pfCysYo@vR{5?fAZ+*j}q-5zF~JG{%or;>@mSQLG?ua8|qoudAK@#9tIbN&O)2m?P3 ztn^re8RNs1vQ^F6)4!mjfDSY3Jj*qV>j`by@A2l;alPo*-GNXpeuJsrq#)|}X7_x# zLFH#F18TvtZM4BU-u*|y`rjYN#Z1i3t7@t?^GKL;S(34o#3px(RC&jGDg`3nSBN)) z(6N*pj}IW8p^JKl?eNG)FEY7(nM8_VE_5Q+X~uJ9IW@$#HUk~n{9@Z_WOD5K>sr{0 z;n#e-VYFq|gLig{CN(*1_(8}S(Q5dNrT32K+Z`AQlCiz_hs#PO@^(K2?_hwOxL=rj zT`69wF8ebEKX(KNW6@UAKlJ}xh51LduD>aE>8pv^exf$6`@!cUQ$Fv(EmY2ZStlwI zP4L1*>F`HuyT1&B(gAe8ht_~-R##d`&DuVBm5P7excUKMS=cx&|P zp*L$Yr$4?%^2XkQ`Oy{sl|is7>__rZo>6;`)zXi=M_Q=t+X4-ri!Ei!X!*;2U&hINqA7>3Tvp!N&|M z1BmX)Dvv@XxP+%_MC6OITM$%8`}RMlqU#RUR~zXkXd25X6U}83ud%1*6rZI#nF`wN z6kdp%+@jKaBunr+Yf@wiQ2LyJ$fwwVZ5? z!y#txRLz8cjwk}Hy>B#rsYA+VIUfoM^Bw6m`R&gXL)vcrhe58&A%gaV>ggRHQq?Q5 zHdjTM{&tlx-+F#Z)6{tz0rrNdfXt$(Qf$9w=Q4f2d$kTsJ6U5qpTFmUtRHfDbNVsw zCg_#+zF#GQ2!JgGG9Q z#J%V@vN;ZiC&gv)dLwDDT7&4e)eVXiykjwG*GgUIY3p0O3!odK( ze6w;Q&6XF10s&UsDEZS#me%RwaNGJX_~x+^uS>-HXE!o4P2qe*yj^YU-nO;1E`bU2 zqzE?i5S#7p!o^dW_r*HXmnWb5UM(%$srbhMR5M+t-x@y8U@7nIf+R9w_OFDx`?{*V z4b$CM9EWyrHEJJ|r)Mj6hf~-;v3(r(xZdnq%>^0bQOy(l0_rA}7)^{#8CSm8;d6?k zB59~iuXo}@jI8AGg+84Iy9ZI4d=eAZIQBViD3+H$5k#wMco<$l?Mr$=3OR5GnL~PY zEK&o{Gom7rwue@skczMRYYmBgHj5dw-G~5_GBC!Uk)ONtncvTCRnxa1E`Jd^nfxMd z_&7vW&)Nu-c$w@c*VX-sigWYD4um9Hyj012P%M|CJx{1oT(x-SVEcxpXTI4hG|XxI zyIS3H$>_@x(Pk*Cg=EgWvcGA`Iu-|;?jWnL%S!2ks)l&Se>GMuPW#aRp-8|!D;YQx z%qw-6!eZiDfv{Jy<)k~!GL``*;Cfnla)RL_xZpdZK!_k&_Ur~M0DHf3(1KPlzSD^^ zqD@fV%~*Es`_=wE4%L7tuDxP~&j2H^%ZEH~?|2ds*2Z;Qcb&VeBUpnEDkno#ds{gu7%?E&vuj z>Zj%mx`k+a0nLt^rGVe78A0g7RggV-_b{T6PO#tw)az7DqmfVI4tC_sjH3lcnJWB} za7Sy@L%9GoKf!LUu);aaqJMf0DPfvkd3V+BB13XMXg%K${O4q)WW@1 zW&`gnX>pMTJm0AhWOP)?+twF=q-S$){-!7R=e_yLP`W)PJHPC$q3!1QMui=%Yt0 zmwI5WWl+VFqtk+tmrWy}u@~xLkk2%+LFV$XIM19Tw4=%zURWGgdZ~4f68o9M=8WkO zDiRAbnZA?C!Qw+qu!MCouk%PInkqqUZ1>zO6Q%A3zdsTwXHCi#c`z2~(jHrSCD@Z$ zJbWpFvV&>|6tJ@U4BEqPcma7j}=nbOgSvXq%t5q#S^d}@hbP_q%`GPC1qrt|FIwse} zn{i4UmO*>re0ergAV)aCKjiwe5%57$V!XUP0_ASho0f`!7eZGWjkf)8bPM<&@r+W* zDKSyh-*kUo@nW)=F1i00*T%aDjTKIpr9nNiaCcT)lp^Rs;z(B?N;!0GOqEsFD0hFKw||c^FE(eP^^3~Ja09NWsUJN0;?|W8oD6&YCq8h z;Kjr9%oi-L z(e!6GLbw2<7@=`98z(}Zyp-?PLrfOi$2k3h72%-l1!I}E(0hSe<_prYjS_h>!Oq0 zg#{o;N{_codHc5Aes643eAr0&y2w`a`one?2Q@H#7_i@^$7(8lWV9vG?`yj#R9ws} zOGe(ez-HQ%^ZN3wXOkRA&<(hjivOP5)dhl08b9j0kXd&;e529buqRs2P|SgU04Hp@ zV%Pv^%pEJitcEks7?pzGbDiSYws1$_r+JM|@+?{4_)k54=wc7!p6aWvS0r%!yC2L` zDOw)n*em=A0FnEnx|Vm{T(e`HJ4C%gc+ypS>c>I2iBEeJpc*aUJj)(7M)AsS9P29MfMbS#niHmwEU`pQ*GfDGUHoDu~is@?! znQZg)-W4K@(+;yY3mW5jqOq$D^ySz;hI9(dXE2*v5sVwS2%WUbMW7A@gJKHBYnz|u zA0%~uAjx!}>>{UrrKEJ!f!>T%y&CQYJO(Fk%W(@??0joRR(i-PtDnR=UgxX*n2_>C z_~>d8tP(Cis8;do>D#vG9aQUH-trQRxl<}=Er*+J5prN9=I0D^$>i}wBM(K8aFoi) zD%&xWw;~u{9>&%IS2WTxqRSKTMi>-L^(>6e-<&n1ws<~p>L!*Pz%wr`7rO6_B(s1K z+S{3YJ7t&i@$ZsO@4!h-x$2&~I$LxaRR4!tjyG;6ydqi9cmF8^1flLPcMp=I_{pgj zUXM3hH6|kxg!?^NK`pMAtQo2duZ&r^{{@dJ#e&0h=$+t-_n!0Q%-^_6xYTLlv8~|3 zmP`Ga4HuL#F4qW8?wT_>n&l-# zB?k!i9bF!V|_6dd!(-ZE07_1!A3Q{-35JkvHY*@#9`HW8_&&W9@FNcgY0N-t!~5VHNUWX~0M z*vx-WPQ@0l+m)&>Ex5}i16RuI$QKHQ-jU{> zLFwXa)a##Mp)l7U{{_+a!7#VEfOgnnn)$Yg9sRcfx9DZaHI*cq#I;43FP{l$`1LbvvqeLo znmmghpr9Wm6YqzOc1vh4xbELoT*RZ%9av>k?Q`=v$n{G?E4IK+J5R(fib=UNg-vTvM{+L!DK7!&YgAS9{PJHrc8nL1AODd)m5Yh(RLmzTyor&>Tr1bB{K8XX7VSdjYQ`ResI+x(E zbT(D=6TNtiI6Y z&kvAX97{+TT7<4cU{DlTI5vylpkg2+Lm=^YqkMfpLKI&{bW=zsf%dgt zO>(j#RV{VKx++VCimL?&s;J~;)YY{8U0@8OJcgn;$Y{SHOC|}qjS@e_mSCqMtCmWu zUNWYLVW-&y-1yT+4)HN1k)awV8)P9I_&qrGEV7hCH!>+zcvvbEFwsj<0@ zB%u;=V`Fo|SW-X~a0$eXG<5w_fYW3Qqs><#c6YSBL%QvPH09`4+dPLI&x=EF1tYV- zDd$PA#X@`5O8f)|egK(4b>>kXnKJA~w$7h+Fh6MydhHik2l ze;>FR%s5LCZUrj}2H)%tpIK#d`_9LaiZ^uqppt5>9CEzwk&>|b2i8B1M)OF4D^ zZXsl7f`T%5SiD#97Ag!N1(7EaKT`T6y6fpsh|nTEyC)^eAMF-IZ80hKaaT&QkN%Qx z)?V}>sFkM9i}X$Oy*OPJf}UX@J(ZJ%KgalPs7J6(mX3oB!nyhu$BIJy#0Jl^Ne14g zJQ*jbppuJ0wKWP+LJjReurRPFOX5TfITj8*(QlM*!J#D=YCJ!rE2`uOxAhLk6*?>5 z-&M8IE=mjUo)jZI3i&wbn-8qL^#zs@2|2NQ(TrtfB|CU^=oVjghSB+GN zOfa@iU^9i((*6D@c?x))(@v8;Ocoz7`7NY;3N#&E_z1AhHk3KX6x?r+a?Wn|02evl zQ~P|*i}^AV6W5zPmfENI+>KI7$}Z-myqm;9kM)`KG|>XeBR6IhKQ$JhYbz-b z3dMxf`wh+1qAzl+8X3lJl`I#4cM(%%^QZa1lQo}(>+89SBk#-r@ZgTVY21yY=-#U@(&XxyKU5?`f z+SPvIJnR=k{U@>0;&GMbbv)ehG@2p-9*RD%=>H&)uH=E4+3xP{*H2H&;~Dn*mBw)W znKsRa>12e;E!tkY^c$UC&|H@={S0Q52eO~rLp!3?Q7z7V2wJy3e+GbhSU_*|W5&$@ zn>OTljzYpJgN`6mE!G~n&U_;M_OC+82P1K0jO-aAJH8Kim(S3>JWge9=&E-1x@?05wKqq^MqY&}gE!kkVdO{Sr{*!^Ap~3YFvIUx=LDKM zy6O5LFZbt?3{9u*T=ivppByjUzsy;TeB&w~rdT)z!x()34B!fh4VhBvcKN`UvY9I_ z+xETdaG~t}L!&|0@ybyfKdNf8Pmt6uxMv0s1*|Vu^>J!R6gW3aitYI7c40@#;x2U7 zM)WYa8R#mT3oBd5yrhYMjZC+eqOq?E*!dIvP8SbQ?D`1F@|laT`kveEC7VP-Ng!I?0pZlBN0eMg8a9Fek;rU<1mTT$Y9cZpVP~ zBYb}tiQFJp{p*v6+jR%QezPMNPZdeC?%Lx4ax#*!Q%H2LfQZ*XNZ}2l zIF>jlJuDr~J7|GtUI?o~cbav-=le=Pthbr_sUK zU2d2EtNWF>q9KalL^|@e9|y|o)1}C6Ug1oei$`LmB3!eBr_m$RXMf{(avZH3uj_jQ zvT~7pmH0s={_@T1efF-~YtfpJ(Av|pY6PFRZnqE2cI=ySd!tVZ6%+3R2w}_P%5d&v zcTJALEE3~{$B-GEP!tH+p7ir$kUlP<6ndc0MMD)=VYnaWNl7%xB&LnSmFXdrqkPeG z-sG%paU_KS2FPVoHt#vTp}KjquE6Gs>UuH@wOhvD^<05%&0z^xYaF)<;CI_l{ry8a z`X8a-Qs7xnv@0Megd$ z@aH|1`kz%w-EjXHbR%3->vMkQ`mDWsZbouwHrm#rO`=+EHL=kyQinl$0}PKEh!gw3 z`5PIn3-2J=!_|V@CCH?*Wsy717&i+Al)=hCLQEiGM0aw9>hwJ8#pCT^^S*1o*Tl?p zt>@-85SR83NKQy%Gt{ldWSlXn2~Si4FpFi4BpxnToJ7*$(2)$5FGKqhO-ppTbE^s4 zcLhs}Sr*ojzqCL6k605e056;vETsTs#R|QUsw)(|V z?{X|(0G2D_aXzRi7_dVKc;n>d(*NFsOAVmm41G9@k}FT9GfGtdzJ45~28rH>ii0(9 zI^7VmG|zzEmb4lPdZD82g@1;IfSO;FSl@$3LD`E}7v}VzN zL7lNO@^Ta<+~TuKzvgZoK{gl#PK<*62%x+_UBI-rXS7_;h5AdW5V#uz>bp{k!{YE~ zcqaU;(2cZiEZYg{;HkChN1{)=!w0gL#E4Kt8UvqeTbvG?nrP)gM>@|S0kL)@hIsl^ zs(bN;V6K>W(J@C1XWc?(4filCQOGM?ZdTvN(@?p=b+oV?UPLyj;YVq`I~MOZe@Ie%gIwpdpM@y>*la~~F_#tY&4XH?s_rrn zgBN+&JX?}Gn9{j+YOAE3HVJz&8h+G!_E;9)X!Ao9mq}j4x_Mzc-r3p8v|MbW;8!Uc zN_6NJc4SVZ?vBc)icu4J!ANVcECD1NM1ZPQwYHXCPcLB#jZf7Z-QKMmH-EV?^xTF- z)c~u-E!WzhfE@NERsArz($D(b-dGGe9Fp5vgkU`c_;G9o5mp7shSLSYgyHCcnXVuw zu*v&F-q!DtJ9B@}Gpg424!Uu2c*LNE1p475xTnXEqNJQeBg>~mzZuo}c)XA=a?X4N z!5|x^Y?rX469?>reIq4xHeJQ>S#zNF%lKd`^5etd7-J9cuN6GmmKwOJ=WZ}&g?_XQ zyMbZdE&m_`gdFdmp1u$}$y)LNnD$oExEbPT)N!nm$*L^UV_9C0iQU>rBaOf2Pp{G{ zp;;|aN`KoPZiMkxya0gyD&$^M;GJW=AYJ zKru5~Rq;l=#RU$kp*bk{nOaE&c6&|aOulAuUJCOjd(EhMs_DJUOdTBNbGpPzb;L@( zCW!>~3shqk$oVzJ=3h1qmg{(jWvb%SU64-ix0oCLC;|A=2dgV9`96oVCtLXfx0r7z z%?_Kx8U_H7&kxa?$&#BEze#v|B6-WLGS0un6Vd)n=e_-5UDB|`OP5FXmqgO%xceq2 zlou6ZjR|YRxZ*O2qM+zhr7C9uaCRP^f0+$qaaE)D_9eAiDIN%s5JAI(%l5+}^AWDa zbr)f-U^+DP0hnjf%qf+&0OjJZbf_+TSZyJhSf+_i=;f|*>?-_D*%-%WU{ta0_eGz} zVduV+7|ikJiwq*E731`{8=r;rnI^z*X??#N9Uc{Dsc z1Y$QPZMrnOy`Vsd_%Xo7@_h6D(2~uXE~JhZr?4ZbL|V2bhwUhDeJ&^9ijmy~lUfxH zR@K^{*Qbuz7HhK#%?@q&3vUnKTARE7Zb2by_0nJkwJrXH;=TZZcE!DX+L1&GKIx?D z=n5W}lQV_C?4?bzP|(k(&i|!p1qVw3_S}?MGZB%_HzI>H0%tAHvsapHgjlsa>EIR6 z(QaXzWr>Za+PW3D*7kK!wjlQkeiliw3^@JP#DRVSA^0=t8EWD4+1h1*spBD>U*;nT zFfL=YJ~@XT;l~w)J`G)I8qoqs1yfp!As9C{VLa%ZDxeZU=>rs*+P`ZD(;x;IvxUNR ziI0MU@?(f3rJqjYNOs;<=cWEFsOZ$btMWoghstU>ZoeYon8w?;(kK@#sb9c6UG6+S zl&3Jee*-SsMN7Eh8IOY&%lLJjE)P7pxp^#Qs<+ASRy1P8A`HbuO-;qxtk+joXl4~W zY+)K1zX}U~fZaYO0O4Bf7=_HfKikn+2R9EN?QSB{YF)A9kZkEL5LnQJIF!?6V0bxU z5XT~FU7#i+NJ63*Ytp1)z2%h#<&E@B9T>Tcjz&;%rBMSKw|WK;^8P_ctv}(fqdyr+ z7dQ@U>8%w3Zn$9Y;1;n^8nWr85df{li4|0#{a|epjOP$l)2fv zyu7rB`0Dgzno!ZSwyUZkXKgxEwZ1s#Ib zYlS3|$P6oNH~@~}lAM{K#+ny27@(`IlHTl{A5jjN{&f|mY}SSo6Hm|2Pe93{Vf{+p zN~^+5Q};!CF0g$srO3EOLZl{+o6M%Tz8zn}R|rC=S~K#co4)TdALiy)Ghpsk>I~+u zOl5Cd7X3o;)R>$cO@S0eva9rLT&X&&CY-_5#c6b5cO~k&SA{_`1^X4<#ax`hyC}S@U@mN#`M~?-@FwFgbT|K<5S}Uo?CEMIb)KiAX;VG+dRkoXiK`= zvc{2SJWQ_+esp`AgsA&vtHvIQW%Ihj-mo~~@Yzs;?Q%X^)hbE@IGm`fKexX|h6^DY z%aetZ(%c*mH*hs|R{Oet8p(o;II*Jq7&NJZ69b*X4X z;uA1GkQwbljd1?^y(k#QLAJux4M<3e@6@{!)(VGbo)^%~7qmQ+BLp@zJPp@0H^f&p ze1keuE=zoV(N&@6SA+NH14ub#sIjABnscui@bQdUtect{59pV-DKn~m=` zFP5XACjGf@K-lOejx4G9rdpwn+dG|Qd>R`SkAfS0SIlHMISpUflYcqSzvarRhejq$ zj^W3vg{Wqo!S93_ODq_iA5)0p$`M?``Bz=1?vj!cNS|P;S74}GN){g&q1W{a>ry0( z(#Iqp>4i*8T%pN|tZ*sRcY*J5eU?WYPbr3V>SDvEL&}=qcsyjrk|pbUz6GPRTrtq4 z2NO>{)M2;p9t;DMYnARUg#xty?Amv>D#;tX%{Pn(BaN0Swo%IeO$|SsJ)vShU{E1t zqKmBR-ZP-aUT*fypM0B0<;6n2+KgAKuzDY0C)GcN`Z@kcSmU#+AfhPr_xN^pALW}+ zY?5%zN!ln$_POm0hlt4b<~V+@aDX#GuyUeCl*=!hOFka0{Nv;x7JrbKmmEpHOO;L| z-smWF8&g0(Y#Tm6)*WZS~d3ZGI1SH{q8>*z* z@9@vxXD1t1gcm>YgOn#2hk30HKI#cXHV^C*?_hE6GbJ@o&UGn&TxkJvMdNTm5M=@8 zFqfJ9Av86$rAd<*0qY*uUuOSt30{5Bc)6WNyA%WZhL+%(n*OD5Tg0QRm)|%=Ewwm; z5V{B+88U)L$fmV)yd{5oy-LHAw+f`|Xht-__ldl9y1lc4T!F#rG3mXF{V*}w4RLqB z5#On(*PK@)F_l`)2@y_K(Ot}^B!;I!c~TL|B8FDMCl}CE78*WFt#t0T8-e-;NC5_2 zU!4)`iR|-qt)hu|Q6wLUglqRanGK(*-|7q@cq3RGwp+fFQ_9Ax+mMr^S7>q*Z?5|X zq6UTLimyLj-#+{V`@^%P7EA~yi-dY>K7r%MQ*Z%V1Q)!n{nr8S%q7#uCmVMz?Dne? z_j=@0sT^;EH*9RIN@cdFK7y9qlzv>=1l5Fo?-Bp}>f*>&bi%dKKD0teVp4BMWd02O zT*lAj0%iHW-}X=S3hTq%@8$a>l~F6Ua98Q`#yU{S{|rU}f%_Bfm+c#RtUx?!R?Eu9 zHrwKfztQeZoVngG94UTx{osKf$JNc1xYu!X!}J9o%t9Ac8 zD^xh1s*6psNs=rCHqMl*z`QYAk#!M(DlvMkl`cmDHcy_c6z;7p2um=>?(f3=pQ(8hywN`xfZ%ERHV!pK&)3+Wwv+zTC+p5!4dn%%eDh*E7nCt9`x*B;l zqNPP_B8wSM&&!vT+(st?V(!*}ohl{gqb*#pAkw5Dg5AQ2z)6$m@%N4Yecoa z1&sM*7X5Ns(UqV!K|;lN=oPnGV!m0bWhqXYgZy!LmAH0D)D%8yZndb5e3Yr@M^auk zdsx;H&HUfcg|3fs(E@726b(5$o(O67`jp#!vF#DjKex7nH-utBmQuS)(z37i-vm!V zxMQmHN=^b`8I=YB?sHzrAcM~29uBkhP-`k&QTOjZZ+>#(b z4`u!FrC&@aJAy&%K52kOc#mH590oXSh|=$RoA4UZ8|OJt)D1Q9>Ymp@P>a zYF5Jd-MCD0FDkeW!aCu$6xmh26v=KO>%;RrsdjXFk2};vGQ4|%=wkk6^e+*kpbGx% z9#f;}3w!4hGg6skUu@1t2dnw4N8s38YwkS>t%AcBG5->U`_#3Owdo%$g3|eS(w94~ z1a)x<%mscpQ;1MM^B+qX9})&wVj_mJ^a${CT=w^@!@T6Q4PS@0<1$Lb0xiP^Sdxlk z)?zX2engqf^W;psB7lgfHStPcWS!~~(2P+TfCA`N3&?0~!MhtXFFB8vx~_l|wnj-4 z@F&|su8aKZEg#B$u`iJBLWMUIc};2quS>?7j#j4YJpB(qG&`t-!D&M;eTm$^myBAe zY9i_?`2VaCBfr>iQ%rjaY9}_`h#9fYk(Vg`=&7Ed5rl!FQgQE7k+4(ZlTS0}!k?5U z0a+FHJa&na>pJ9Pf1evUUH+XB`fK^p(Y|{8=?tk?#F7x9rW5lXa;zT>T7v=^YHq4n z>f2Uu^MG#+8I>a);}d)>6*GUtS5VAjJ%F^B!G>>j_)YROl)(mHt2qPMDe~qBG`)Ja#cnR@1@Po47=I@iGvBUTP7)ov<)KTI*!!zHaK>d5?LZZiJkEjptr=`y15PyRrSKwwSchM=^JRn|`)rs{#N2Qw3UOv$-IRBXWc4FVSn{l4f%aA?hx{@*`c()4QD5_lI!gWqnV0l z^xF9YBmEjSq`NKZmJPDzigYsbn^(nm;l`JMlICaJTjcK8E}ec;ukDB27AGO7X_0vr zH-&GEP;@ti)ZPO0dP6g~E#w4Hl3Svssj*6+u;G(?9aywPUZ;1#=V@3|Nag#;m@V13 zSN8A5a6@ALqkR*5p)ch)RSaEz|2NMm$RRl`2pPUrV()#Nj`c-~EljP1sQ^xytXz8< z9a}u5FYf;+E!)p|`u+y@u&V83jW}owA!XvX8$XO{>LRKcyP9A>AJ>Zc5yQ*cUO>>H zL+vFj*087;o?VF1^W%gt3ElQwLo%?RDOKi_I9j8^U+!xEV@+ zj{R?}K2$W*@%v}TU~|9I);^V~c4Aco_f5dgr?uwFDd9(BihkF>q_=AWS};oCE`lZ5h0tslq%ViRJD^>%_ZF6+ zYVPF8%n4(L4y$kb=ZC&=U<-y62hlvbK80i+mM@)o#2BR(KT*bI^Lvs(-R3Cm&(!K* z7ZjZ?vm1klFXa=xf_}2r@4+F3%Z>hW)Bg`xo9@&_lZ5TRt|vdy#4$k?=RPnA!y?Lw zerJLY9yZPUg(yk?IRO8FCD32mK1zLL7})-u_P+8Wn1a|l_jy55N`o4OWZ-t|^_Qg} z%NKkJte>ySP1KyHsH#P9{AjP3lK@nVz5 z2u%cxY;Zxf$>pzji-K`)oFo7bmC0;%8@aTX=8keWnzzG`9tEGQHZC;rTTMe13*wa2 zw|dY`C%h>nnC~PW*)ofd`+2+<^xr&x9$PNpX~O4fS88+kj>-JL5lnbuh}(u)?oUi< zVI!c_hPoUu-?0~i1|Z!?5|R#lT77T?O#F8;5tGr!fnAK0r_nI#xGOPg7IP1bz4?V2 z7LA?q(5jL{hr;Q13l>;Y&i(&)l>PUQyn~wzHHFDc0EFvdfS})%9Dzj$6zl&^I&d>W zKkWZ?;(y=F(SM+B`6z{PL&6Kb1pn9&H@!utWD#|~=NU&jeyOCM$XqBFP0sh%|D5tq z5{_qHb#Mi&g9NxhP0NM(HH-#_$C?{SFnlR_gu;|^=P_pdH&J@2!ZVnk_kO@yB>)J} z#-_VKo-L5r8ETXXmp#OW0(XfRk3Q~`a`d!T)Af-pdUN~RS@pEIrE!fc7aP`-O$;=0 zzQgWvlnINq1LAoLo7~ZVGazeE_5K1ik--fQ@z3mfu;eC%$CnnIJsyIKKAa<=593>^ zE3yVTVbBI0tNb~Jmx{~XW=FGR5S4exyA78g$EMlfLzu_$hBF8rkX$1$ChgiOmRLxl zRJri}*F?6_R_YY@wV{A)Ok)ivjBGG7CmNTgHbMGFTBI+$%Foo)fO1mFU`I}2mM3k{ z8n`*#l~4@(G*HcOPxK7grbUzi!0Lq{exy>V_PkXk4I%?Wj*-t))^xw|5|P# zFKOG_g|pqx!k=+&WMu>a4^PL<-P)73^|gH8>2(OQq*BkwrV2gM+6=>f+Y1hzic2%~ z%lGNaec@Ey7mDe76-Y>}c5M17vj9Kl`rfk?PD>HvJ@q)KH!(Eacc0Duj zy0#cW&Ck}s0v#caG2|HysNDWX^hCiFy;Fne>aPAVaDAtcVUCx=DJ|QyaYWhBI>;KD zc-@TpDt*K|Zy|Io#5fR@t>_tc0g_b6d)FcOQmUz<2{_>7Ko4%Mbcod27PlLMs%@`7 zC$28PTUMh!EK!L+z6}rDR0+uZNyRZy&S25)%rKM=QyJPDd#P`6VnQi3pEBOq>-_`X z+6S^`}s-$nB@4M0yGE%y!AHv!+yk>TwM-UTO^$bcDJ45l2m&sVn`UnB+aB zVtXiR;~=NQ=U*E!7=eo`Un!YGJaXS^xV#^R-p)7FW#63$$vA$lOv1Mi+6Ikb`d&Xj zE9rXPWBk&VNr`f~QZ4!BQ;}B-9JX5ViLK{b7a~?{4Rk>N*=}lpA;M5g;GCI?0Gtlap zUBn1b$Wts5wzwx|#+av!7U?=Jc04ghuziorSGmHfsrL{Ls(PQPv~hXXw(u@3kL>9p z#NS!-!sxi$4WdOshC5eeCw1Q`JnuWrP< zjZ)YvIP9~z0g^O-G32vs&K5367s3rhM5F0F}s{gArP=&!AmX- zMfN$eBA@E7e2z*<5lO7QF%V2?VQ67b zbxgVay&F*8+kOxAiBm_gvbJXYbh!zZN=HE=7L5Lni>XnqznJ{nyP~r4FukZ*I9D+T zj9M*G%ncMg+z#0SXlZK$pFViOO5{DrLT@C)Pm+qQI?eW{Pyg9l_@=aMhUTV0Lar!S zPi^Pbc=cQG9A&aAtN+Bx)iHM|2iP}#RmWskRxm}!o78rX4yX^Rv8a&sB*a$G&vjcd zqOceS<_tK=$ppWXdSpf7!4*)ID-|i3s7{otoMQ<>DK@1*D+Ki)c1aY5O5}@Ec1BEn zN9V&B)A$k#upK%LGg3^~?wuXQ;>GWlxYx3^ei*Oie$DkEMIPAb{sYl-t}#_e*&6X=lLGglZ@<9LHs!TW&`F$(b;gG`my?oW}-{7!qp z6_QJ<8PvKhHLfq@Ba`Xe9+w1ZGyfajQ>mw^CHHwlm0N|!`cx! zUOMm_R`Q-d7_3y?h>L|IOU}-& z`gjJeB$|VAUary$kBv<}yxeT3dIHaE`2D?Ws{7NW-s&ZoBwR2K)hHep#g|kl2wrMF z;VbPt7upgIti7!AgnG0_^|#{qCu}`7?aghjElmcc=CftX zcq&xRKIeb`) zvx`y+(U^Qk*Edh1`rRGZPY)sO3R?H4cTi)$%?Jg+bD6Mi6!3iyLg5ucAbXhm+nw`X|x zTQCzP8k0`_KPm6qdf_kThwWEg3C1b6&`1Q~VEWnU3ofQ&uP<@y#9|5VR zqcvaz+XpP_z8Ho*$})*qv^$YWsa=CSF*^-wT-;EHbE-t|C|3S zUt>KGHg_x~=Yz%VTESC&OYA>3<6ioH%PS%xV({{CRd>-B0=v(}*zw5#;8qmusT4mW zFmX8C3jN^QcR_=TLXL%oUvz2U?N-MpWDRon5U*f)HwA zZ4pF^g#@i^5epudBjMF-3-yGWnS1}r?Opk?ry5B z5v7_c!cua=r+%0ZPE11Ws_g#k8(UcW_GE6|*$J0CWCFd&@nu9Mp~RCrKn6pa&Y65tLch^82jc206? zy<8)>$h2n~BEki2iE>0rI+`w%2jmb}GKa%KUz{oUmA(Qdxl+Ay`w|LsOcHQ&Op>=s zZ6P7xMRHlxH(l>q}57j#`R8;Hqs-pD@<6Da{Sh3u2m*7bjgf8()HKW=SoFSH;PG@pDU-P&Hb zfs!GQqGQX8XDHMs8t+O#DsdfyBQ2VZQg(=2)OLlZWKIf4fG;c^^lyYbVXFq)y#@4o z0oEX9PBaArq(Wkl$h$I_f6*p;N|!A7zD&v_pO-v9rr8U) zAd-eqX~uo@>!OSi+UZU@B??5pA!bSx^D!4!0Mo`Uq!AP3B46wwi!XrNtdjp6k7ZrX z>gwvab0nDDVPY_oAOi>nt%g65aHzy7;~gClk>EiBEPI4c;E=%K8(^*Fh{ykWo9KL3 z)CDw?-sWhwi;&rVI&k8cF~jP@;#=)Zo4czsn?{3Okh5AaBwnnS3aH!S7={A_t*I== z^cPmvh71=DQm<<|s+P<WYBfHXJ#XWXg_s$Ji2QV%$aW>pTdh>e1 zB-5wG5n|{I?LSp2OhhnOD%48BlM?=nyX?*mP@ZB@EHuR}Ab1BSDSGh_aq{g;BE?-U zC?HA#jfIkV9m~m-rB0T`jwwO`ymCdEkek*4}n4ldWa zzO?&%V)9~hz?#pfqqOY^Yf5rOmdOm+$ca_zXH);#5*w7p2Gfvh^ZX|u#Y(VTt+D{U z&Fk;Q6AG8`a8AsVk^hSC9F~r*uKsPp81@NnGCdKIlyP3S2{r|$oufn1TRc>eEBqb2 z3BEMo$Y>I55!BeMR=Kp4TvS*ylGHAhL_O=sYqP?dCU^dwZF%|)NJ}ncq>$#$()Zjp zz@oV0I#r@;koO_6`U-sjLG80&DJ}K^miv8+N{_#%pSQI6yB9@PnD!S z)vy`st_4=Bi;ZTdt0EUgUR^)=Kv{J9%3*>%Xrg1H6=%x>RE7(FA8nXffviEb zMh_l(?N0ZLE<0PL!|f#b_?UPa+uB;je*anB_W7NFbWbAQ6j4SrzMVl+99|)Z+Oe`| zOUvYn4AVTQ@JB@vng(@w37LcVPQtY#jH77aw18`E78?W2kDo4jw!MMig={VI*?f+_ zDbpEbKvSLs7qw14M<~>ZCAM=c6n7<}XNuGWZ2CT*hm^QDItFHE6Bfy5#{!n7z2oE3 zMP?yuRnY0&vgb;l=(U)mR=$8(n@2qfsC_&6433fSF$-b!wNr)>$^^##V0SkNm<$_X zieGiP-00li-X^*M?we1-x>K=AOqbPKgOH6TS%Cc*`k9Apy2Zn3o+?HBOf%2C6vcja zxUlXoO<-LJugR_DX9SPP92G;m)DCjIm@V}>>U4Ii*ig2y?Lzr@Xdc)kF-1c0*De7= zh)p9^h51!Iig;*ciXY{_!S2d1L+(yv`o{(l*-kk`Z@$SY&hkrZdwNHyi{B)>Z13ir zR#vJrv6?S}y6m@1D0VnfwPw>@SE*P>`jx>hltk=p_feSm z1%qGf7ij&Q6xvf0`0iVUY$4|*z18!os^Q*5h=zkcQ3poEBr)2elA}4(^{0U*PyW#u z4r4PD>=+mtD8#avD}v{Hy;W}2yIyN`Dm0+g@8U^kwIJp|DMl-ZT+16Z&Cm%0%68h2 zZA^V{BHbUGq_=Pw1}F`7B;Vmp;XS&#x@LuBZ+tDD&+4c4Ud*V4LE0%PG44X1#9zs; zPL7T!pJiV2zOu@v&BEm+f01Tr)l6)6kyCKCuKP2~W3(bM|6Q7+lP4zy9XZe)TW~X1Zziu9I(y`;ya}Id{B_9$-3-{CfGZUdITBt(j6QPe#*E!_ za%%Kb%Bms|BWF2UCnn_o=%fR^5p*oBO+=>*k9(6soa#&jeNZuN+krB2nu=+ROAy?y z3!*!uEda{J$*D2PFlm8rP=}=4U|ttHo2iT9uStX~8JHTVotBXy9YqN(nvN@T<6vQH z8-v4EVDJk8Kji>L8T(0QC*j}@1$f!rETqO^S_$TOcFqDbbaF?@(uV}{78%9U^5m6O zY#l?2-W2)aZbC}qZV{s@rdiF>Y1cztN9odoSz0r3SH@s8*Gu_gvR2A9>x=jxXO(1} z!Yt(2TaD&sC*_3Lx}Lwl&2L+&!) zUX~;TO_1A3#usC}_2!Xqah#xrZ*n-~&4Nr3BcFyqAMG4@oe431o(COX5!GN!386(x zuW*Je798R`mKJdhk75h#^RUfSpD_Gs@WXbGe7wP;oj1ArmBNtS!1~dpul^q{E&XNA zr__8V&wQh!6%I`X>q47LFtoH#I%RC%uv7;~FfdgxV7}k~1_zp^Tk%eA+L3M~v}7DZ0D-`)t&Uek${=DZCYfg|NVB7=kM5Rq%b zR(0GQN78Vr14PX@yY)7}lS->YLyC=~AApK;6|j28s~A_3v10gClMM`F79J{QL0Rc} zKEbE!}fy9NVm@}JKD{yoTI0lKnQDVeLC!K<4o{_*34cDKtR zAe`rp%#rXPxDMSlk!^LmFv1 z)RB1xxY+x8*khn+|Gw_(qtQb{<@t=ai?*bua|JWGxQr(gjAs!MbHy+4(GvzXFy_&f z24#N45wNRqc5*M_t5i>)X`&5nX<^bd`*jF!Lx7KYd=Fj};&VAc`|5o3XA!w}ljFaS z!2>-68;_Q5%fbRS(K>y8ejedK&9=rTZ2Sy6!Wav|iNu|ti1&A(%v{B}G6;Bdh{i{e z^v0iKnoCK!1VlM88V|u$Xw-rh%V#Gq?o+Fl?XB!(;1c2KShXsVs*-C)?_Z28 ztf6<+y>q~iXEW$h0kwfTqt=EV~~|&vVU#eWte+gK71nAQ+C% zmt{ISKrS#nUcAcaoVgunZb>YV|9ELPa81anG-`o~W|}a-U1bMo$AzbU@+#JUll?)d z2nw{)vndlk|9#aC*kqz*5|TY2pGhXS%;yd9VtJyCYY*)ysXj@CP@zF&RI}s5PBw_0 zw$m7xW)l89Ze~0ZGLCM2m-x2n)dAOeNhb)iwpBdV6Y8&bZz1q!edt{&nlPUbjmqCS9-yXo{MLUL_vMq+g%P zuxh*E%ur*tj7sH=KkfyVLjI>nrB-lB7D0SpMczdSJUJ?^1S zWO1wOleqQ%_f1-=VQdnzUSlxt?1nhGG!GBu#?9MUv^?^*BViRY5nYT}EfL=|KDzld zQQ-oCds;mZdceUrH;k8Pi!dxpq!G1u9B^uLVW1v#r!jYn=89ewC!H&8Pbx+BQ|LC1 zNQ-SY*q}<=7PRPj36y!?1qV04vFZJp zpP!#96dw<7?^^E129SpdhyN7~sHPW|CV2mE?UWX%>|KuHc4aTNEz6@?9%e*!tp0xT zigXUDC5N}{6yQVgfu+uunCMu+Ds;sVA6++PqM(MUk`U@pf;Xn2;epbNAPV!$dA?A? z!to|xWC{D4k26dSAqOsXj1s-BI6nz5MyyNa+>~-zTw;w9 zs;rn|az9g|8_r@AAn$!W;`8xce-+3}AyxrWM08+`18GN4DdYxfjfeNTQc_ZME=qx@ z!H1LbY9z7?>{e)uk#{V+mYWaWb>baFD+Z>h$U||X5^AhXO+>Ugl5$rA3f+dspzK$$ z+R3hS@Hg^LMd*yw@5C=!j#L;Jy8dkFh=%%&*$RV#K?@YPd40cYB^*b>R=}TkVGpGk z(QU?iwCx5cGybxzEC0$>Ji-*hfLuDVS=N%OD-W3Bd39hlVVM2Fw2a>XY3jo-Ol75+ z4s0PFi1DYl+x=lPRl3}Jqc_*!ra)|%seM%(|C>g(5Q{b1Y@7G{n-AcgC7+$n4bU{P z66lCfe~IOGm!e)7A0<$uxu!qO#WnQ|;co6~zQ~&I^?&*}uU_7B1R-YYD}yTLza8m; zj8p0ylmHRv&W>==_)@t-8z&`c-4JE_m4VlMq6W#1kf?zSIYK|PYbtiIywzHeuQf^% z9=V%E8rAR=N;b}B>PtDa`p3;2tk=y#_(TJ~g>?D*%sN!Suf_60^F7oV==zR)+)Nyg z-cp#USb}&pJ-|PlDm&t0F1)ggM=1)E!>i6%1oHVS5Edb!S+~!iwCkj~^advV-O*i%Nn)xN_ zx|wZfbly3T9l^=Ouow_s>?VKnPbz(&VRyPR$Wfy(cz>MCZo4JDXfG68NdbuB6> z4G~YK;?|2n5o@(?xDP$q6Slh6iU!ZN;j^$!{-58ao+W+__}}>91zoDW6*TsGPhgxB zP{n5sB&1((E@K5fxci0&M0o+3_p!HERYUr&`CA%U(QVhK`^?eAtJ(T|6JVw{UDM3P+08X%DwQ!t!@LA*>jJRE@|+dkC6CoT<3~E(f=eg zbnQ%pGXIP8cV&IJ@BnvuK|Ce9c?-$f$`c-Ob+iA#QhzJa(aFAS)Uv@59XmSI3oL2Ez%3Rn*D7o& zHg)RzBnpZpyhj3d@5Hi9vKQVB*f>4H?BQGebVhmu>s8Gkg5Ql+nBOKdOE*7Gl{a8T z0sRbNSF%atLh&&ZyNifk22V|KbH652b5OSn$X@+@K<8t*2YYhY2*WI0^k8fR$%~D* zfowK9`3|RtC@&+ zt+2wEwgTAi5#|T?Z6DZU=%X2xP77n1YI*2~n;8fQ$dk$!7M7c&ZV?@wOdV7%{Vs3w z`H}=aHn$#7O?gkxHl#YwrOM{5>zhg_NvM#Rw-`4$ld^yxa5-Kf#}9cY3<^D9O&EE$ ze++q|3O!WcX+KCTAdx~Z1L<8JKs-N-i;KS!{M&;1NjC(ldC9h2a`3Bgng_pM+ZZOv z&VTH`euRz*>`agN<2eS`d!w@gOV#?_nByGibXsUYT2TB=JT%H!BDw7MpT!L?iM?#x z&A{I#uss+ChaCvKh+bE5qY8!Fj??U|9bg_X=D}5J-$1Enb3J2ZvsHJTnql&1@|KtE z8z*5|jHP|_zS4u)C1!78pj@JX(0dV%(h-%{6iYWrQk?q%lbQ9iv7v? zUu`T}fQ~n;o+YM+MdFWia&Ietg|FoK;Qv8MT3PeW40cBV#hAAE65stw1 zO+xvYc9=T%-BTqsFxHeDnyC|i7m-cSQ4B@qF3cUnYgZI(nm=%Wj;?4uR~WLvd`3vm z90I_-BkRqkPX79Ldi^y5VmxLm129fDY79TOxt>1}dp`k96Qe=TC$0nF@7o6OSmvzd z1`E`WYA3DP{mRIvtV#|**rvODyx?%z;iL*tfK=Fo*W=~d{1@rPlgyl(C%}S!lpQeb zg-w!bZW;!-j#{5*S2zM5ETG%}Ve!#J-4xgU889OwH}*^;7_s~7`q$y($(`pOx-mxs zM+OEE^&$xL7K(Wy5lieS1Hd~W4n*P>-c#zfu>zjJEbp`WaCY-C?e(yBt znM1B0!;%8G>g)owU%M8(e@oNCn}u21h66g$l_&Y#_LQX8hMhnvw;IR z9AK&iz?_fU1YljJGB^;?sa4`_5q^jn>$N(PdETEvGPW#5;8JBh}{5gL&^XB>9m?TOOF|=VqyLw8k=SB1@x<)f&vm? zJ40~;&=y!?aRf0#F}UNbz*G8nIby1*sU5AiH=}i_maB;ZpQ68dvRFx@TPn#Nk3k7E z=<^sr=%9P9*M1)KTW)3#|-fzV8^*s+`$Wj1zTGVn<1&Tm-8UL?BYfa z)2i10_YN^fzp-@I(VP2)v5Q*)*yN;IU*=$geOWnbkN2}3t)Mfan59nX=8omPbO9N15q9Y(6 zFlXgNrJhXVevy}#pD$7TjDdlnWWU+&Ca>i$>7S@TF#_I?lp71?0(|jJy!H-c?J@v1 zo%ZI4ZeExS^c<=HJ5EuH4W%tX3YpdfDPP1xZuPc`iMvmw2rH_#Dp60tmD7q84 zp*qRCiRLjwt7(FIU>AXUEgxWFPOWD7UauB6v(wYlU-uEKo24&FPlu$ae#uBn>#PaJ zJ)po{OP5qhG@p!YZ|Ba-&o{KQV>kq<(CgIFiU2B8+8c|iPta!-3OJ(0+|Ebw-l(?= z>iVNyGqrW0cEv}|CR_x7f&<_$DF>Sjvsj+O#QOgAs2{6N-ux#Ub)?}7o2J4j%(Hqe zW36Zs4YJC zu^B1n85Z93B_F{LODKVobx@O3JAwO*5ei@YfNQ(s6+}bXXlD7j;^0WBq6*Iv9 zlZW%UhR|GM$qy6D*&@uegdz`+;*(OrSOlx)AbK}Sl_@5&`SsgO;{OymLX`Um2?@n2 zHhk>-|E@Q>=rBY=z6-!TY8K@oS2%HKlq`+7bmqmlU#-*Ag-bn|Jh9s*haT67qM%&K zp6!8DMg+a=8*P(J49qAtG$VR;J&we&aU0T{n2>~@rYTX!S=0&hMxam#jHDHfrU5J( zr9VU0gU}nXPHEy8BH_>t`ukh*5DXIBBM2JkY{w>M^iR`3HI_;-byOK1temczKQU zX=rJUc}xO7f#veNR+OU zQ@~0_>60+Z8fgR7G|~Co_`f+EBJ)TR?3{=Fl>&fr!PZ0ssCL{Eb^2-08RU&c!iw0d5_Rl zb%Q$gxENfl%g3qp)H{`nWVvq_z!bB%Tg;BAxL91+1@YhansK6ATq94)79tRZ85t=H zBxwbS_ZXiz9S-AMys*P(g5pHmeRlk#eg4RHU<1APOg>t@V#YizT#8JJVhH{p>4}oUB zsdk>PN;-tYF~e!Rc`UjOjVc2;7e0&JkK!UD7S`}0%3)6w(RGuAB8=f;=G&!D=eV(t zZifHS+WtH33H#?oK(vWM&G6!S7jjfymKQ(EB@czmsO28?9Ue7O!QEkk=iLU2Yq)X= zE)p@VI*1}g`gjrUQ}Fghjyo`SPPHlA{zjtapW?v|db;_HoS7O?*^D}4GRhK6T4G>Y z7GM5`Iq;oY0WLZ^oAIaQUCIeW=18< ztwg$+f4gHWoX~b)BOn{*5vZ+2WZKtgka_0*f56bkI71?^>(U<9=oS2kxUtoIVJhx4 zIf!+yZ7J)n1pml4H32Whu#-?EU2ok#VgnOUSZ z(4er5FXv}ggZ~UR>ruK*Q1d)$Wyhit5_T)Q2w&YkZn@%tKPVc?lcV;uffMa_yf@eF z1<7v?i`u0ab$C(ze+0=d!wRrOXI6HBQbN&s?%zYjXRw)d%{3^a(T@~okogY3u;>lFzkATu;-tiP zg%)d=S-ua~qvYKONVHRkRHc<76aT_2dMg5n-LFt-@r5WcbmHT9gQ+HGh7=JjEh1NV2e;A2kbU6*|+yPR9hM8Hm}0 z;ZfrUHPqS3CCTU}YpJM9SXlG;=aB(6&m>X^2Ar4*rdcZrWC8E09&5NY0ere`c@s8v z;y*Cp$G7-^U#v_iXsZQI<+^s@mYrbPhZrLfL7YfAUr;O^LQ?1x>TSva8Kx^+KL`9e zqmLD5*(8j>Aa>lgiE$vG>e;rXO(3!Dw`DI@9P|#l>6&;p0zMhBNKUa%??oY<@&wXg zmja~Dq*q$S?iw~7Qy6W0sWHQ&t8U{-X?2kl%n zWUE_E{OA?fpGGIOSQ@F+@x&sIm(+6hHH>2AWK+{S!^Iy1EqU()N=jV+g*5+rFFr&@ zxM=g5+Rth(B8Q>$E!VX0amFg*(66FgBr%{#uRt(# zF{1bRF9XX8flcbz67>}X^lOd0f-tVHs?H+q-(72(Nk>bOY?gC6kamm2U})dxA$7|+ zzwinNeZRmE0@Kd#LYkfX>d~wDqdH>~<}+?96jP-(vbpgTPhH%?YsIuoLm~nlGYebE zmT`crkGEpqI-Vr9B`b1YdrnkZ#f|zu&hA5f2c**-CH~u6AL~mNEK5|V*Sh@(XJ%#? z+1M5iUzpJRgMpG6FE1|-2^SX@7Z=x@6$c9!mxPN;^8(1v)JQgd!4c_DBNl3=(cxu06vsa5r(K^%NkE8 zZ!V#1*68@r-MPXbE(yPL^Vn`H@WZLbSiI?fSbimp;uI(nMn;kR!=Lbu4#?f%3K4}e zzHNrYz>@q=p+S6=(wG#yjY(m6=F)^Um9?JV0>0`-df%Indja$nojMNDHna zq4{ZN|2uo%!C)R4ufH*09qkxe|L%b1M${LG*tvS4w&;E%0ABMMg*wl_Fr z5B^rL+xuI0x$&J6bXVU^A{M?SfCe$|lM9|40e9ePE`v5gyC1?ThuuJ|@S#380WxiD ztJf;I^|~7#2y!TjB~QDD(!J?w*If8}e@{_qi-S=$*3!2#E>G-+?@&sC4)<%S1K&E( z?$c)C>@Q~VNjpnBquq7*T|VJ9TlWSy{k@BrpEo_@am1j`7s1B4CWP{`hgLVch{3Nw_8t zm2@sJ-3vfQb1%aiBQy+#>zZDzzqN6`J*bz=A3x#zJ8}s|WCkMkjsNV1CWcE%^(19Q zF4W4l}l|_-OB8ZdZzZ8=YBCfj7j}TtUhV85rw6w%iYP2jM8~Q>{1zy?^YRXQK zy&RnFrx&sSOR=3{0qgL`w?jg<|1oBc$#BLMY?xx}K7kF3_Zu_;S&4Jftni=J3Pmh_ zKN`?Hl&jk(uFOtkQf6ici(MHR-6qU!$bf#Z`l+E z6DLlyN!p1Ck-_sOGxg4-Lt-TKlF(RDxAB0W)ex>p2TsWQ%$PQd6DG>7(;NG3YnlOz z#}h-2FI?y-%NWpB5nT)w^shw|?gBf*2HVScIxMtYOIUI zBHEX2WcJG_q4D0`neM-4Sll-BS}9jxv_3NjyOSe!hH=-j?YZL5$&?3APt16W2nZ4p zZHP&J6lW_9pr21XNUn!m&pb{-FN@`d{Wp9VJy&>C*jj^ZATP$gF1fx0b_NhUc%Goa z9k0Ub$6$rtH^M^sR0K+oVm$cHns{GnB6@QhV{{l-Z7;`|YhHpYEfg8|=M6&rwTPfX z?Gn7y%QoH{0iislw(Wt5L^_4Kp{1WEKWMUdtpn5n@`_q?V@>B_5vbjQfAu~yn(3Gg z;B7{=*WxT6l!qY5p7u>H!`cjJv^7>B;JSb|+5Ltu7VBr)Z3j7eezcc)E@QI=0&T=i zpyJviHk&d!hzYzZhIp zkFjg&8DhWvqUjl(#8A08=>;C(86(qiqi53vD-F znNbMBh_fi_+ll@I>#=F$ov_F0Z-?hvkSwU)I=PN4c81Dk!_AOkw~qSHm%aRc@*Pe&0lel1 zE@2C_lqsj~v;93le_m_5{x!{aZv@%L;)AmR`r9A-btnLoH8LXcx}hJoh!hhQm6#1Q zk`aI=c6(W_D z)jG6DOu9e_{O4S^c1r~^@h?LAFqjnDK995oA%^3D(5b{=Fa7Z*oR3dEGZSEz`{gr5 zqtZ0I)L_X&dVi4q&MzVT6%Ogc8q(A)jfo)wOn`78p@WDhLKsmmARSAl4^kqN$}wp@dGKC7q}(@*EP3Euc5mHY zP;HDcplWi%5{dyfgPDF%E=&hN&_WMM=wJCeV~}ZG0d+1wstaCWy_=1B!N0J@>Y|MW zGh&R=r?fc;>;g#St_9N0bby%!K-AjD(EUOjVs33s3jvQq3Vs8mU;o*S;~%RN5fJz^ z6pJ4|N(%GsZDPT!T z?ilc5bR+YI|6HZA+kUzp!W)bpd^S~}BMC8AUR#Tfxg%f6%<;-r@L_N%W;#3nM5E2G zQmqPmhdZC5owc%{z>^Id9sSFoI6_Rt`Fq!kbYJ!Pz=qj97PBoagwF=LEUr&5&%sb$ z_7kY5`R3XNdTE+q_=tt92?=TmtK`VU^+wd_GqfAwLQ) zca!igL}AE-ns7emvh%mbosRUQ1-e!%n%NkI93$QN5@qq&YQ4^{C91`6XED)(yc?jS z+rN|ZXNQQGSy0~cVwHfguV|T08emz3Fz6Ffb3`srpZJKL}0E^0f0+r z^*XTXzy0b1P?ryUUFyo())oO47WOLs=A_*U7(z@1;1LMhC@KV=Gv zibgP_(Ke`AgrQ}Nq`Gp@m5>qR&U&)q-BTl<16!oGAM25(QM#K5$Q0_G4GTnv1EPz? z4({!1WW}s?m{mpEHu9dtRVvhR0@T@!S7kdP6~%}zba+Fbk&hYcZiLry=$BiMIo4*% zwN7Q`{q0KmFVl|@NKLfsVRirSUg*xq`{ zA$Wkwz&4M@z{VJoCeeHVN>+Z+1a=suWS=l1shopI%5E(dz9VFO>C#_s8-rRMxhlb{ z_U=}hXm+q=*4V*g+?NMhK51O#-w-&)F2JJ4c>x*?K)PEfZ}bMQ#Ijqvzeq|-l|LXH zF&VNlI0Rmb-GX@+K678nyt!>x5%rUg4jDJOXfiN)upKg07hap$8wJMeTPBPNz z{(OI)3Je|qh5cRWP9Tz-*UI&;m@H=);M1HwPQ_)n?g51Cdt871{E7Dfwr7Z=zY#??BI4q( zf@x1H(gV!_zWOEfuf8+V;+As~P^z95lFtCX{q+5vu}o!+s}%t^R;M>fy4c~N2jpzU z4+Z&K$V`i^;2M7${wadaK)&`8@{fdw5{m5_hM7-nyd*XDFRW)R_K?j}_z~(&(WpWGKtm3zlwZX9$E2RJtBLDw--{%1TpWoWP(|67U`j#UTM6o{tDU4wnwp* zvv2jCi?_I&aLDni0ZyP&_RZ`;b^(9`77tL9u10XXUG6?a%!RXGiQkQZU%lKCST8#X z-QGZ4m{P~P(ja{Ur_PnLU>Mls533x`S7^53ZAsP4Q81)f2JQudD(__oS*te690>zI$eVVJ16cJR( zL(@_nh2>DEle5!~@G-mGovie_Zui2wg$uE45<`L#`fMRte!Q(tdrlr*VLzt0?IzC(*7Z)S2|6 zSSt@#O9~6zO}78f(qoULpBZW0Q-F2?AjSZ3L2F1uG#D_E4JoA>*?{8}iMtb5Nf@3{ z!%NT0PfItv@PcC?l(@^tR@+mfTWC*r8wj^2`V^}(kXga2Kw_7I8F={@p!O2cB3w@9 zXNo(dfO*|aw>3b0V&PKQI%pDLvsi&WZ`tS^Wjk9istvvO!Dj&|aUK0#4A>2z6bb@1 zDk%;-l!B_NDy1J7)GAP~Pj`uk5v1<*FeZ*yLJo?v`dK<=x8YLWsIK@LsNuXSo zR_~IQpYN|T#XL(A6}p4Z;uL0Rw94Q~(vMM)VrD93>bT&%ncJDwa4X?nYB5rUiUlS7 zcmu!Y`UWBQsZ&C`Q$EWK_R?m(DAp_TDsF(t*>+KCH)2RVaRsy;spdhPs8GXDcwwrP zEWyE_s-qqjB6aAVi|Q)7AJ9Zdk-_vWEP@`;gDW(Pi8>&hDdu?dI$T;sM;1!w`d4FC z90?S;QYu#R!LXUvwT_~q`7JE-U2DP5a1HV4_MRNl% zslfm%Jr3CmgeCySR{*P~4tD)9RvAxD!vbwVjf#UqUD-ow4{bR-H6^drE*YMfN|7tQ z#PO^4C!=AYcTaS9cuy9$+aBzvy03{YX=@WSDkWb+81K?(C+0WOSj-B@IsrOf$sn;Y z{X`i+C9pmd0`mR9!mRS!m#h1$eo$9F0H;&C^7=kX}KX=aGg%*cquo05Ky6HRN zia&(&@Do9uPt*g>*i9v1YL%>B zH}pnun8)DhJw;X^J{@^s6jl z&j;S|ad~Uh2uzY`WN_uRh}&SNM%A$9_h=KPx>m3m%>^1-TSs~nbMN%spw^5ULQ_V% zD2VWG$H zYmTg(7Fq%(S@h<|=s6en9<{7sJsgInmRe**H^g7-Cuyg^QvARtL%=#OoxR@-rETEh zYIpncG7bX>~}=Tcde_j$r}b=mVdZ_{+U zbpW{3WViBzda^!3{qOTfP|Ema^3!X^k*Oc!y;NdU*%gyut%10Kh6(#C z>+8Cc*c3M;gQR6d`geu;d8o&9E!u`(fL){yApL4_c^IRR^{S|WJ*Knn=9 zxDQ*4s&a<2On>?q@Kp)!Z4XXPB$>+O=rovsev&1kBmnienzkc;jh(qHH$3KDp}55V zxKW~!&xvr_)-4_!@wL%B5}IEQg?5zs|IGcq;KX%@A|LW^T{g!znf-nXCyHD?TA7|1 z!<~d3LBC&gO#R`U^A7mR0DO7sU+DjMTGU6m&=1Uvq3SPfL~f{(=jKRjmfR)pjF@~; zgigiI$m@_Ni7so$$r_hkSo+_@0-%}uc&n%|irCGyjaYrnJ;3KOvWNGIxCfe$mD4ZL zpVgUw+4A|3RrJUuyz2j>=To~ufi^D=6kL}&zvEK8|Ru9?|R`#=t#;Og=Y4sy1*M0M>g$|KA9L@2GnycD7$2)hgkLJI(|*=`lf(FFy=T zEYA#u3DGutkvw*s)pnaIsUtq(VvgqOD3rV1rR+Rsry01dNLQP)v6wf~J$-iQPhQX6 z(};0<{x^A@!ZJ#0{jcD!&w#_Gq_jN)k8t+Y_nOr(rnmB`8NWU2Oa!UL;%^ktDXz3b zl}l=?D%(Re&%yDSr)TO9YP= zItdS6T=A}+GE)k6FSgOKt~KqV(`zw6M!TK#n2BqBHx3_mZWyMU?eyUY*r11PMUPn` zNP(O&XyX*?9$a}lF;2A!>>$Eqo!w37gU-JH;`@K2JoutD>oF7gsO~2TL}r5+jBd9| zTQD9S*Pq@=q1_l=W}R=(<`7frPmqc4G05Za{{_ocI3||pZpHXj?i<}tWJffk^qENzYS)7za9h2D zRE2{h-ywf-`R%AMva|VIyWxTlqL%%wCWbo{LG{FVN!h1JAPFyEJZxz@>HuD$@GiwY zwP-4Sz(6nC^N8sgO(zONbmywoe9`s`=q9OW_5~p86m}rmz!-$_*cw38&oMfL7CJZ( z(859>_zD@5PdJ9*G#mh)ooia%8a~Q=CfmP~w_dVfPpbn*wUm;p`Agn`UM@Qg-xMiX zUOO^zYSz%oB4U67awcEV1@7T7lU_d~*y*O&urFoPc(_qSSBw28zD^)=Ke^GtEbc?x zd-=Bwmj4ZiOr`d=3m`vmY99T4=PLsFTWfplThNYus{EFZg_|$%=1FPx_$j~GZi(WA zsWCLH7{B6|<0){+QNtXmtG*1kbozes^liqlEwmCaK8BN?OS#f%z(H*wsDaX6r0?Mn z?jCxPZJ(yQ1fS4w-;1kE#c^VN@+5Df*AGV)4mF+({dJVZ@yRI}ZypnB_-qtb@i{~P zYWKm8(dJle!XkgDM-kRAe%ftOUgXj+%0RV)SSND=*k2`IA2-rcT{lp>&vb&A`n!hm zTmXTtC2p6lkGTGh>O98J)COOQkkIeRj3z3pSNCtg(TRs-Y2TE%M6?nvmNN?hGcrk|5~zKLW2N78PH%r zi@ku{IF>9BCz*j{QR7@H#8@aVEVJUEe8I=h2+12-C9su9lln(gTBSq-x*wBJJWwz} z?rBRy03!nBmgXz#JC2)?fAF~SjQ1ZX9#uUd2F2#%6jbE0zv=KDEg-M_wvl71 z<-6mhr$`UvacY9zJ(AJqb099|YJ#1v`bO{l>RB@R7uh|jw!aixrQmO~YGIe;Wdsiq zoRA*+1X3eT+x>)^o8T1-9c7&fSka3Kx*t1`kCEMKEw4YZR%pXEo;D3F@t+S#KUcxv z`aYnLBohj4wbEhDS7A`k+Xl6IIbw*Q#K2f8yG`hI;+@!hX4*Ax;0u}_+b4F3iaeH# z8n}-5Jd-DAqhd7;N^)>65PSbYTMQ00=^BQ*zT<7Y%coNLr)j}XX6i2GaZgsx!AwmlhQuN?w zzCQ8JGPT-1Kb=$?oM}S1T(|cB`8q)B zN7mp91Q8Us&Bpze5zb!>x?|Lo{wRY87_dVw|v+YUKn}r7_ z^lQ&UI3e#UVJey57OayD-wpr!%%UzeTz(w`U#~3-a9}aZ@hqjhJzd8;5OQ>xkPUB- zC5R+9e(qspecq*Eb!m_6Bv{S+$gvE&wG0LtKD~+V8;plTd(XI*E#5nfc2Fna7Gmop z_!_)Kdr5?%J%jp2r^&Ll=jlz@9nT@-;vS5X1YLe%1+-QaXE1U^T_Kd;hEF3K#k(fy zcC1o~Ho8)Set0$pr^6t7dwuta)_^=H(l=VXMNxngTr<5C`ey9(^&h7iZAz#(V&ml- zQvEc!{uf9eF63l*`Zb54=}tom`~|iC=7cU$vO9#R7b{p7J?LPm>Owi%!^l|iN|7E+ zFh<)^_RE-+$XIQc{(+47$iGyOt`;E>OBjdWy?8p$RiW6ioJIf4{Lv4q1%&pOYe;i7 zZt|?hNRNaY(&bmW4}{BTm;3JhsYj)Z6Z%j$;z>jD!b3Yp=@CWz-4ACchLhh}oUvIE z^IztBHVVC`9lEu?NmMA7ra+Ei+I8R17Qb~*IU!pmi-)l*zMX%&J!XfnEL<}lyuDCl zGlk+i=Yg4$Fu*e<-hM`r$z&r)o}k%2$=$fJ`DfvLw1~!%%qy0-xwTzi1`B_(Ne~K( zKs*jFnQP8u*~@H#ZWUZ>43)32?7?cy2l{Xqz+*BMd&uuX6w!~i~!OR$si@eM+dmBaB6_0=jdNHDpRH^D6h>J&;BMvSc#f&@PxLJEsM_{-B-1?cGzv8dKp~q=@iB3pK9b; zz4lbDWZ3A6Ef=?TZ+?->O61{r;i+4vfx&mEStd#q-U9{nRRGvzEuX=0 zwYUaIG5;l{q?CBSKGxpP7D}%It~E=bSfAhF`_T!cfsI{Xya**y4gRXyLMcg%Z9Ic( zu^u`{oGxx3EgU#&9MoHAXr6xtztgo9#tUv*ZC<;B(K?%eIBS5Q_Nhe3C%99#y*S=q zo$}$MxyiCUDsFV4D%f}@RX#_15nouuwBe8~&&{mSZ&39vvus{h8R3!aCbdPGQd>>% zZ`=4yBHaLCV`VK*`SWG9Kzx-!OJz1@<4UDmMs9kLqR`5i-gaJ%Qc2;@wZF|B2J4aK zDy#4Z1P)4bBc1n+br&!0w|kYMmG#-%PxwYJdlleHj2u=HAvGBjcpY6omk6C-7RFY zOE$PjAeh?@K^)w@tUzy;&>e3nd0uj!ky=Bk-z+W?)+(D>1~ys4xvg0*txG2Uq3}5@ zxT{*%PbYu46(L+yvv*yfZ{ z>ckq>@1%ufu5Fuu1Ldsx5w>lsh4#?9DvPAA=UV-%V1vE2qe&sUa}|oy+uyf)r#0K^ zCi$Ygqk!L=qIiB9^W=}(%jnGPl(qHOE|Xq8ny3Fn+P3jZBw6d?atO9REHzlNU?#s={8X6&OW+>X_Ji0+_C%En!CXUtT z)XrHoJYN+T%XMG5^_g|F>osI;mpVHdCulGh)z0}?#(t(o7mTegio#hOT)0=rM#L@* z25-zG+UiJX*RAO|s6iYbtkyz!H4sci_&P6{S&yMxR(9m_GnIm9Hzx8}Syo3*L3NfD z&?#C6vyDxmBm@HlK{m%1_&&rC2Y*d3N+?-88y+fX&=kG6~+L*3!Km&rg zh=>rfLTWe|QU<%u?<*MV1&#yJQn-(_y}mnBLf&3PlkDVmnAD~5_l=B|TFf_~=yBzO z7M6SVseQY~5Lj+hhJLV$2qBZbYYn2)YAg$WX{P^5>-qbeEw)Q$j7aoiDDZCNmDQEjlU8n$EP z_O?{JWW6Haf(folb%wqR0Wp)}#2e+-qH16>P~AjryN4i*Vd={qoYI?t&ax+xI99CC z4SDo*fuu3(YYCWL|3RQ8?F!4n@Gxxz=6C6lwxG&Z5*?`~VNpD9(5UUhGsK@|?_~<6 z-6ZnH^9%`hQ7ySy7JEl~j26V_J0ly`Dg2MX8Jb0p##<0y%XJ#Ckck%~cdf)FUqrN6 zscEV@-V(%!&h9)6a44p|x@mK#{C#KrMl$cOajy&I^=Atjj(t)bq61}dSOdYfZKm8Q z6~kz26n=kEv$}$PTLFguVl^Udf9Y!O^r)qXJ~j)CKg84wk$juBfR8iGC);H(kQl7i z&PCh%_yl|HDM!0mex}nK?l&W1|GMSe6_?7~-SF-%5wgzl0}S;ayv*?FsF99Qo@#v2(uar#{xFt82~=0y>{HKuL3F&79G ztM@u;ysa+ZQSn@%RyLGXI3I^u#`)p@mouREZD$hE`H)9KtgCXdg=A2MC>B=_GfaMD zC0-fMn6iP~jcH9GjHNLi)@oqvDb%V_%L0U?gVhs*uFy&*F7=|(Lod4a)Ud`;*Zr}=_R&&+m)qEQ{(Dt z@uisTJ&!cJGWH`s6y>74_)M7DxdH5XCJC9S;J~x;yQrw(dD00g#4;)AfZlo7l|5-q z?sQ@6s(4{Hh@Wj8&Mjx0>{14+MXv9p$z^B z@WVelBhIqe=)p)NrecD@&7BUh2d0-6^3cy*CeTCk3ej&-+wi;LEu=LDC5%goRMfJ2 z$moQetu=+;{DHw+`Y{;pQ<*NlZGnK4grbSi-lQ5pV6#h8NYUOa({z%-& zsMylYstsSOQYe*S48fd`5NDu?lb~+X+fgwe6J9g5kGx@P>GJ~zXQ@l zl18Xer?fl9KMLg^y=|_S`sXn`?XPbkXrDXbP7|tx73)-zuqdlheRj>A-<@G~*5n4N zh@N{-T!T7mt(k7mSHO7rJ|MeZ+ptZuD}*nyq zZBL$@2!>a)3G0BKAF`ZNyf%5zv0dp z4xic0axbn&B(2l-CZVr2w2&_!pTm}nq$~#^E%5S+Y!`fT?QCIhvPJfHN*kAu;pZ;`-=UTsb{f!9*dx=eN@XMX2!W=EBLKpCzyMd?ECFV8MxWQFmt;^oro- zQ-q1x`f=`f>B7HDw7Ig^C)De|do{Y&+#TwzN7M=aiKQNOjt^|l8fhUY3d)b<_Bwox zn3$Wx*(dFlFr=x|L4$O}9*aaLLF?})66&ULYRcaETUku0Mo%=(`?{Tl>NoAo&hf|d zD2A$2*_HiUS!s!l1lZ3cEIQvTlkq1j^k3FK3{@X7m!5`aK5+B#DNE*Ge-pj=$<~K< zPhBrCj1PAxg`0ipcK1&D^P$_xgLGmQk$eSFe4YsaKLif-w@jRCadqsL=w&s z?!RT*Hw1TY=#{MX+@B$-Zsr+ieC(f>B?lcX9rwqqNQ-m`0!L3%Qpq?UrMPC;0gjgJx3uww1@unc9Bo!-IC}REv+6j7!;BG zGR{`-)y=oC$k@=)Tzo$3y=WZXHN&w)Mnz&Y^To-*M;yGkx!FoYA&yYJRsX%*(&>J1 zgUwqij?@%bCz9htnrGj86qo1^cz#0fFVS2W2hHyRD=$^O%IL=y73x6!1+oK ziASe7Rk}Fjxf*sKch^nA+h1XYrsrC{3SrloOnrXneEdW_55JgE@txJWJBDkPR-Q3^ z2MHpz&W&pGAK~R23KSx^lKk0GI#r6!M{gLN9U?21BJZi<$gTs1dH60WYx7h_;=}nU z;FJ9zU)2|Mfw>3cUKnX)g>m;`Dt?GN%&G&1iYuMOEH)?+8P(sQ$-gaTL98O#qp^Bm zj9+gVBG(vf;8^09MOVpGV$6--j`gi7UL`|=333nXY`!|79Aio5zV-vrx#|#iOpDK+ z--jDH+NDwIEAb-^Xf`DA(x;bii8}8 zRpBp|Q7EpxA{c0vkKx)!|Is-)^mKf)`m0P#sSqW6doYqe_n|ugkm8{>F( z2*mXV2o;V)m@JH#v((pJCE;9^w;ds{&$il4GS!AHL{cne*!$gBj@go=!taX+yM;(- z$>a@rjnLFYf(!!N+a!Zt=zIFfn(v-ZDTX$7a9HMeZ-JpGr}%Kan?mNw%*@UH^$8`} zqVOqNt$hO{s$J%>?kD#-dZ&={^osCrn47|Ve3?K{$WbJj2l|t?!PVbQjg?$hT%>mt zMOYT+6bF3m7p$Is`bNbHXH&1{JHA)xBnZi=QHCWrW!j@ z=w9tXkYg)qONLruVh$}Y2OBS_@s;#9BVlZCQg!3_;~y0pWI7YW;RNvST%$!Jj|UX5 z^53CvUqC6E_o&G+*+JsS?dVn&oYC4BRk}x>)<#r^QtoftBAA? ziX2MHZ{Z1EJZwBg_jy|e2V0vLs@?s`=>3CKDv3U~UXEI3=kyr%ruq9IuBOCE{W)0} zqO2pX9VxG1_8^90sT9I|g(lkfGF8FCPGbjki;foQv8%UO>zv^plk}`4#O!&vL$yLy zYL&T##Ib?}oDgwGmjJTCa}NJ1#G#oy8%b8e8c@}tv!36O)#F0R7SZF|yT^!;>OoG- zi0{|D$11cDxRsD!E3YXtR@fjp^>uLzXrgm@m~k4Vk^~!Z^BD5@Rm)WdyK7$x3}Rbq zK(?aZaHf>}jW9;L|L)Lxsnjz@sM$sZ7k^J_yMu01U+kj1vW{RGQt$rz*JPejl@P&m zBJcyty2je^hFv2>c4vDRv7DCvt(v&g6G@%J?5d(<)|--R2{%Zu@~2`G3=21V3&%Cr z@c4_Upkl@U*+x>MBek^AY3i_u|xx#lcP_LA#Q$D zq@NUwp5A45Mk6rj(7t;V;XaRdG!nyo63B)&vw}BLj^5tieH2nCRW?RJM+K99Xt!^k zBj)0Paj}yVfA_?;CGtYe;qlZr@cb~ZOfHno7dX2P@zjnV zN8AaQSecJ}%`lXKjS?i0OMkt8Bf7dMAMfgqM1PhJ`++s5@;waJC!~dyZvFai-450BjebJ5i-7KW8=WsGLVL6Nj_Ygz4jwEJb0pId7mw*;a>6(;j#J);tZqO<;x6@tg> zru)9pSISBx82lT(DBKdyyFF`(gl!SFXT@Rh%UZaJELU$H>h|F-%9=3bIvsV0pvLp9J;~XV=C}=KH0b=?BJSVj_zKRCog!0I7hKW%*e|U(vmvOD>|F? zog*{4knEroQjCoLE+$SdRqB9%fcT>Cp`p!0r;cqd1C6NI#P7CiZSAcWzW`^4Pa1by zry!|y96r@#ePBBBNXUhJ;QX?T#!<3A@$c{%iO~kkE;>zpR3=DZM+TObR6goX9%nFc}>e>^2tduR(ZwoIhb( z#Ang1YcP}|F15lW;-z+U7(WS;48s#7d%-69@3)tlPWe=<-mIrM$)N(p4uiR4arn~b zwuNNFO@M;7MF;6W-gVF#_YVuw;OtIQVoWADAH%@32yK}4ZcHZ3R12ni6BJq=b||2} z!+~@g%Ukw8P4;&f(AbK;Y5Hu#<-_nt7ZrY1n?xZ7U&4h{SdSH`}BzT&+bDcTsE zec=$_IVT+LA9B6tFEj#KVwhdVUO80iNkSaTmoa4KP`hf&hTfBFH@QKRzH=NtKfFSC z>aHs^>miREL03YZ87%$+)pW{B&O!fAjz8|GI!}_v{6U~qlbbcH zEyjQnPq3H&q%~|i$aC4p9Fpz~I$`VSO110@Hyl=r#*xu0SuxGkTH{4f%NVDdlw3h! zVXkaCTMD3P;rplRa=so8;QPMP(-&)Gw}wtnE5PG0g9D1;;p6VC(&*LD#bw29on-mrXeS~Rg0MO7MpANF`JU=hd z2TM^?r^W1Cog94Dpi0UA&hAhvMVjKC#F|qs+RF)ir z4Qr^jvhBa*`eR^gMnX5@+Q+c1cnhJM+w9j*JmCan5fCUT>sQf8#$o)t*`H`}*bB+{ zNJ{@^_yUOZr~>MRgc1kEP`H~eukN?& z^b!dkTo1>SJoDxNSia$O_G+L1|8+#5UoT`OMoiDQMP517@Yp4VV9_AvSnC-m zLGEDl<0v3p77AWCM61+;XjGHGjph?3u!x&gFJ0HGvdmz5iUizI*Q5Koky8rZ#keg9H`;2+X8+xZd{ ztNH0lB8h3H5?-z@Ry8o^n2|jBwSQyD|7wBL@t@88<5SR7!g>@Yi4ZE}MS>o^HQtZw zT|O>u_%JFl`DM1>%*-gJ=et@?fw{0vCllfreGIaxst~@EH(;vys%!iM1cqZ3m)$Pv z(;)y|3Ms*uK(_GpD-3F9zF2fG@CEq0_30IGuG{nh#4sA2UYr05F6fgwu&CeWO!djb z1(eiX_P*pa428;XpU#+WKak}v->z|z9zKAGOsiQ34rEOHI-mJu_2kWV|E5y5zy2%v zwkchg#3W!znkG4~v{gd5K%pqT>wP$#t7M-*Nq)!>zA?r30zZ|_pKP}{pWW(oV6oIfW6$ETgad#uw8rDk%AE)I1i)7W0V3&PFRm_Uz7n2= zLN>`3G(ZoO*qbq`MDW~hy+ofn^gn_y#_^Xx2uFJVAUnVu1FM@jT~M{CXQ5F-)LHI} z9IO-B+dGd6Bo%}^@=eqpY}&j!I`BjdG*cw|&I&L7&FD9hqeO?Lx*zU6Iz4?Aii=N7 ztms6*WBa|R1IC)&q)}`1$;J5$wgLFE!*!&2fb}54Y$9#Mz3WrNeLlYB{g8=1`nO8P z$qNQ`K_yg#31%Arf*S)8rdeWX&;9+08>mi2MP*WV47I5${9b`BI&HH zsX5XK3|mduWk6+1%))|3Mb#kkexkso>3jsy1E66{w~lmVGPy8ob^V>ItwI{NwzjquV4r%NlpNoTa_em*{f& z(x_K?8P@ihy~ z1<<|e0#NL^WDe4Aqp;EbLB&0&Hh%XAi$Ch%Dagq?ch56Bc;c10Nf)#Eyd$b~|BQO0 zzOqiG^xFeWW(PpNk5#qp43ByTX~FZiOm3){ZFttskM_Y?ZWvTvp}XN1ne^xsV1Nqk z=y0m-~f16g~ei=7>L&heR}wX z@WTo&Il%Zwd#I#&xW}Ix2SK_iG5yO8@I~b`%(Gcc3utLdcE?+gqR8~dUQ3|sw>pZ# zwgYn3_LM;Dn%}=lint(42v9TUF%!Sdq+p=ds)X3v+s9$*gAsY(#-qnM+n~}mBy$!?u z!oguciBX?!&5*LOlQc5m7eQqVXt2t{6zQR5Cps-N0Wy87HTq zXgnwe1_rAAI=J?V7swt7(8K^^oPB&WAa4lZ)_WLo7J6u&{Q9}Fwx&I4OAgoABbAB4 z&d&b$^3ug@p8PE16f*)4Df!~KTN@kiBE9>}me~ceM~|fU1 zh(&CEm2`?aqw~{sJuqGwV93QZ8lz3;Am2?CFzqs%oqcVJt0^>%gI6q*{F;E#s#dAx zm|c;_%D!wsV7t}X;Y?H1+ViP(7i@waEe@N4^cPQBtElQRdHa3}q*5qH7^PK93)V@P z%xR7NRg!tj9iD`YYye67kha?rjl^6iZbyW8(E_Yo&hCUfZZ$f4DHYoQx!K;qA)bDX z6%w^Hq&DF$i4B(`TxFG*gybJ*?p6F<%D$p2m#t3k!my9ej0#WmiYU+k#jZm#!NstRc?#i={|;<-U&^4b)4#0g z!ZG)^4s^D~#(6dc+hnw1nPl!&n9F86btb9sNQ-#SyqI!lP~4U~>I=8AmVE-^J>0&n!r z2Zu>30JtFlQKT3mkA@#`3lOSPg5&N`8$WiSfv8_LQugn)g8+DA)s(wQPR<7(uEV&} z#0j)5z+9{q=Rf}Z@fQHG{%7E8+{yqK)@;k=7a3a@OpLcO5@q86=SDk%zwNZEz z-TliTL>L!QRI>73al0wB?VnO(q=>hdho$2Gizs|`0%i-qhveDXU2FF?6VO?m2GZgBxgF+&LH;TwYGo`Fuq*w@43rJ3vtYg@b04dlhvvPN4?&ibMsx|`P@ig%QC88GprC|#4}LSKn);Td%Nx=G6|?7Jz>(nv+fBcR<6@%| zJBHL7UnC`V)EcZ#&${4K-gZt6h i2Yo4Lt`n|AiM1jd%QMgi0HU#CE-BTZGF znA7DDG^`(jmPZi!STyeI?0MmRBf4u2r>7V^v2(o?)f7>^KX!+`5z<)cBca5bx4To* zhZ`oreiW-OJigt$*k2x6t|LhKc@p|+q#w>E5bF0lnQJ|I;_l4f%W}Bi!9?j?_im`^ z3&=laDla!aVB3kCg)M% zjs|CR@@lRSXEL4YJM=8aYH*>aD)bhu#Z$ttqA#*ncd#!l}EE@v3?6#6xSF0 z`?D^Hl(fFyW$3}YCM|1FYbWKaOQBeXa1HLC#6ByoL}qIHEsa0vA!Ri~(G_Qx$EENg z9OrP?dWE2+f=hnkyMeHT_(bka{W1)5Z@YX_yK7&5dPJBLM20!9gfA9I~1PAGbmFt)NpHJ z;Ef|Q(CzP?H{hfa)o5|XIUmCNc0Pj=MZ^Ognq55N6{d)<&4QsXs~2gX2s-Mwm-0B# zjLqvaFy=mGiL%V|Vokg3geR6n-qQSoFhq6*F}Ltj)ZGQ5{VDFo-Y2-s)r|WA0NEknJLwjKF_r|_(?%Za+%Z(DG{H3?`u%wkGfcbB$NIvSQt^b zZDtxU9+s{F#I3=8KMj8;z|FB)c(#Y*4d09PJ8q2Z(-Io2$0DZXB+IF)2^5%bloZWj3$11GPCla`6P$&9-?&&K`Y|TOX{sCdW zZYbQrjC$8|KirZ^#GGxI4KAXkAXJzC*kPQ54+xqXAy`Xi^4CzafLTRDT3xY=kFP6G zXYYta-sBJo<(W7VlYfm6#c5;XWV4%ZPPw5f84K{|O)&YAhw zFXy3|FsE&$9Jb>`<`$8)d=(!g6F!8b#nbK^aYMZ-daHlU3#M@H`fWBC=npC+vRMJF z8N3cqQ|B2yyP7Nm8byYSrvpGnZbVqOz2+xh`pNqB0Q|bX8rkn_GKmB6gK=*V+66OI^h)B$kz(gt~ zrC`SIF5=DAVmWQLlu0tU!8-}QgL5X0o#dQ~bN5BlQ%M#puHdUl%tPm*#!R;_Pc63` za@pdT9=dN>r0ac1`CUV^(L>1jwL<>Az-Ua5nTOxFGSMtEEaB-w93lT>-Uz7U`0N!eJ<7xUm7Z!g9`|npEI$yXskMwO zAU5M)Je@Z5l^xi2=Y(qi?JLP}tbe6vxm`a(Hx|bgp1XLhPntDee zqW>TaR9G4-Zd}3m=v?C&Ij?}yMMcB^QjAQ8YT7BarHjI6N9E;e6z@wb+_uW~uD$&w zL?cqdu^CD2f~Y|AWr;XZq0>rLrCbp~)mlY#KuI*H1``Qn#opdgP{%l?oNfm_n`S;}Vrp($|htQ}@r?Y4ue$%J2gq51K5c z50i}!?FdHh+YO<*5N`8~f0dpBAx$Hw@Do>bWx9E!2$OtwvKE--sRhWFLX|#GgRi#G zNJ!!;D!>1yuJ$e~F-#qonk!Ny6<$z4%DE&<<~W}QZ!w!F@y)nIE&R_*^NySZj|?4E z%um=XKA#YD#p%^TMrzE7(D8m!6WBs?ll?DPe@sNgDZ(6aDesc>z#?ug8B3*0H+TQul(D;Zxxm92busLqid}o$6!rISTdn z_)!WXH)r16>cF!4!F$!hjN1@8UG&kWBX!$$=4_{>*82ibjrzfaxJI8?moVJm03ayi z&tsL(E?H7i((^q3K2UQR0N4)-O*Wd|qoI93k>;O1Eb%AC;}f*J-0DOTL&P5gghh1& zDVO^n{EUXZ;42^b;<2L!oSU1Q&l&vt^QFo;zSizH`wPXNclj$ph$E%K-TT@F4bN?S z`Yi77z?XZYp)6x)cDevFgb3p8d6$ZH7kF2@b&&>SZ$+d)L){7xD^GF5k(QBVw`16< zOfJ6!f2)RzIRep)feCeJotuU=jYcz?Nq%U5g(`guCwAFR$4 z2FGCco7ABByuaYN5DFVKP({G>=$3_XXJu|g>o*hjHlCO}Tz2D7UaJT_5wEPL!WcuL zQ&~iwyet|nWF@pX{3Dgi@)k67gaq5YrWt|^r0qp}~NF+j9BK)IoaeDCc)%rw$ z=y_>rI5X~N_ItGR*{UjE4kRRG@5l(uXZBq3a4OI`fbjA0u~@Fj^#T#vBLb+x;?d?Q zNXXatN~J0-fF2rsaAyLMh5Kz9F`9b`Z1AcxuByC zbZ=hb^fbb(1Q0<6qK!rL*P-ym5teWZL7*&^Mhf#%NXP_wmQH8K%pB5GhXYiYBsYlj z6&5qZnC;?UNjDUlv~1*iZ2x$xzcgCN zO{6j`yC|23ILx%X3bfgOhJ7KU0~zq??$|wl5)*FvLIz9FY@g-$a#r=$haw?Y&v%co z5PAJ`^Fbmb-0}@8aE8EN54OLn0h`@L#;p!052d+%#!ekk43n7c2F##xE2x>m+iy@3 z)5|#~we7=UFk-(-;dIVu<5IE1!&u?m+E#ir^!e%7@N17SFE5?lzAd7WWDzscqTFwa z?wI=!ZNY1V+~4?!w9>|6T^Y&)D^VQs#YsxF?KAyVX84;Ac?E?y*5Quky~ezh$Hj)JN_klbL@(fcq&lRF|NzSWaa# zPEb&oO6XD*dbWh&C0!7?pub(Mf#=8#^c=*1ensIU#jwz3nJNtca|g}#x`qMb>(~KM zjA&==$b*s)<&ce^gI-vq%+Q4?IW-wE~6M>p~EKn^gs51>jNW zqtmJ&2S;LJ_O?SuM0RmG$Y_xG*3K&7qutwW0>3e12q~4?(MQpJb@9=v-h&nEBs!P+ znGEgrqCf|v@?=U;tHX_ZH(B_B(q94EP*5ruEII?-0F&Xx(F5@IK^IDuc2d|*H}hvH={HV)>^VN*1&$(tGOheu7ZFY4Z|4YIoe8kyTM2a)%7^cORWP zqLBGl1bHL+y+7VR<8q>wCX_MPK32Kmx~XI6VY6G%GPcjp&k+YUfS<;e1~&&fIR{r) zI=ddS^Xqrsgyu`=TyTjOuvQ}Pi8amiRPR6B=I5E*&HC9wB|Bq-=qI14WbQ!Da7kU# ztcL_m9xxi5><_2E-EGXyDrq9`Pi95}^=E2nW;#eN0HtBEQb#8Bn=-dz?xR2LyS4pp zKaA!FA*QbFvyY&_aVLE=#d-%Q( zguc_2`dWuQu|zSTvA_uQYD~W~4fOR1YYNqwO_U{R^4VE)@ujOR0}m-<0OMtq`f_`| zfd_#16^Wl`3m^Pv4?E=yM|tY#e@2e^gF)4}-LGH ze~MhoOsoRco_vR_x1g}=n^Choctadk#Y{!-VJ~O@@ z%LaU)xrf&bd*f7GPM<50FHxBC*Jq{P9Oy*x{{EU@zUc=7rcka%M^-kK$sI~2omBEh z959s0Xb4)SQW^k!`KXrQBAkVCdnL7uA6-X_mu6FjP0Mz>Gd)AyTlfSK(s!FdpxP7q|S=xg$huVRbBWgQ(6VM(U z+8v0Tsn+LDREG@R`H>5t+odk+5Gik0QUyMVU?SgxZ-CCDs!yvekR73N02`Yd=^Ei1 zp*e{&;s^wM-~dQ{r4u*XHrI=)ei~jZ%1x>^m=it#BSxuFlY`n>R8#KbF^r^&`6Xv zH1LWK%&kU&B8$J+RRDT5M&GXubNW^U2#E&uE!QYha$i zO$j&BPE0p-^R(Z)hs}6W?*%{5aHO;aS-xm$i}>K<3=j>4zg-OxwYXj7q5h##Do_AT zwHs_eKe6Y=!xMXhr=h8gq%|*+N&U@H^fMVWZ8t$EL2UwPuNw@ji0nTaV zdLoq;%#<>~rTIRYIF#4Y`ST>HPEVPZLN-lBvy?L_I`0zbB4hJ#6v}{ZO!vURK#LgKYFamIW0n%_$=E4$ zP8>@xrVgxtmc$X;`1M?y6q8u8{vr~DA@Dp)JaGVSjSD_Jv6jxJZJ=|{IOymFL6l2J zGv|j*jC@VgtE{Ick8dC8iT_)=d{A+Jo+ zaxOIC4Lzj_5c^f_B6PGO@7iaAwYvCj(WiZj8@xhYi{F2B^8zPT=ypKJnxdxiQkQ)Z zxKH^ocb=G6RaRD_4g%pD%6p2I9g$Jn%mb67Fe^GzJq$8?CAGsnx!vmtnG5T~g1I0m zE-i@XUEY_u|1<(;zkkIZjxX`j5F*WyW~EDZb|B<-p{>&Is+y(J0-{;T31WUM40T;I zR3;WFG$?i}o;tK&0GNPt3D1%+_dnBbekB6a&~3zbC`i7QN8j#IaL>ZFia2 zKGVenOP`+>?nS2QCkdpIc{(dcRuk$UhQkq= z=K%E4t+*o;Va?mL(6pM5xMbArVVAv{Nf3oX=b^|@7(m1 zNwSq4?Mdi8=)=D6PsOPJ8j|M7()ZGi)Z}78Vj|7FS*9`$Qq~T8Iu5la9A`&mup6T8 zkbT;l{`X`;fB4b@s|S{h=;sRuJ++7`Kp|09Te}sIFKg3&?e2F|Wo~E;fsrvbiw}(m z6Rhs{2X%a>p|+0s#R|7vb2DB7jXj&`?nXgHG0t>6MN!k$Qnx@g-dLHFFsV0cH^tcY zC}zx|aXlm;CLq$-4#zO9Hv)K8Q&OFBu%&<$S!5PyvkqaWYvs%4Uef5bx1c6BSY59> zgOH=Q0++sWxM5xi+mVN5h)}s^HZfIj#*Ht+VBg<8PvYN?U^PW8ORD)JDLdR5%~1Mj zTJEXBKS9boN~iT1JbVKjLvSZGn;QJ%+$W(gO5H39ctZ%EvIZEr7}4To(`}jvjjCd@ z^l2;Et19jJ*6~Rg7f6LLW&1hm$vVb3oQ+nPYP^%*?qAnq?s_L7VxY_oSoQclzSq33 z!1QwcWdeLqT>!(Z^rl7zeQ_L3lox`z8fNU{_DFf*=o@tJ6hyYe=@(-IQI}ECTb)<@ zQ=Yv}FT$C*prOC@K^M^Up+tPdz)%*^KV*`MRZ{*Tx^rU4oqgs}56p>hDFZA7UsGYA zuLmm7=xRRhvuhT?nTqX|rsHts2<9r&!Vw(d#WwIsR99ocEA#N@r2U_M^g{2U|Hemq zqq1->G)?rc&3jqaj9<8INuR_%Hmtg4OnBTRuE)1W%Tz!9EVEbbrZq!ml>a+onbuS zh$^dr`dR-V(2FhE{V0t-nzWj%;=r6Mx*7l2c#ZN~W^S})DHiHrb0UEv)`99cm+Ut^ z0esN_RpL90RtgjXZozs%K6i#z{&*FBp`2~tR<`&mJ_E^RC^y(xA>R%RKBa>o`x%0ty9?1;ZiAh^ zgW~dz?3Jy6_xQU-D1_&S+GIrhPEY6Aa?{7&-Vu0bdpHJ*SdM!jt_4uFmfQHSPl5;U z>WR?D%Q1Wt16JW6$BR|bAjUDi%pX%zHbO|Ye;}iAb9~;C4%rP+vN~c_Bm9P-W-6W3 zY5AXLQ?8k+r>eytTcLzPqpM3J7+Cr zIgkWrQw%}!eN;H!6Ze8Y=r}rEx4ZqVd=7>M9=fY>A|L7vxhj*t0=p9K_KrH*gzuVM zi*4o`Y#27MgW-k}*U%hwx{z+I^Cef4IS$LJkHt~mbn>}j9oO{5L;5fvWwU&Jb0oHPDQ-1C zX&nzr!+D!~+u%X7viioi%#DWUaojL=1lt+CRpe!|o1w9`uoJ*{D86dJnhnD^Jo zN3|f1pv+A|uq7c5e*e#NEpCjP&EUgWyg(8c4^uzS(}qq}VLZ0a05-dCmg##$5`7q2 z{fps^Em}fWi10Xe7}ng*i(PkWS2ox6*7p2vE{B~q))-c_=$KkZtzpfo8dP z0Jc79@URfOu`hg%-_r`kH@uqx%{HT@2+fao?Su(TsldMpdD*>%&(ss>ptNPCM zA-DB`c}1gtOOq1}?zIxhqTfL-7O7DcX!8tba!{VZ{GL) z_V;i9*dBApkAazIp1EsX>sse}&B#sSSLr4)68G}B_w~z&J%flH(@wIUDo-1=5|&@4 zBmUzqUdzii3RN`cP~IQ(I@s{qOA0=p$v>hLs65B!T0+*kZ1#tcSd!}vpVO(;F>Q<0MHKF+b4OAgms-_$_8w|k1ehf%=KsPisdFf?E zMy$n(VS;2VmbO~O;x>1IuHF7pPq%aa`OsHBFSYfuNt_~i0nXCrm$J6um)MtU-NSvo zX|))eq&}9~XQZLreJj>7M=(YKO#+5Qn^&k-G!(+)<~* z?~t2F#7|DuyyjM3WZ_pn+Gr91Su+<`91NpDcABAzFc7+7zO+o09qsL^)iBLp<-Y<7 z90a{?2g^?p8=$X84`96yc_`!=jM~kX?dV_kF0EhLgt|uVt=`tDSkk|4#dq!dm7(Py zM8M_hFYOgA6emv<$c@{RDJ%^SlW^Y&vmzx0^Xs3J7Upv3j!`~^?SiR!%ovuPx2TW} zGMY!ol4g8IZ#HzB(?YPDeJISQWuz^NkK(fdQFZ)W;7j%Lbrisj@!vqErRo~GF`u-H zrQyZO7T%v$|9wj?Ym?ic#uVrS0mogXC+&k#qUXjrGj`&J2XN=9qUPiJKnfd z3|An3e(mdm4{CSmopZ(j_V>Ar zvGSo_MXP6z32e`&bPh9=>Oy8txp6jk96NRn-xmv9`)U^*9f-|%LXltnqMg8*!bP!} zLUO}W>t6SGJDOX`CRt@3-cmZDX^ytkb5-$BbE59of#j1XD@s2gV6K)=S}&*j^3KtR z#m$_Am}mj1#nunQ)n2`w=@=F|LL9cx{EEf?l=%FS8dkEZ#DC|G^i|DC>!-Yh@}3xE zl`ZD>480VVLIXBy4}!dMe!be&!8{=r(_w?okvOt1Db^qA>FbiBC2F)kj^-K($tJSA zxX_XpnOcg=HY{XLXQW!5_0>kK6pNDm5L;U7v4no}y$eyx;xX zK82q|jSqo>&U&t4;p}+P4(C}G8IxPwmrzu<2dYC%$nPkBcj*YPj%ewNz)W?{cc~nF z%H84SNa<4Q_tP(NN&mM6>lyJq=(HBOH#pJf_xd;bE|TvLc|*u5Fz84eDu{s_ik`4quu#|-i@(lp4{ zlS8pc#ctDGOp19pSMS!E!`JW6RG+E#d%hdX6!2{-X=NJ17&FWY^VrSl8DVU~;x~YF z{!|4gVEAKFDgvig!xBL z9S%0~8PhWT1!tQrSd0vvtdF42?{qrGT}ZMB?W3nN2hNKKDm?RPc>P4012#rYy7dTj zw>i7fbhB+@unv;9I!{oEYO7Y)^ES$vm(Wy|)mN7*6S?(u@SScsM4^>Cin%s!lQMT0 zXYq$hbDDoND5{-&EB9v+B*H(oPcG%P-=2G_;~hnnR>@?F2ZHJb7RESh!Ruk9SEuKU zqHEKgpw>>Q(4J!Qi0xH)a$b8G+sC4dt4H(j1*Qv#&pRt)g?K!ZIobA4hUhGg!@gCE zW5e)UdW{m(u*p^R>0xjF2;mfkk=xY@`bE)}Dx9qDP|ZKykv2Bd*jN9>t+a-adhkYz z?6}YV%I6yng`@OHF!YITLr?Sd^II2bvpVr-a_)`zvZkr8`@Mae-4ZX3DJe866@Jw) zMSo`2TaF=aW}C+blQ&+*M%y_Zn0+w;v^q6&REz=bByWA zL=b$+!=}IA-4baf6yl(Sl7!%UGYfBfSay6hOux;2MX_65Pzoig^y2Crfp=h4Yx@-P zNWgK@C~amou_)0(m7!+$L(U5J$j_M(Gl&cwv-e-Upf&L2Z^)6dcCqF-5n`3efO=H< zZx_+|8H2qJr?pOs&R%u=KeFtfbg=Mq-W$R#*qa*Q2COBUX4*wJZ_n0I@1ox!3%2Q6 z&UO*}DWcH&hOb^DG>GM4@Q0OKwIF1kNqDspb&R8kYxhS@H5XX5Tf5#iI2^Q!{wPw!!|jik=sR^O->t{Dj(9-NeDwdU zQjN11JKTl{`HmwN%-do_mK^JafSc@W8>gSt}?=pl~x0u8D|FV?q_3JX0k5nUa+GvF6=aRcn;}NT+>dMk__xX1G#=|L8~uQzvP)OJhg+i>$ixgjSW1*d8TBum(G2DrTbK1N(t~mg`&klTD8f_)Sjv>rupMSy)mP-4cpf zSAW5BPSpCGa#3PrBAWqp>)v*qz)WG|EM5AfBPDO>d!;w&K&K%#6wuKx zA}WeOX!s>#^vVPg4Rxxs`Q~o6qAH+jA=HI{U^zU79QnW;uF>U8{E$!RDQ zJHRZW6N>mhf2aEs6R&Lc)5RW_(NbIL_PtDZw68*81G$mFk1#iED_8!uwUmB5d0mp3QFEswiYh7K#WqHOs5KQKvXk*bx?oPgnVK=azF>j~vb;Rf94fvBc3S9kyeevhCJPVtomtNx)XYa^3u|1wu#)jLOMo8K;X(+8RCbta;Q&m zUJLp4(EDPI8p3FXi7ZnSY!QR40Lue-gS`TkpH>x!9b1R(QV@ULgRb@sGl>4}i^?Lc z=ZVTRQsyyy0UV6)c@j6{KO8sP6En-so@Js(uVm!YgcZDDn4r+Y^V;Xdv9-+Qf-k3~ z!wWNhr!VUsE!I6-em)mybS1RU&h5F0u+M57nq>cV%S@j!7Ma91`kkX+z)Tm!M_q1k z?+?*dqjLLjw=xNZM}_QXr@Lpv34`ZiQFcoKFgxKC%}9o^I6e&~UAt99%YVCu01NbOXEYHRB$lrn(vpYSc0HBRXZk${z2ehUb`Q7Tev|`G zgppP>mC`Tb&$+SOTF>#dd3|s%A%>LxCn<<)T_Ld!#T7Jtm{_+bcKZUZ<@N}l_Ri30 zT|DpisToL01hw+hXt%OABKQ(crZt^QlNJ*SzavQazW=nw7eyG~N3=hGfe>tob7RcD zjR2{EDf+uOYcZ4DNV#i_Ci7?+@B$O3bX6tq9o*eByKCRV`gu~TtDKk0oe|tynG_V| zk?}}3P9~Ch5f+%!JL%o_eA!wxNi!AYT#^ibg5QvMT1=~m4feDveE#qJwrG@&B*%Bz zvz#dgF`eg*AcQpB(PEX&&(H7yD``DUF1s znr}NqIWkCMI=fKj@R_>BTFB@5ni0jitwWPb0%-~&Q1>TemHw>I(?n>aC#P`4P%z*! zkJz-@y>(p+Zh7uo8gKt+0LkJTZAg{F10OLqg~{Nwdc84rBYT^@dh<)W2eFe)sBLBO zs`A!Cf2<%D^#qa!IR$MW` zw!A{o6buj(?5(XP&f2*1Av`0zXDTx-|9lW_v=?GHG^TZoe%X@=xQ`}(sMzf^By5z* zjTF>v4#RF=#;wIFVCXK)Mvo0OthRnBAW-za~V08jsrzBW9hRh#Sc*zEAqjWje^_U&8+ z**#v3c(jnb#fD=?y?cpl{>VdST}5RGo&Q5OUP%J z`nY<~x~8I6cty-P z5)b{7Jzh4+`kQy@Oo`<8`k#gWQE*nsPS72M=db1a35hWA`eY3Ere41Db!I;`?u&?W z8p$$^_BYpNsIb)k{(R3ZBV+*$r!R#hkkd1JXW5i4p);<&^&Lv3c`9y=Zlh?GWLKmG zF|9? O#bd+0gcA7i-FbG`8Qa=!?tlk`76lew}BJZoO;O%UXR@=FJorp}T(F%U7L z&w7TV%=oyIAvI})%)k1TadRz}L_iqK`{iJT2*XsZdSn{c>`Ec9b zQ2gp~&}=#mHzR&pYrT9TsP{9-8Pa}6Rqd|dcKCf;gZ=#soT;n*dxEPD`p*p3Uwf%Z ztc4(qUwy3cJHv4oceeuC2J$H5o6uLgYFO2hNqEb6zgV~!>Pt^J_QsUZNglYij(?XL zqf7477K-Z{Ep4;P=}ROt8E?dwfjDvWMBedXHdZi)nZ65@PBm-iF)>(|-uPL98tg)h z|2oVfyR)`^o)8hMJG214O0}5#9r3_etsKI>a(wG2h}f~Uj^6q}$}63C{_dOyv#~Dj zQe37f`-^tHcg`1X>wRBenP@3|@~U>EOXJ?ps2+B}6jBpcN?uZ_+(O$=RmM%0Qj@CZ zr-r!8Ah_xYhI8`l{5T5yvu0-0aSDx#LV#^kl5$w=U8u5^}h-#0q7@@Isl z{H4__3QEYKSA*^0xIVQ; zJbZ$t8=Md|1taYng#?N`8OqMv*08-V7ZW{36MgWXoIHOFr4sB(v5V|Z@+g*c9QDrm zP@~D&S#8pZ^~>Kt_4*3%aDf(3jKm}yW0-_UpsSa=msioo`A$be z(4$n)bz27?P85WAGGRdhYx#?rT_gL~@6ReK@qVZ0!w(*$8f0^&1^OJ{=fdx=z7F^; zN5{X=WN<{hwlH7n$7~+Ot2Lp2HQV;iT-m3snbiGYv&2Hq?Dc24lS_*LpZVkV1@sDR z>VQ-Y@0rZJ_9!eGb_d!ZBU6(`^?Z$AMIxg-%<)e%-3$;>czk-PYLvg!<}fMmfHAW2z!T`RXx@@d-s$Mz-}FAT)eGhL;!^x)tZS+))dwQc z<&#Se{s++&V&=?CEtWi~7kUbA1lESFZJP_|5>xq!kyQI79pm=`OUqSe&#Z|JDRr-- zf)o#K8(jk3gr5}?8&c^G`W>1hAf5-?WxPru7NW=ud{|k*enD#%dUXW?>c^yY&f&$p z{=LJMTI>p0{vRAst?jau<78XWlrxyP6^KT|X#RJ9IJp#Ky>JxNM*w|+X>IqcsrK5KsI-Oq)0VDVq}7w4E^=+>9tK6BUUbto0MzKYX_IzgvSMCk2XbZ{awM(};W{(hN)Xp2jW4i}& zV&s71DRCKp@6$0&SwU^G?5nu8UIC!{k1!KdG@x{w5ByfBgm#{ati}Bktlu|uO3dQ) zDdh}t7pF|YI{fVmzSFXo4J|Dp%oe|#{Jir6BbW%aRqu`~(-(>mV23EgfXC@-ANJsJ zWeu`>3YaC@#d;n~J^O1m?EZ}k5_7+Q2N2$M+#uHx6TE;iCyNR$OsYdo+Dt^!8t9D_ zzycpHi)+=85zO5C_|M@klUN7Z#bT%w32whgj90*Vx4g1qpnc0ZzpyY>MFq$~t-veh zaC>f{UGMxp*9RH&coPFf>61LYz#G0O=XJ`_=y4{waR98|wN6`65Dv%VMg~1a#hD*8 z|J+OPnHp&|5!SuzMph=*N-U9{X@(00yFH6xFr_h;SHyU1duB#Gc2m;6pz!Om;AtK>*ZrvWso~j5%B?>7}$@f*!fq{Wp2Ka|ryOAeSgVaQxTN*j$rQ@`nH`jjqw4^>ghS6zb=!L;8 zC=!=4=JEBz<@bL0vB!>xh)U#ik9N?YKpw-c_9lYC55Y(JKDai@A$IOUtj4SM*f`mt zV?+$Iv*OwP9ksfamfx6r6?5=KHE3)%5=MWz+J*eoy)@G6&JUA-;InFxTEU5TdKUQk z5YP`bm|4%+4YP^_-J-%?C%+++x^tE;#3a|KjzsRJyAf8#4x^Wq8FkDB&noCB***M;!lb&rkCQ1bbq}6vX}Wy; zoFdR&$D>+mZqZouW+Ve%i=xJMLD?`!G@5eY!w+rX(E}#XT|QV22|^t&jqagjt|Dg@ zVAITp`V1e$i$=_ix7Py7#Z;njrrTOKDuZ?{2CYKchdG@lPu!;tl!J+EOC-Q$7T;g* z{&4HUp_m%@7xApcc}E#M0}l$al`PT&?tZ24qXiQO4k+e0=pj`dXT?J;+T%H8t) zxw9%00+qz*bXAC742?T0J1!~?y2+%8-%^!ddhFe4-<_H;Rm;oEORqccj7!wXPq6a1 zAM0~Gs)WAy0hJL23h$OYb$Kmg7giTVW=0CT1s32a<#p09esZpQzyWsFZU8cw^r%Ds ztE}yy7sgGRUOIqtD7)qvm7I5-qnQ8GkQX+eY16zQS3lS47+4|~5w|{PyfgXWTpu}M zljzd=OL@Mc(HRpO-QcpPrmD92CF`3}1-&vb^V0?WdDVV@2>>&I)bZhP`X2STLC`k& zVzxw!OBF88;qT)^;bM0*S=p(ae}hpET2&c6!`nSUl|0$*A6IyK?LGj45e|4Sg(kY8 zpchwR#fbAjGoXJ+Z{4WNhnc2@Lv73ta+naJV4`9webta|LYrbzdOA>|19UfvR8%NU$8T)v>1^XOs4M=LJ_{fZ z3Z39*)N9tZYVli=Oo#oC6-h3>zt_8L(O`4=pj$+L!X`ggXn7)*91nB*a#=P^+fF^}PB zXSzeA2xE=i(qaapa0lqDh?-Q+ZZ0}zzXxRuq~S*e=H3r?iS1|T#N4A9h;mhf2fg5n zMoIe2Ia#PWP!8H9$9A)tXUY1|CVk?NP2nD^HD(!16PU~R!h7)mmY3c8Cwo`h)Rby2 zq%4U=b;%<&43E)Ly}`9abJIhEIcb~S{Kf1jaP$Y#piS{A1HWw`jq8*z=so2TuOLT9 z2&yykPYch$%q(n-0ppDjIIv5a_l~)NQyXIbVlKgu#e8+{nlS=kiwJ=u?2Kt)XlO_g z^oJDle*udEkUi||HnSgeklQ_vCJVmb%Cmp9G@q}nxE!S8eX!6ti}XoX`Vw9);fJ>!eI*dDY56%Vuc&XF}8Al!D!}&~oyL zU+)G^ruA)25xuPVa*InCKjG_+u=x6^np3_?4P$5y>zG>5=fHlqt*lfV*Qo{Q4p3IH#OG$JNoVnOk}-Zhs~OxWe<6`YPkW@U(&B)cznW%z$}KMO%r)=bS5d|Zto`NX zjQ#3_lYS!-*mZq!FzW?^R zH(f{wSn5lQt6$%{tUDNWgr3=PMFQap?kC#iWJ-1} z)=swj2Ol)Z^0sd_PktHq1$2JFCYlb?D7#sy$_!2_y##Z>*Ubi@>^b9^#dwYgC<#y{ z6hL>EyKXRARM&;(x|X#xTvjsdar5y}0k=5W1I^Qc7Cb0;&lZr^5wU49Tn}cubD4Yk zc+nhRzlna99m>AN+kC#yerr7d+d|I#+F3r_gLY1TD)^POFdIueT4!TS$}E2>BEVVzc4cMCU*P;uGlUQ03sDkOXNkgo^Ks`K2M) zN)--9_!Sv;%lYSN`DOEGkXxeeSP3}>q?M$(9DE9?NubC&(HHj>`}OL;9$tkM#sRoa zE;?Xrl$=5vJ)B%fp8WRn)gE$ng7+6|4%`lR`QmhCZ~2hhf4iR;Qm1+7TxV`i?Wu%% ztZ>xS)O-|8=rJ(+^hjio|5E2N5q>k$Pp*t!f(`b!Mbf|LDspkR<~=B+CvO~ykQ50G zDe+yVMXEL8K)c$6Mue^$H~z1p9yVE_af~7+wASHd@0aXX7HbxXG0ju3V zM#LbZ5Lp8WrS_9{-pVvq7W~ZEA8Q|6YNN8$Lh(ey2sF7YOpK57C?&!&z@kZ~R-Az4 zhdK2;y2GJ}UM25?Q84|ec~_2mPOd6`3eI7rB3oGs+-Fe@{9YVy*@+sHm^YeX^y41B zND!43!nEUQ+AI?!!OgE88#9`keje6HIy!@pXq*<_Vl9=5z9ZReZsV<&!MSKX`@b5s zM@~A5k@FHY4G~9%8n=y{5Q9sZT+i0~vKhwqVU~dtzKPU45qj2fs|m((gCi>jVUHC! z1l9j z>{kH?2lrPTF?lV^IXeMPrC#Nar|#4dCGAr%=tr40dQ3h}ukEFcWFv_OiMlqyuK5EY zmm^Va9vrCO7LmcCFt*}A>V*wp_P{dw_UccTM0laF7K*jR*N%gyCI1Dx(mQC;M+HP$ zkkUDxue9I28(rW9y%2Y~`I*i{DdepfgEhc>ne<2~)nc2-mB|+;4kizjchG|Rth5JD z-B^s}sNdn#S0%9sF=VHGM3O+_Bu(!zW+{0767Hw>@l0h1mgexQl0=s((;-4?4Ej^K zGQNrPvtw=);%-NkR+S88{_*x2b{CH=Y zMQSB(?MWBO0RHFZGGe8mG_=G#(9gO@nb@;+6v(uO(#m1ni$PrtD$_DIWj38vPjTZY4pgO{gB4v93PYYA#78!?=JeB*w2K^#SsR_mO}`B&Yno$Xog z;WzXL89V>qHZP2Jw=xk{uQkHt!WcH=)r64PHj|A$(Z^X(Tj7F9F&vK<>`x6M^sH{{Hl-b8T?|@~PI?bI)06j=e7Kvfu z+xzWrat1v%JWZqd8GB68*?N3wp)aD_6EE6KNL6Y?&on3cmxFvOCp*=p@qrs#>Q+!p#^Ip3QA%0#XGr=;4rvN4)j?On z*L$sdeHVE>l=JDU$t_~KVv_f!W~gURn{tL%D9f!t`o-;U2G;FVKd_T4kBAB4Uk@&y z1&-heeykfzKzrJYg=c6?lA{kKR^~FtVnPa!)X4u5&3pJDu|;uoEREQ;D@}YzSxdkQ z<_)&N4%@&uSX+VrA+3dYfIKWhe22~$)jon)(n+j0_)J?bP*4>&`bHW zE32CAIRgYYP}&aLfjBaO*QF3i{IY60Zt;jjvx~=Bq_g6`VdE`Z`HMVxb@Pt+quzd- z)>1wK(q(8#71b(Fj;LC^a(T@o>y?d2#>VGu+cjYA7U~RO0HFm?6<~QRzr^7gs~G-i z$T07x`59M7W@$ziqQ`nE`e@7nBoaMXqH@Zp|F*%E9-3Jsj@M$jYdo7;PRjgEeq{XPI4dwaFx92S+HlGJZE?c>JCu(z_UijXf-|LZh%5q9< zb-keccBHEN!*bt5e(zU4A?4hGAggJ+pX})9=n

XbxDfB^!hKMhoS<+&w&sT=?ay zQ#_QK9aVt>QN*sm%>3c!2iO+*q^vkCt2uN6Rvd+N0jX{V9?WTKL{`(GFY6ZL9Z(7y zwNlOOYCiXEx>G!cfCLAf7@dKRuT(uBcwY%xGJMOIj}CgTLQx$TjaM>9IjPT5iW`)U zE_C2a$Z8FPMXiv1_@!UfYI$X?hfZY$-vvE)B!z^2!ZgS!1;&!k@!;0Qxc3`L)h_ho zRc1@N&>PhPfAtfrf&&k|p0Icf0ne*0gM-KZxVL1sOU>DrsAz~HXKFTM%}yVy?tuh6 zIT_V)Hw01RKR*ys%tdaG4nbI_Tkd_>AgCxp={_n8SBZA*m|A1fz2ol)tl_G-g1(8E zS3xwoQfob!rj7J^`l8ySCCuN^-QV8}f2s4jYAzR#blPGn45>;?dKd9^PNA+pb+X~qitn-Cehfvl61je*QELvtW7G)Slq1Y?8#(MvfV^a=^2!i- zs5k%9Sm5dhN2L;ivOSki3!<08OCFb7hQ=zdz5c4bTGsjP`0d{?VUR9 z1Q}4o_UIslbvxsoKe_}pEr^E}hBHvCT_dC0igDe0(>Crc&lfMNaofLo4Mci9MF{QD zFM}&ch(?lT8eP|T4;LHC#<^@4BeEor+7^e=TrV%V9Zqqe#X8aS*gZz)y(;0z^!yGiDT%zS2(F5~9G@(&s0azB$w?(98Ozo( zLwUvj44gX02ycd196MQFE5xBtNk)ovA#GGR{xLk?n?SZwW0Z;;Ok^=US9d{?L3T2R z#=)cRhthxdl}KVQIKIv-(xgQYq5>Bv8UVv&zF3S1hrS>Ez9mX*H3)G`8oLL(J6dA( zHKJ0LB`(VD68c8}59L((y=gn~n$lFIpTfTPb+)Oz^2j!SPmClfY?3DFbP8XGpb&lv zOA^lOni+=5yEtO99T*v9FEm_MAD^C&!(6(o_a>6OyuIwcuE-u0!-mGi8C+G8-@p4T z{0N6OukrcfV}2|2g?)J*2HIUh)_7%$e6_Is&*x_ft{n^0X6GUIRzn886IA(V972p79h$@)qQ7LQpJbq$R-4%XMLu(KJ})M=z? zmSk>MHI_Gh6K1tuSAzYPk=`Uo=fpg})+&nLqRiew)vRiTeD}jP-hMPVKE`xExrZ%O zEwP^0#PAi|oh}In(LI$+6hDEG0V}DXr?vJ=ptr(_`1>E9pR?pWb30sYvtn!lHIu#H zcz!!(LQk?9T!f!_N9dAtX4Ln!qpPsAE(tx8hop>FyT}YCUo;RNmRo~wS}7~sLge~u zjNZY=)y*rIB4#v^cL@467P1ka^bZzn4NOtJ-qNa@S3QhxF{z|p707Vj zs-b|2ZehRxQKee5x&}ND`@?yjwehV)P8&irVs3DI|Na7H)YkL0W3}fOLFV|8E`+aY zNow$6L_S}duX1tI5q!a>mORwvsqi`yw4Q4g(}=AjCMGNvudJ%7qHz|(%(A&d`&}+s zwWyAUh6WX{z|E6O`UJ!xgRyawe%v3nvqzAd((w!Ql8(6YU)7@a8GStLbU&wfDu>+f z;nB%5jp7g0E4*>>m=XeI6IlssaDvjKDT9%gCr_vdW?GoxP>3To8MQh(76hzwPe@st zdM*$65Tt0OKz#*{L^w`JaxxK3BuD-ls8JF*dBw27nw`T;?Z*c&ur z{?9>7h@-`-d?;i*k_&qQ$JrDwZr<{o=%Z>2q`et{e4#s(ipeVqs8X1wlo|Uyqp7Yj z^YcJKXf`;xZ1HN`2MrG5alVZSP#6&)A(suh`~YztsWF{ye|?Ka${!#Ki(DAOw;*-4F%&t+RZ8b}SkWRM6mO=U z!2+aX{jE`;r2No2Kc)yQKOBkW_Ii9#x=?E|FWMo#_YFH=-tuYz@>)I)Y2)o<0Koeg z^c*f&>WJI408y#Hb*1UfpyMr_FOc<$a%mL@P>xlz+#&n()v!8zZtaInUaI`hMXtv% z{O@sLVD45|CsNDVU5kS)nZ5q%laGqqDG~q19OeCMj3cY0pq1a$T4lmk(*1{%`>3A7 zy5n6dz7I$Y#865g)z;QNg?_!?sF7{w5yJ6qsmrAvWVYw)*g!aL3r4M||9z&*%AWox zV%SW2uy@d>H4qs+D ziVV(cPt4~s*)(hRK68@rW*P)~_NFtFc~>25e=E?6(`2^_3b;9=YVDD$Q~KW8N)7`D zTX;onuqGtT$1K6^;Os0qEl*;9InOg$I)|a-buO6-hq|(H5bSEBSzvQtd61Y6bFxHRpdS^6={r47=a5(f}w8YGAFu4WeOC={2EZa(X;Io5fqYNfT9V@Xk z9xqZIUYE`kP!t5UWBXBbu`4va(lKsqVZwEV(YloP)zaKF$Rs(5CYdBVLXrhMn zgY!H>sB{{&kC7*J_h*0ifJDDQ7;_JEqNm~2&=SN{j{_zIj!tPv+Hc)gwJt*f4~fHv z7iD#M`FQxn?N+*9mS`{%opvn?P(1J32#o4gCa4=g!Tt7Z)mv>Hoh6MJz>qGu97yJ3 z1|oX?csrZtxz<#k2?f}JXK8el{U;2F*) zf0@X{MuTsQ{w27O+&}5K#WG+(-oG7rB`|Vaqc_7 zim#5S4t(YNXuCh>CmgA;__Ioc&FX-no8(;z_gAciO6hod6|C?|&g`lxm>n!%VclkL z3xIB~%cBnJT|K;;95r+?d=k??=)s6xMecmKWgF27>3&OsVzSjw5EtR7}pM z!E)C1A}-kH+UL4-JS1k6ylQ9*m>fznZRV;&z+tFh13+g83ss9uLGrV}04z0da^-~V zD^{a0DF)M}I+m%Sp`lOCh>*>pWFmGY^HD;C5WrY?rJN^QP`v`-I4P(egDGA)YWb^n z7b=Av5(41qKaYTz#$_(5Y4aCk@Wc*gt5BbkIeW5yn!IW<-A`6`z4@6fKnBkM=yJP! z(bE|sFE3JiJg$#3Z%ctPjdz%j=om;-iki%6@^ z*c$Mf1tzHj@ytX|44kL|qvKj}>{&RueIk&qon+lx9Zux~uRg@O^h8X#+M6CJ2b>PK z4Zw`4mZnOo_)NNuKhuUn(MZD1w*X#acWq~~3+q>jjyN_> z$qPT>PONgx1V;5Mmk|m3^ceC3-_awAMZ1+FbSwg#>qFnLZNW(cPw^|N3BjspjEnMv zf$ByKjwb~6RK9vl%L^x`x7_M;Z}2Uxke4&>7#VoxP?_W{Al2=NDD(})xU9}?74UCa zEEQl`5NPxL##emNIHL?6p%q672P zNNid(u$gCc4+ES%E=VDKFp@Msx!;J_`++;TxTq^Zr(J1|$@e}+(B~Fmp)9f6rwX2w z)oeIovB~SLjL7}=uW=W|$1k|A`?;Bc@u*#9zrS#V*_R&v)Z3AG$O$KE9gmgU}-`<&2hh!kxEG`XS)> zch+GY!XdWk5$Xf!>I2^6Be`0f+RyAO1G0;8Z?-^=pOTn%NInou$hZEBU>|S`&}5GGWvGAu`(D8FT=8(7zxr@^xPCR1 zMGt2fl${-=)yk&$8c>iI|J3OLdh6vD7_;KBZiz0~ls;N{dK3AJKI zGn`$yJ-)n}sOC5{4*E7YI7oc+O$}Y^*defE2Q-U>&7-nbp7I(l^2O1yFBX4_F78KTomg|)lSo#EmM z(p7KN@k<6Hf>K|<$1?FT#bBye`-ePZV=FZ_AP>`&;%s3iV6)h$p)tDCKV+i;=bOMx zD)zslAsj3SC|ks)m1&4yd!YSF*q_?CAKaN4B5a|Xl98KaSkGkSSVH5iQ;_;4RmOoh zDC*twp?h*f%pi_IgO%E|W8@_cB~(vka4R4ua)WVJ_4`NRw;FQqGE@=UUl* z9I5|-TaR>l3~S(|rZOfZv~;0z*wM>ZA(1r~EAh2%qkI21qUqO#beX})t8sIC?Xfo* z`g!531FfIODnF~tD}Q234TwKnwJRA(Sa5%q0N52HQEoWT^;QWuB4hX%5XDzGZU@tQ z^F?rU63Cb)u^3DmP$LB~o1InZRjl6YU&F(`4L|8|mE}x&Rwi7svi1WZ6@ZdJ@*&bH z$j+Z>&RQI>il!3)k>dDoAOvySryY$wZ2j{KGG8Pgb7^4InY71;duwb)3dwH#)0OC4tw?%fne( zv3qL*Ma#g?YjmH`UrHV}0|L`8?tdy}o)klISjw2}Pn7h825Kb@3F;nv()k5}l_yS= z8iVY;-^hgu%`C!~tWw&3yoryYvGrXeZLbk_+W{$7Ly)W{6Ce1$MyzW*U}(ckxurMz z7PhVCjM}ZnAK9l`W@LP0WUiuLN)tgv79(@bPi)rq%osB=@qhnL6Z{mc!y?`W@NBCJ*2~Y$Mq+!`d8)+qFXon=hxe# z{SnrGO5pdh!u?YQ^6!QHe`~ysKMY&Hgj!W>%)%5mu;a{?Dzr_R-9_yE@8>;wh1sjr zYLzaP;FLw3R2CJAc(-SG*ynq_c1|}aIELKe8ZUB(+E;OTif|2oM{xDu+ zGBY!e&(7GGd{Cwxi2l{FSGKxUt%^!pSdqS?O|^a&poW|Xt6JBxYFahuve|-!g*&)N z>dZ`v1fO3Vq;&#;fvO1vQ3!$05l}X_jazlG`+7);TayOFI@BJKwOf2iK*r3!p`ih? zA*NYr?hks>5X3i=dHeV{?9V`eP)(%r@2v|eTiG1QPH@d(s<-Wr$TbUH9bJ&)zPlzz zTl$MEGmJJi{#*YvbGG(m^|J%I2!`|1Rp6)D_FQbyE*lpy-Uo7DqC5-B`5DY=p-E1T zV0rgY6TezAVzj2F6fVMNp%~+ur2r&O^ZQGmllvPd6cSz>X1!+iy#KU(z5dkce2Al+ z#(q;3-lir|ENRrGnt|Bg>yKBUeT-WZ;iFrhOM`=g2*$fg>|Up>;Vy72kC4#=!YW3T zkouQT%PjboSOhPE#$~O8R%bp=6i_RxN{nFbDKhuabyh?dLF;asyZWT|Ca->WL!o$a zakJl#uz~8Lb zOjq~sV!61;HO`nkIXG;;T&Q>HAoII7kiBud4{SUhJwPgEI5#2J7_6Lq*@M!Sq^K6R zCY+&osyCWFW9fd~)LI5_wOX=dRfSF_?Y5Mn3SI2|1$aGY|Go9W00ZF_;m3ht?O8i=WU$YVU_d0lohKr0tZy;Nj;rezQh&r}$} z??Ow$0ltiIS{#MU(D|!wi!V6*Y@gT?pRf0QSNd%#HwfZW#ywz|RLFJP9u?7uY-(z% z0L`dkff3{oAY5(>8=Vo4fFvTHj^<@Q0h?@M@(&|!95gzUvcEdiK_TW=jC~Jg+GkHr zo7o^>bZPMt^egGrskVL<$E*ir6f*)YBjBy!20;X5?aFkV z%ub7;?%mh8)wN(KZ!X~rLMbdqK|wvi3*~YO3|rsBLeM%Q{Cl_T7mF#| zOsAC%gt0VE(pjf0?^#a*#am{WklW0qYW~{OeRgXewM6*Q$E{6;kE^|~1E;sy%q$?YSCeaG!%f3#3GR3-5gTlAmCKBZ z6X^K`9)Y)WEq;DLH-PthxQN%3YSYSjhe@s76Y(m$^AyMull7mM#T<{9+Y0f^eeN!I ziTO!a>Jt4nQwP(9$bj>;9herw47#tIB9mU^~&<@zGjP%!5sfYaz_ zhz9TzLV)@KTw0haxxesIjgN=BMwFzu07?*=jgwB&$0DQyIJSe_65iqiSzjn;$wELfsuqsD^F}v;0;s-2b*=4iARkDD`wXN3Nfql z(7#sm9xhlC%ADeX+8s*lCK6A)zQyNuCwu;H+}95ayM4Bg{Y@++xwd;3&WDGGr3sXm zPpRKK6g0SvwaYkYWe7iCA5PWsadUK3M*jiqft0{oRv7V$&GdsI4}hv!l9W{AWP`{< z44l$pxa@RIhD_E%?URA)>CK+wY^BB6PbDf)B_UD52Rye}EnDGxur2um?+4OZR97NM>695KG{T zM|!y(s2#u(iN<73)_)e9@{Uub3H})A43fPY5|bSpD$U5Fg3#$7L2<$!d{V17Lp07E zsMoD^mFZi}>e%gi@Ms_)r~5>CnSU-b@dED^_HEM<&FolSeE%q z{q;-$)skB{F%3>?uB1{MG!enJth&P!GjoQ+Ejbg^G7}C}-};4#dkAm3N9s0uDw64V zeg|_-SMo!>n&VZEf%(s}oXIIb{UT}ZffdEOWrppL_rxrUAKHnd0;QDC)uH=>XtfZ~ zebL;pja>_22b1Xfgw*HwV|kVn5(rt$y!4*JULRTwiE#klvX)nXC$ufvk#}Zhrqqfq z$07>c4bh-m z=;4+Td`mU3j?hn&NHvh%puS{)E#msX7(TQO=IFiRZD5MP<}zuWZaSg!f0+c)RA>|5 z%6@sgt|DY)3pg^ss7ioH$`6ehpeP==Cp{lmVbrYQ6-YoYWrW9*4=2ro>w?Rt-9GKc zg$bT5_|dTi4FWR?-*PJM8<{`QW=cccon7L$D=T=}8RolsOd;z-7FG`Lu3mm+Ab$E= zI!4x-Isu(kv&nHbaJ6j?iFv&m9aoERlA;nizuMS81w4J95EQ;v_j=NTN^Xb6ECW@o_U_bE^Jd~L*LVFZI*Uhp)7DOVOV%G zhqN^axU-ghK=B}IDQZ1^S%9^&-E?&s^PAxa9(qsPwez%|>Rx`dMHZpT_?nUd*W2F~ zCFHiCAfD(B=W#YL^70`wNIBgPG^pMG(DS0H@cK z+$wTyO>+*K%-a!y>w5&Wq#ZvgiFdMYhg^eNDD)M9l*7d$<8DczZDDnx-iM|_%=Hm^ zVRw*S=h@|j+t=i?ZzAN4guV9{rR(tpCQ-$;x0-u=w0tR~L-DM-7^P`6#;c*Sr`)+d z?@Avf;){FEaSD*Rng%o>5huM-={b0B5&IdS-|w~?FV*;=)Pw|R~6 zGZ(qr#X12N7$gVNN{v#xE4*{d}psB{EH}HhAk}S-t zMxdl^e5V#%UJe=3D)E>XpeBb=)bhzF3DMin$Hh|K7LKgT#>G-}t=1jN+tL<^6n$>M z=lbfHTV0&!{N3^Uh=xj&i(KqOzOU( z#Bcj88~OnMTGVf}K|DH-y3H4^lOAE8z9yt%on8xTaVcNYWy6~y)y<0-T}o1C)~GqV01bA}tyd>FX;@(UwTtB-HITPnTuNx0=) z+!;;f006_PjR6r(E~a)+E?22Wo(#BilBgK z4YKw7iMTQBC+xt)E-t}wQMmh4+$6w6{lRDd#on)Q@ubh#@Yks_XJoHbJSO|cFz|)< zjWeV%=-FW+2Wb85!6daLO`&a@mlWo|TkCAHtqObxlesePr_bl$kqawKi{=^*&eRQM zOUEr(RZ*K>dWtgFAC1{pc1NhL52i`m6ZMj6vewTeikbB_)_s{9Ho^>`&UGX2=h zMUpem6)#BcEo zTCVs@n7G91o$vRhn$ciuo-gKGB~LPs<7;)DB`lVHou(r3+0`oyI4Tx1_hM)DbEFj> zlAd@na7GcvmWM%#CT^S#3BSN!`5ey2Z5z815Vo(G94nJVr(b`2v|!~gCA{KQqncvl zU-&}#czTy}WVl%Nh0-HC^cG8uVCOBXY`?(vKbZ1@bL_A`{0Bq}-@Sai*MsQ0V59Ee z@G+u@l0&kVm${~7PMG}$E6OUL<581P#jtGjK_|ij4Kf-iT1fQc$QRFe%>wA=13XL{ z^i?uSTYtTU57--B65c9YKnZoc6Gmqd!p!1!zS8Os>I$M42_5SWA3YBmT#^#^7V$*R zSXJB|I~o*bi?9wQpKGN*SrW|~5hiVG-9KRrkMZk{;ROWR8um|k%wVQ(C2Q0ebI}bzbV|Irt_Ryks?VYX$i%nja&Xf@| zF;7p%l5)lBw>M;&UP-fA|AT00>Wn#W^8SWIq${ueZ-*N zmiV|=X(>j|v6?dSP+@be$6`zuE1tR(!~(+OjRSyjQ|x+_#+8BEB^XU+GZ#MmRq2tuQ3p?^R8A%w6+~j1Tf@3iofFXVxtO z-kHQZ?z=U~$)S@k`V`6TH4Vn&ALlMjUAFaIr8#vT@cE!F8%BDgh(3I?no45vFk5aS zMn(I_J?USPeHgg(%GU=$-EIT724XJzLt#ZFbP^oV1n@Zpvi8XCjst*s%ZM>e&pB_G z(a?R2w!JAHI%PTN9k$rv@$Om;+|HD9;RLvQ{9KxF;ECXPgoD;kZRF(_bVMEZF96=o zJg-RF$`j>FU9hd*+2!_oi6J;UBY@IXwIBXJG2{GIFU-*5ISiQSS%lP0ZjnAFGOo*RQZ#7 za(wbbbX)<_!TL*$liDDoT-IziI%P=xZv4gBhLe(IUri7oy!qv$`OAMkM zJeqN}6x+s~^Q-maP&iv_#I(M`-EUXPXN~P~BL)@H7`ACePthZ=Yu{~aqSHJ%r}`J$ z{PBgS3u!wIjt9rj=Q^}sE%<`n*(I)Xx*oAVz6w2E?DcSclKB2~gnx;IM0JM2kqtZ$ zmy1L2#Dv#vjX&^4_sblK+Gee_J9^d%B)~0Y-^R*t|LKT$TMYwUqSM;%{{eDaZSYJp!+C$oJPWB*VWxd3&Evv!OQ27+Hf#HbD)h9a{)3N#V%rl~+Y z0O8TpE{<|ziZY4UkgDv+k;+V55&6WPJHn0!FA_9t#}|?j{8%g8HMf;pi z&&LmyEQ|8v%ChSwy*wX1U(re`;w11xP@{D+w{13$qfP=nmcPHcvH+c$XPB(v^m9fx z-+Xj`5!dThiLV9se|=qDc~1qg*pQiRx=57p_9lWOMcD&l86|11B@%ed4C9L4xk*Wu z6GaPIc4@#b~~Jef-?2DDVfHdcJ)GY+!mup@WdcME`Wa^3vn% z^?&-VDdvK-C|L{ecRj2wWPqu?A0DABK+r#pmXJ*lcJ|gY)O)CFujC={Avyz3)BD}o089Ac< z%h()x*Tu!9Ayew5f5Sot)-bX%1xDt-nsaD>*V*GJ5=a8*{8|ijz`@7}sEtiTliMnl zW~j^Zr=>tyFkf1p;VJ2qDNRr5Q$yEIF9DM)sVC~sUbDv0Ca^2`|9oyh8>aDnvfW0| z`L7S&5!so5!!dwaTlZq>m<3!g7@dHCz$sm>SuD}&(xZeokK>Tt5~B8?Y%nBJ9Dm*f zeJR&W&+yLr`Yc|4r9~Z+2rA}8yU5IJLE!J@lDEzAvuemazxiDMADwT(@R!c7jd^m5 z9xRK(yK2kq*xZ8I8Ez&rduKBFb)il<*=)osr-t8&S-%glPI_B4U)HRzsIM;m_xt-# zufXhEA)^TFl|w3TBQLAM<7!4_PECobc3$jay2?i%-_X>{U4v;K1HXPL{hgXdCuwhl z^!)#O-C}{Z^Vfw(HZ?bW${<23b#g^*nI89nr}7)EnmUN?POiGy+QpLtoFjhcL%-J6 zR^+f9KowDGGQmN&TRS)DRdjAH1JM5rZ*FKfyMb8or)qU3br=Y`*A{_D2mID}F?Gs+ z-!M`Y2yw9GE;OO!9mD1WOPu+eYp4Y!NDSI)KbRfX9fx}BcyH>u3;3r@M^Xs$C6QjV z&#BDAyn31@HuxSr(zO^CIryVg-O!+&^9mO9HN&j3?79)=w)$GNa34>p=QN!#TR&kJcS;J<5eLJJQC+ z>hRxB_20aq=Yp$#sA_QV~qiYzT}4AdJu`Uv3Jt*m`F4@0oZ9`3F*^1^j#YuC0%v~ERerUo1_1O0P&jPwr$3~A%MyRI`|DS%&2<83&XgT%M%K|t zkcElC^#$ljnkz86j@7cnwBHdYistzi*$6P_LfqzCyd2g72kjJ_2*TK<9Y2g}e6~W6A=_=Boi5%-^w+o8E5mTj8s9_ja^j6ZHPLlhzA5HT!y!C{ph zPQ+v1*l7?3NKw6eu-vUUczg-edj%}3Ndj?VG9Hr{q^-_NLh5+XLN$$F9?N^Z5_|>Q zsq<2RfGkta9LchpD7Mb)A4(G_o&`Rh>E;4ak8!lx44j4pCVYOBcC@)c6S z>00zNAb=!;c0_ULM2Z>I8jX$mDN7t>+hxraGccNvNd-AmXj$_p=t_ip9%4h+!fy2f zp(48#-T8Vu>hbXExJse&3 zGlI3qqS9`naPH!x@#pu1MGa}m)=^GTuhwOKHmpt9TfCyhpbIsy`iBBv^uaW-7>C@x zAmm{aM-?pB_qjIdCQJV|hb3b4UrXJeQe1Dp>SX$!vkgfu21huj)c@Ae3s5Eujeq0K z6%f?~yqBOmM5-uvbFq>040tg>1EsnyCg{DJyL~W+W10tTj@Lp2Bx1X3aR3WqzVW4T zN3;6(SXK4+dIw)O|? zNajOLO|7k9;AhuhZK8aPaB9YrULM_HYeisxvMNT|u>!5cf&9u1rGJYlV0o<07=-pb z=-cIpuDL2VyT0|GU9z|LJKtLY!1p(#^Q#Cv;FOoY*V}0d4S9kVExjHAb97i3DK=}> zJrA5Hq00M0ZSsk*lUoyI2|ZG3p7_q|@iwg%x_+gI)p%OROmzl7E{lPUyy}809#>8PC9#pxCq^7TS`xw*!#M-(ffpK6Ny zj?Dq7IuCdFmz%7kl zPWU%nX9Qz|(>4?V=hxD=HjtzVU-NZ}tDFquJ}!*MvD9zQ*>XG23eMnOI^oY%pIUB` z*&99u%SyuwN_Fs`OY{e?`2MfYLKx_d<*(~LMjXtZkq$N##Z|JjF*N*+8Rp3Jkc{dS z4}9{+$?stD3?TtQTee{Xw`dC-jO)4sT;4)OFxnYnoyXfEl*?9vbhS(tvv=9(Z^IgN z!^>?amV5c!&d`9;7Sx0fi2?d*aLi~jBpeQg6ZUP41wYqa1~&%;;Q}svl6GcS+}zI9 z3qgKYT`*aS5BSI%xVS!nC$#QfFOB%%OFP=z_vNDTPk=)qq${Dbf<1W4Wblho(nDv5kBturRSWS7&PQdOC{`B z688;u%(L_2-wOMqQo@K@7bgTy+DQ3f_Dw_-+I`n9Cl@dDRr9W`YzUs~#f9j#cYP^9 z;0YsVz+(uHMCk8X`(}PK@o_;gV%U3i>PBdpc(cHNZIe&M;=!-Pdb*4*Pid9m!kBk= zWfk{jjNq1iLOBsA3Hh~&%{Y922B(dS5@QwduSmL5@&n{6?@7cJckp zLb5K-D_USD;l-jKO>)NRb0yK87`R|14lSm#UqX#)%q?mcoR-uoE>E%PyMK$~nJ&+rbxn1e^_}ia#RsB@ zmWgg$l3VB4;^>}jls!_Fbf}NO%X#j&cfct3$zJUhgUc8SsQm7}6|q_W2)5KNHGuK# zW1#!~#Kp;Md!3aROi1gyc62(OjJkf&0(?!=Nb|8~J6eJeq;uEW_foR5Y7w{gh9q(@3+kB?BI+wSkNv z3eC`iNKlY?25~NwEj-*&#P`i(Ur)j3;R5Gz3Sb36En zm3O2FlKjdi1y7(C-61Ecum7OHHE#!=>M*FZ(e zzk>O^6Fyy@hj)l>>HDj=v7i0Sq@zu-EfI-AJB zi>c|(OAs?U6G{etG@YNnh{Ae2l;ceKPi3Md1V-9NWCVvKDGSHy$3koB+`vKv6`Wh1 zxD_%kKp8djty;v3n|Kg@V}t@uhzkrdY z?|Y+;Z-+s|Sn5MeX;zn;>@H=8nQ?a-gfN{ynPL3wOa$XILu>UJ)N~O#b$e>_+0SN& zX?xQ?8)|y?D|%a+zgG9IBz7MYJtk7(bHujj3?+x-(jfwT09oGmzWSUyQ|A2{WTA$# zLOTLphzTOjm;i24xYvt3CR6ZA&|9=`o}DLiK~b{nYI!LtAejwo$X+bVlXKz!*sBlp zDQpuh3LZJlen91={^&UJcDz`PL)ShFoH-$#gFg~GZZA2?bgMqD!#9-C=i>#KAAokg zlK?;xgpTjP@ihB??@Xz^6seN3p*5q5hwGEGH;R(g0v&!^f@c3)_}SqqT)W$dyu-gB zi#y%X_V_%rZ5hBXWw@8#oEX%zH&Hw{Bx8=M zxWsPZ<$VM9`Ucq}L?vn}N7zs(F1tlS%lF*UqMRUfjzAq#*nyUU{ zu*jO|$vG!UWj!HC)58Ry%M&E7pUdJnza49vkFmg5kAGNzu?3_wRwObSN7HsE(w7y*}I6VPd@vL19QPy%Jh2>VZ3ffUXH zD@5dJ98IYa4P|{qtY%ku#ch^r*~6DMZ(IUU?@1+A`@(#q~%G3)aQ<+lXDCuO}2Jh_dn%039wu;)C#wpZl!ymfSKxsc%(;LyE^~S=+Q2eKgK{ptz)*7!O zo`WgRz0ZfeiHdcdcF6p1@@rR5tE=41bNNCg?TN}Rw?U`MuXvyx4z9z|R*Q#wZPhjY( zBVeu)f*~i>LEpj!inH_X6dAHsf<%}q9eAZb8Rj>`bi(UvG->zz@?pEYKo;;gsQ!+9#`Yt&n*tMFT{qC%Mu~Z%* zWc=xZ#vFrkm=S#GfAyJhE+(ca7UT^;$b>=d=lE=lG)-qXlJh|hGq4xgn?{Mp?X2=a z2xr)Tx`Qdzv4*#2n>rQ>IqOX_3U7;qfTVQNpD9{5z!tK#xdX7I4;5lGA%o$(Xq=VG zl>CET-B@qmB1a4)TkkoyqD=H=po%=RL3_MIqf-Gga!dB*~3dg z>NRl0)W8F*lh<&I#a#HLQRyhcnP#pBCv!Xxru03#Rni`dv+<_0-+Afn zZ&qVOS<^y4(TtPMJ-XAs`xehQx*|BXFvRT}{J;cC$my-Pbr87rQXy{u@EWB19xYnq zznXy?U|XtSEw!75;%D^7Nzcx8fe<%r+oW}WZ%4PLZjk5W26!H!2f@Qn;v^@BzpC;{ z=>peFWpbJ-&s(tJmS0JTbqzhmr%c2TE(xM-)UUM+`g=?U20R@evcFfg9Y5-^fw-+` z-ZSe0mfB4+5^RYUwsEF`a2B*jy>##ew={Dtpd92J;QL1N1t6B^-+4ULXh>pBQ0aAx z{N1!H@ij1LR)Fof$q8p8`XP-b)zqzsx~pbnJeANu>iwG^G{?KP=dZl8ZsAJ;zuiDx)J! zYw>7sb}+wO8L#t)(GId_@Gn;!>HZ`X=U}sRhs5PWs0);5z1h>tDEV2~@iBJjeTK~X z7$^j73Bkb=s!SAG@_#3otiP)nFZlFvE|GQK3*ZL)7OKEiG`OzA2%@5 zci(sKhK%}S489@db9Q&Y+v@uI`y*v>aAVbg0D{%A;fV5b4uE3b_3`m(YHo%CQxGys z_T!BUg>;|MmHvByH?I>=E|e-0*!HV>Vs)pMZHXHlc|&i6!d65c6OT9@smOm~LNvs_rkMR5 zu~*rhHN_TV_>#`U!()1Sx}*cZlu4wVTteEn@64glyABQxrzym(z)g%66cp6gurvff z*`$LT5&@ebIk3whZFLQ_RiohC#Ru&|1*aD@kmuL~Skf57R~6)P1@4y_6UjfeMmOYj2-rq5S8=ADEi`K3Y@i1_UFLc(oWi0}8DVuiufS za=IWEh+XLWu0~9;5})>TXLV%Zn|*~_2`el_kWu-@-ioL(wwA`h&dcLOzQCx%KKPsj zWLV<-Vc@ZP;r!l#aCljQuZBko3patF!(J-K+FWo+pNsYY7i#}o?vp{@`^pKemf;~fKCQ8*iibu z??(eHRJ#wn2{SmFAYE5Je*g_%x)Z}U+3jXeO(Tg=%HX43})j^6EC@gX)WBu^ZBHE2#n&td

MNSTy3{{QGqSNO{SF-)!3r;ED zgF65)?*vxBAG_Cws7D~-lMQ$qwH7=~*MlyAdzuTF=fi-o%kjwGz~(Ec1V-y@)T|)R zBe{2xL%u7r9UmWI^urE|tR*dnGWDB{>*BCU*mLJ!BmJM-p;tcv089JsWOMW$5Me+j zP36ED0D)`V*A#`^V8HnN-G4lM3p_X6j($GJ1u!d@Cd1NaA2nlQ%@(hnaKP5)13d|u z+~R+>S2>29H~-NN1Uh^_0@{Dlk0J*FaeH6M^NgQDma9o~wSjtYU})6NLLpfo*#t3; zT5z+~l>F?S3qq?!Evmao;qLInYS}$9r4{&YY;RZ6LKyCk=#rYKe;K~Z z4h501TK^Ln>xkUeaH)j{M?l0uh<5>n;GsFYW>L&C_~QDw@X%ETDFulN+lN#6y46cH zSVCCsCu&(^aDj5jZ;SFh_vRcg-B!Q|Sk5bVM)E?Q5dYW?klUQI#}n!XJ+tq8O~U|> zk1?dco$Up7_o>`B=CVM}!%I)q-U8=7L0de5Q%Q;0_&)Upd+lxzbo#wEBO@aZ^bF8d zU}aXxPl(&LuYgp7%Vki{Ya1)pAYu09jx6XDX`6sL!3Ei4>0DSat{e|-fC8PbEG{yT zrn1Ja>E%^p9w^PxD?9B*sG`EbBB2&!EF1W}(i`ow1v<(CnIdj7zv#2U5{Dg7o7USV zLN`b9`U=l%%|Y_3^6AY;p6oV_9Pt5Y$kBpc*mpv3MhpyTRRFeH%^eW+V7oa{o^sjl zcWf>D_Q7*pus^9I($62+=IgxNXs}i*U)?G9mFp+)oa~+UG=MgkK z3aIk3jc1nsGGjsC3E8v71NM79u+7U#c3)(QBMta}Q^&;|-*xz(4m3T0ae(K5*6D?8 z%oA$@F|Mq7szAuPHCc6It;TBHmd4HZ9q!-r>9IXSlp_rX?%#)Jh=uC#IxWu1Q{Ahj zu7g9MHsmzF&>%)&zD?FU9 zAP#uLl$0*_y2RK({oM_=3iUEA2r$u@CPj~|DB*Bcv2d*oeqv3S5VgIqTWD#RjoYfQ zxMcz0$4m0}$_}{9;Ch!WVj-qM+KnSn9k@I?kX^5%8fOqbf?HWrF*z!k)j97gJ>o76 z!gi8W_pw`Q1Z>Hdg`l*??XU}D|fmtVeKce-keXiZ!*QT582k1`2Rc|)n)x~}M_Ru|xqOC&9 zp)|p}WTKC>qKKkIvDL8U7PwN(!-7-IRpH+ol9oVk+r|hxWaVvF{N5MOh(Zc0<9f%L z`{0TA!aR<<&hw)1ys)1MY^q(L8$^|m*TdpkL5yEN*9ksU(EJr2uTpK zVF#X{OUnO0t`@HVu%&;bRRcR)u;6h&p@({HTuk_2w&mbZnsop-`06 z)gWa4C92p$C695-hk>q>zd;LKvY!+JnS#Cg;~|$9DtSuGJ7LbNF@)b*^3Mx#M9_p6 z*Z^~$H{aJxd)&9V3al8~YhX?VC02@`q&|75SFx!}OM67eB&W1IcizYp9GjSX{H4LL zz;RdqoM+)igEjmv;udtvAwG_i)KwVF-kO#kw8!1oJ91iqe@hBrNZW%=p`=Z6MM+@~ z9mq8Xxj0zG?_wd!F(-O7#yKX_RrO!mp~Nkk2I(%I@vYk4Yr4AX;zqgm0M54YOyg_Q ziJu5}lFlU9OW2`>YQ?f{a&C_4F4#2K zvKQ;o7Vow`5C565{ zjuolXMC51;d{0-@)7|C#(NNHvoowJOb&y^|LsC^iY!G1AupvB1jTnj}G?B(uZ=<<( zXi+|K7*<7JV5*cotTJ40jxU|w2po*C+rWlW0_wG>6<2QqSflQ)VpT8tN{Ym#uf~Z= zrNenE>9L(}b(kIW$&t!0Sk#+<{7wKgP}#iP%rpT3_3t8?V-FzIP(Qa|0RxzI7+VYs z9a#R{_#T*{Cdx{M6R`!vMlR!y4 zTSzlJ{)~C0A^fY^TQ~$th|iY&?1-#iCr9R%tUN77N_zPNuPq0sz+U}+PN>wNq0C*y z+O-6jDNFAdWiqF)S+!HLXp_&ARJC}$^DEf2e#~dk3pv@;P!3Y$5RrH92)xm{1cvVS z>}({FnYWSV($_8_GTAzYw8=poJW)p8!kh;6&s1`H%nV4zkyfu4A0g>>ZPucCRiVy5 zJ=O12&VhS&`sjpZgKS|_i)n$wxMRa;G{+zpH;Td z70!O8H*2naqNuiF5zi}+)a1uf^}Fsf-$i4nbg!R(k;mu20a=aG%38q!6O=^7B&H54*Ta=nYO z9Xzb`>|P_04h?=kX}Q;Jp((O6xpq+9P8q8oKkyWX74f)Su!=m4>A3g$D3fyzEJ!U> z=j1S2RAr0yX1s+Szu1dqJ-hX+1MzW+taO=V?jQ5X2WMX}Yw8CD#6*VSmqjIoydR#e zH5PT?B`2CElIgsp5jpO8HUy|0O)uboi1FcG_Izb6n&vYpRNyq;o8f08R;*{x=$LlD zpd2h?|sC251wMTuw08x^_DXtPppsmOaQAoUy-Pq52hcvW#;*Tn) zGB1_L*N}zIZoPQ~x%gq=PrGd7olAc&Z5?7iTI?4ktk~H#kL`pr_+*2rqjN@>4u%r+ zSOpU!!Zv55nxL{T^GpO9LqehBT^zZZgE{1;yz^3SwX%TPlh%gB@gIf>KJ@|P=+Ju| zZezBXQdi8%5 z06JPy;rp*N(G`$iSY@}8O8G)kNm{ww8>gHK%|g@TUb4_hZ)#MrI;$~tPpz8yyvBc? ztKgDy%k6g}xzzu>%;f$b5wW4{p+uz}Cl-q5CkL;P`gKr0KtDT;9_k?fpLSYRc#q%k zDZvKU?HIv^+6IU+x~w^AUitfz2W?;MvO`OrO!ZoWt&ZPut`QHgUbY9)or`l#GVP5!j+R!RAz?^LHx1f!Beg;*+bwmDFmtZgRo+&UibEr=~@B s{@&&J9S-MvgzZ_Nre`v3hn>^22>n`>3Kw0t1O7ahSC#uHV;20s0C)Qc;Q#;t diff --git a/Health/media/container-insights-health/health-overview-unit-monitor.png b/Health/media/container-insights-health/health-overview-unit-monitor.png deleted file mode 100644 index 91873527432059ad4ee27b72b15484d950bdd425..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 151432 zcmd?RbyQnhyFN;b7l&fSHBhWTakt`5DehVdgy8P(5F}{v3a-JWXt0*HxI>_Lp}6(8 zy7xZk+vnW-@4e%WaTy^b8I!r@oa@!+ecr@sX(-}7dh!Sb1qDx8NnQs91)T&1Jt4OTLAM?oQD`SbfgS%>*2 z3d-*?WqBDrAG7^@^pKYdZc`)In zO)V|826kVK`!|@-x>}NFmQ4^8-W&!BRUD4}E^uen)zL|7@j14dYjjL!i!1*3BSNRv zLgu|mYrV~Uo99O%@T#=q=eMCY2&l67B#z7f7|Mp2!}YTSrb?idFskl6HyUYl{L1Qj z`Ew2ORb2>ONc*wEzaJt>EiOx&QB*D|WAEbs=SA7r*Pi!}o&Ww{pDHqZ-5SsBwEPtH zKDVKmjk53l?#6Z?jrhmc{>kzgJ+0fH&qTAh(n*DmWqj012+fV`NbwAe8Zm5;5$A(o~kw#V}JQU4?KRn zapJSo=9$K#N;baHfEJd#Jy~eZ-F*u&ZXx~qVq|)1;_AI0oOY(lbjnmnW8>l;RlkIi zjGiAX98}0gJ{sae$q`h%zx}1zx>w!4ziJHZ0SnbU=&H;l@a(%zsZ#3d{064wd-Luf zop)Lcj#H(|;3^1TOl)lGQTOlVM}q_L`QHaW?~0P6^< zgrIWyM>3WEDplsagDv+3;@A$`*vB3IummG8J-jUKNz8b;MI%9@Sh?0=*K((K+h&`)j1uR3Fl zgRo6>SDxx(j*aIngflBY555?(fKe=JiR`?Y%cpm}Z&H_}Yt`j`50jJZs;V6`BIUTa znB`2*djI3&zeFC=oX+4fA=2<-Sd)M}UG>S<lKH$TgWqa%Zm4NFJ4u1(({<4nw#BO=W@=0*BARA57gO>KcReP5rRuA zF04*x_0@T)O~B`|g2-LAkZZwnr|GdlXXXj3wZ6!M3yzu&9k2aaqD2)E8pC$4D#Aj) z&u?Y;oTe*R=fj^dJuhN@kwpFeAp38{gCNowtQc``Tnp7qAoOo(8QG$a^VHQ&HX~P> zN2**aQ}-n2=$%?VzRgbUXJuAyHxjW4t^{pMw5T8!>+}Y>7kNnI<(0oIrV^*Wvnxf! zMvoX~GZ#58G&^3KxDAl<1>Ih*YClGvT217M=(M<)UTqf!@5>u+=X)3AJH%thXWw-= zel6E`c%{KZNCZe;kXX1!uAuY&n<>xTZ*}R*sV!p<$C+tuM>8`3(N^7?uBuA8MR3&2 zD%bpD3y^xua{c|LH|d+O!j_T4#N#xc;L>k3)LivE+8u;4ynQC>`mElG2X?>&*;T!mKO-`@C2gkgArw%*QRSS-knjl5Jx56o0 z`h$sQ{ple)4$zUTJVRY`6Phwr+lq$#ox17`G>4Dk5&!pzY+fd~M+&d?;IM1=9hr%W)Ye44(;nDp z!IVWjU(CBXJwmV2fY-OLgox?I@H_32O3!uRddNhfWh|F`L7k_Q@7cF?%hghoC9m1w zrKRWvL@g^p?Ez=sUwO64#}ezjm40y33D`V+;WytuWCVsj9OKKE@b|j8JDV}j!lVoM z9Hfc&O|C%BW$W(rLl%^1SA;D6nawca)kd<^{>ABA)hzD%fUA?$w0|V4Err2q9VF*H zd~$U&rX^4SS0K`*t#Gojix{`TA|T=Fr^;sIZYltrU|=@J&i24te>WJSk;wD4ZGA zz^X5JpbQ<@+bDHjqCVLeWwAX+^l(;i2!e)h!o7yYP@6|zm3SeTE7f#EdWV36VVe~H zr3N0C0YnSLWz?8bBo{sZ0MZq371o_6Y-MG&`Qc-Anp1V_R;{5iIgeT1n}Y_ojTh~C z#EUaL-M5TcW`2$k-;YL3A3SSpWvMyiYuQBgh3|_M;`waK4|OvDH5eI#@gI2a3WV!t z0+PmV9(>nC8oU&ZJnFj9xWBtFf3IB1V?TMWx}uD(ny=Q;bO z>UGFl?=K4E%CfSu;KbpT;NQ*z=$ugMMgB|`=t5(y%?NRBP?1*kt7;QUKqA2HWmURK zg>wz|#uqb-t?r{g*FKfmjHG$hSG{}3wqOSAbH?T3*QC@!w_|9QUB@#y=qbPNiB0R( zR_>=vN-3!NVogTlFF;9+iIRdIO=6k}uJx%IL;*WO1pmIXUkEVE`8;7+Hq3uAV;-CV zI7Agdj}Dp@QI9D{Ulgc|?^So-`}r@PhIpmG7{)ctbW>A3$ zy2`CutxW?gl%Yr418qdd+i%eKMD5Wd%h;?iWk=D;O)U0AJ)8W_cF%c6D-}^RRP);} z*W;@~_A!QfLMYvkeK+7Ff?CXqzP^TGR?b)`u5gpY1IO%f}GnFZ*a_ zKkN0X&FA1pa#H6Y5=}mav@Cokw-$mG+C~myWgBQ>8k!>D0ibZ zDe#@Z;`=@X*D>UxCD&&DgyL#{I&7*{F6g};vx>^LkT1Vag~ws1LV0ye>f#*#sQ(N( z^dr?~nxH`aXvUfQh`XKfd0`h{$JygBJjdj-cZHIH*9NLoR!54G@<_!zXlU2_NLWd@>3zwJK6J+;{?*dV5n!_H;N|eIlyN<19r^a~gxF)Sz42 zXS;}1AaHu~gt7i6*-cbB$0`*yMz5bz?w!IV=zuUXmTAa5v(}}@;)wjrVTi4PI$1ESSL%TDfEGFHN`tM2^`$#F zn5sO~{aWU>bh)<1{L};f3lcb?LsUc-Z(4bvacV={h&O@G&!WV$EB|`;dL+btW{57B zlc)1EEif}u_Fi=FyEV}{8|qqrzZOySiBeZ&^8!9X;7a*M6~_F+(ZNuZ=yT>2H!b!_ zb{qIoR~tc zGz=@yQF=j%@~!$?T(}p3PeEOQrUTUvSJg%SWrFUng(@FkdKncJ5*^xB&=r(ao9h1^ zHsEgqW=QNDG;;2IoF?EO0e$N8!=l_HlyjlRkYk7we$D=%@hzS+|bBG<3BGZTW44&%=e9uH7TuoUCKY&LuD3pWxH3sP@+gc z)jiw_Q_mCDrVYMzYrl^qH>=Heu~?M`QDrHc@VgZSZKf(}D9?N^WZh3CzG6fM&yYcX zyi+^*bSZJNGA~T|(*t`)3)H?Jd$x(>I2jedgCN%N+#sV_RHa{ej5l*!sB9LCQFZDZ zpb0tTK|jNA$TKN|$hkOcfAljwf9SZ~!Z`3Gp>fcYG+x`pkg|s`X1R=K(f_FP> z%q;n?%XJ*8;HT8MDZ(HZ8v&>3#;~Ef{-OTPt9hrICdRL+VAN{+!ocY`L3~6%iKe(E z`WuYE0FW6olGsUJo&n<+*Mcoq>i)8X=J78}<8^zY$yB}s6l{YjiPO(=(}84`34DVW zK*YCPzT`eBk;NI5&%Nli;C!;|e>8Vw(2|B9LyD_E3T3y>qD;^sh?RO_RPI%KfN4qD zJ1t{mbvGbF$a2_qmWbU;3{cOU(QanrysZH8ZK9Ua8YE(N2+x7RCC>Q}ugJ zvGTInnVW$Fe)eKEkBQ_7ydxQ%o5AKQCGx~eje=bKC`qPed=1fRUIDEwJ1asQ5Fo79F!Mb>)cOimAp^;vsGNN4I$$-2c8dv4ZwPSuD zHXTXHt4$IP#!kTOPY;mYIu16ZHd9*0L8y%~H6DwL)!J0hmZMD?a>Bk|L)PTirzG;Q z2)+fIWk-DK>`+TY(V(rTv5okZI)u-;>(){(N$j3Zj?vSAb)3_&Dc2>sR_L|bFe%w^ z!061qUdRFRar>DD4Rqjl$P&h<-?y&PG#+sk$283yzWd2pqAq=Fufsr}2-zIAEd0Jn zhy2W=QGf*xs+|8ckd1yF)e+)xC-INF25Z2xDQIBIj19J#5onrL%`^f{>p z*Jm5iGUTYr7)Bn~6sLUeURV@atAn24(1}(AP0K zB4THSiS|@pL9`pL)eZR2X4EnI-_dkIwF`gzQnP!~&j%ixJ4#Gu3{Xq;D6O70-NfM- zH62dP9=_8rbS(&HASK!FRW_*4@u<<^RH=s{J2A(4_`hrxQkF=36RmkHaI-I&;&9$# z2xaVdUhgthzPMnu6V}wLxggYhSPkRQX=AI0Q9%eldA!CFRaLN+C zBV_-5(fxbw<`P){LG%HU3Q-Iplr0>~0IG^%e4B(7d&7CdM{n&PK@x#-E+Z|m^wZck z?a;+y#Nu%sI=Qc%f$edd@-sIWx7>_X`grPVP<9wIHsH$lOYu%n>*;e`|u%$6(u7} zUtB|ue7554r~24ERxXDMaN(vXJIOclsZ_6|6~Y`MZ1{_*kdv6%o;wBucNZZBG-QnO z2Vp5$5+1$0*?71#rF;ORX|_*XeauhgInX#;a+awVZ0TNJCP3yS23xHUJ1%Aqi!tV` zf~i!DrzxaEgQHOb$|bAv+;K`iN5IMM_p;w!mA-f}Cxe%^*|1_kE|h2v z5~+lm`$U|{53oJ+{FLst2GD(RzBxxYUdsZya{r~f2yX)&#reS`DVowe`wYJIozhZ; z__0_ZLuI3g)WL&EwrU!;G_TltnRafj-s4=y>&BqPdm!D2eM@f^QmC;nM z4JbB0AAeHEbYc*V?6|U{PyE?KPyQvU7n8=Z(!Ex&cmvTD^d-!*zf|@ec+}$^5e4Pg z7snSr`Vy0^naHLn6-#ZvNLJ#auVc(O{X7pvzj_>M4hZ|`M_})|VucfDF{8Vq#i5<3 zbto5HBTH-%^{5b+k;*iFrwH7F&dG`0#d1GUSn?c?)Q7928@t-3U@E6<z+OAM@17kL?y}zC^zHFM)Hgryym4)ce>Y_p`>T}pe@Ai|4mnl zI;(NZ+sc;jo!#pA=mU8&dA@mS1PhYT=|#kAj1Al6kh6&a#}b-PeldcRJU#SE z^d5||S`%NYSd04P*Y*d6-ez0++5Tv{1I}!$t*psj@9F3AILtK}C3$Kfb>*Z}R+NuCIxnT?O-#z1k4-+;#YAQbG`_ zKzCsL`Y^UF`8)r15gudB`>zH}I+KnqO8jO)^=ecy3opd_e`ShSO#BLq62#(go-z9g z3C((P*xGN72x$l_$92oE#szbqM6@XxZj{jtm5E<1@L$xA8|GV?CI|$+=v5VAZ7Vhf zXRbwLOY0#&HDuW*{2j>?7m#qXl1!CHgkg=sgZoSnw{hw; z0oBXVQq)9a#(ddIjoLW3d~JD3(RL35N;O^-yVXR2D03x`U?L_$6bg?Ad%ib^Xt-!7 z=NpMq`YvdS-Y#GAW(FQvev9h|6nR(kJ>TVofEL9c^+yGh37xyx+mpgo& zjnZp2F^_qO)Cjeeq%dCLh7dj{+^lVrHn5~R$3xe~JSK9B3%q>xL%#H6vcUg21(Y}B zq}`hquX+U!9(uIMaiMtCD;nW%1s>?35yo7A%TNuyz1871r7Tq=u_kzs205VHujMKh zIW&Z4dG61CF*+>J04E2%6T?lb)3L1`#d(M^dTR?i!wt{{@&#r>m|KgS50V;-2|tUV z5i=$b%jHe@YK-wSUnT!O3+mADBZCI|F0kn>tqZ+MrB;MCxoXNe4k0PgGTR2!+e}N# z&cqO`CA8>wBG#DTmLKy#anw608LLM!;9?`f@G##4?D_aoqqW4@M#|(lQF`8)1#Ex@}N$9UmaNO;AFD5UqSN)g~=A9)p9MqC2-qDZ~S^ zMT0%v3C9-4EVhqD$WO@}RQtI^Klx(X!i~AeTpGQ7$Uo!LHx|B5DKNNvFg*zj4%P`f zkC4RRf&Ea_@^;`)!nl$)i!zeZ(R`n*_M<<(0-r|I;G_4hV7DddK~RZQxHqPA0tY#M zu$cVKs`=I;R+7PtwM6o>AsjBVXBKz|^CSf&Z^$3*{XCKWJn6_e)7VZE7Cy=VKY0E!8^u}a&F(i9rbd|9W{r~@@wS_NKu z_~oaBq{L8!_9#ZDOmOh}Q0Q5$g^oUAq(=qK^qvtw@95`irBFGN_RhDR#bh54$|b`c z;18_0QqxdS*z>n-Ai*ES>)p-M7H+2tbUD9_rHki$u)qr%9iiLr9 zB(7)saZ(x;)pq9k6(bA7NV+liFp0Ph!lE;B;2~<)333!<{6)mXh+*@YYK>HtvMxqL zDLW&x%bS?jSh2WI>z)tv4H||<^-1H#V@BYc)Dec(3QEJT?k-ji1aaS)EdLZTk*vdy zr$_5yLbtg%QoVAkD@KXOL{qa*lxgjIL{NmvLCh(9Io|4mI%M&7pwl{Ib1G6euaZEB zO~I#soDzpwo_29W14O8jPsQcCkKqZ=w!V6?+)glx8^z%9GxtgDz08lT0IV8(1hXgX zbYMNhnxDRclb^zS+a_g_KCfN@Ci}UZt2iOj-OcM6xI7U;CecGr_5h!-tM-v>o=xrE z2~61Bz^XwCHK_W_%Jfef4w=ePLaaO-L9KJ5U%jouOnrRVXyFd>xILnHPG<9h5q;vm zCkC(dF+Jm+ugz0lrt^GLJ;BBS(N{h|5^AGeXdtX!guZ0iNfr~d%66BMT9UpjxkHtw z<+M)5M1UP=Eax)ZcZr=j$PBCV#2#eYmytZ4Bij=sN2RbVBp0Wo)Oc{Ahj`paUPr^; z-tk%NWp7#mIhpv74|W(BIWin+VfNgy@{)Tswa#w5t(rG~Qkb5j9qPl7gHiBb9ULER%9j`i1Y_|GE|AaBpD+FG3 zIs&Jg7u~iRuY;SfwklJp&0@{d{N}u&e1L@AY%37$B>zw~`>V)M%%;c+pwv$X8W3M; zPGW6FIm1DRsT`69Jp6rQ4{s;?P^Xq}&yN;S8Na{|_WQ2tD;D2dKkE;f_I&HRFtJJP zVX#yARmO($qXLFF?sO~xhKC(xZL19NkkuSbJU+iZT8F9Psij2{+W_xuG(H<|8Mta) zUHmXUZ^ba>$`aQ5`~8b9YNC^osz~JEpa@biou?(ZQ(N?6`RR96+l*w_r)+e~CI$r^ z&%4!(h?z*nw#U1hM`_HR)4Z>pvogQ;Y37kUddLut&Z$Jvc*&p+$hCM%!JFvFhuj6F zO*b@d7?zP5lEqZAlrFkhOd5;F;cQ_U#On{ksW1j{3&0@TXK^7zbtB$cLby=fda$+D zAU$Cnj$2MInJm5AI>l+~1(Z(6Vo2qYH_u@#-;hPZuR5BG=xlkI6@3(XT*JO)iXv>| z63UOD*Qbqw`2n_j;ZuTevmhkeY4JqCHAKF2b& z8~4$0x%Q~HV`n;o@E1ZonDg8iKe1vBwzVtRFtPmea7G1_KA9gz`$fD}x)np?2-*+} zf4xnd0N0dQ7uvBr|CPB`3CDG=^YcDEk090 zU7#$kpPSc_W>pg$o&IgEr?k7RBOia=zSb1YIlG;9E4ui3k=s$a+Qn{((Y(x*EWUoQ z(k%sNm9Egp7yruMxT21^%ui!{1i+!E1;O{e01j^J)w$E?Pat{uwgmvY+dHfV9!=tY z>jVP71=}G2T<^?vX&d23^*E|-KRg*j`Ck1SfVU6aas`~WU`g-D$;s_!azk4#z9`bo z9VK0?1h4FUti9JwHhE7Y?gRcwKDp3f zpR69t?}J-?+@UFX)$b2v8R4qYCjfLmcUu+^T@MtrkAT8av#%=#;WAJUu$@V!7MZ#M zW*zS=3vmBhuKsJRXCyNkfJpuAq_@TkBo-Fhy=&|-tObLqP20WBo;-J0mAKkWFQCl! zujxF`X>pq2b_I$_&1%O~Lpf5xL12PO<5o!7Ou0H$uGu1g0-e-y`+6M5n>O+5A0^A1 zBk2d%u_Pc`o#}U&K#}~vdCjcWd)sPVS0pEKeLA^}o2lz* zWxp~n21fQt77eY5_GNTd8Mn;4yQ>GuPLAF$CWi|qIWO|IR^w3#Ms!Ldo@~$7St|m? z7-814nB?C%BGtd2tZ!@_+`rKjbrXLTrRn)jtT6B#*12_B?z++y-Dv!NtuXlhxZZy9 zFoWaOL^Ju2D*S{jCwLX{;A$Izchf2=YkUQq^!yQ&Wp`lb;aTW;Ah6$FfgRMHZ#Cvp zH~Ugg-pGj!R9FusdC}O`cpc1pnZ^>cbiCKCm_Kt2b;5F{y9FO@ayToqRaZwf<9xF{ znMHOw8Nb`^Io~HY!hqAAyWjD(NPuzYgC$)$yAvNyP#JM+D)`|4zTT^eUM&18!bZ%Y zS_eEC9I|wiEZ9>dk^Ob$f!!-w*GiQ^5K~~>xl8Q_zw5L2dy>V*LZ(E zuB(IAp7(zLu6~`SJ!V46L_$WB;vQ#{At=zmV*HWKUeaK}5*gK$=l)$%UoF?#)UocQ z{{%sh&LVQ5(UVWPw^GvU)z8IJ@FbHbE{&|V_V%#-Xg>m%J3+y(R~}?=NHqGqNf?KF z-IFgiY9;Q>M%D*wx1>QsBEP4M9LJ6GX(d=;YoVdR!TD#@ z22ATY$&sQ5oX#-FZn)UDU(2)> zAgnTKFZ;w-{b0-MTgM?5WG?j_EN6t|pVd|5E43r!<*j86-le?z=i=Np@NBF|hyo&i z+mTXMFu|sD>Hy@uIz)V(*NQ={GmXMycp>JipyC;XiOa5fWe{i zLfY4zaV0j2(l%Dki?a}rSoGv#w)fHX?oYn}#UkVF7#_d2@!{%HTD#ZwTfzUVMr28g z_xO2`6ZY#ugZ#}ypbW6aY6Y?R2Wi%GArij;0QxeqHIg)4C zu4_+{v+@`8k#}Ho`DP-nIB&_qd7vK^sB32I&owyZ$stR_*a%5TnC@;aTI%-W$he*W z2BddA6&6%dQc?!cY$)xPK2ksO?XaJuEvot26?j9!m&jhaIp%+U@|5-IU(^o4BCFzJHW0de2C%?|-fa91V5(PS zbx;L8HBGj?r_2Qb&yzDwG2BEIlc)g>&>wOUv;pX8cnIn5CtjkatGIyxTX7MF}p{|h5G zbjxT0D}v(WI4%{xFqB9w_12OxAoy8%^wRfaGZo-X%t5l2>FIR_iAF>utte!-Vufd5 zDzdU+;~ZwfpIK9afI@WrIv~yeefE??%A_Pj?tnJ9Dt!9GEJ-f5VaA$WXm(Dg5%Zdb z7}VP+bLf>}#$+zExVFeLW_qCGkU^8;lEKj^WKq_M@DD-BQ@b*84p$$!71n6Rml0-%)TjLx6v#h1=^JAn1p7_OO6IYO5;&ugpjOAaT#4wvTc6bF&$N@JwR$IGpMtw_rGufEQRV%@_$Xt7lunh zLZk(Dp2!z#dCX;`w?vYmi-E8Lym)1;5+#eqN?on{wuY0h>J=-QwQwe*&@$(fVj zyp%43p!E+LZ5;pJx=nVrQYC+79ih=49BxfqVS@X@#7!REySkfzBWcChAX$NKb*XI^`-$Qa(iF(YK0`tpJ88S`-dh5Xi?3;!6Sko>B_~mDQM5FFoW0ZpUY_^4l`E^F}YTKW@r^{N} z`8};~FOTa10>f$!)UYd{wMutQn%cC@!x6lrb$@dtwKG#wSe5lnd$BpRemiOCDxX5R zQ&QWL>_N7NTJnhdf7=g$*Cq-wQIk)R&32PTq)Hq7A<6t($newdvPNj`s7Lthg4!SEWHv*M&pBk_}*3v>>xjg)bJ}4?ui;eDIgoAy_e_Y;|632Q@1xq-RC#4A zf3cecdN@wFzE9**Ug$e+75bMlYH$((by+`8KudKR?8a9=oTAI1>66?QljmYAmGoiBB z98R4!&GWFNItiN%!4fK^Is9Nk(1$-n3y>~5H4c+KIE(giZguDh)m^#}~Ynt&+$;Z{P9)s(AEHdK+j+*mzfx^5q1Xfg6<6c-6 za17l+CEdj^IVmVlC2ngncxNvf(kBVzJo#sy{!LUFXQgz5AxUzz!qH7&|f$Sumq%lR-KZUFQt`akFeX%C@R-&#L|yq;tp zfhBew2NyktpwqP4pIUPywY>0D>YnuH_b)$YcACp`2MyIHigTECz&(W&pPhc{v_Oxt znE&{S=L$%KN`d@QP+gV*uhkyy!-tn2=QrFnPC}gMw`dRNfNT^BaE;~<3@}Yli!&SS ztYIErwjtuI-GtS%bae+c-S_pjB>)Fpr`_w|v8iV=+RRw1aL{WD~r;iSa zFg&`FS_BQ*=0Yna?^4RKDi-+}>901Y;>4vnOM|qy0k7wTu8>3kML#c7Tkle9tI(Sm zCWtWVmqVn*rz^qexu_RWAMOp1o4+yFZ**Rm&h!QqKxlXIt%^S;Lm758057na=RQ%l zmxOWA)B11Yd2ADtjTMx-W+3w%yp9=$VHMtxgh&S^lXmC%Y_7k1ps$K$Yanh}HQ*Yf zRRm6?Cg2d2-j~7Z_rr}BmY0nnkhb_`>~8g7@Yr%RlWW>Sxc?)wGhq!}^68`yAST1t5x4-vvc8$7;imFGzqvO+t^lWfR< zH@*6>HK$JKNyxAoO!2wA_^EMYn&zgluu|~ulLc@f6!qtfrWEzccZIiy+H|dgySOu( zyG*`GAvOPrs}S#%0XK?8jm5H)Xs-8&6qZL3ZV*-*K^P;#N>^Ff*XmkdCDqL2(j*s! zTQ{v|K*4C#dV@u(KAYtkV}k`^H%wzVv4|CNo_~HL6C&s1!T632O|+Uko=@^kGCHNc z9XpYqDVE?LCa5P*wN_CWshbkjAu4YpfbC2dCy(fX*lMXCk2!*m0cgo;_y_KCO*glC>%lM;*T^zncJU5taVF zMr+hX$Q>oP^W97)nbIV{KAd}wq*x>$k~2`y(!hgabK=$6l;sgV40-N#p=lJoRWVAE zHGO}B6uR&i&TIFovl_r}Fu{xbb)7uWL@XG>mzeR?s{3Q|rl*l<#!?gAj^Dr1BxSlk z#c|<~;f)qG6&^zNJZa*m&=3;d+RaZDxyrKKZ9fh)%x zCD0}e#R={2^#(PH#6Me%YbBG6d9fy4>c)Gph{?eXs*e0*+D?*O`%{kIgDlCww4U}s z>U=yVcqwpyL)B%RRz=;RL%5;@UYP=Q_@zEP^5w=TXoc?@;4Cqjs{ z(xDk~V(a4ArU$ZVLfs_|64swi-hl(UsYGW~?@`xjkde+ba0W4sMQ{q8V{hT_O)(_? zE05}|w$+x+GuPZFY%9=Mrtdiaa*|WgxI`pG9AJA~kXjj9nCv?N(i<_N@T2>U)&rXR zmAbc|h#xs|u?9$^xpR*M2{-@F1_V)04Wswo;AxwCZPoAj+wCC%ig>xjxGU8Txqkd4 z({eRRjw!;G)a-Dz%`h+tzC3v~O==85W+Xsb!EtIGs3R(dPZLPpYApx<4s>#i@rhwm z;FPH(Sdsuss8#>rVvPV4Etjw8y`;Tqfx>h%E1mw{d)#}N4^k)+V*Kd>eugGI5kMHB zG7?7iwGuX%7}X{G7{m<4`4Ye3aWMZqh!9Q)Bh(=Rxg8H6etxX!swc}or08)YN?WXJ z{Y@+3rgMC7(ec&+d6*QaaPNy88if6*(tD4V1r-Q?dfF5XZys&SI zeaT(@X;Xycd&<$b2M9rA)ESh&(gJ1>&8I4IoaHLDzfl9i555D!YH5^hlYYZ~6^aXo zld12Dk{Um7t6sei4Q$0c{I};KQ7!)iSC5tZ|7jw6RVEdq9YbP(T0R{~^yLA$>F>%> zxZ;V7&AN9#6`D{4TE7h1Qxl#CS(?-B6mD4Z6AzQDqvGX;j)l2plf*#B1VrtGo5F&9 zc-`&+{Wz6B=udAK`L{QMTFvaVU8YK7J6WZrE&VxHpXygSR4pW7R^#kF2ldnM(m!H+ z=2nH32``06c3IPm2)G9?a%?L~d;Fy0Zdhb0ehuGa(i9OKlHc@j+``<`NE*1B#jhi@ zjLr7vJ}}s>+*IP|PKnZrpcrj7k@5{KW54PR)S`t;R;PE^J|v$CaT3Xd<%!Vl4D&9X zm0?uJyQeL$O)&_7@8EU17)xq%??WN-W+~;>0V1`#o8D9SghV2eo?i^j+bHrjg@$A} z?fj?uGVG-`Nq1 z+x|A|PZ2Ug5)rb#hPDG*8jxRwP2IoxXjHCO`};`_w{Q*cEMW=^^9E^k1LsvDZJkM)#GLn|w47W8GCSUy))@1R6b6Uj*9Lw6N>`hAi zA7jO0Jxs_;1kShloJ>lT8E4ODV9xjck=qX-6tJ`3Ju`m#w1PG!25z&F>Cc%N88Zyu zV+1p>(s5wH;PxDf(YJlX*|q5ZiuG-9DOp*q9FZn@u`E8U#%xxGFZQM68LTGjD)ZV1 zsXn)^rP~iZ`IT}v;>rn*-SFlli~B|tysv@;$22GfSsuq5i%jSJ@A)29m#77V!J{a8 zm{}b2TVHBV;Z$P5LNL}Qu|tP&^b@ht3`8M|8~JO7B9Euio5Qn+O5I zZClA^o$bvSbBJ>8>WOPom6BT$Emkg1z#y0{dLk#;&dCRzltsU)P@U$R!o z;PwT{W19!5p{6u@O5X+a+1Ur2Dun+BA<3iGp#0zXl`a*C^+R{re+K>W`~UAkSpT2H z)c*gpAWCjlzXFi4*N~E0fE73q^0dJ}IB$QJirhp;gA70x(t+$$#boizFRP8g#Cibe zUY(j7slGd|4>W%RRu(br2QiOq<@2UNH|LPdU+-O)Nv&r%5OWXjXc2)XVdwsT$Kg=^ z@EDDJ(ayFvbA0a&8|=W}Q?DHN9~rhe7V2@q#Nb=>E6Il3RLyiLaRU(a8MN6B4*j-d zBi2ZWV(vsE=zAeGXBm!7R*vlnMvIaZS=9>Dr|mb;4=fPXtua`bQwCJ}8_n4Y>yDUh zRRXLrl9Akhs9#pIAqzKX>*>ntjwaivOeIe+57W;LXjHT zAQ8}}h#M;*NfsNEy8rc1X%pXcW5utT!?biEH$UyI z!^Wl({mZuBAq8FUdCwdd!?^fd4_t~Ww8Vp(dtQJ-JQprTMFuUUR6HR<>QXZY!- z_?2zn>)3?hH=}mH9i7xyY!b&SfZe3RIJ{LDz9s>C44RyNd@O&hX2gUueUOP5a97?^ z+gT~R^<7k%qe`bQdt^sY{kP>3IfIFMe$cBilQ_tX?F3b)D*CGtaMPeUeY%kD?br|_6d+l&_6t-iGeqGkKu&YIa>TP3Fim&Qb^qw&hV#mRd zKPoFU=G1ryHH=f!6YhRB_ z`rb@l@!2CMR&yFKuQ4?PC0@4OHfeP|n|!N@$&+cX7T9(<&I`FyB(dF?K<(xFn#EbZ zNIp)*HTSh>#r>)5%~J5Z=&yszf*q-3W7|EAbBi14-_`CHlqUn9%gQHf4Mcy7E9P|a z>G;8ZYYktsozcqG2P?)4v!zXfxjnJuDtrEIG<vm0ZRp?_|{}bkfYbn3C&5b)pVJ7h=5JI2A(iLI7aL5m@Gkb9*bdx0^ThchVG4{iQy z{)#!*(URUx#J2&wcB{<{$=@TpRsP{lI*KI-blZ~F=Di#!ezk~yPg~`iodCY=4OQE5 zp;yD#t~F586>sSMq#1oWnQO*YO+u#Uf@Z>7pBzC2Mcb87AK!yyUp{{%euKxpkaG8N zNoYK>%;|$F>}{EuUM5!STJfX$>yM+h`6D0iq|CWy1i85`f(6v4-(YwA6xY_$$MEXX zx|Z-SvJseZWAy0=v}K1$wFd}#xPcS@3IY&2ayg_WIps_ROiy)Kxh20nvAW#Hn`$(& z97*NUv8#HkOKH)vnh{ej6t^W3OS8MRVJgzd_BSztNSArmTj$-wtBEe@DjBK6bQlPWvP` zac@mH+M?RGM#PM5(cB8dSUW9~AV|!#S}1&^kk-+LOX1E$ry~3DDAh7D{zlZ|aFXDv zClKD@nKHfLHH4tm#h^BjIv%7`ExU8Ww)_=z@gV(Gzay@4rcXK#Hy#N|XYCG5yObH9 zTKRcQa&n8yF#CXn4UpnpAPOWDTX0pCl%DMb)+P+~_s@320{i@$--L>9`sCf%5K|S} zm>P?jnyJ(7@tW#K5=rZ*v`JtRzoDv?613gJLo93P+Bvw=KQ(+OdHNO`;2>8*5%@!E z-n)SquSRVgq|A@_UrBZ~B#HYgyc{74)Ur|V=!ENQkH8MJ)-os}7S1DFRh3@_)0Dbs zo8Q&`Hf!MapXMwN0?i&MbI@1i`NXQ4nJ9J0H4{{A4?R3KZ26iQRodR!Y~deADtb^~ z*c0Ri;=(_gKfv6)ulb!ZwD1yoNFWG(D5-k*;~m!50FbnN6eRTtInNeefNXt7MX%hO>eR4?)9sw zbBEdO(mBxc@%Q%C<|Hcg&Zt+rlb8!b*dwk<2fLcW51kb&$J^zw#jQ=v+yaYcJ_Wig zP`MUvziFCj{C*>cSi?^*FWvjHUV2_oYXr&lI!K#W2zO%pSl~N-bur33X<|0em6y}O zNl2c((dGm)ms`M`O1%*y-0S9^T-Jm;S>>zhB3wuME8AJ;}pk8wVgT`t{iZn=;U5PcUsh)b>T zpMXBmB*^@bOpx}gfQCD~9FsFg@E{5JMu9wfb9`e)oav)f2*GLA$7L#e%vUtK87a%j zLikv57@~DNch`mAd|t=o{TfDj|9kP--c|-N;PJF2>6-QKpXIj$Rw7O3;v(Hil7ffI zYoEY{C?qLhyU;$@iHb9WmXd8I=(%>LD{hQ*w}t@=mpKiK@i@u?Z_pRA_b0Q`u=>r_ zehT=FpwI%>Lbl*&+D6&$Z=TKk3s(rJ^-)>BI%{A7mfOqx7Q3u(ets!xrp*%Y7n_sp zPRPD2MU^HHhB9w@celEcGp2NwHzi2o1!tC{-NFgMJ(rM2K3iQcz6ff8eQ+Mjwxs3B zV(E9=qkH$542~VY?>8j-R}(_W^6L^*+p}r8Uo0g(4|#!l>?T%_7!=@#neKl72m(6V zGuu4(5I??@gm~g2q1NUS}yR_AW)N_D`Y)TZeFdybhEPjwKa~c8=`Gk0lLXu zIH>UfM%EmJ_|-Ph$T$3_yB(;sX5nzdrT-+%!D5ucmYZcxlL9LW%h%Rjrjo?|YQ8>q zExzTPxO`{cDc|@!FPFNvKy;&ZgV0d@n#8n6$@nRLleihx;bgE-qhfiR2^&m!l5%{D z^61RaET7@tPSJG!#ANVCFO?hb!5nwI%(RP(tcvbJ*sT=<)0NBEqxaf^HH$EWa2Xs! z&~N!$GzddQAYET5;JJ+80dCA_$lm`U?JvWk{JuACSOGz$L8QAxO1eS1K>_J*q?;i{ zL_nltXhZ=;>F$srhZ2w)8ipZ;8k!-V3%~#0bHBguw>lmk*UY-sUVHEJ{G1y_?r~`a z0wRVDUCp^4)&2zCBu|IsgC#RUj}ky$@E}rK#G>Iux>n-H+$d;m5o~CAKIK=7Mxr$7 zbU*r9pJrwI}qaP4k|_9p{ezqcb9II-%5%Ze#%00#deH{ zDU3m%&2xTC^p8s)f8gEkIy~5aU_>3^jPW_MMK!BcbbkKm9*q8CK@v(MahByU*Rlpb zrZG1V^#el98aaxOzKE98lY*IbqRP)+5cK=F;FSpP9IcK-z1a7@Z$U5%RquAH~q z1^Hyzr)m@A%7gv4!gGiv#frEj;te!z$57H>v5d~y79*+x)Xrq>+4{Cv>yPrK+YoA* zn1Fx&cF;r71GI zOQFTOC605EB1Shem_IVOMh)E)Ir{# zCS=tAt*y0Whb7Z>ZXdgNx7=|g@&K2nCT4*2(%e@Q77p`zr{UGzM+#p_Oq7Z#ww#xL z7DI183;!OP!;u@B6Vfl>@TEjO&Vt5j;xhu@ZLCv0&Yq3iDdF1Gc6&-!3bW7K)cLX^ z9}mxvxxUm~#d-{f1eLURbkNWskqn4MNyII3X*?0(jN7P)|b^p@55F!Hio}MMs&e# ztBJ8o6!o=g9S;e(gy_+Va$bXFW5)!z%w!>7Miy*-?=co`%Wf=x7b$u;$FG)>hd^ zCHnjgcrQAVdX0ty<7ZFHsy60?)f*&s2I#d#>d>{JzIt9XoV|IRBKvg_#l>$P4|q@O zfm_A+{g@ule~wW<2cRT_y4q59GJpuz7Ig;PcB)R5Mj10o9YRR&t{Z0tbOCcBH|NuO zmsy?m+nXS?#Mc$)iZ(W~=X!~NoCJzVD9OOA^f^)3b*?|27N;z@%Sh_ZIz8U(>x{l0 zSLdxmzbP~RYs|%e2nFCqfRURn2~gpfRCx}ja(K>#Y&MGnuw#!gu8%~4T zv58x+Y1x4pdW2~i*}yPvy55CNtL{uIXL{+ry4m&#Z}m87($@&8uy{FXT5VNfikhM* zdBp2CAiOFHX@*VP_%2X_gp;XhbQPDWZWTux~lB(iX*kJF*`jaUrRCUXFuEBlC z*T!?=%h3y?R>lLb2+)ryHwlJ{3|Ie*rO=So;EUt(W|Z3g?RuFE=2_TM57|2AMDfPp zQx5uheUre0o?Us+(DJ`N1t5%Sb8(Be!hO5u&$y=({I6ETF;yH zGrivi)+i$(RCOrD8w^Ty1!LMKFKjF>34jE=;RIW6^IVr?Ci_2iNn#e%4Me!_SMVwx z+22i($sH0Fj`?i|XSFBQ(}lMV{!=2m`;p^617B~QcVQXX4BlfHvFzI5YUj_yT51$f zGQR@FBVX)v1pNs2X=%v|7I%!1Jh|>7SH>)^0JHJN*N(=AZ!}sg_~%JZJIDJ7Utm1+ zefSAD0AiHM^g8H}*cm*DQ-}WcZ7GU#g8fSE=e{2HJq@S~Pvyywg6JS8m#=^gR4hk| za|4dK4&!Gt)Tzvdk-k2*%#vwdTWQw(&8*&*u;^GkN^-`RK?_*h!^zH?vmB;h9+`g2 zpE&MWsrCpN)Zg&WbXY%XvmY=k8%?^;W)>(t|MFvoq}$=PBl>;4Y!8NxWSF0XYE*u^ z?z9nO{Y8^v85z^`pgK9*;rZGYTRncu@i3o$<*GP#Mr(78FQ{oEHMdKE;L~9Ahlj1? zf1f{(Fn8ohFf%ry+vhWDr)ro!q8B>?|gPIg9rD?MiL%U>%L zrk|(F5@otn`}uP~S=#(uX(`#|TxOtV&=J%o4*p%4S{1sjNU$RzS~+OaUHuU z`m=g!ahhCyF#NmiOP%=iKZ^wT=|LaL z(&#Q==s?K$GSz6%WmKkln^r`G`U*$jS^i>62m%L;!NQuk;`CTh`0VvrrhLN1S_YYU zfyC{xzWag$xCP&Q{Y{v4B6)s>BRG5qetDbII>#ddvT8hgyMK#3b8WF{qt+0w4e%}W zU+W{?6>r>(d%Y+Tf_MPt|HQK2oX{IY7%U*v7;H;-{x-|s7aVahJviUZy+APBGRs*X z1aiL~&aQNfH4EL2qBM_ih??7938?MsFb9?mY%oT!luL<0ccF#c?=Hc}!32VXP>P!Q z&|A>_V{IPv0NQz*swJ480?^2kQ&PrQC#`-5Cf>y?p>XFMsi1mtV&auH)F7wd?CS!# z2NkcsDb;iJLLdQv$dT}dWEp6~Ref16tlpc20+2Bz>fL=zB>(Z|pYC$tV8p^B4zat? zG7Sau=7Yt&?e_r!7H;`TYs-E+%nkNkJAXOrGyKNA7zAz?5*7>NkUzWScbX0H)xq) zs6hXCnJGh3W8ij%TDplkQ?4BLgk+*Q&@W{wj53d)qHz_-LEeHf;NBDsBA7<&QeWoDhieIZ9Jf%fVG{bLo_L z`yn$B{Tb>3NMEG817H8a%( zox-+EY+AkPsU44I|G->y$5Mao2Ccn!#;6QYI@*MXG;QwJnbh4NnQ7ku>BUzJbj@v*h(&+Ur^s^Ta~bx`6X+jIki9XY*1!$iCR?Z z6)zjl+%~-F#NYjgSbl!dMr)*|#oDhGbtDRX{WS0m9rdy+?rH3ozdPSr*)@V5#ZK*? zdd*Mv?T9-Tn0;iW_RGF8qTlzk2pu1q5(+6I4}4G_6>8|K`?Q0$YKTXoouV3Y8h%O_ zeRP`XS2MSU+4^gVp8QJaB{1W;^E(@0%Q*t(u+^eje&U~LgUoPTL~4Po|CiUduC-Hy zuPTB=owZ1eIQxjYxxbb*M~9UAo@LVcbW4q|a~WdRmFY?X1?e>~A3pO^ zwgJxPEdmqyOMlbW-;C34C%u1a!mv}n3%+p1+yjB9IM}{zxL0qJxEJ{EswQgh{osU( z>{hvDwpR5T|8Q-SpICvUsCp?nB<%+M?z0wuD(Ai2%VuZyw?1S_hZI^cHe(F2_SXMw z*_%3rPa{o3q%t8Amd|c>W6l79cBj#Z~`!9t|kDL!_Kx4^y(*cW>vAPl$j)Wi5@w;K) zn>oE#JL9c@LoA3Ussl+UN>us7Ba9YeTMEeJ096@aa4gLh@Muqo`BiS%gcP(pxUkrm zv3tLOrWE&1G4(U|FhU}o_E5rhcqai?)Q+EG2rM!SZBNT{u) zT1F-1C2sj)(h{6f;;(oDZEAhv;#9iE)4c7VR94%t0)#|^EuAx5Ev+Xvu%M~kx467f zT%kX_OW=TEhMl^BzS1{i#se(EWwpKAn$hz$!N(vLyepqL&JkO3~JIEMjHK-cFKuHO);P6C@i=As)}=sDZD zQ%X+L#Z0sS+;1>>x0nZ+VOQylv9ON%WVaS)-o&gmdDYbziwUVe^pP=&8FEt54W%M5^u)J4dx~`+i>WA^@C_9162=pOtLss~; zu#@9nO#!8f{L!|eT^nV9>nxd#csgu7XSHc)nK8llBCjD3%w^u8jLM4lPP1-8o>HpX zP5jBPvX1gK(&8+Xbu5s*IzTuqpe}4TV~2~Z4|BYZ*mVngIGJ~^>E83=_7kFVm*E;y z5hfQ{Ddpd8VO*NOo~9~wp}L#A9bzSIPxn553>P6vAtFvR%Jl|(Fb`f%F_JX9-b*5G zNAF2&O_rqiDu*8bQMLor?>c}~E$DMs1dckOokiA<%G|c%QSw8Yl}J+rg;t|)gFE@l zM{Z8EJRpi_y?e4~xbrVuPhHOfXzukZ^4Dx>{VCVW3QutcSY~UKX5J>Wutm&Bwm;&f z@x%CFIle-&3Cgr>FY*7tn^EbQ{aGl&7pvDKJFTJX$~$YPYxlSpSIs2+=xT)7rM2uc zo4PHVM9cSB2Y(hvlk1e4bF1Xi2y1*_;R!3dN1l0s2&g_}d~P9tKxJ@+Va+mKlOgth%XFnH1WRZXK z4XZ)yc={fsumurYQokF#yz0H)f&1&!&5>VQB88=6r-pRjQ^*l-$^a~C}D{``YY z2ggy9W>zGr-t`_T5~ZNN;B`^BpIYaa*_e*Vkd}Wl0rq%BbH)2+;5(MLSpm?SwzS9{ zLceoeJIp~2H`*OZV|?3!EL!JhYHQnsQH!U4<;0z1>P@lCB@4082dLwdonKJiH3QWN zu+ktb1hY!NyNlBCNIu`LQN6+Km7Kk=Gp}xQeF}%Wg-Z`)%ogf&`5;wF`F#mj;UiDZ zN7PpP?tAtUTLbu-P__EfaQ;Bx^vkw**8Zs6(5Tt7f=q?i-Q%F>L8ER@ws3+VHVD-*%hj38b}wo#31z7Kufka3LQkAPPiE^lfox#lVjLMd8)_wJYI@cSQt?;VqO z`zmZPkkChoboxxTw<09B9L0mk^*Qf^9t{|I9UY2&BRvZ0F{Q;3{*hZWBf+;M)QFP4 zO*_=zF}p5{?(Q$oXv#6O*_BfTMcMPOPKF|d*_M~dvj*OcZ0yGlr*PwMr$U({y&dX2 zS2jxLSjjU53nI**rQb2dJqMnOb?C?rkci8T+$$IFD0G>pADb>Sz*zrPNP+OmK z?8Ao86n$%Fr`JOO&N#_|4JmM$rDe{HILQ!w=W@1jBDAaiXi)!4un?Hgy~1YDfx++D z6r;x1D&Tm|YgCbJJ>4y37dPqihJ6Z8go8`seF1{r#mCBr!GG%UPqph;GwK4r*R>v! zW~D!Q6jt?uwfBZzRv>kZ{6*nkq>uz5^_4*Oj+ekQUJF5AUR9!kx2c29Yf?H(CFrXn z*S6MB%UAAD{;=KTd=0NR-Q)ZhVdMk-2F8OGYx)BcVYJeu`!O4jy>qku=o5KA(u&b> zj7DG{=GDE#v%A?|NM4dOxT^7qVkwKv@#J=3(3XUSif1gxJ~9stElo0=XsoS^=gi~H zM$Y7Wtg{!1O|BrIwxi0I4x>~~4kTeWgKz5znu3|==sm@c5QA@fs0p6z*_p*$&BD{1 zo%_@G3B%RkqT<=Ycj>!0Q}>+iMv_-ai$~SQKo*1d)5V21zK}la zPQ#@lnD#q(PjO$sXY~n|YH1YM$C=lC;njpzvw-zTK?+PX;AVe@)WPBzdW9;3woQLF z_%zaYMRH|LQg`DDoC#Yu@tNX6&6@cROFh^dURZ*OOjSQ!c05!%pkSn1KJZs9wz;LuyZ-2E5K>Dug8#>A-xBJ?cDbMH_zPYMy`}`HVcC z(@Lh7h{g{+TND4@S|&q8&KoO&`pE?e+bd9nAk>6`6Q9)&ZjjYJA#o4Ki{ z2f{PF!Z~$>lS~s48avC*+W5iacWl_OP@LL;iok)Tk|s|1`w08e1_25=zlqoG#&wCo z`~g+IMx_Rf4E6OtU+V#*_;Y52OMUpLHg@9=8E+#pP|36l=ms(CCr4>I{%9lK1eUKm z&7vKM5zc7$P8^Oh5pf?U%jkJxkbjM~PEmYl+L#NoFrT>J@x}Q5Y#Xoa>WebICxRFC zemkK%do&aQ_SNNzelHxxC*jQL08wx0No)Gc#hD6rd;{TlwvnbF$3I^qpFDf@G7q#t zDIo?0k^;(1jsc2;-S%Y3droh_ZdvMloWW;Px^oO5%k8--S5y$*@7DnTS{bzvJr`ku zpDTMrYI(HmOfr5;S&)RjNxX!yUSdpaL;rWbuMsbe_xS9N>ZHBPJpTMt>Wr)~Ya>A& zb}Cl8$ai9-b|EU(37>=M1CSU8UJ@QV(+9ntA?LeUYRp?S8(o zv(<5EGbQgg5hUXU=AIHMZ!0`Gcd_mn%Ihk7a?qad2f~W$eh_=mhZOetNLXRM+RM1N zoDa%|pQgj#?+o4QaFO7Iy`1SL+593ppl8&@EqB^c;6!QE0)N+fh&FZ3#GJI7b7FI8 z;-~0kns2}9A@Gfb&Az?O@p>iNqnTL$M3w>rMjuF2Z)82kFpf3Codz>@zHk^DeKsOu z_v?u4?|K*SZ5=#?h{Tk9m)vP~dY<~J_M4#-Gt-)AN%RC4Oi8>Fg5(Q~oDW!$JwteG z*&qJhHT`3lZ=ar)O*rD1;7CWhZv?irWTIhvV&XaiW;H^tP%z>U`tx9Z|CXx+aWZZC z(t5QMHsChZ8G?6cM0uG{MzBqGf=i^HQ3ZYko%a@FUb%c>ss3UYix+?3n`^NqL+yP5 z$EB&SrTE16{%VHZ=i9t6_qNr3@#Nle=qRr2GMsI9m%JN1GrLTik7vwMqmNA8p=vp} z7cM-sVmf2N2alYsrOnSgjUV_@x$5&`))44RNCgp%=YOSH1l2O;@oIh(pBH#l&Fett zmySAywA>`_O}mlvGr(?M-sQNux)Pggd~8#1K{S6Fi@H7p*g+m|MGzb>{U zFcHqdZH+)72xCYH%6kmBxf(YOc=FW6vr*fGHT=$SKh8#ptv?^=jsTGzgUQNDxSu@` zVL(WG&dlW#5R{`AW?gA|auXo2<6PJ@0yDwFo?WqI*t941h4iow5O}*@Mf%x)xz&q$ z?&Uo_pmVM2jpSJ?()!Mbs-~nkH=FQqzwqA{=WxYlQ9QyDPr!fvBoS^;)H;Cod;879 zA*BHRfSaFh$4~Z-3ei8{I@>p)I;Y_`UL*2h>a#d#D9-g$BXX55%`nw_@w$n6Et`97YpN84M-;*mFRvi|_|3!v+sFsoFPl_Ru z!u^!=Q8^STg!LuQro$vvM}(XLi&Q7FwDIdbv(}Y&H!+X3oxyf z?@*&*^1)bGDhGYaF7u65sQ?3g1k3EJ!lO(9hj`zbjVq*bsS?gkCoS(c*%^nA{-rV3 zIX2k(xfS9DQ$sK=li(?-#s=adZEvq=r^WjH&6QK_ z=xt=tTuMaYEGIL3RYb{ThpAMZg2hHr9d^7@R_6+PebdzeU!7J~|7D=8e(ex7@ZL8( zSYhl=b>eh`de(G4{HNYUCfJF!M3-l6=NYD&eQSG*-Giq6UtvX%kZX+)D$5;5d|md? z{OUP^$|HFHb18aB3pU<4`-^hG zFS(T*;f-Cp{m(U9!}ln0dANS(zNw0sjc!>qGLWWfH2f(k-9I_)sk0j+D=he+hR`Mk zyYt}cYsE2h|R(ZrSz|+g?Q}759v+PL7{FhPJUK`*!BtP*O1!ioxuo)$M<&M{%CBF z>8)+p25{Lj60G8+ zKT8~LPDU3<@-ZJL%}_gUM>dIc7~FGGkWk1$RuhhqU-V5z5*N3}B@4k$V=a!`OG6)J ziaMkq1rU$p2nPhZd+~)_pQZoI&k%MW9Cy{(2-T(2$TG7WkM(K`jZ&?#T&f5xkrXC; z$~F63Q-gl&b&;W4rnp_%Bw@RuA5qpla_8Uc{~0iIv}~<~5db)5KvQJliD_COu$N)% zc5qG3Hc3rlYPxnemb!50($8$NZkhkndc~M)A+7~zvA8uk$*ZfvXdsi+V*cNDQ=4~h zvzqmbGe<0Ut3mKArJ^!cOTfHV=+_iMyQ^p~Ghf0z9QY`w^B;A+4VTd<+Mfy00tDg`;&h^i);1>`w43d zc5A~&e}PB1r$xjZlLSI8{HT|b?{n~xyEiXe=BTvFO4lXm5UCo@N>VLw4i?!vKM$JAYPdIcS#$5WP6dvCr;7~9u*?-rtV`aVDB#d%j_Xp zZ}Xml&0)Y%X=K*1CzQ?nF=8;%j>r?&9o2F`SqVV{Vna?mYgO+$C!F6%9zcgp>A}gB zsqE%rtklarZ>`8RxY$*_nz^fQqOQUf#VgrRNxO_8camsCU1moxC8I&L;*P2H6p(wQ zye>9VAR2r-_Z0i<@Q9RX6$ZoOYx}ugMQ4B5WiLd?4i|xU^A;P%cUWv@VpC>&b>dN6 zqAsAnSL?KA-M%|R?!0~aT%w8Wr1Vv};|1tSt;7nuzme)=HE8-iEbp-TyT%HQSB+D| z_2HT@Qq*#-)yx@%4^^InEU}kW?JAYL$62U!D4SvIws)`HCFV^j7j*q5G4j5y1^EHn z5I;#IzNuJ>p`2CUCQW_4x(Y3~)?z2v2s%UimvJ|ks%oa#yAHK|)Y^Ophp+d*SWNWn zw>->M(JrW?6pm31>lLoKaGUXR>#hu53hgIQUI_JSn4gR3oUv_J1*!hvRb5c2^Rsjp zuIcC57C*CU7u--86v5HMtiT}xZ+!c$LzAgtU83G|zZWczT#wHqv*GPiWQ3l$djg(m zLuNHFV-g#yCOAR5e2>FwTH8{k^l>KTH+Q+9Zf$a*R| zEyV3wc1x4GW6@iYp8gltgxmqg4lIq6?h>iF&{p#X6cqoem}~a!gN{EHb;-uk1xdmL z+V7#7l+Q_!*$i^G;#P5!6&V(9vxFzYV_oxHj0&6Ft)G671;~U7^8vhkc7GpbMnl6g znvJ2y9hKugrTo)sx$Rx^-(=Fa^Ha7eTx$xPo;3te5#5SPGrs0|;p)0WrF;t66tiy% z7O-x@3n~G5&G*Z&Riyujj$VJozLMcIMO9yHMndz&Y+t~tuI|c4hlZ_y$a6MX`dLp&EW75*}{Ka&g-XqMm?bHyg7&b7owJ0LkxjzRK%xh zfeoaPiYkGqseEtO0OD1lOAEt1RideTzc%lbDLYv?jkS_#ox?ZDB7OaT5#aPF9n|m|BzFK#9R+z_P4B`6|+Ok=W8XVMvyl! zQq2hGQ6K)ij*q~EF53NM->Bb>(*fc`j8p)Hv1 z?^ezY@}kLsX#JmWQ0u|}{>lIS!IS^DG%Ke61qA-L02MuRzyS`clQS|XB%F&$@(K%C z?wl6@^@+Pt>3{#%9W*TIe`Kk58KOR$uQ=IydwL#(>8A>S0bJ4`0IGxou*tjYxt|#U zSdybDK(^Ju-PZU=Kz%)NDzS9*uYX@#j%fg!vXT;;O#W7dsNp}aB^=@ropIN*x5f%0 zTwwK@^qqs0nt)#iATRdYBuKG!M-hTIH#e7g)zpx%2JO4uI0%rVm9l>C=nfU`3UL_8 zxhQS>M}rf4^F;;>1Q7yRRANX)`cbEli|=Whw~cI+ z0!%Zi$xmIWkA=;qb#KA2kj;;~$cLDn15Fz!{A? zoIO1V@19$%rIV>jIe`uZNHu{77~MkPS-qys{09K~qPry9DjdrjHMg`2lKOQFK7%B4GJr1UE z6Z;*EQj9HKYB7?|_NYO|%u>y-LTUhhEA`cli!I;}ohoyiE;s8Ex~s+k3{HhPyH!>u z7NFux%|UQ75D+R8Ve+rxCma7OATj*eMu(DM$vyjl990%|1rNK28eJs$&yrb)lUey= z>kM=1?W>Qk&Z;`GOKg|gLr~p>GX208|K1rw&|A4FN;Q;s0c0y-f%)?w5QUi7SX>@j zdVnxjcmK+NZ=TXLg&%k+Ys-BAFo`NTUlJPd`w;RJ(bJ^Z0542V339Ow)+~7BzuXyK z4X|`MS=vvgOuTNd&;Ivga{u>ZK28JxdROV>wpW7|>o2eJmi3h#Zf~ydhEBFv#|Rw4bby>`0dTu*gf#V4k{;H<1%^r(VMMvL(x596LSDDr{2zi(L8hDBXvh3(RF3k5J3CZbVM1~Gky+Sl=T$=LNrf-z`LMs<7rw^kItKozTJd(QJZiF zfCp?G!*jx7Me2MlB9cP?oLe(Lh|9rO;m7my>OXE|ic-@734p(P(! zQ-YSR@O#XnUofmEkee@IFsz@ z1qmmq2OC2>B-qZKt5h?Nv2pQZl?hU1>XzI^<4`qvJ-$iSpVSo#j%9K;TR=yJQCSVm3Q|+-Lx5K{xfOwm(P*C_O%v}bu z8wh}A{mlB}X^Vie_FDkbf7gM`ti=4#OI5elLnWhnt``?WV+HbXAbl?#2(}&R74&)0 z0!JT4ox>LwTc58Fudx9qWlnt>s02u)7H6f6EbOFovIqv~bD}4Z;!%81= zoaD1LuK1MO*!HL^CNMfd`clCSMnkVT0HFYhLyG%E%zl-PP)5;WDbWt&$^Uda`N&%qtNc(RHt~fke-0Um6A%F;8N~T_D7kninJgo1r7chNw?W!bq3sGBO?S%D^WyW~4TlT05FXNhDHe-|s_ zq#eqrR?ThAeFpKIor?K<*Wb7BN4@5J@)G(mcuDTwC?g=u)?;|tJoCfx0~R+jB90s5PFMnlZkLXkc$Hhx1JhVFaOvNV`99d?5y7!hA> z%2bfxsds01Ik}tw|5i=Sl5Yw~frh1HxK)*~>3&d9#BS%;Jz6ch9c+ZlsIFB{4*-_< z0arvDqrr4AVEo_Z-H&TGwbqOpbS2E2Wf$jn@Zj)eh+bj($XY~SPFL~F>bq=c=d zBJt^-5diSqdHd1M42Za_BPr~TI!Fh~firMLcvxKgouM4C)J#3Ld5J|?4KB8tK zk029|!>1BV1Q3cduEaY;TkpKRxbH}$eRh_R6OT4i6F}Bc_J9SkMyZ5@cV>&#`f4U> z=^R>fe@Ej(9bk(XY(nHYFMJ`^4;Ng{$e~_I^KsFsN& zn`9*F>GRCfpL{oF$HNVtD26veD2$q2e||Q;jKecN zPr8yuyrHaJmZxkhjpAAmpOuVApmPu@J6L@b>v5KxKsZWTdOd+_-e?AlVfv|}PXmnn z0XxSm-|E)G5R7seW_xt8j92N}8`>Z`tP0{mE?$c*;K>8*4t}S%6NX%(_gY3z-;|`Z z1{7T#Fr6yEiFqL)ryHQ!=iqnP`gWlZtIL$FN4(Wd7KpoCytA1giS+87X~Ru)ird3) zgIhg(F)_eISGT9vTJqeu_D5ZE=hqdz*i40@9vw&%s3>ivI|?SEQxX|PfKVUS$Ojhp zwlVz|_e7^+c$owgRSR-@${twTPkh=>kYLEyZZ@$my*8l&H))msJp5WZD`X)}=ihe0 zeM-cb^dnI4O40jGCA1_QClmmFAv-Y|^gAT@<#c}B>#5!mWK_B+;{kPbi-#&_K;#lf zo<=ZP^D&8|Om40&cKgB&dps|8mPLI4(vB6VpU*?2mn?HY``yxlbWRXZ8K;siP#n*s zqZhC7c>5~dT$b9eUoVU_DV365+(XFbc*{|Bv>c&w1rNSs>L=dGfpS%!Kq0_7Gv|Qw zm1XB{Clf?#D2~y$DoW6v!cI^gaqMx?$-mWOp3IT@V9PGW9Ng27rAB6mp(SgX?k_y=pBoHVRpRDS7)N)l7lv7OcvN4)5B zN5)p$p2y&r-AxnVGk-tE6u3j6w)NiEWyg@e+S7gOMM5hMM0(fPn`@CCa;=RWKsA*; zRmhI0JYVo%GLn_3W1wJaUwyQ`Y$xbSiT!qPOEJrtk}_e3GW|)TNP&i~;e~mDoTZ0a zILhEDHs?(P8(w|R`{CsgeES@^VT;=v7bxsO#|~L#t^}K$y(3Z3s?+O(3-04AInVp+ z3&PRgfB~!SJ;(HpjL1^A86lw`wt*q|!_5Z9Zp+S_CvYUz^ddkB zL3|5KtMW;oFL;xi&Q%fIC+=6zGrK{Mj|zdoH$m^Hgx=1O$Iiz>Wl$4gxp!M7svzrz z0e@ZkB%GUPu6Zr~gG0V+EL||0xjvR1?!+F!@T85`i5#S)hP!EEJH1e9W;Y=iIL%xj zFQp7Ug_YogY(zX5?_%b(AYd;tFsK>mn70sox3-m^|wLew^75 zBaLV!x${s@?s+lh-~3$)A{2E5&mzV>`^KD<`#e*EkZ3OftB-P(!KJSQY&w1KyPss# zrG7cb)1kD)*u!kS__peyHG=sh7<;CWnDD{K7Pc>5?T^VjJdH0tAsH2!qL>75r4Y;0 z{0ziIB;zEsD^oFPmnq1omm@@^^ zd|RHXfcd~scW>!5Gn{($8zj^0@VED1pWw0l`Mxr!edwq|J&eA?B5*i(`Xl{pE#e@w zv+89F@=I8F;==-7F3N~MBP|q-D__d|F&-XZ@X+Lt#&Xpk$&kvefcpI;Tfz~nYx3j~ zNe;}{1*yFbp^c#kr`}M>8u`)Ts-~M)o;_Wl0Hr20F3zZLE1M|xQ}Y{gM^lmMov@gP zk``h-qe$j(gU4r`z8=BxYkRcBpLY7Hhr=vrQi9-XlJxH!B&Jm@Fz#m_dONzPf%9oZ ziUd^&Rmk_CZCnHo_KC}dOfMOpi4EVAr`wxIfbK9i?Rc4wXlD@#vmZwmLv?YsQpXP) zLqnPKxAtit)#92LF8WTd@FL7IqXyGBp+Nq&QwQ3jT*9>JaUBnwV$#Tf?njFY!hDhb z(O#0Ym7~#v!2)Moxue*shaHhq#N|oVAJX`~e*Hy0N&I^Q!0?+gJMEDJqGnzc<|cW{ zSvjvdx(DihnRf>nfhau3XR9>~FT{%EKVJEL?~~jvw$76m*kXDwvBe;fU+;GfW#6aY zbHWpLvaEVv^~+Dy1hzN>Bg!}uJ-$!v>fmfk_!q^tej6cKvO z589lozI*n{T7xu4#6+v9{nVsOs3c;9^3Wm&_dykY*!c?1jY2WH+ojr(Wb{djE)X~=&iyNgW4-C08%^BzcZv>Gh29omI6!6UXqQ? zvPdn>1*VNuFt_(KqoLkhACtARCeNd-U21gl)^t(-{>g!8dbsAEfrB~0p`T!_H{^OP zPYaPN`_yUGyp)f~lK%<9d{`jD`+i>+Kc=(BGF?Dd&j~}ZmqvMljh7@?UlNvK`hZBI zDXctKmMjJOq1RX835yI-5b-6^lLZr}Js@c)`LtCMJnu5^^L`=QelNVCXI%soGp5o= znD_l1OvsawcAXXRQjhIcMMix1ZV6y&u@N4?jmYl*Tt~`cpkKwPK%l0n{Bd_iS~<1< zN>c!S!5C)?YhO!nc={Z>(IJ`I+v-nt?P#lI_b)ENdZy6_c_xz1PDj8d^N;PH zQ7M07V!j{C{&_7|JrcvYoKG$>T$JhWQ{bYIt40I3QmywhDB>KXLOwBffkdD3E(M%c z()Y4-s`F1VDtKHQhINlt9qcq~ZzU(4ahJ>km*74$+C;z!lhen*LVKJF%+ zI>Hzx?0rTv-G;=CjNxl^D6MG3DUWdA*$RI{9m3EXX<+IZ*@lB>(@6K(2v|{cgFPPj zt~3PfC8tNL)8tEQ1EICM?#sOsg*bf>xTMsY6GjN@Al{00Bky1al(e(*;|->=Feizv<2^w*?x3wb@%H2ID1LEB4TcOL_PifSM4w6y?K`~!Kwu5 zA5iZ>-fIhXhD)2oWdBsnJ_gRY{^Q-Xr9)EL!^H-{Ze(c4l)v zS|<}dJ>}wka+cY82n{3H`%g0pbbJpMlh?JX(2;eCY*~JGB3=W1Fyf zx1-iJh_pJW_+!vi@u_#&h3S-8>-0yn_1&1+r;_q>o3Mt8w%bu@G*=_r3$3Qjud&EW z;6yA4koX*bdct81UIMK6?wex;i@>dz8~B%AiFMzb&J1qgRMQ+#5w-#&!1$@ZTk{qh zFA$F62a}D!p53LGs2nnM23YHhz)jkA$oWc?ACf#CxbdFdl{sI=Y-t5<8G;spFpvGa zF!MFZ5ipRA#cNP*x;WqDz3aEK94I;kd{yp!VSvs5fPhAHc{qcwd(Yy#7Olh$HIOL- zd{m4VxI7tH8@W3LX#q^9u9*NX?K3UZi>ucq9#A5Ul;0(5wT5Hj1k#6HRT;GS{r{-? z%BU#Yc5MZuYXIqPq(f$}G1efQqq zTJu9)INWj7c^>7^R0bN^o>YURL*T`OyyO>OUJQH%rh6a+fgI<_>+$wwx*2I&q;c*Q zhz@80fRYo4O*loyZXvyRC4EeQ&a2uR$!`)@paCYQOWDnE)F=*7|^0Je{>j`R3`RkdpiCI`{5%v=**54 za>N<$E_`{(c>ucU6;{Q-N5PW@(VeV1$hk8rejwo!4OIo#yY~&?Mn4>7ucSZx0LSm! zQ4eG&nj%*s$R(?u*|PxxTw~C}Z2@=u^A{SNX>-Ncv0*g~E>;iP>E)nRYz(}i9WI@v zV)`Dt;C^(hR+|IzktPSq=bj+n%W>!$a7On$^B*p>fL@_z`jz8sMQ*8GCI)C0AP^%N zGeaL9e?0@Qc}`5DC7%}6HULbL1)t69vY3xQMc7qK)@A+jctV35yAGfSzgN3jKpSC- z$L)a6ZA0x~yC|n_$Fb!^cR?XeBL?{K&n?rvjd1;i>{ZVSR9g*^@~WFNhQi*5#-$K( zL`0APn^}-XOWAyPa}aRa6pS7hmWfUxf+0hf$3#$WlkMH>pY3}z01rd2jK$tkQd9f0 z?0c+kL}+%lzqBlQwrGLls51UWYtxd=m(g!#s6WNcPVDyeaWg5^U8pqY z6h5T_lhoOqah)(E0~irW8k95wulBtssuYu2-DC(6p$Ju;#%{_y5%LVzOwd+MN!?0Ij4>@)94>Pasp$XMt zJ$yews*|T%YFMXnP9g4D!mUcpDIhR;i;&o2^B#1%17P;}Dx%W|yv+iG&)U>@bi22h3k!`6M&2n?PQzEiYd zxJ=bPjX?@Nvlq5mG?Lu+g6QVmCJ$cFxN+RSHaGvUnCT+f%Pju5uqcg8oKN^vb{KcD z`?_M3BpdE9;$b+$X?@Y1pDkjPmb!i!`ZYCpkJ|T)w+J1&0tB^^`VC*6bV7*$y@0C>J9t}MPs8>x{Fo-p(G2?KKwo2gd zWevHdzfQnuTy4*aKP1i7dv}OemZFBgNgr13mSGiz4su*#m+;9{pk!HJ3j&6{gvhB@ z{UffvbVWy;QZsCr>hMV<1z5k1d2BOEO(hli<~D@ZY^@4FSj)E>yTa`+lMk8qb2F)g zokkJ+kqv`fZkiz)E-PYD3q=d~N~q@Nt7|w7ptT-34%)oSX*i|;-0~bAC>h!RM%}O@ z5XKBo4Hm^DKc4>ba>!WF8j)JKk=EHkNG6;HMB}TlqK}}zCrb~?$+D7+McUYru%#2_siv0pm zivdnAf??%*0CTD)#*Ht3jdCl|^@HNfQ1}VPAQoaNiz>}IhETWx4b}0eduo5VrOBg- z6^>_1@x=_dc|LEu*3#gJT@=r?$$tBbIw5*lAWX>vR&hX2I-MH@H z5>QSvm;TL9(&8O{gvR(}`{Q-(wM6r&Pxv5R%yyO70>AK7&qNJD*xXq@le z{Ggv~eLRhzI+LSWHHJ2NAu`=DdwNLNGGkre#DeAU^LrUKBD()|`*rXrQ)(a|$&UB|o;LIUU~PXmaP4+rc5( zR279T`D^r;5x>GUux4UEV-1dV8m;1LF0!-*oEDb-jy()(NTeIiZ9CjZ7y~oIa9qT?J!rp4XoXoiGHRRi2}omY2byV#5Jo_9 zASlXDZwrCe%Pr26Bf9(|%NL7Vb7gWC@ibt->&~yiRXIwdcX^+-foJU*qZXr*OOE64 zUfM2}$99 zmR5k9sGydoE(y zv{nicSthK&dw3t`DW%=A=i$cSXM+v^OC*P;p23$f%514O zpK?*0_#=a_v&5+lH0&hh?i^D;>h7A6Nrp7BgYUHGQPQ!%6AfD^i0(qO24!Ehu3r=l zqLw%uowH19YoEA%xRs zvRqUq>Om?5duBc3mD^raxvUL=4GrJ@ufak`MdB7NgO3+NmQmMT<;2il_3mup`k!Yx zbYa=tx`lK)y-^Z~cr;yxB*dWb5T6vFEih$BXp z_B(3IsOVqig3%;N{=r8Z`XoIk6Z;I>hmxwvo`my@Pq!X3X9_a-Q09xzz`Fvw&lM`( zEIH$^_{uuBqQnOHMF`Hi!!MOy$-F_+TfXszABFvaLw|L6Um{np*B|)?SQ3lxy?BBq zueQEqL_ujs`y>tfI1E!dT4lYxMr4YMoY)i^cH@j|{j88dMh@<)kFMh!Xwi`VC@?JZ zr$}osItByLN#^^_@ImQSL@809;_fHLM!T-0>~QovhKJN{j?-q7L#Tnm?$FquMDg{{ z0k@*AlkH(V2KUB*{h0qEjuZ9&Fq4v=ry>@LhAv;>u&9!M%AQ4w4{2{W=OgE(gON3U zo*Z%Q^Ugt=dEd>vTi4M`4HhiV(bVJx{D-(Ys-IoL=%4|C23L?GLPfrR+E0l1`$MC@ z{rd8C*Cd1$O|Cl*X*ja@`*>%TB^(eE$oTmX*ML5bfZaZ_)?E@xuS_{~6miCox9s*v zbCOJ=iGfZIom^vCTH~LEU^VC+M}b4Lt(*n#x;6fmjkHN=LRn>XMCnJBNh_NZ(EMsa#)5#e` zheE@%{7Esk4lx}4V?dLQN0bn&vQuu6rBsylto>&7GWtqzjeP$SE^2tcuTKV_%T*e1B6Ji%W9gC({qHI&b> z4V!pqh&3bqRtQ8|8T*~QBv0%eMKl}{_@ z0i1U;FSEgJQzFUs4=X~bLny2``yF5vAnYyYI3(^zB3GPD2g@lmu64cZwO4bp%`rde z#Bv9%te2PqbpJTsv800PKm4}0Zm%iwY$WL_Nb#__Aw>URXW+~`)t__uajOMC1c$ir zruzD7C8ao6S)d`n+FFuG?a8EwNLT3PI9TnA7)dXr8Pq-VSej^1;Ssr4Cn8X!)+uER^NpMmNtL*kQYoJomW z-vCfO9o0TROFIua0jptkB10v&)%#{sB6`+0?_**#mUCRaK-Tbrau{_IueIxFJd@&) zXFg;Pr|*8*LD&jh!?*S3)Tk-jwFAym8eCn(S;TXTr7yt1YW}z{g9bUcoZsM#EeHhR zA&HppU0plYF(bun3}hF@OX4doJMFNBvi_J(P3|8ZHZ-w_9evDjlgU%c-eNpb8+r<4 zuvSMMvkPcUd7okL z^`ETP!J3#-tlrp&I+Zz_J_zq2r)M!gh;<$HEgpmSHy2H%r_P0%sK#pU5c+`j5WBJ!w^qt` zD>obt1Zx%E?KcG>6kZEgU2D$g-qH;w)!q?P#~Z3iZbp+q)Go2pyV>>S*~IF^I1 zk1$gYSESQT!7tgE#i(%4FUc29f{X?}piX(K|M{X6rLwI1eX@JIYJ3#}V@R3n^tbb$ zCU0@*-9n>Tc;VyC9{E&()0XdD|Gt`aws97+@` zg_tcDCgusb+&E&R@V4g3_f;=ogyylk-k~Ir4o8=TB=&f z*rO8nhe1=6$5uJJTFv|T;2_*}uu{q7NJ5BvfcF>vjI@+lvM$*DiGwnm7E6-vYl z^!u0{1gqh$Td_BVnXgtlV-_UJPLjg^bVo!q5UrE?6$VEN<%!6&5x+=&PTZE3wvqO~ znaVa|@sJ6k!1pj&<2V53LsVN+AB%6!BRqd(C-_kM0Vj&cuZ#3&Pp${CBO;80eEAPO zFDyoj5+olr$HA-^b))0cp9>3qbvGKUBhp%5b)3_8B! zw1sY_srpEg zRnz#qbn#VN#;Y*8J3kq8?sMb3N+pR-SfsLk6)ueIfA>A*Ga7$c zf^KDiTF_BDIEQcc5cYJ-+C+nQzRtl;UL~YXkj6Z;kc9W8Xs!1D5VJ{GLIy)ARac=1 z7AgLQPQ~ok@yQEhsxyc61EZ)r!FKZBG<}r9crK+~ttG4E8JRz&UqgOdDNF6hwadgSzqBsT zVRyoBdr?QH&k07=i_4Kcv-L-`>Pp=nVm!+5>q(kY`A>hhRgM1>m`xOhGq}VhT5X*u z=bIq=%+j)OlVxE;X){pWay-bfxJn5PpNxiMGJo+T@F5$;*s&#G4ky4GEy+*!7HrrU z{xSjoy)xx^IbP!wryPdr8N8HjJBj2NT%5mx2 z$_LpC04u;jvk#9a%U3BF#5tm;BB_W`tRv=Yr?Y!if;@apaXGWy`g0^r1GkdK_{_9% zMK%)L1h-coca@dA(Lar)__OZy)+Ju}Y+%eFdCuD%*=ZvJrneZA-p`1)B|ZmAh*y+e zxW{c`)F*ACXY(G!ZnGZ?N2f6JMiqzseS*?Z&wW81Aw7mUIy!JOTo7y1&;z$eFm*zX z1i^Bs_WH%jhnfjs%-9vu0>XF(fL|jgur|XiQ>Fs_@&wR4zanE*|G*6LW~+YQAL@A? z0WwdRi#q0O?W_PQ&tdoyscG5P{LtOKbl6m|wBEAo~ znR<>Un)fk{5Gneo!FgaFes90;tSbiSdX1NcK?|pcWZbP-d`_h zT|})U0=lrfmZM1qu5yStsDbO;!SBU@G3C%U&i=fX$Xx-;SW8RJ1QxfHzoHAoo7uX6 zTJX*SxfUh5u*2s*4d3e#>hkn8lhS`hs4o?6kOS`-82kP-p6=EU%8ZIfy0*5q+y2)K zN8mje{*TH{j>3}$UnzL@p`mruw4XLSAsc)NHFh|zulek(_dB&FIN9_?k~iU1|A)sr z4CeHv9p|9?KHs+ANCC3ak<4*G0)qnkdsZAWo|>J&8_|70Z?*#Xh=Bbp-}X{deROL7 zVdeD>5Hx}oJDUJVx>+#}?>tnDB4+?Su*7UhpZPH`qpSefaGD8@j>imTv($-hzK1NnVSqOSVSEcMGPv&NNs#%PvZ?7?ZDAyS{< zxdhJI96o3JwL79YO|1Y4GEU(&ahyROeQcQ=%2S7ru~vBm5-F6!2;(^z`NnZY;brHq z1f0$!3~GGhyIE;|p|tRt<#5ec_usZ|w0~Hcr*949a2#{<8m-l9n#XmYp}~}3CKR6I z(X^y(!t0wOtnRq&Z6w__q_;aOuN}fKv$-w1m>$#tYlNthuXzdBV5H_AR-3q5T4_P9 z8PFuMDu!2_vUeH%Ha(MFIQ?;NKZtD&JIz+C5^58Yaj!|zL<4Y28>9fxLdD$Lv6+R? zN$r=uy<}BOqX)Q80zmgR359w5kBwrGvzRUBvY;%X$mmb#A*3I1D@b6WXS-M6`$cl$J?6SdovTQSad|z6ht}1=(zIsYvPp=-Y53 zv+rgGZ#n=IGc-dEf4DN>|K16YH+9M!Ld|d<6D0uew9JY3}N?ffJ&{pQnU*2 zIX9cnKAiM-_lY=LW9hsmcAMYlY*I~@-VkYfnJREIsFd^AujKWty`^BWWQk|hIi}a0 zPyN&{6V-Jtu%dw`(M}j%!6Yk1XE9RVBjVl_Lsx6DX|ZoBB9*Ja zp!3z5F2Rx@CL}egv$rj!k3kQ{MMr1DL6_54&9}K`N9zQToCyf|N&$;v^VzbGE9eOs z;k-vGIDm$01Ab+5k!lk2v>680>jbeuL^%_kdm0n3$#uO@`FMwfpqoLYY<4t%#D>M~ z5zS%T$%H|b@>gs<<;|29_?{}8YH}wTlHo=V8%Y=JGn_rlvjtJxr;Qbn zj6p>xkCf?fiX08gyRUmk0$tkk?7rrHnGkqm7=6Suy_(nB$ncAYBuk>aEq`=47m*kv zcIYarVlqe(n;6Xs#ms>CAH0>o9faF z>t^?>y+%@~*p}G!QX34dp-*Jdp%TwLyZ7Rn3YV>7tKAQD8mou11akM#D88FT39&Ce zqrExPrXu5iR#_W;+e@(zph zt#T&CJ$_;qF@sDBl8Y08R*U#I#sj32jX&rc3-)~y1XVS@^OG4gS`-ZbBwt2wP`htX zJmcHyVEKh)!V{H$@aW_^Pcz9?@=lzHAgc=5vBFZ zRpyNQR{Gi^U-8k^_0y2+3xy)9pj5}V-`m!Hh?6Iu;H?U;;CU*-O5dwem9vP*loDO{ z5Nm_NLoJ(aoaL{C472~aZ4!FqBkO#yj?r-dBph|T2wMMhGjC+wHK9mXJN8metc1Yj z3>@^n`U2c^I+Q#)`dPZyd6V&x+sVHM~*6;8` zkAG9S&;+h;08i+PbIfdsK7$R)B~Sda0fR**(#tXBoQ_wCccHX9Z#eqIN@#$Gv{UT* z$FYv;?*?~3_OoQ1h^&Bj^VQXaRt$HoPMH}PrPl=dl5a z5v}F_v;3v%wB2xL63Xu-Pf=|a#WmL?xBtj1_2Io^s{yvdh(jGQMcX;LdXwqCl6t&W z)SW)_x%bKg_ACZ*tPegYl}gm2v>g)iqb_*awQ>rlHX7{n2DAxrOvl17MqS1&f46!6^NZ^PJn2RldBnIEaPU-B_1GY6aoO-pi4a8=_A zmnszSf;IozYW@uMXm6^|%4u;Kp7y?47#EM0+k=n&qwF1c@`gC~(Pi78y=zdyz^9Jt zhMThmuXEZ^hT%ss0uqw9_gbJw~0Kv1uy)&`1U4a;$+o7Wo+-lLaOzya)$t3(ODN_b z-0sZC+`MCTv6-GlJJIw{KEZao7e=+4tK?!iRni`iwG|T3O@?1FX(R_rQ_dWqzo9+= zdY}qNcUVVz%bKggHOHjPQaRwceILl=kQpQs(G+{(V@PX%{Yk=QmF~#*>*~=|Xu#l2 z^xJuZk7FED_cosEQ*sw=-J1Ao9Hfl%e{z`?e=-cQ<3p!iQ8QZ?{>*+bzT=!wwb&gZMUA z$Zuz3z;v?_!;AzJc@|YU-DBa%S5n&Jq~Y)FC2}fAEF~S=YL06rYI-p3)Ra&#qiZg% zM^#0Q`3^H(uA1toh9r&i8&eIaX96^gna!-ovHMmp&v7KWM0o-N*N>xC^cz&0#u=KE zx>RyD@y29VL>!;4`0hU-ZEHMOB<4M<_y#w2PGxOn;_cUe`g)xHiik+>pVPyiMaYH9 z9xPO7g0H)iMM$^^ZY)QVezYD^OxTBTIn_f+jjOZ@>$7=oKK!Yk4kH+O(~lkUo%aKe z5%0pIF+IO!L)Och?yi+p=QR0TZw=Ah_;prh7JZ07M11LL6;bLyo?oP8_Xb43Z^7MG zuOn8vVo-OrD@s!{7%Z5}^c82i|Z^$+fQ!taD_$Ug4HYB*g9 z3ll1O2+v|DWRj414t+<))Ku1&d9iaA9kW$KjUy&_w*MV!T0w%ob~h(nU*CfejaD^Q z{Gj?|nvf(OT5*yPO_cdfAg{bF)ynN; zNC}4uqqZ{0hB1#>Q}EQ~(V45nYry<5KX+%!=)*;zIA@U$S%O6t>y(YqEga@+;*rH# zS;HE(_&duAEX=g-!Okxa$m|xk2_aeM?=d5KFfLU5r9hvYatcJSA<(o-^!cxbf6HDW zd3AOkiWwT>BZ3f4*ZF_gnh%@7uecZ*;-B~9x)0fV(`z<|q0f6{z5}+n+Oc3@nPsFZ zgE5A1=B*P60E=#7DWyy7hWD@RZ79_c%v)l|8>`2XyTRgEqGh`cuvz zrL9p(Df6ys9I^orv0+li)mVaa45vvf#W@PQp~_cTex$?g^;jI$E$5{l^CN*l?7)=q z=V@I&mDwdy6=DsHZ)g}6Do1Znp*c8;WGRZ)5{yP`_5FKRPW3y;^LDVDbxgwboSVKh zziLSTqWayy_W-@b7Jef{sBRZLT9s9)YzyUR_oJAA%Z2lRbuZ<<_B-@}vujZm4BdQ` z5){nknA%mKtFn1emGtq|S(A?0t9SCb!T0fGNJ*p7J}F|#UCWYKRD|?UPvhcBU{W8L znufBN#xu(A3P{gVz@4efFlH=56l1zfhZ}h<=C&4x2zLp$oNKMcp@S7+?P5XC4u=`N zS3}7Rb0@Lf{K=dBzzU`7>t0!&R!XF2fng4isQZY@#rE1+I@w3m4XT3gn$FiEOYcRT zusIGvIZ4Byn>1dFW;R`Nf4XHtI;a9K0^4v!D$UU*gVUePk#eeV$%~?ItG!L6rADv#Ze|dSfnz2Belur_t5wD)SA} z{^1=4W_WYl+so~uy~;F^P%>xyY^QDq+y{pQtD`MiYb+#V!r4ph)$yC*#8?mv6Ob$i z-+>oUI!iY1%C5MBzTK#?;O~UtlLxaYZl2qdJ+ySrs~c&}@b!z`5JfqS(Q@8qFu7x5 zg(4!bjDvPza|{a>bs!|P+mLmiY%YtCoNGyI1Krl}Te0u$&rO{c6OZD@6)7(+n%?6s z@x6EiFQNCqnM%G@y;S|-|FU53ysnDDF}S;@*Ff011lx;dmc_`tZdO_UqnZC&Gz=)aWKf(tTvjx_j5784P zeNJoodPerZR#mw@NFUijmxB~R8)lkz1U8Y|u50E1<*kvPh&f9FnZ8E?2JSUaBb+Fs zmqyIJ;*hM-aX;A3?c!e#8l*5z8MMXhA#?D7FPek0$*9stH$v>=&-RPmVuc=!_wxu= z4$hKV6c{BsPFBgO+G^zoHox5D_O2X^OnA)r^?L4H)4OiDda^;^;-ca-eDdmg`RH}6 zVu5eW(MF!Y(5IKQ$u?i4nC2L(=hh{Sw_Ua;YQh(_4$*D(8*=%Ej(@O}r!4$hWPMnr zSFvCk{l;$22ysXmtGfUB9fiMTp^~jZLjlc&vEGr`Ko;VoH6KJEJviNl=7(jF*>MuG$^D&b1(H?C388O zd50%6kU9gul==m8$(gTu<(3dUoTtz_!aKsvb38A0t~J8;Gznzj;xspzo!Fum@Y%%(dIPdoS`pEmHsxqjxTc>oD{6B?zBGzABY4 zRa~!#_njRBI`h0kqZgX8oER6s2rob0{fc*b#S{N@)4wu$)riKhahgZ<)< zm*Z*i-YPBRITXm}l1`p9m)+-|*gZ2pAw$&uDp!tyy;Js;E5vO4v`aJDBJH*PqA`)C zVNvT6eS7X>QLPBtX;WM0&5?FiY)RTSd6hp*PC;?ds)rZnZy2^rvB{-MvHs_>U7X(M zidw_*@|LH)SOV6{yaSg3r=zIN@AGiFFk~oO^tNesVM@H%`h+Kngbzuyw58D?ge9|7 zN@Nf7u^xSPFTmExKYK)zAHMwkpzQ#T&oz?~XWEW7`t^*#YiDG zLe%-SR-?u;M?|oXNGHi13cid?1!jgkb=RDt##3lu$OciF-O{^|*9?>i1n=Y6GQC1v z@2`;tK>>6)tdtCUGu;CV9^Y%0&8#ugY(IsZ5_S{L6W+&SVOmV}4X-P~^b3($H6 z&7iElP?=JDv53o>oM6^BmKg>u-X}t%c#d*16V5({Wgf*LYo zFC~}hAVM@y9^xEcO|Z#`H3PX#)BG%8PWKZF!s;z?3^`tmgVKgm56w~g@5UbJ?#k52 zy{Yew$aqp%1!;WZ(TfApxP-0VpAwCOmgy$e2`y1*B%x7yU%`?-I` z{lcddiZqdt(-F{n=>2Km(i>h_m%zdtDc237mgIEZis~P5<`*Sa-Do!WHg#!ixHR|F zkp&cb|9%hvjCn{rkfYC6^O0ms3ySi+H}oTN07Nk9g)vT6nFm~)R`Ok}EN>t6E(IfM z=Wh_D-#!)Sb^SOBJucRPyC`Rzu1&;)JNx$wlk76=bk$yfzuZa&+6Kcnd_4+uBW&`G zi|cd>l!!T!_N)(}|NUVS#9LXU1I~Z0q)fet7%WS!gg>=6`=>8P;UskBz@k+)&%ekkC-?Ld_f= zq_Kln0=R%RYalMD@;c@s*2yY>?KfExdE6DorYQvC4& zF072~U(%5O?HTY0xzNknNhKJYf;PGifXV>r#xeO(8ujmk;Q7xJN^n)`Pfr6mVjMuE z|L;Rv1x4!o?I!RKOW=7WhJ}xRs-aDUGru!e z%@|G)S@Mcg;GZc{;53x3)8=nMgCzdQAC5)*=1XrmG1`_auWY_ucr}TbX@cLR*IJ!d z;a9D$P#@_0XL-qE=OBhgJYr^z{_PxZ`(4i1TOPjOB>zSCq3xo+<#1X;f|OsBfx_Rn zgzq{?($ zM41}QeDwQ2OL7_KzDJgbR;axlcHjal+n=)HzvB%#I`ge%WHO(a=pLlL`4>7B9pkl= ziW{I(P*x~^w?!7KRB}8(&g{x6p!4>6LdlcXXge$2^#T9~R=~Y!*=a!b#fzkiRJl8V zwy~O!H8v##gROr6(3XdP4)Djle?eWZKJuQo)UY$i) zV!G4fV@XIb=E=*T2rBoka1oKvS8bmAQ?&fzK3Gf|sxa$<16b>i|J51&pB7LQDo9*| z{t+AqToT!`-^fr3J92>|$r+EwMHD%|3=&+ry*(P@MF7&fP=E=3war)xC`c7(eRlb6 zr#{CIXNk{<_dSKB0-GFh-Y6hk8u$I5`TIic&nk$Ht^$%8O9}yB05Vqrh0Q@drzY_H z03M*yF?3J&v%ul+WH?tgyg7g+##_8Km3Y;LIm_##wME!5e|J_SDHa}Xik@9e%H|`y zq>t~OVPO*)H1Ft%zV$5kDo#=csInT$2K=7aPb6#t)K1B9afD1zlYv2VMw_73p!=Lv4p5`{0^-pTP`D2O>M|TOb5h2~wGN2hs8GRzh)vD><7%(e zT{KMj+z=R1B}ztVY9~!$oaK-izv;Nzo9ymY7)odhNTJWhww24X$c$BF9&BVWkUWGY zDUFUpC-?RTTj08nynets9G;QLwpYER$% z{^*5m6_pHFW@(!}fR#V?v*;q#3TV}ATbn8up8A}YKsM|fD)3GuJ$@ynA1q*X`L8dq z0wAHc1SvZ?rjh0c(y{~qB_OTZim!kYF2lS1Rv0+dUG*F9hOQ%8M_gPaT4iXgYCS+& zy&w`ZyI835yYl>QCeKDr_?shw{WHz|>RSy7m%i41^RY&;eq~-d!4%!X zTs7OiZzs^D4Id%{4`GufdS7+9(?zr~7XLFVlXkM0BO`!em{4>xJV=3I)pTdN^jPD< zsiQPr&_)9b^S7bf?E}Chdor5PT@i5SPa)>Mb)m3|0ExJU1KE;I_&#OqGor_vW2u0m zHUKP1TAuT$zp!rz$(mnZypl6Fmlp`NjPG;weqL2D#;`i!BEy(F#;ZrgAdF~IWOdr& z2u^X2@84oq-jq-t-@?z<_+W=u8C|h}l+}f-81myO(tRg{8`jx-OHHL-F_nD--fhxI zW3^*08Rx6tT?8YCRoq_ZJY&h8d-DUpqmH+{_3{g3E~r9VjJ~4fVk1M~&2Lk0fczE! zPcDUVv=GUZ0N^(h4YLUq6}E2zMiR?T=GhdB0o?X?py?HAl)14&du2C*VtSC?=eNz`1Y~33>vi z!sD@r)8$&ZK?c3tmWG5cMRk9{s^%_t0D_IQ=3|Y@@RMzHMbuRPwdMn}Hz_|dLPXr& zVwesV%%++BXJ@y|ED#5cEerwXPnG+&&ia#F?e($rzPC>XB=5d0ajrC}3){!~RrsCN zU1Xd%M>gb|{gcJ%=NkT3s}u_~!9RpVZ=xi+#y@6E{v@ zuh{lvL84IVU_vqb;NuYh51TEzVmbkF;~$L{p+({`v3i4jdRPRDt8N@?eKYUDR2GxB z;6~zbM>=BZ?RP{9%Guy-?ZJ>9j+pXmF;O}qJR3&h9P|6D_>iwoKU)=`8A7%RE^Po> zRY%%5rAzsJ6a5$42Mo0R_`twG1$|~{I>Ke%V}H@$JsB?#4k;Jk8gpl=hY&I?s@GN{ z?Er)=+lY19D^5fZvqgQUg~a;?0mI10C^XH$fXTWER)rh1cF_7h4ryz2P~WggC_^tp zdq>Kl(;b42dew^u(-HRmR!`S5}m zVq7ddI9s4fcC1zmZLBI5mI9iyMCoAkbOgiz^5_6Gf1hu{(%JGdTee9 z2&G6HPz7IKkkS|Jd<8)541|MX#sc@3|LjmRf;Biuwuad8g>WTZlZ5xc&9*5Aa@rkY z8NPe|Nw3_v@P2qk;I$tqH3Ug~_63%Ky#`f4xVk?5aaD#Xs{2TU0kiZdw3i7VYubCe zAXRN0|1}h%pw<=J`@zZFPo}2-Le5*iSBwX9VJ&3UI+sf*kH{P}01M4}j)YKb^B1a` zhE!qXc8yCYT9boi9@=MkR=4+Ue7 zST@^Hzz!?bkQ!fq;7Y3?GAo0U<tZS=`9q_&A$n*wEOx^dhoZAHmru8u04E z!$K46;tp7NL|D#}()zrzrYQ})5%(tIe1nCT+!HS}()dQ5L)w7xO}g!~{@>j7i{@fk zgwtE&GVG}PjlP)N*`jWRkQu@sZ*=J=D;?w{>Cq@$#yl|@)Q%*i>4Sg$_?mN&^hi4| zq%qY^Q~vj{N7^xUe(Rz+cg$~}{YFLFw_6NM8jS8JW|Npj)Kp_UF~L_NovfP?>tA0r z)ftfHmRSv@Z00CCLK1H(2?n2f{CdHJZRjkg69Dk>=U{a)J8pIse-5FBP(V;zj2H`# zpbE`g&kLg{wkgdLwnLmz#{|xfaVM&u1)>z9s{VH!#knM+x!nRO)vSM{yQGg-O{Oif zc4XAlFrg2Da!?RYnwpR~uV_`K3nw?O*9bUktDzFP!uuJ3ptiv!9T_RkiWY^Q!;5JL zx6QvV-?bA)<;aIGMX4(C5SikrP}DsVc#bZ#Oo%d9Qj)<$CXT8Py4cclqsr6U_-fE$DM6O#s!*NZ@dI>IVZ4x9~o{bWBy zS>zom1}9h4D$@B)3E|pmO|Dp5YW<2vXWyxCs#CNe!@$~cz80avT5oz`?gvUBNy0Bk zx$yc*6LrvctE9ZS!E|WEBDzJ^)oDNCpntZOPN^80T%Z!)XEj=vdvvm<(c8L3N@O89 z+SQ$|%heYu;hP#XN-QTXEWt@w?R(IL`BoPVE;V`sNmB?P1#AN++I|VQhWcfokYpXp z!GoM)T)7^EU)8mE32G^W_D7W9L2*&`w=F3r7x9hzHX{377_@KN-G%ruaVpf&qESj%~_(ST#Eu{p%khOln!@NK zqJE~WWNxErOgA9~2tyz`^FsFz6!_P*?zpF4K$cE#uKC%;N-zQC8OBxDVFFOACo&D; zMRT1-yi=Yv1l7xkD*zz}5_*It*;AXedk!x(++>Y;VggT!=_svXu_fUP!G5HVUi3Gp zr6~r-eKV)^kZXhx*j^v9cPUo7v_=F4n9fc;Af&0hX^`k+k$dDe-WVbkC(s``2V>4_MlVL~% z&b7En@d7O#=YFhJiBj(kzBJL<6LUs$asroZPPI)b+Xtf41xy23g>!(f%a84pH*_fguF7|k>4oCjVkLsy+rqUei( z5=_2B-Idy2Ar7cn+Pc&%`SW;V0(CvruuVn478*;9mTo2#_l~J(y-JUY>pzyLY<%~h zf*A2OP{VII5fx?f=2MmnZ4q~m_>+S_p;?|N4vBA4)mND_(v)PwojIVsAqJ~&pZ&>W z2*ZgZ6j+R4{@^8U^SVOIyKZ8gASKsDNs1dsg$IQ`T7b$>UH@^kDy8yn@U`}Bdu5P# zh-(7Q``*m*;}8Yf_X5$}UdAOup$d=o6=hnmtO~Q8r}S-b?sN!rG4rE*ovSM(ta_5k zy;J{vI=XYKC;LGOC}M#EMn4MM7^ix@P##9WY?b|!0v zw0R7kvQtoZj5U1s-k@iWp%Cm9X>GhahISbA-=tgz<|kF0>C5rg_xB3Eo1#$bI~kEo zB{+LQ?brHS&6R*Utm1W7eqc-qaLY`$auyZP!tPV1aQT5cMv;wL=L@%G?k4vVn?0Is zE7gx$$hOJ$dlE(WbQv=(3aUPj0}TuDTR<4ZOfiKR#|0;B@_CaK2Xt0}D%sy{I0^5J z*`p&8FOPBIlN-MC`tyLPe;rIgeM(m0kQ9bTmqgSPIT(!?Qn_%o;<;SZFDrqM<&1fs z-6>4wt6#+`4MMT^tJttcJo(61$6R}Ac?pEwoa!3un5yojevzp zgon7nV!)LQ6X_~32CL`@xf#i3{!;nDVYL%=wh)i#6DevoJf-P%5u*8=JoXt3cRuHX zg5d6(4%;y2Bob_^76aOr3y%mGB;r(R#+j{9nV7SvGuA|C_B|rW4Tm$Or9r}T2Lsu- zyb?n67DR7F1QTqEo%Mq=&xiHS#%TTi8zREWH!-t(Htl$0@7UlN-RdzoSsJ9>d?EZH z5^{XqPh_y{*@(?q%~naFzhDWN_6;Y5ln`Euk0CKsCSQP+#^O8DBVOPM)ODqfOwQRz z@Amt!NrX`NZPoN-lc`KK#55ag>i(#Vju9{->>KgFw<$WtMIV*%hgyQ|4Jo6iPJm~? z6v4vk1-hrC>kZ0U!9ykGlaA_8@1*ce2D7PmL-CjOp9~UZCsY2%&=hC9Rr)7`rcaSZ zx$@O%xf0m*crR<5erg*Zoc3NQ(N8PaB6~dwG1S}2Tu4_{8({pp>_kQm+%<~8v1r+e zb_|pr5*q7$f?oU3gCrc_3_Sn6#mDQo{yX832p;PgU=5CV1t6(}*9)TpdO#9u z1J;vlaKP}+`}C>h`5G)Dg8$ARoT{=xDfFRjiOZ;N_&Xp5Xo3!p*ZaP8AyP?=^}*{s zp%e+^p?zVl#izaqRJ7T^^gPYmF*7Wu&sM@%DAob^RP?S34Gy1ys`)JdZ7slcd_KXY z6zadm!yusjQv-IF>VJMbI`$j+@HiU~(tI*|GcWdq%q2BYr>cR~?eP-XuVmM6E|kNx zMTeUPO`fG*V3ZGZfM$cl^Fp;l|EAN6x7v1Ne!<am637NxxEDI8 zEABp2TJ%lgI4vi7E0mkGEms4BkTKvQA_)w3Pk8<||S!xTJJbtw*4w*A@4F8Fxo zt4_?1)PlKvAqA_mX`s?D_?eP;R7qnhr;Q$OCYsX6FMFfULMek3~0$ z_kBmj>T7FkW%5^}HT%g3%2Eh3@WlMTXBg(1L;mWg(?hl&|%|!OI9hK5c^=F_}+2r;yN+6T`GkLe@t$ep^ zKV!T^t_92)76Ery6Hk_zCC#J$PO+EnK}dwfTa7~7NquZDih1B6drrp4q`W=A|Da?D zmZ-b(Ndo?aIX~KJmh~9IfmO`fA!%kB;>lnvaKzE)vfxmkPWTUH2<~nW-(UnwAJc-F zcW34tzB#)LBjOnvOUF}`&*~wX=VS~v_K6qp$8Zz|W<37*Uh*r%|KsW{fTHf+H*OJx zMFe(1X#|#58l<~R6i@-_2BlM4V1cDmLb~;lMuC;?1_`CRq`T`q`#it@yzjg-JHrg1 zY<$1x+~>Zp>vMg8%D`^MnWf>K5qn|DiV*%|$(qLt@4&UneV4(+6bw#O2{?P?3CckT z0HuH21q$pi&s!57QKc*kKmN{BnBg+ZZ~y<-iB#8mmc{(1%Z4a`WiStik$oupIxw(~|MQ-E3D) zZ9yxq`4i))JC;f1kqm8I4$%bG#g~r84SOaGW&Yo7Yhc{keY!G_)_TZEc(b&XqPaFh#rGghF@;4%D zsVVBQpMJ}A-#O=VirZ6Tbi)+&7R@kO&U;(qki9P1xzJzBDKx^3Z)EWj-6;6h4CKtu zmsv1Pd6JhuVqC8@-C)W$W?!2rEnS6J5R;SBMf2O4^qCfgTjv9($L+7I8i?JYk@r6rvBv#3stoN0;Uw&JnoBs$^ssNT^>LF?&Dt7q zF_2Ig@o$s=Ys&rCKu+q;&ynH&G7W$vO!!7Xf1eMiwM+m4Il%8~@b3T9<^Q)miMq=r z4?zu+k0(jtlWQkoxQMuJFn}2>zo}aUwaA}Zmkr7PcP{YnzfiIu%Of4y1I6A1Z;H%ceG4EF={^Z z)0IgfN5X+KkyTS(IgyPO9c1CY;TJ;Cgns+TKw}JU^6wK=khoBi%e0DRSvzN(gcYDW z+78k(GWCA?5uHo1pz#AfxQRN=8zm#7EFGpNPkKZcSN?A6*&JzSHfZphO9{n|zPND>jfKhUl!ZfAkfbM zj@=5-B1DH*q>1}Xiaa~l20I*@0p)bIXT1AxMVXkInjLJE3FF#}iX7Of+e5hugWt9R zaH8Qt8C?ckySZj*H5=+X_Q&Z)r%GyMN1Z>XmhhbxSuq)>5hG8!(2sfAH3wq9Y%(jD z@A)p_rhA^g>PQ0oEix-%ru%J~v5tu^(1baZ{tOUB}`Nplm2VL~h zAJ+1`_-6|Hi#rv)-&bRHR zs`^EkjcX>*>_|Vnztn<_$q&Tb;q}uUd{`hV;GkF!6 z!T}g=@)5ELs_YC(s2Q|GG+Sj-XnwJS+y!W9|UkDg@o`RyVQS@@F za#bM%OwM+xxO11AtX*cnjS|^#b)5! z@qwE-9^_2)i)i(IQ705X%R))fnPB7Kc&#CUce@{ggk<%hd28n)4HXLvveV1e*I;Cm zdT%&?{|1B8ssOr~&9yG2SQ(vaU-1I}31bJ-O`%u$z)nqOy10XAPCSLJv!B19S z;bhda2RRQlGFT|^<(5v&T2+M5}u=1PC5zNsqvMRqG^AzHB9aY>$cCg7J`d#%(RV_|erjvkz( z@Nno%7z3X zO6A5TlR|{K(#lwE(taNl|69|j5r}|b+12lI&QZ=D56}x)sT>qPRErEe^Bbt`$26j| zaRrty(oiKToDzY#J@=?k9v!JTzpunt3@5hDpL$&H)-a~nFVv|^=uz6$WRRUh9FFL^ zP}SB8o{JaR88a{6^k5?TV=hol0h6ud1weK;}~Nq zdx7xY2&h=XTGrm$bw!UDN_Of`4K(?yI-Ra|hyt7y3Dv+b6YY_k{p#p|Zvpy4g48IT z%NsYLIb=l*g)L{Sllj8I0*{p9w2w`3gu!{yQM#tSufcK7Rn-k~N>zEG>f6i22=P#@d#SlrT-ko(O+# zilLLKd!MHl$G~(UJ;1G}0(OQIH53`*9>wwU4;hEzwN}`if(VX74w!o# z5?)j1V6ATW=0gNw%L`PKIG{sT`n0^C33!s7KT6s{dMS5a@OUxsr_9IP5>{t#Y_uU7i}$>v~TKK2*9mllt1K?-WZo(aigy+_EM z1bGqW171_hai{Azz%3HTskCTH`=d_S(wwL5L06$lDY$2j|CDwOuQ@z65fB^RpKKEj zWIw7c`eloYi@Sg0icz2w%S(5~9=J3hOaptQ^67~=RWixe@RhiYBG<7r3-=~%0cxS{ zED92IMsE0pw?w{#yhJ9dS)eWD@grXaND#7R=h~6lQH+NM`ex>(Jyq!=T1V20#A5Ao zliR;s4z1#_@~@a_5zE@HhL^}pNCvWGG`jSIDA+lj3BGEcl zU~Bf2a7+5r)R@%>DR@fY5~3q{yb?^epPbuuSMrz{W8y{=BxQZf3pHWe+>L*zozW2Q zGVmf^Qt(=-rbx0mE12brSUw_qTPrzIV^!7r&fxN$I>JLip z^ktzr(Qea|6@9uDV@kM2VM5jW@)YU3Z%gdpuriQ71qox@C0UxLedzQU`b20Igq%>y zQ5z=4wu3A3XE68V=$H%A<}N%*}M{ZHb0FBf*(?gwC7qH22Q=$WHvoP0GMFJD;s ztd^?RC#Y3*icS+}!ONVpvZNTiHT~J%-5I9bMl|x|3O1E)t%6`|D!dxSN z^9y9mCGQH3Koz@ENob1Rk0~T)QQ2kRgGDS!*3n`xsfo6c$dY)my_qiyE0=)EK{KJx z%D%#1(q>F7NxhPJDSL&{RuAl}X@sqEEVUq2EAopqT_M%o)W(-D+PFE`4qj=5G&MMC zEjBDA-*NmTtne3WgQ_zLe=U+wmIA8`9OyC$-Q}nA32f_WHQ;|?7Wa|-5O=Dlu3hI+ z$v?7pkqv1wEg#M)VHD~(3nX;7?#wOlyYRJ$B|7`ZoiR_fIF`6%9>Ni1f*dWL#mQY& zU+}OwrRr9Na|d9yLXB79is23j$Y@SvcY{X+jGqF@av^F{a~E$ z{%+Aoe}J9fHxZ+9vNdF2hYHzrE;)M2{P-dOFJ3tN#^H-o@jSL_6x7h~xC!_rb~^6w z2x_p83N4DALVJQg4*p1jA|v%M{|tqGu$5_Hzf$``IsF+@dGdB)a;due*?<^b!)0Qe zDD9Kj%RRF*^>cGq-O#8=V!WkRAzLSo9uGUgO>-BH^Y&W#-WF<11_pmuwyp4y_5HS< zHbW+r-N!z8oy>+^<20Sh7ZHwXVu`4SY;pV_y%wy`kG6LzjE~>p{ybZAhq*Ogm3t34 z2|4}!>$eKwGk5omJ9_PNEN4$HaiM_wAS~5g$l*bDHasQ0Q%kzjbd%QkjWCo`^*?bL zI4iE3^?!a1jwIIKnfPK8l;d*uUoK17T^ zq(Le}JV_9DW$#pf>(UxEYDn)m*b4MxhuA}u1!tgesF0ws;5-ZGj!26Ic}r?D%ObHY zWKix(staz-6lhP&wD`AWJ2>WTmL$gRu;YAQ#r#7vGDkxw#&u9O{G{B6O5d)_zjJ@w z-vOHG@KHyQ(1Deib{~B-1~Vb2YeVXeQnq}1Waz(VP-VYgZy;#*lu!4IDVB8GbF5d13m?Xr*V1;E z;YK2m+L~!1dN>o-7^#}RUc8_!Y%0D538Rlt3?C1Xv{(?mSp8^b2#yZV#y0Y^I3U|A zX-?^jUG;zmNp#iE$SlYQ!*#@pv-#t*4ZM|u5v!^`u+M7L4tjbjH%@5TvlQ^VL#QkQF{nuUR1qC|fpFAnawfucz~J-%W6_&N08pQ#v^Lrk_5i znl|6g9@TihSACjjSvI#MZk={1&S93p>3F!LOSm6vetCMO`lnRab~?0)XUFx*xY}Hz zL;tV$mS?%!&-XfhQ{NB!I$7;)R1=)%qCfG?npXAxOgap*-m!J2QXwsl*~O0Jx%*YW zV@bb*L!rK-O}012cdo~fl3$;}=)&$_87aDBpb}UTaDM+#N13tPBO|Yg`8m2 zVCJ3A+8DSnYz4~BNS2VSUVc{2?3sq#c_&q>Z}^?<)*VfBKu4kK`1I~^z~KWeIL!ui zHEp0nneA#N%_(Zh-mi19M63Gc14!a1Kz{Vn@mJ6wj8BxrpHpR>P%(y-$^R9@?05iL z1T|docg>}}SKi9fItRwQOLrx~CR@C42ro&xyC3NtTx0Ec{MK^)6XH-hL)laFi=Q3x zrb(#xeEy<2o!oE&624X^4vyUgnt=`;QQ-sFzNI7s-|B<^_GedOhQR5T#@qcjRZ#(g zzg5Pa@|-Ddgt^x1-bZD=t2uObyEQu(Y4Koq#qmeu^X!oBlZQ5RbQt9Qt?SDu&5HA1 zM>Rv~OC&F#C8$*UMj2rjABCR{f8=J@hjedd8=kw_&6%{7ACb%2bunp-pD()vNgg)8 zPRjI1Negj*V|k15CG&yga#7NLi|0hzan1F(3QLDjhNR_+eK3CFyJ!EXe0T2CNi&yD zpEAa+Kh~TTw&hsI>U;?hPa2ci26M-12W{JPo8+4Q$(p?W4@VV@eZhJLMJw zT%Gpct535qvg;tx$k#RL4u<8_vJ%sFNE^FYOk?~cvPCM|94?CD{#%IK==)UOLHtFs z%R_2abI^6~$d^a5p8X*|OSRZ9)@*VNbB<6)OmIdXfqcUhs56zB9e409ME*oL%U1`7 zF7c-^b<=uiOi*oKz@8<}uRX1(vjQDKyNyvXWdS&M6rXGX=&Q^K(si3!El@NIy;BM+ zTh=9^ZA;gSKnC1MFT$#>^d*{pw7Uj+4P=TCX)y~i3t>xMF_@7x{~1g9TUs(+t(*2E zd*#jibBN#6VY|EdSlx9oHv~GMJpH-BgU0`T4?7n9$-xxzMFit59fACTC(qvpG}0}o z0qBVk#~g<3*>l=1BKhZ?A;n8=RH+BMS)Q%AgIy)2eiT}83Equ5*@4OJzx)hN`v_OGuHrRyZpQt+^f`evr)A#pBK#dR0bLCA9}kIKc@NvF~>c6h}u zuFU6>(ez7KSpK(s*G;#B@ue3kjhqdMF8)IuI>;KgCKO@)aCWNbpDl87;SVtrZ-C5* z!KbXNCm%$`{NYSJ4kn~9|0A`Gc@ydCYf9Yl{1a&Tshmwf$OSy(Y-Y=zPu87O`N#U#<4*(nubKwQR19Q02x9J?psHXPsB@xpNJ$TYWOM|#afAW&Qjkro?{0R->5f)*KVDX2b zm#=c=@p&H#E)*ujs(V3I$(lQ91hKm>gXzN$7BU!i>FJ;77E)yYvfJ6f}p?~vhjMtxMI)=+?iOO}YYw1~L;HTtte0jFV- zrg}-xrd@p&L`)Qt8?+f<{>2gi(sQjfI}x1mnDM{fw%yrCt#IVRLcv*pxp5clVE+?=q!9D$9P--CN%!Uxu z)eb%cX$*_9xhz7P&vN2<%dMg)hO0Ure0`ns?U2&R_r<_qjpu&i@bbwE^Qi@V9vdZk zA&;FJ@~rkSHB0*LLW=50mF8z;52>DbIGM)>46u96&Ec5!7T{|Xxv=VdcZhs&wk7jb zr}nex$yx5fSjUgy)A&KeQ8v(G#t;5%WS0vkjq6N(7E6yq*nKrusMMiKZho3!dT<@% zud%!Hd3fHBP|4_JnXH@vBwoP8#OAwdI^P?Dttm$9G+ePczgCE2^fN!zC)4Uu7v2XA zRFi(AkRXzq+3&{n-ZN{O-SGm2qcw#;5h9E-)nU@Wn`*k9mbFU z@CjM&OE(m*vR={aJvQt5IO}vZj(Z5*-4<Ik=OoeHLeeO) zG?!%-nP1Y{3`bnz4it0|I+jFtan4JKL%#&n64QZOC~`cwHtxzFSp-qWU75Ip~(MOK{#EiX>CeUo?|>Y34VIvqqyu?HbXw zviVQz@2-ZeCKi%`d&jNM5XioAc*b3G+3zc!<(n`0H#)b^ayiIl1K)=o_FU*m)JO_d zBU^Vs@g^!Hk%oiCC2d$-)+R2bR>EB6#kzIeeO^hRnfB$)yu1y=78Q5+ble}MFqXf& z0v-EwR~4e2EkC{2ewxrEgqqUJCLdIawMq4C;Z4E)n8wP*NP2>y3U)7j-ADWLv4zUZ z#C%PcwU4v}PNv7-N1H=Eg|xN;e=Z8Ij0zVos)kaIuAImr#|W0c+|TR)$=L+ zJ(Hw7;q9Bx61Q=q-wc$(=JeD|axl|2Z(Dp#O~oqaqvuvuMRpRlgq*rRuc+jkPif`J z{}5R&2y^dABd=tTDPYscdF4o-<3uHR@u&E$t!z`0;pXzypM&lzAw2W;IJx0(W1Sie z$qH(Nk1P9*^0O|}Xu;Fenge2vdW#8)6mHj? z@0$m~ufBZA6jO4{M2&&D~)z^;(u!CEhedAiqnf?;8A0eh-h>(6Hz zUzOQW+XK33!g{RkbEjF;I_o4to^0~Lelvy{9yda$Qa5~5<=fXfbyh)|ooi{HoKJ_; zaWm0Wd;w7z9+6YkQ`a7sxzl4wCE2ciILhFc9L)YXxa3iW8n_@Tqn5m`n@*8f&^HOQ3+n z(z^%iTymuF!vOP)g@d7lIwz(L1dMc04h*4URNiW(aI`zkJc%7rOBuLgzh0YWRT!X3 zfF`N+@xx7IAG6$+@Uh%hISO_#OO~@u-Ym2#9a{ z`SZIiBWCa|8#!;jK(B(FTo2BS!6zI_3A%4dY;n`~?sw(B;EP2%P<5>>w+a}<%Rf(5 z`Z4L=6XV~y0)mm_y$5)x=e%{p9zNxWHRxoVbTczcs5@5HwVvD%VpXq5JGx%hw;rkr ze8`qOxAo1dP%E!R|9778h&<8yvq)a$BTjn182S)G<KU`zK6pz9+)c^4`3^YRGoK zyn1cmSzP(BhOYFA-|jP;uHXv;uUkxd>05F0pAk+y?l06Y`bUt?@q>^~=G)7;TUBv$?9^Nt67JYj4&jsHY)SMck&p*`VvyzvW8+y|j*iI9 z-M;sqk1(_o+Wg;#>$wZUFchfhYHCs)6rPOXAV&U_8s5S!i*xh{+uFhqSO~?TG*^a} z)JJ#9_r@n=cucH`C)~YL)4~MV=t(552l#-+6L#bJFC^A6rjv zkz4Oi^E&mWKU!(C*5E`acc`W?UTrBYwc#-x1~9*gq7rAAs8~s0o2cwHUQ;69O2E+M z%UNL^b`%-t^nNRUEBr<|=_w~o{p@e+qF_qcK;%?CMwOFEu57|u98fom0bLy_7pIXoU{IuMa zGBx4${m`CcZBCoMVA1V?Q;+LOA33grn;Mr}{>Ja+zI9EW+vxUzD@j(p?=`YD*huDY zvIVNm!}~u7cs#GPyVIVf9{s&3f%#3?eXKMtyjWfibvQRZ6Bw@Oe>&VS6MpdOn?;|vvGMSzTSzlxlZL(IdcuhnKfH=mPpefW4pKCZ>2n!@s9w|)xwWA@4as_KV~ zpK?qVA0c3#j``es5DyC6%PaJZk)_1RN3upBaAG{hN)Gk73{HU}++~^8kj5PO-25sz z%Btq}jAUJ+I{3j&Wr>YM#Q}{pJWKsk-)r@-<(l6Jk=pM+KX7LsGr#ECCuUwb(}{H+ zz>(s4AH_DDHAH-{^Pbwg^`GBFRqs>1M@pj~vcJ)=D}AZ4Q}kA*-~|OUZ{a}963!{; z68a{P$9F0#Dwb`{_^1}B-RQ&B(qlya8I1{$3s-b;DSF`{%0wl|Bs`fw2fs>#jn!4E zekkof^n7gT`N}L$ha}nOQs2FMwz0Z**EvbaXp+zAtW9U4?EE8^;Bk_;kNm_B)6^~T z4=)>b1hfm(F4_@MS~cJPvC*#Az&@p6Sm-?IDYAAp8?DBSyrcB4^D4+LM)&Ef1b2~o zwjUc}wAt%Q>`vL@N;q{+-JUvJe&w2Z+Nk`N;y4bjE=cBt(`pR0E`M(oerme)sg(&4&?}_DLFs$t4j!!dwiENuN= zj_AcFC1P+|oEAU79Djm)OwYihjb~!=dy%?MlOwWIM_k{iEa8c>soucr7t<~XUZ*3m z>6+|UrTT`mTcfWHyie{~zWx5)uRb+R{HexI8+DH=edhIB4^Hb-Z{7IJ%Gaq;9oD48 zl=PThest^^R*duw2a0l`?ilXMJbXw@PP?*@IfOgl$sryo zC*y?k{37o+G8V0Y>2n4{{{RbJ1Pdvfp5*NI#ES;Xs~_LJEAqW(uf;5z$#C(X(+}2- zO0C|C%VhXm2;t&B=VUkBnzxZiH|u@OqCb1x^uX;vwbIgj=JWAzgRXmb8soP%!Q(;2 zN%uF!tE109Ca`EuZgmz1E|!TppClYz{3W*jU6iky&ZRR6oMs4--kG-xYLWy1duktNT5tsEHGFL*}x>zGoMExt1<_NKluvC=aA=l0}l9`gaV zVqH;sPKnb1o43{S6&8bLQM&)Tvoz=<6u3Yx5pT@Kr{HoNvdfRPh>UF z){Ds5tgjcrG>|{=QkweH2%dFo!doSp+9bJuyDS4w`uJjGKkjQwM z`4y=|wW~T}E~y|ip?`9{6N0wQA}A|2uIeC`!mcshti4^?x#pLDo)70=PIVNoV>?D4 zPw-zdz^VN|`4qTr(0|i(E68-|>2kDiO1$SztsZP->1k2;U>PO4GWyiKYrZI)IrSgm zHC2gCRe`k@T^z8#Bn5Z{um0ej>gnH5VLjtdu1_K{UIN#50 zJXEc!0`Ck$!K?Za&6_lnDds3cHik(*M~)0(R;R9%!A`cdM?|chDVp$*bZc)lkzQ)| z8#xA3T~*U``hgJdz&Fs!DTNpj54fF2Pm{0BE8a`<3|&R68+iv=MY9Tag}h3i+Q!_0 zkX=>gu`G6)sp=KXswMl`uf)t|TZE=&9i_zi3%SpqPmCv0VoBH9o_O?~(=yyuQ|@VU zbQYzmor$iE`Y82U`!-KAdJ7YlENns1tJzpJ3-0-n*C)Gk_VS-sWoBLEliq;uXh?uO zg9M<>&<80Ma7qEY1RxIvsSBDBO&|XLqSFE1+jFlQK(~m5S^<&rw%j2Q>#%~-)pfR* z9!+L-ns36WRDHm?Nuavq1=3sld}%(+WAIXspMt2WzMsJCjLoZtxiwWiD9F4p-yDcG zvlXT<^2wJ#|C5?&VLgpWcg{#f@g~MtK7A3xyi3Id-v7i0%{rULa#Mc(zO7M5#P294p5`L4idsN@|7R)u42zi zFUz=49Rhr$Z9(*7vA1@0`|@AGzy{4NbuoGd@;^LJcRus&TsE%_g< zK zU~yzcs(#4ssX5q*XvKUhc*cN*7Bj5zH~Cd$-%|wcPS_emWDtg>F8$bWC#F|Ara*L zksHT&4zv72TBEb&6ZUqeSMfxscPh?XO6V@JMPh5f;?$@GU<$&QTwfEEB` z``tR0)UUQ!#y=TQ% zo>IK3$E@G2Pl~8V#RNfTha=IVDuL2XfqLHsT8?9V#-qHhYhQ2P101_?8r?B;fw(jo zT*d)cXG_tm28tmN;{jyw1FSzFXM>(>Q5IOBf*-QHY?@|9of5@~8a3T<-epQ7-;che zGs#{l?|jTl(25Rcgv85*J>;h5WW%}Yts?U-0_m?E=23#7$w>NrfT-rl9z#eTPJaY{ z`S+Dq{(tBS)IWQw5xf7YWWL_R5~5-igf7w3(l#OfSzs%lH|vh2?>x5_B)3M)I`y)5 ztRqtqws6VBr&Pkz9~1+1B=b`10Ra&?Wt;k>_387utSHk zm=}0!3>-axm!JGgNPsWK?|g-HgoSC0B=YSvH=hXrM6&{#GM}+Ckc!3wW^PI_2fj_u z2neFBe9@*zQAdRbf7+=gGr77CZzPMN0BiLy$V)Wvy*alW`3p7|rLRzvX&_T2_61i= zGZaVol!w+V#fy=e5g~?oYJw#uKD~w_beda!++9Dx&Qu{zE#ty~hSph7oXRD(;;Q+| z?M&GOUOpj^-8s!yN#f^lrkaO@it;)HEamk9H1ov3f9RNmO|0J=U^9Ejc%G%Mc*C|y zrNoAO$G=Q!1%WFAAj6oT-WK0YhX=UMpc@WnkrdB*1fd~>Gmx0=@fb3lFAm1oyfY}w z?5MZ+E-zaIa5WE`X9BJMSr`DSZL`1g*-Myf?qIo$I81#=2=YyN57%iIGdk+YVXzvU zc&C7N}z1!JC8lCiEi?{CDrtN>fYI;mfxvq;VkXW<1u?Eyxq|n}M6b8!ASOxjAx&F>jE7 z!PJ@l?fwXRz+xX-bYu>|tn$#j^8TA5ZEHErT7q*bN7Lc6)xclfcd#G4lGFzf@;sLe zfLdLe_63@0)sXO1z|f?`$=ya^P|XASB{!za0vAqNq+lD2cIKByTSZNu|2dmhm5t6J zi0S6d!b^Waq8GN61ePEBn~wYs`AWnjB&&4B9B$9i)mdiD%QUVW;Ef&=VLWc0XDUp8 z@%9HZI?UlK7Q+`8Yq<*H?Pt~J(J(gH5DAB#Rok=Fyo{JKHl2#(UzOi50t;f8V>)Q) z-rYz_(odfOL@z(SBJC}$9dX?5l5pF4z@0I`qUWXQ>XlQI2C17L7{+LoE{flSlmIiw zE&dRCIQ+cU;m5S!?KM-v=h#BrUiwTeX)4hVzY_2;D&LQl02lp z16@14cu0((15|_@>my77MrA!8cN(i_$QJ+7ss>D959TV&!v#;huy+sC9JuZD{45PL z|Dy@D|Da_;zp|bm(#kMmf2u$rmwjepTyD}Hp&8@^9SHw`3@-z_s>Y9ZujB3Uh8%$9 zz=FX-Rz8h+S=LP01D739$^E2KSJliALA2z6S_r1>b3F8I*@ypuoye1Wx67VH-a&>F zDd25ZQqgtQFt7`j~CjizO)2idl>)y0#2w+1A@iM99d=~D%z$>cqgWdNC3st$Ky|AcCDRfO!=LFs!zJ%vwUWu5 zDbla~6S1c>&{n(ivsdI1b2kaGn+*<@QR;)JfTzErHk9-i9=COO(cZDMgmXdfugF5p zURhJc*o_M;(p>hV4XVc$eAb9oP&1^SQzcQee&*T*YaQT;V86%MumZ#Y`7^VL+eL3Q z0&G9m7%VR=-5>vhO99?g=)W%}NAIasa^=6HeemP|9-#pbC}&lv_H+LI4X58|TF;hH zgTak>mGVjEBz$40u-+NHX#V?%BOI2WH|Xpx$wWwSb9)acEwsRhpwP|YX1|<&|7Mb8 zYg&Btk4wKKAaPHM#JPg^bj|HK#&NZuuGb{Kh69j>_-e+wnv=`nvm2ZF~ntw zUX;1HppQ_x)1V6?Hl=%PP{Re4<;8-3uZzy9%B+JIzaC1EkEy(W0+H zXvi!Fs4H$WWD4DTvKY6`C+3J(USZwMIgC+$I)JI)M*P%?<1NDz`8=aP-Up4T!o8U0 zhWr1aph*9<#Dv9>ks-sXr~;+AtsX7r#0lQT6eD^l>$9))v{21F&iA@Q#s6-|ia*X{ zb#87UA>XT-t+_@+v^@uqP!82z18+#K$ztPLB(@30TZZL8Nz=mlnvOX0z(h+%MO$Xipr0%#aBpbST2 zretLip~j6=IXO8DJj2#LOhgIR27G4%sEIP;K=1RFB&Un-t2cmQ=bt>zvMy8|%69FQ zZAPFZspw+YJ;V)fUY8xlMtTc>i!GYI{N`@ggATfg1N)U{fVKJzBor zXyU3HJPW5!c>^_ANO$l$}mVgaY^NfT*9H)ToiHXd044F91OH1#TC4R28|Ij0>h- zcg-PV(8Xp>-qcK2)i~+x&xs%JlR!4f?_JTJK9?vuZU-v1U9Td9pe9MrCSlc}C&H{| zVca%l+`@Ykb92hm%&O4c39(} zaP=<}A?4oAQ}%?UZ;`-ov5p4 z!G^q*p1_4pmjP%3MR2tM&Jpmtyd~#0LBs?)f};vk2ySl+h@LS6mRTN;12Z&X3=NFy zXa;cs61;?#IL0l@g5BWe*XazPJpn)M1;@w!pGb!-zrFcp0PA`ZU+6HDBNqq2OO^W5 z@qMl3HK6`S$5EsxOyG`)dmeq!SsneVsR33d%_#JGQ8Adz-D(qmfh_e$Iw)vU_EtuN z7(mc^m^PMuz7gkBo(YJ)?Nno+F6IE<<@GA>PI4t`N48~G)|XiyQ#yV9F1H1i9MB!x z{*beqDZAS3t1mJM)XZxjow)x1l62+MiZ76C+g$KGbr~q>y#o(NX21*GD53pE;2+NQW0<#|fg~?AC$Xa>3wZ=7+T9Y&gpiqMiQ02Z!xV zR@>XJ5(3{jBm|wA1B$LxAGG;AAu#+tcZ|b2IMQ{iaMqj23ib3>U2y{d5>i-?2?xwsX&zYoPMKm0Lfz?;stT(**-8ZrthLT2F0Dn^X~NW)YC>^|fpn)&-&c&X%Uwk0E=z?KE2AX43B7AZZBi)v z6Q6^oE1YGUqT4P;tj@JWab|Zl)CgNCdSlRvGb1LpHX0Tg2U!K(X+o8-kvllpaI(AF zz!;YgQrP6NY5{<~&pQoSBLW?FaNnz@l=g7E1TjdnH4uvJW zM?xz=+BsaTE1*fb1>)us3kYm5X<~`hfB%sPVC+no4qOF>;p>l*3JQ4I3vB8iOc_5RLl)Jo+R~1Vlg7R-OUv%61MJ7bh4b=bItl1O1 zm6z|#+-WA&3qc~hp*Wy7qTpQG$>+Q63mprQ(5@=--;}X2E8_#;lD6Bqtl|y11YR^Wyur1D-}h_*hc`2dUigDUBt8FP z9(pm?5MYMC=n7XcND$C!Tu*oMH$1t4>)&oY!HZ@)QbTz=kjocFCc0p(GZZtIobwyc zVOdv{g;GHzj2bAJ;gxsOZbz(c+`zuD(i3L`(C&{H3~J`lzS=ZWh&|E@=?#`$fsxJ# za_3ljOo7Mv?0Q}|S=k|3!Sf+9h>px4*R9R~GKIgGmxWSiU@sw~8(Af7%U+zN$qx61 zG+KC2G4nxT=gsIUM6Ob2BJHm>f)s9ht!I z4yPLwBoLCvTj)*rB+qs^uMOc|BLf{ncJO)o>&$88y+%_;A@7hzNiv_mV-QiWSbY2> zxn^DIwCjET&RC_JDo#`Q_h;JeZ;=UgDJos_?VLRk{c`|ai6;%;bHB){Il>S1|FsC* zP4n;&kyFQEeRQ1fEUSrd>EANCnR};dJGlr}%#yZr5%CcpyvqrC*(fku ziedca;cE`V2M(sCaeP|owO^KqJ z^J?6!+OEX@&@djwprO0>fB_%J&P^Bv{&vDB;h?{ZHHXWQuJm2$IzN3{Hc{1C?Ze<{ zu}wRa`$;{IOXtg7s#to^+rGx%RjJoG?z;aX!eDh!1H=PbMScVnpp-Y zuB?Ar`TeO>S=L5YPbjT(bTvLTzyE+=KwM~Z!R{O9we7>8;MI#xQ3b*B^ zij}g19mH;%r!)$gdFtt$?t<(1H2fP^g@oHmu_>wxyawf7&QV-%6+5-PQZ388QX`whBFSg7E_S$ zyeeAJ&z6iOis&bF97w-sHL9NCXV%as%BN`&+@Ekyfwar8@W$!!-Pxcz(W|xAVQ0-3 zO2n!1Zs`+@#UwxCV72ne9&Cf~c_ig>*;KYE$4n^Ps4?US$W1-UwL z3%n)ov|!d^KGDWsSQ~_*_IwI>=xN_@UeH~trrQ?axBxP+i0atSHCacDL=^fD zv|F-h05(+M923?C-GSV@T#A-Zl^cS}smgwDWmlMG`ICzgA~F;erc56Z!!7^TLYRKL zMk=3iNDTB9PGg2lp}&E(v@4{YmeFP^F;Jeu1KUw$(S)J7;a)XEueu#V5FzIz7^f2c z@||=jt_M@;r&3dB*K5Tac|E^`PzS-PAk&}_!Q&;nm&0GoMXpi-Xw7*cri{ioR-Q3? z{*CPS-k)QI7(IOAe0E=HT9R;4x$3?1Y>0p~heXKvCu{&B8z9&6MdV8ZgekZh34Y4P878I5$< z7>xnvZ{{~iKjSx)XS!YUHxU2uShdp9GZh^3S2v7b1+hKQWf^&=7N2t3m+<G!#CLzC3deh19-7}W2j{qv>Z4*O4^*A=KC;o zT+Cv1+}*})*3PLoY*nhCCpy@mx7xekc|ix89_`^*`iclfe2ro;z*Emler#nAC0Du6 zcu))Gc>fPmXB`z~*Y<7cL23q+&Y`6wq@}yV06|hv6zP1DtuFIrpPq8JQ&{%u0K}Cd5|2 z78VVCpw@*X{0c}&7|?<&86!YOAfEw8tVsT6Ng^1=<@Wbxql$P|WQGPaAK#5fP*&Jy zD5=BLIGUZG>^hC0V@a)CC;VKsxmzIApqFP1rGnz!iu6Z_ZGpp(Jworma*@*74?O)7 zFdiI50?u=2Mvm5nKBBJIFqzIVoND-^IsUiPK5v!#N$1H^Ec+?8ihbS1o%^Fz zMO*xld;Rr}KMc|#Av%Q?wAplQG;CD)scvppE>kX~lFy;{$|*Y6p;m7Jy=%kVFl8`o zo--R-Plb6zq)0Xv%`Bp#daj5pl=wUsM5X-9%D$g6N%?9P$jom{NWHvurVoX=>YrSX z;&(qlX?zO(TI?InOi7th*F$!EeswKyAaLqj0bLe3p;fjdw;;6} zbxbm|=iApzJ($@HY6KI`6SkL))=oS7&b+SOE~##2G7o=BzQjJWDmN18cPGr{RBjFX zkm**IL3Gs9dL;S@X7A2+ou;3^GRiUj6X7nrRhb7@=BtaZ%G4~6w`AGzFi}eS8gUDZB37H%h_6GE#6QhQj*NTsjG@=Nxg5-H=|Q?p zZu_sfO#3>OUYM+?YqBKFLE@K>pwNST6jass_6`$FO z%;X)h09~E47u>$E}yTwvRW$Sn%}J8O1Qa+$=H9`meC10HBlXVSc8nnljh;W}<;^Ou7ap^>dcX>Qlza)l7(hQib%qQ`$*U+Z3Em`g4%^nzpL}IRdj|XSN+*cBn z;x!s|R-UL$k(zpWWDdQ(tN;2bzo1=dTB?p>=99-iJbS`VMsq3pyMIvG{pmXY%*v0r zTN^J$wAvRENuiOGAZuT@zES&@LD<#yGFA=2f2TO5|D9yRk$I!TnXa36PLmxbo4utN z^rjk*SkG>3Bgn7b5_3rmIjQLX-kqP<=?acWKm)k4t+_%MeCY-o49Y0%VKv+IXr!j7 z-74v)wo3PzkCGr+oRs$zLukfL%8=b5qO=dV%zs4Uxhp3bczn>@eRADRYy=Ja22ZbAW^T8=cs5db(=^1+11Fp zcW31u@8#9Qu{jpbuU4JW0mztG?R122aPwbiK$EfvoRnPz z964t;`jyDu-{L*puVh)-K5*8~A64oxSP=g4@=rHL$HRU)#G;~!d3JNE%{Kc_o^5|z z&3?DqdfSIKMx-R`2u;A84mQl6MDvSwoFA|WDm_`Hqi*=A_z7$50FsE0g@(#*hiCjX z&3iAjxWRbi!MtJyXWCS}+*x1uH&IDka&1B)K(NwWPw6w&l>S01usbKC%G|UKAB6Tus)=TC<=^x^VpzZ?{|CGH&$>&ot1#QA}{kq(<@&8$SHHy?)l?@lsvqZ;{>ut?W0i z-%IeOY39pL^G>Q2e#+_oeTXt+RVjsP(4M#t&gP&LQwAv(O;6*J5KD9~9qFG4&rQ<_ z$sF>)KAPu4p7mMrp~;C2xgJVO!TLYxIeAK`BptYRoiHOPuz+4v7|dZZ!zS<+u6&_&ZucY!yF#oH?3uKv*Eut8$>_8f(+RayUt@-%XI&RH&3{P79-5PyKv`f@flL2zLPg-*URdwRgs zLxlGpi)T~20UEl6e|cORUwE-heBHB;C#>(`cxYCh5{~~-w=_odQ>W-Y$CR^e4}3`e ztC9&bHSx+X8GPX!?7VBEQue?W(okh z)}CNMh)~;w5p&RmYkw+VI-LYN=YM!}*q1+WM<-+ycYL{?dHnon&WOu+;CTYruNHZ^ zP&ffi1(;`KcW(KeM(iu+dr;(E*$1|QLZ$k-Y2Zwq8S;R?qoZTIP@RJP2AuKx*hzn> zhBO7fh7qL*dXP$A)_S%0#{c|D*xNW|5YZfiM@X6gU~gF4;|i#9&q=|9E`O)r5P0-O zk~V;i$QslsutayRW1$3+6QJx6D^v#mVKNCAUPSM5x|-T70V?ZIFg~!t|JKtyy#Qn@ zt{z!w>F+YCAplA>-Qb+7sr721J>=ef&{Pv%?UjD{U2HeyEnb@4jICAvq%~H@0a9N< zL)xTYF2O7LtlPyhd#_3@*_e%O>vJ!STC~mim+9iX3r3>e&Wh82TNBKU8drix5uULJ8dZVPZZnLkN$Bnwb z17d7Shp~ciy64THpNb8#8LRgb$WtSV_=pQnga>xME|um6CP~csEW;N`^S7{-+BTAk zK8%cu*&C|RGiyJSbO3iXw0*2*;-O4IP$(mu>rV_iDTjbzwl(JOPbcM8G%aXXn9jkb z=myLk*!rWx_quY^oC~sxG|JHJ0N?@;dC~_33@EHP1S`WZ_Bsl{GUZSRY=yb`*SL1i z-1ea8h^*~~wA{WMa5i8S=2?a{;8Xw4*NRrCVzlD@oyIlh_tx!wUjQn$w-8-6tSFaenXmKBoX~rvX?pvF_t!QY=sr z`HMYecTob$&layiQI+nYwSq!0HpB;biD(HLfkzk#MFE%3iSOZZ=~$ucf4(?q>Q?-I zZ`i5XTU6VG3%_VA2gH4VdwYZhofQ_sI%A?BWH{`khahCM{#ZH=+x-@o;-Z6xjjj>9KPV~2;*z;X)V~lx$`0oW_+Q@b7$sL z_==YiTf_psa^Hfe-;pC#ypy$QNwwv-6&0m7Vh%As!xTcM4goFif_*FLCA$%rL09hV z>})o^4EyW>P@jLG&ipBfduDu9tO+L{;k?J7w??&HQP{kn8h`tN3|Yh6Y@>Uu#!VK- zFzZ3sYb;5-5sw0}BINrOjL; z*H+E5pc+jz`f|eY$o45oRw}!0M#5$}aNplUsdPqu-y)W1;VIN_PGd1+RW@(3N7_(w{%H z@57(D-Nlm-_RHQkvt?F-$7y=Ez<(O69YsP1gjsJL_~qqL;Vve=s0NnnRa9fXo;#+` zdX;VOOevBGmt$CKTM`Q-{r9Ud(h(WL`lNIEjRjtc}-QZ|B#+BUowK_*vZy)?0V(bSrd< zxITi(4T!{8;6Y>{Z&1jq%kx37;@kpT3rUu_e!0xZ(T+Cs4)l&xd4;wqRFa2epeojs zd$tZdi`00zh7N&SQ#iClUM+@}ZA9+8!XMN|#(}fbo!^PJJhEySn@M$yehL3IRi#rO z%?HInzO3TcyhDY%A15-C-{Tq<3rLtU4|UAtJUhapBvl(}Pq8YlIZi>sMG{5cR$F`( zrnuxYI3s;&CP67frNlkARd;?yBDU%a1BkO~@EZ}k*HZCFgLX00JBR`L5pN-xx;g}N zmM188z9q{3sP+;(-7`L7P*x1}O0?&S8kkvEa>*-f!Hlu#96iN7!An#rdsTlZ#Z{^N zO$d~%EgKjnC0d9On@dhD_jcqf?{<3)c=KNTaD+B*d+?9^)ojcty$`>6EEzZuUlu>Q zN`1R=j_2)+MXWN7A*JgK%Y%#$tWc3yk$r5gj2mkm$yC=Ps=R}^7xxu}@|*`cJ?*XM zQwXfZ<;rA+g$gY}qz2kFar4^wqC^{(^H?toxr80A!IkI5NLjW;l;B zj5dxI@z-UJN!=!g5%lmp~~h?odfQ!qSCR+3DdWYzI?_BnUC`6~O<|xYEG6lGtNkvemfQKyo=IS(b3P zAuuv)oJnf#2$vt=ZXZJg8iWu;hh1w<7Y3K>HART2zy%abqA@`%E3tf);XOBT2G{pf8a##u$X zVhjoEHKnF#dOx>9_%GMM-~R$+rnxXI}|68foPIAPDui+rDA&b`1II+g}m?6+$FGK#+5!iAv1{dr)2lQ zo#ZTFYvbNvfYDh`+cb0?{g`>u<`0dKBHEgfHlf0mxaAouT63@5NR9T_<4E^*XB@@E zVO$cs_IDa`57YM74DMIz4y!RI-#;(yOW=Ho>6+h7_I~;$)#8vj)l%XpDS%u2Rr+5& zBX+516tSvO%0&Z4kd9xPhJ^>`(7pO?a%|QA<%5z!%V{sMm-clebwa=B#crqt_n7p0 z8PTT;a4k71Ehx=mBl6xGpGGs((fI|?aVRkq1`Bjy!U?^0^Mx`uEYXD%CQc0DM8v{1 zrm7y+cSYqdh4^?}c{R~YH<+C)`hjef?g{%$k!)GhgYG9;NH2&fN~be$dGJ$jKc`2s zi!Q#-kvf_m%BMdKdw-1$LR9p55{c};217nUinZTfFxP;Dn=7=k8n|9aGQMl?-v^-j zOMgl|#Em33k!OSkyNK64YYNJ;UDj$DqH%H?qf7%aF31;Dt`yc_0$njBDCCvRIhrU$ z+I9ifnJ6Wfr?cbUep)yT4R=k2ySeVbu9?QYJm^w##19N+WCtrE8Lp>xU)@dl{^&^K zc#4IOet=aPla*)(EroG{_N{)m0LlrHmojqQ^$T8a7}`gQ^k%E-y3pP2Ip|E_qI4Gb?X#y+i%YgkPGios#1!9IvoiLTGpp$w z;)C_A$fEPQ$R|r@AvMn?@0ot|E4irtO+#0QF7!Bm7ZLO5>;1UgCLb&I|2{BMFu1kU zAITq4e!RAA41XrvKJ{||p^ydNpqgMT5r!)YPg}Rb?>yF8uIW}`e1T}A6nw2LCTY0esqG)LmR7#;lJ3XrURB+Ra{)d?7m))#Kf?FnP7T#U2+V1ctJ+r zab(y~&MkpRUoxn5*?$BhlziY_sbra)O~1H zrY|A6C^_%NVOwuILSP^P@x}D3AXFtgGy5nIL8jEz{Q^rhEujP?ui8FYzo$`JS>?xU*I1cC8^fDe?_SDSNNJFS`Cpz+4_;1}QPmMSg5uY~1gVdfHBHUddtg z;kQ<{0Zl7AtGj^vUEx1Q1`@N~P<+=L@C7&_T)da4g%`16%#5&@KSY^;_NH3b?@zi3 zmu`BO9a3GK(LB0S;-?InR0`QAAWh4FfeGk6Ub7RSD6yBI{4PSwo219$`#bds!g%iniw6tre+YdBBfP{H( z3K35F=+U6(^G7`LjbhXCGpqEAgzFRtUnR6F<5J2eYa|{`!>x5vfN`LB%D-dUxiu}f zW4_lSp86tQ-rHYD{@waZM3>HRWbT4$ga7Wa$FKm8I$`2vXFn8T?f>rHFTC^a-ciNB)1^*u`o$eRaMs;tGc$j8*3sO?}ul#rV~CB0Mr+)1JdH(OBC2#ylR@Lh&V za!KiTK3lttr&+5J2T$BX8_Jnw3dHJ3n4HW9d05~~<& z$`w1dhL(bSc%X17x8TDD_KD8eNK{DuTsdF2D9W$(M25QIHpx_lYgNzjdGQXY+KMby z{rdE^M3#XCmoO=w3_m zi~8APzMWiIlK*baHDy(3DVh4u>%O{;hP(nZkf`Y9+QM}!FI+JeQt_W+SEOFNu^drR zVtOvCV@SA8ih+hwwu99v$$HUZ7*AgtHA>T;Uo6I?=k)`gYRUAzEH#4VYa9~nxOlj* zSVnWpoy%%6MprL-8;A|<=IE3Sleu=qQ&osdIsq&^!O}dPG$-V?;Ht>5%Yn14O!NEXewhVci1K?t}sICw}YyvivB9p~-7&V*(G@nm>-*SxaI`zxfYz;UsI@ zFx@e}%0h+LDv$g~{l{3}TI8UxJ9&t0eLjG)?+0Jbh`3cSvXe`rvbMrTgNDm*-gi}A zlKw^1LQYDm?Ayp(rXFY%n)a_Cmm9DBhf;LW-6OquhvIUQkcb- zmGV~fJf%ylo386JC(B#T{1=v*mqT;X=H$6jtRcj*9q{a4@>gz~T9?KBy1SW?k2R#% zBpMv5P|-TyKXi2E&chdpFXCKg;5P2^dA6Y**Jkmc^o=`PEsXi`7qw1k@2cj{`&lMF zaeK{+-nbVQyy?1O0P7ra$V53i?ui5`VHC(W$nwgxAr}#ZX^n|lIm{5CAbbo z2jo?X;3^q4)sInb5a9<~1^nnbNe9$31hu)HKlhgjOABDQ54Tb_ftmHq_q7uDcHfLb zswv0y!23mAW#AjE9&CZEX>dZrU9s7dbL;^zw&O%WvS^EA=LyCNUxdEm$fS&_n@>f1 zvv2974`*Fyjmd`3Q&mbT7b1=f^wMxcD0SvYwQVJTRKwKZ(si=^9;k-vxXmKNw&sCs zT|py^elh8za!j$}uy|wON&f9vk%_+5=ELP0woU#nuPxsd*$JyvMi-}PIVFa*#65RL zZw>*^wJ$}1<#=lGYO+NgBk`#L-(R%nx@=JlAkWDnc_+nx>pzBDxayd%A)_D^)P(o* zNl_%0l5*WdD#YhmOXhBsje1jZp@O(vWUsnyyPeGBog}HdjW!EJjo;^}WOrxKa z0;+;G`YsvPoCEby5(uiJML&LIGifNM_1n}OR;RQZ%)mW~o{{<)v6=8<@KUD1Mc060 z*1$AEgsp1A;hH(a77g3@aRZa3hNqEYdv{1rHQyfr<~0e)C)0y&w=SxRNL(uSO%nHh zqH~0I*V8rL^VWaOZo$lnY-4;*aDWuLM#ZmSg>h2t=)j0)ra0HP-8p#;haxa=Qnm>_ z;!3e3r}<2H1W#J*# z;$}Gcczc!FHqqxj%uBbb$QOv_4Sr?`mwsmvLf&Cj*&J5rmuOF(y*6LM&}3w_r${9H z(b&W!BXX2Sg=u9VI721l-Fs!uJ7HNc>r#*%o#TBi4@p-y?=F5G{)qE;2x}bcV3)$F zvlN4`dCrAg7L1Zj>@i`v5jv57>US+FPL^*;v5LyKcokA^SkXB2LrzKV&$0vYL4-=~ zYp8?&q4s!zO&glb9Cv5N!*P-8m?#_KVYO#uMWhNR5@6}}E?MTOW8JIu2LWI~`qArC zG)zQj3{GMaG{~)d)U?a1?_IQH)MmN^1mzJtW~flgs`nVx=W(>>rX=-~j5CP0{FaYY zK2?lcA}f7j(r_~vi_YqiI_4vsE1)8MdY9>?WLo1I-i!|@qke7^`!+4ttJF(Av)mX= ztErAlu4ldj-ot*fV{O8w!orNW8`5nss`YCHRO6OJmEg^F3+VLYQ?l{zNqbAMeT|U% z_nk5WIitGbdpKicu$DUC+c~}gj9P9X<2qf*Bm^$oiG$buHb3C&(K2kj?WSm9MaVy= zXe{8M5YC|m)4DP5ZQS5x9FqreF|KmvXRI=qw5w8O2 zlO|+fYdP697-fUdux2Nh&rkMiOZB}{4(!+aCPYAxxRxyEzGMN6%9V2*C$x!@Th=!^ zB!y#(1t<9zIWUBYb@CMO+HmqxLT|Y0w}E037fqGt{(o%Y1HVb*y`UuD^tpjKRS35>drGUgE@WG3t z&q=;AYFa;xgfu3<%lAd!Kks9g@JyEXzG7t=YPK9!%}um=_~2UL>wFy7!Ub-^8Q~i& z2p!f9VG~ODENC3W+9u5DcmBD&Jjz_vf1;A$0Z2IkX}hy8V1HbQla#+koYYbT@lwoA z5Erj1@0huLOiBxRxx}TS!J(1>b&z2tb-**p4cZRK*$rOM7{Eg%7Ts+x9~1^qh0Y;v ze3S>_R(u^EG7eJ(0;hlLfGgjXzUDoN7(=;+EDSFw)2e|6R@FKyujWj$k{8lm7b0lwp_TlZg!rJifG@n?0e4hGAcW%|E%VuM`@w)<|Z^rKE@ z(=p$5pHMKZlY{GR&TFSmLX`XJ^)DK1Rf@%MMpJ?SI zH>r~8=wC}}xCGtS=>9-Ir_~DFs|TRY*=U4cLXvo$fF%)GhTdfhfj=!A?x%5QNl*3T z9<;PRTv?u`7?|$ci$rCYIS+6BHD@SH|&cZ{?_eCv&3}F$f8Fd2+xnVkGm2vei-j zv--7PKg1`lHNbsu{z!-)Tji<^Ee-#;&;90a&?^$NMSC!5?OB|^)tc>yeLlCT&mUl; zviDSD3*xm)5}?T}Fo>VrO;@c~pZ$!^KKm)@q3$}Vs96@EjL6~f4c zt|Fel1iRuPjn9(9uIFsO#uThaZxFKXhZ|UR=F&AIpw{2OXz_d4-UV5o%5W!kIK(^* zLA30o?xQlRoT5BrOCOdsinjgZ)do^hO89EXQgaon8?kDjQl z zjX2M1V*mLljQj%_yt#S-OIRZ=SMZ^>5bz<~Z=M6AmVe`>N#^(-JF|^4u{YI1us)P4 zVy7B*^|%w=jJM_iZclhP&h?#AeiMD8J7!apCNzyg#wCa*a&z|eCPLNJ9 zC79EJkrA%HO^c@-VQ~vX%f@xj43lY~W26nNdLBW7vIlE!+X=Z)Z}v^l^!P4wYy)Al zm+y+Ckf>Bq(-H&<;S|Bg-!Vd%vn;IP__)tkqbp{G?ve~f5y@&jeW9D#=rm2vsvwd< zGMIFjV22&4Ue0j_Qx(KtS2Jjf!k4XYI2=W@zCNVHNtulT;c5{jxD9tBO|~~=xvYr! zBtar^SD`EUH|z=cypyPQx+E+6&j;>aKc-_6x)^L(f38YcEw47Z#vIFVS88Fde?M_G zG^Fn8g^w3l#)dHcml96yIWSrIGQOP5^iVy)jgYl883GL>WK4|Mh-gj}X-o;&?h)?} z7g5yswxeJt)g=V(!sniA4W#m6%8N`ZRo z%`HVe2KK4b6A~M@?}BbzMjQby3C)I2LuZpLNAf{y`8iSK$)BrZc~c-f(-cC8G6=)c z#hB>DFFR8S7vn={K#NT9!;8ryy-RNwQjafKOorsQKQBN}(r)%Q=`G*J0%le&5n9@8 z3*ZR^QWzHLc9(Xns7i)&-Q}^z?%a<$cPwUN_Z~o^NL*c=JF|08h7?}NpoNmTvr(J> zj;|I;1BCz|2*oE64ajjnIg#-ha0piI2^XQdVqCxQ6EVm1nI^)gwY9)n{5Ao^845UL zT7B&Uu%mT}R(Rrp;@p z7;{ghRx?+nwCyNuRgXrUDIX)Q3K?Do3e|zK`@MF5`cs74^CP6dW6KyRw+GSbQr z?F7ogbw-Vwi-eO)d5U2%wedA z2(~mvrFu-pgopv1Op#14?xpqf-8ScXlXoo~qd&546HWAk0mawB zTGwHyGVyFe_%axRvYyROAyP~-ZzD}8iQ`ah3~YR3J-j%^>}&r^&OzdN?nV92j0@RK z9wa~Xje-^u3$2J3T@kqdPP-;U2QM@e)={yAtxy7%Hcx1W0c9RM$X z!%&lw-s^mBGFK+5D;Hpu`ODv>y4ibI$>HJ4qI-O2h>2$(FRJ*l58FmY==WO>DxP&Z z5}T^wn}K0*U$z+NcDlu@et+7je?YN`OMbyv|E?Y4J!s$cgH*(#ce|j9dbfy@H(eb` zb<8XpYhpTQrx9#qbY*^2%4WmFz83^ksrG^3B8`D)NKFp|LIo#wmOqpg7j9F?kR=3D zqzpL(BZcC2LE9fDb$l~+v*w{}x54`x=q>OcVl>Dn7&r@*&`N(K71IYH@ybrsWY8ZR2v{5PHbHNqE^bn@o$IR(V#rD{d51#$ zKLYduOMm$lTeC{NRk_`u75C%QYTfV(Q5ag06?cNoVspN~$iQ(0=%3XuwSDez-Nx&)$ur(vkRjSUQXZj3{zITiw8C;3IB0rDKK1Rba2FlC%Un5( z@a1$qyT+R?qp>5HsE9l9x>@uv;ohasTiIXh56ZvTM*c$inb>6et@7C(55BZ?O#dgK zVK9uvDOtU3p50KG*>`#J_H^|o%5jEC@JHZx?umL-8rP-QKuy4+8^5d?Gvh(S?0Nr# z^0QCmjlU0H9*m!axk-x%a&~|HADv^$6AXBHJ+W941HF3Q&7Ho;zIpUD*NH`_3ZwE6 z9$yb9$AEBr)w2eYgcN?H1Htth30RhmfioqBUDAjeA%(AD>GB(dHZ4K--nKK7n4nuz zqpV6uk_9FNQwqq}f%y3I9|1o>XZ5lZ2Pyp+m0^+y93JX?*M&4#vQJnffdp^+@0=uY z_1OHrtL)Vg7}!giWt3$SFe6v(dl|08$&u|{kk0AXxza*RRJ7T3`ot{v!07Eg!gXn3 zoqCqoq!>C}j@yqP(H(V@9lVOh1x6c9(+~3Igz{;Y1L-pBAMy{&A<{A^l(S!@s*`L8>6@X31?RIWZnrgDr4* z#b%9MH$z;fiaYNl5z0_})E5SM5PaC6E8#Ro4&?#BS2#FoyeB46)2Mj(J|{CGFT!M9 z2;&U`0X;t^t8LYdsnz4t+xw_Rd^DelU;mMQ1=My4HEdH}pOThToYD^?r(Wz?qfYK@SG(o0(H zx*bVMjG_Zd|51OcoC5B+G@s=0=(Dvew3j6U(?`!S@}Lj?A^K?oeX(d?|DK8H?Uk=e zyA}WL%E9+^9HrEo?GTEdbt1`i+V<`UAyFs^xoinr%Hoi+3A?5B*MIKYfgqi|6yKZ1 zu7H4j5Y@d1COzmZ8wy@f|0xuw+O2p1y#mp`HKxdvQ~=f35(FE(XdOAV64NP`L6K3C z6~&V}JuQ5#)x`*vA=}*cefOaS>O;qA({IdtGjPmI8Ua+W{NVsG8kVURr@<#CvLT9x zzQ#5|dh5A*xP1$V=w!X}Vgd5sfmFD6kH>*Qtaj;9>uA35B$8z&EcMKNZQB3zgW$@@8iy-<~r!@l>E(_c3ak>d7XOMD(6r? zZM4n}Wm5kZj9<->FhgYDufF6kLjic^iO!oqIMC-Cz+rh8kQUOG@p9ucVts7LUYqiMZHg z;IBZEDnGald|Ng=_@sYc-0N&zvu={o{Kv|o+)Sj<|JRJ76x~W9`5m{uH+V3S@0owB zzB1Z(uj-dFC5a9g%*+9Se8rTzZ%AYjX<(^CzbCo*>m_ z4ns@!7+I=Y+da(kOSgI>6}0HK+eN{KG05e;qDCFwgU4hgFQqwqJMRt$%(8!d9if$9 zp=f`Gi7mx+Pt)?I3#m8_jC8 zy5Z(Gp9F{At`7}v4$XLqXg%1KfxJHnGwxFwk93YUpckHNxa&5Lk?yt1FNm*L*7Q$=%~Lq z&40@lf4>{ysMZ!Ii+j5DPk@Ugl42$Ge)eliSGF;l|5>ZCD>mm%16J+4W+T4HW>cYb zkBV&LgQp+Q@N~)bgP!ZSY5Su4FG*qRLI{p zi_4W+_#55*_a|`B{L4x#Por8SvYY)#QQU=KDBg7T?%OnGPMIvdljPL)H8GCLIZ6Mi zJV#LYXR7$#<;TkZ_nS^7{*5CSY1K1YaO~HHBBj>v#KYM zI4bf>#V0_sKI@;xP<_Fa*<_3Gc3MKl#-^t7JsbcP0sz@q1;hI_4ij9v4Jm_wR%ZtQ zjX+Z?>9d!|c=y$p^KBcGr{_o8tGnQ6s09*?pX+}9eC9s%Mc1wB=?F7Wmm>d?NtPn~ zegRxX(ss^U^BJsMc6AysT-=HcvAJzk&HMcGqMG7} z}bHstj-$+!{jYeU8JKn@Y zFLwTJfAV8>?8rE2y!c5cW zzkp+Di?NVBNCX}L3?KK&mBJdg-C-|i{pd{l$ZPR8lL$QiC^qLUr!or;SiWtbFwqlx z02r&8K(YQcGqFPTdhi~ajD0gPcoGI<3Y1b^?M zlzX3IK8JA*_{Mj+A~k4lQXRi1VB0rO(fAD~!(5(I_qnGMYW3vBP%g;si-*5Uj3v6- z6ZCcP>9_@ID83GNv03ovjA+Vqep5Jjq8#aIHUh|>8ZItw^Zb$({SxaR+{!MX`JUDZ z13|0D*d!t`8QGmdd?KPOnxBBinYn_cAf{>GqT3 zR5W4|CW^Ib*m3u^Gz6Jn{bXZ@+Vv%L$gIpOmKuRQh8@^wfr`HngScZt{mYpF!1)X^ zfYloWZ$y4C&=m z@ATsqFf}Ki9{(f&y~)Y@pB084UNGGEtuPuSMf4P6Jmv&M!=EI&l>Sgt7oLcV zjJ|ivC*Gq>O0Ba{Z(xCw3FC-y(Cnoe@%>{KbbH?hE@X|20%VM2kgEp+Ai{P7H$>kX z$_xF|;&+?|cEEGZUvPy=Q`$hlf35U0N^0Uu2N<5=gxO2T-`5{18Q%CrBW)(_we@Ug5-Y8B)f}4+%}6rG3}sFrZ}f9Q zT}OITSNM{6Y4Q#$Kni!&bZyfg+~X~n!E}8ffLzN?I@mj|(ATTLI{@?0X>iroASvFb z`nzK{^q67J*oUZnB}#^TY}*KoD15$1!;(u7O~6i`dCa=WipeIbjy>H90+fu^%iuVz zh0ApS_`b8r#_W}xcZd{>0WU!KlxS(w3-d-rG^L2{MtYwcJKYk7_JhcmR zz>M-`A-#UUBCedhGLk!wj0MstF*r|lIb-CuzXGPTE6sLk3PxWpG1pItyT)B0IJw0V zWPTAh80Z_hlCkWrSF49*rk&pS|5rzW4LfEk(`8w!JKIEV{_%KW$F0WF?qy2m<>p_i zufOoTif=acj!@}FrAd@NNd9_G4Fp=7-O_`Ma_;NtrX)f+V$IBcTf*!JQ*XFXHjrW` zqzRZ=d|@qwMFppU@rGr46(}Bl-L3ooWTabJr1%Q2*rlgiT8%cZzQkM!~5Emn-wXlF~L+M|# zr}f}Ukpqa5ow3)qj(i_14JqA_o`yeKa^MV{PjB0;n!v~ayV~mRRY{<4V!aR=l7NOz zaUH_Mw`Dh~z+!U%YD?N3p#-|h*SLehRy~N|SZ3!wH9ZERyZ<9>{ih;FJCkZ@KOTjw zi(Fm8=rka%4uDA-H$!#u;|G9(@&e7m9=LE)(6w=OzAG_8G1Gwufwo4Mj0*E+WQMak8t zaSt=uJw3|u9PjJ%J~8_}n6*kZcB_argyvaZAr%q5pwFO#!nML=jRHH2rDzb_p}6^^wwm;!dQ z-A43_dXM#Snii@Wemg}4O!f{Kx)ArYwVwA-o0%8d)l2z&y%w4Nr58m0hmKDcew2e! zno+I2ZYC({sM=v9`em5g$mVPVl#gAXB^fsX8D{@R*@H@oe}|8ZC>&5U4|a1eU`$kO zY$2vax?f>N#e0`-ejOX*<==dys65SIU5#m6T{q*)3|%iiu3V~EZmbGd>4;`yYXdtT zc_GKKlVyUm9>Q31i#cN70vu($kRIy0Lt-b;3+S59{)*i@(0(pC&VbT%zVI2tgeMYG zkCJ>Q;VW3gGN9FA|W6PhTxrJi&u;f-Q`0tJeNTzrbiUFiM4dw6|)WR_ee;_fvf*(M1<7v9TnA} zJyd%~tO8;t!H@n}bmthN4$(r75U1A{;4cx4P^-XpnvfirH!E|!mq`I`^Y+YpvU{ID z*%RRH%tMTP4_EIfk&M80rYoY`BZ4)2T~_ynMuc);7)KS4BD~mh`H9OAKhlm$z^}ER zL65>r`O#mYx7KNu_*D@36R+18SYNXRv#}b{$dat*Dfh)tNRO2fA#UYV%eMJ=Ju-|t zU>7rdDk~2gfm%Wr9kWlvYW7ec&v%-1>6XG!{<3(!a#X{Er|M88)ROZYY}ST(g)JO% z7%ZJXEcp!>fHU+oR9F-)Y?z1THe%NJXI9MuehTls7xO zUD!C++*mfckx=Tr3fHyY8asET521pW#5xdL#04%>IPq6@R{U{dhWTAM8(JKTTx0YL zUQoM>Uc4JNPu4~^$9M7)$Id;hZ(z4iVm2KKIa;I`=7Y1H44Ms`M+?L|&uMHB2+Oqk zmr7>5oA|oKVA|=cr*X1s0)>Dnw->{ly-Un4@yn{_V7{m3^pc&rX|W~RTRv_1v9@7S z=wIZd)<0~I-zWgQKo3ThloUGcVpZ%fa%d5Sl(JRnj{6#vxDX6I#T=j8eai8*Pe?l) z;~AEA$o8y3Sok<-lZwgtuyMqb#S&W7LyWmbScIN62k#$fEx)#SMb9Mm%%}gU?<Zxmu31cKir-A9qR*m`e9D6O7A#wWN9h z!0e*nbFw*yyyp*N=vXD|`k>iz*4m8id+0)OEPWOU*bfo}aZ8zYJc3wt_Ga5d0M6TZ z*{7f|3TW-fCR`4yb@_bu{QCgb3-ISyFgd-V2tWRFZ0{%b+7o3BSFQ2WrHJ1zPdN4N ztl3Ad&q1imcGbEUlv{SBI&Vc@&65mmr zbwRW&m0;-l{dZQ5rV*n9H5bte>b;EQvV`$SArDm5{Z7epzk$fA2&%+5=^TkKEaZwg z`Y7&!@a5d6$!$0f_>ZRhM4501kMqLUs7LgzEFrllZInMF%UWWbf(NNZW= zb=u`dbt3rS!CWxpx$s$^FssdpT*Nez^rF%KsiP zzd|W9G@XY0;#Jf>MIrKso8|`sv@sK=WSp{RDTjB%{6*e|R7}+xj5jexLARUP*(UGv zjDjgC$_0Wnf3arAeWT&QyOlB)qg&dnLIsc@$cyHTv1NS|(j{8lBAH*|zkjq`whFnl z>RqG1BdIz?WAo-Fb4Y@Kg_bXyN>vwZ2~=u-#WeY4PjkhQclz=FdNe~gUR>3y%bt?s zX@L|FLp<9WM;GL)$D{q`#Sy)o`?X{PDnCrhh)P_WFSG+dc06GJ{*9Hk2tScyUz8$} z4!!NqWJtYZhAwewGBcqF8R@*F7aq^mM*6`+hrlYkl}*_otCTMz_WgM7=hbjP$~L+| zAFQtej9LhbrNCaCsHyN5|A2v9HK<~+;!w=4l;Y#tRU~jor6)oi^oC!6VtTz&c>{H6XfaG7s5&qNM!KTMENZusU!;g!nf=87 zXdJ9G{xK|^t&s`A%lIBImx&1jfi6t)cfY9YDSki7>nuraf`7b zW!Qffyn;?L3N8Az^>7*3T|VX8p-bj;x8G;_Hx<3(zwrC#2}m>xI!C$ADIVGh@D#gq z&JONG-@JO30Tn@P1{2cU&OY^r)Gn@Mr^{IA5SuC)EEA-zY-!)#YdDwLG^rbG{QPs1kV@jGSY2kto|QUZvj+QyncVHwB(_?L%O@98zm$UARyA+-3^EC zmhO~p>F$v3?oQv$z4!l{cV>^njLN`1-(1hfL`zD~m?`9x>!Lj*iRp{=D@$|A(Frv?Qmd#E^bh_67+PxvcR z1mvwVYzbL8r@YCJ^k^mU*1a1&f{2rd#yX-4?({Y!4$f$-QSLe(o3O(z^Ly5J>_7zE z64afu_nmwIJ=koy)RqrDD7-fP+1ux7Oz7XhQLxV*DmXNRG#;)x?73t4Y>}p$$RMf` zD#Ex_P%Sgd3WSv0&nEECW233FdNEob@kxZVdi>iHAwbdt?acmYF+t7xOBPXAJ}x8G zz0UIWBjN<>q)^b}Pbj|cM%Dzb5R3~}L9y_j1;n2ymVi|9dkr4Rq3?y&t(9?+TR()B za;HK135H#xgYBR!X(EH9ou()2jm4*KhT+EY)v9F1-}lF8iLQDuo=M6~aQ{;!|$rTcD4A_??OwH9_0Pr8CGx*KCp9`b;ID6>I4uC zxS97G`37o#*mGSNoLP~QrE1zD+vrpYO`vT_5cj`3{U}A~-JkEI!4}u%(@s4(~Z&593|MMb@{=GJiIEI|D0?n6*O+?QyU?*TmN<>_#j?`oKD z2b-Ltw$mnVF`$Sd;X^_uLclV<&4xNc+X+s3g#u@^rDr}|V6l+pQ|DZ{u7?e=5T1;> zyojc$bE`m`@N8VYl6UJwG3IGkK@;Qr`NI1i`55oum~MiA?-cbkg|cj1Y3q(mmS-nS z8bv+tw?td^?A~~HITlXA%Sv(y-sIXP`ioK#pZY&h^;-b!&)7sPp}0;5)9r{zH^=CjU6ZIxF+5Fte zE4nxUwR0py8ifUc*R>e%9jLZ1SG#2o_`4cp1ZYX!Xhx3q1(H*Yw*topyExl3 zu_BR~ZW+i{Xi$!@MNEhvvUs4G_4fl467t(b2n#s#g!2%wUBK%Z9bPRT11>Q?}P&Qt0;)x7Me1=pTBw3!kD!X(Pp^P%{0B6jx=%r5Yy8H}0fO zYO$ambS^gt?N4IhV$H*jx;(vHqPBHnoHZ+qhw~u{GK9<4Xpta`6Hi7m9^-VHQ@uQw zY%w9u|JEX64?m?f)O$Fr5z@y0j!1_%=$8E26cI z=hF0u93o0Ump>d$$z4_w_E4Ww`R9Y6hizHWnXTj#Hp0S}v;%;}ghGW2WTa;<;#x-R zhn?nOD$sn7tch1ffGhPkkfoG5Fx|#1)ZN@1+Er25$=NXR???}lh$fCA?|CxpG z_nG@{QpIvVxOX##7pt zJ=_fQkaTQOHA^_MokE5Puahwf3kj)Z!&8r7d8tl`nS3i-`%Q4Ispu!T`C!josQzva z#CgA`)OG72f1Ux(!KS=FC|&3-?RFJ|E7G2t^LbchjY7*y9hmW>FCK7wJWYvc(M?tZ zprLkP9HB_kCqMd$B_2h()gN7Mq{6HcHA?cpR(v~*`h@76(@7Y9#{a>9Xz!zKs(o+K z>M3L>psJ(ye%+(_2y!J0N4ppA$jilN>Dp1<-)r3qSyi}(&l!)8JW5)FC#rZQwaBzT zWhM`-a;tFvW%#M`?DvRu&{M$DMe@v5#eM%A#U_^@uPHM6W3*g?I}YGC*95)_cqqi4 z3J45z-M5r2fzbp=K{sC$qAQv3RUlv3lTY>E=1sbcBQZ6joO z`7V&9eEkQAT|Y98?rJ4fBNHI-<@Yv|0tN31N3=AX>5Y*tqu_N3!;C)u3?zi6x;TQ{ z(9a@&h#R;wST96)s0{FlSnD^&oQD)?kV(rx)Rt0ZG$SAy!ft}6@5KEk{Q4Ti!5CId z4eOHoj7t)YouIz@Wg<2n;{~p(^3Q$?zEwS}nDlBR@=!hO0Vs*NVsvDazFS?Ef$r%i z>vnlip*8Qi$&GRuW^js`(OQ>JJ%)_nEcCrYecf`7=VpKAc-j2k7^wM$!=$X;-!6)4 zqX_DGMo^W1Q{X0vF>KOjNRB2_b`ke7?W!`FU zq;*;svp5wb%bIEeaC7+rr*1nuO+igT$JYHgLn1_fZ9KCpXbGNjMrO1PHk`#*dXoO2 z8IyI$X%i2PJ^(N657^$L;l|(}6=Bvxe584@VT4wnmeD-Lqp1l^{q>wGq-xN*uIujS`?N%!24j*mT zNbk&Id&I5m0*GSd=T{dr=TS<>{6{Qr_WC4n<2;KLlUcg9!#W`l1C(XbYQJfJ$ya!* z@7Dr0lUILgarlv`X53?|>VZDzh+~+driI4Es16cBHB;dFztV)(2#PMtO#8sCjZR}v zk$>l7D&XQW!G4L!t&dJS4b`a-i!tIKl}Y&7gtTXP)kW#?O_H%HBZPA7vYk8v>5V~2 zT)@qf&trEm>Wc1*Y#A8=&LO*hH(`6lVuO8=rSj&ZGSS>fEp_mELZwGJ`wJo-?{Pvz=M6 zG37Y%li!;wmS8mSI;bwrN9?#O2Gyk~Qm*l zEkza85R3gy8seVfq=BvkGD`IJtc9`o(ok*rZ685^ZN8=q((``7dRS(6I@UPDj4A}u zYJUC&Y+nT53^2X-M$_h1fMU=%u(_Y;KiM2FuzmsRMI+CEn@Ota`tsM2I_@=(X%Az5 zMHsjLcwzEm9nz{6MehOH>J?yq&?65xw8U#-1L%lxLAhSXD*ui_7$)syl;0 zaG43zFWuj4zrR%Kwg~_o{#QK02KJu3%p;j;R?UN3t)Jb;G6j#`ih$GnKyjv?-R;`M z{>5lCbyj|lyX&S62yz$)@Dr;7pqXX}++m^jKm*j7uK+r<_etTu$Oigf^41OpaN~qP zt|^?wLRD9-$AjHC;Dg^LM@x&as^osXzi}{Eo(5F=gff9#pk$}GX_gzN95CvHZVqF` zC-pd;7AL&&Jz4wnb&=^^E5PH6hK_ECpHu&>V3le7Mx3rKN@fbU3IKHl>6r0aAaXBr zek?ZL1fB-djP@a(VJj7 z&_`F_w4jW7FQX-6A@>El4YH8JcueA6DLjjVC@7dvRG=YIO$=ovNto~kr%%hsMr^@5 z!?mUGbV;insm9vsQ~*{#@lmbhggEu?vA=5sW4GhJCz>)yk^i*QkLJMYr*xFeM0L}* z@uZb~TQ{b3XS`$irgQyT5(O9n7}3Z-kHd8^E(e3n%l=r0EPsl~4x>+0YR8?0hz~!j zHAyz99UQFRNim^#(OXPsgaDps{{bUnp;lD@ZpGrSMQg(cC`S-c0a)0E=pGSDJ^l#m zXpWizzn=-}lWisCHW6I=+5RzLUlkh4B_4aO+Z`c;B8$uN0fw9e%a_M@U(&@02;piq z55sGNpNx+F+09}3OUY(W{9S!lBrI6~>Kdaw&34KQ#U_Yv#A9d|aZdTtheZpN`+0G) zS0hP)lydTA-5*tcfW|LR;Wd0BR3hR03nZMy_*O1O23GEu3bc|qUUwY=8P)raHeT{< zWgHKhC>Zan^=oxWiBQxE>k1n53-rgRg;B?0#{ipgYk%{(SKW6TF%ItoSSWDOg+74& zpT{&Z9m~(h-H?_$+Z#>EegO*P8}=b_Aidwgz{4*P{qG|nGh4P+k6}4N)wH{47Yds-rkhg zy&ySMSPFkz&n0u>kzkCpDW|D#F(t{3*2j@*9mz5EX{5^~c62jBQI zDk>TI9)v)u=Q13;|Jdd2*Fwn5M9xE7U~;n)5-SrqyEi* zO7y3~8`J?rsa?_1kFwH2^Q8*3fKC1@P@PdQVfUwa0g?hGv)_C%|M%<+iBgjHaYu-G z;5y){ppBhqTQ&9K&QBC6lgl_!liQ$uupk>r^4~Jl2?Fe%Ie8KAUU19!!@@*edfrQ* zh;&1$9nk$`T&R8_997GUl64(VcSoPS(m4b8yX21_Sx)=Tmwhj9#&iD&{XX3)^~l_Q zNoIQzhX?S361;Le`u0HAvhh!&Z>E)E2LJ=V!ov|i9jY+2wV19=D1Ga{=fb)^3B6RF%yJ zNk6x-+QxboNc7wGZUvlocitT4mW;LI2XO*$pnIfL@K!^EZM@PD>x zyCT?1-Fw_hh4Z2Z_un=c{FA)ibMAChczt8a{@+s}fTSuvCf-LVBIbUOpA-=!sZE0X z4(h8cr2^0&3dc?QfW!<#P6!cA58T#-3g8yn#u67;YWHjbVS(rdL_*g4__lcg2dP3$ zxa7XefR9^2qNH_-w3iKJc`TxKK#{-4r0PWdp--`PyJHzk@V`23tAxM5S&gK2 zfUMy;e^cnv%PMc&1$2&b_B#$)4~Z_+wsoB^QI=(`y&n@t>|n1D^U4CBf%_S(z3HD( z?vXXsSv&C7%=JR~QJ)j>ZU89h?Q)Mj1&ZkliI-@NyE+|jK_V*GtogxuCiXTgKIHw* zB|4eQt$-FkpZZC&gX{mEMiQD($fL3Ka)`d5e9Wh}u6Gdy|8s$S$_OqokTJT1t=?loMeYou*hnPniwpjmx$Ww%Qpjq+*Y~&b=w0!Tv9h5 z5(ssL#S>^$`nmp{8|iuY9RrX=d#)&h_g@?bEbd?t|J+*8(#l?5aGf!)i+z^L$%@x;XXBx zn%&oBGB5W(-Ulqc_7F9k-Fr6ls`5lBS0pVReI-MKPPp!o?#<4#HqGnJ{P=|G>b?6? zv(agBmim7$R@MWPR=MZ-h#mzNpmDtY{ryKf5`o4Y+?;A4j$xIS7V5w9>Xp3izowDp z=kOo4nff`ljn_>FH8xof*Lk*E3 z`$Ve8BrfS+#5qI_e-09+_wryhkgSmXNoGEb?&%NV?|8;y-^PPrLo8_~n+Fp&6l$85Gs)Bv)qS4QvZ(ztgfGFW;xSQnVEKr%GS2td1PREq-pdKE_5D=pCL z^;Ri@*+t0$f@I8qLCtqmRvVw1TC(Q-Pv_<7ZXW1A+7a)Jp^$o|9UIO;BPOl@NGbk1 z2#^g1V4{kEm(a&DL5?6)lG96mU?*<_EE=0P!|3BM7x@#J>3T!PE zqY5qcE*G0t50ieOp+Wk%{iFb4*kslIu-mz;7nhKb!)q?IKtuWShwOo-h8YaB#97w1 zL-CKU%|q`?7b3k44$IXR*%Zx`k9W5+SM?dc9|arEa(ov2zxwxTR5eN>ff8$1gDV{d z2M5R8B9z9e2%%od%`BJijajF^WLX**8s2#xTy;ECQBgH{Zf$L?v_^I4S((EA8O~@0 z(kRt{lszlF{?2ZudylGE3RDIP%egqNr>C#5+eQEt>r3SgpXOm8dEKxToU~ZDAvG zkx)9#p19#aaLpcmO^5f1|HYM7_UHEih$sj6$|95v4wAU)0t2?aMvQbDmtoJPvxynp z_%#V^7iv$nfykm~KUj+D82aKdnPI5hJ_9qgsR8_7`=>YWSxZA);Kc$DoFD2|oh{IS zRs!Trn*gZKlY$H4S=d&vSs0(6bb`8?x%6S;1;KzHlF5r$$Dc?6wRkD?*WitXi8l~!76 za(QRRM~%o~$JzdH;wkgBySv-ruj20ma80*Xv3zGPVOf!w+pd*YfR)2)yXFm)WF!I1 z1u;YcM_nf!&zsq&%>cmO^-t4mJAh1%Et7yyMN9^w$*kPR9)RX;Mp31=P5V>}jI^I5}ahCe`z;q##Jr;wPuQO@;y zfT*4U-il6p5y1>jq6mPef!EFTOgR5HLBHO2j!GQ>w$FM1J7I@GvgA)AF8+7OBikA< z2;Ln{D>ScY>iZpKx>bXm$*RG0dGQK3gH8b}BLPqMWZ-`%DjKkmv;l8X;EtRhGf6jq z5}X`k$5xYfNEM|LCHXS@)Pt%@Uf*ayPT^?Wk2;%eLF4)RnYNAHJDXsIx7$1Xhuk3E zllCYPQrYGk2u$PW3FJSy0eQ!&fGk17;sta|;sas`Mlu@mo6oVaU{K3D<{1FpNyO-n*WQ z*yN`R10JMzX_vo_KM_D{iGN#8D3a4MBO!CZ9pWfPhr(228@6J_74TbGbzr|LCJ6?ru_Kma(KO$(g)} z2f3!|^j6!~&#e-da#e>DE-~8)J7B|}6`CxMsf;({<}CyI_S?Vad=J}kEA<)%v8dt( z|J|eh3`7DH?{0Bh>WfSKt*MhZP#?+QpTDs_oUbI2 ze%a6UnHL%E3kzhvf}`*{)2)o-8$Ysmo+M*M`ZExPdHy;)Agx}kY~%o*F7VBVgEeQ^ zTqd#9V^qQ2wIk9)`1!pFG)s-HVp2$DLniCqO)jnLz%VOg5N>)45$Uc!VpRKV& z^=x)YUcJ}%A#Xb&e_jY%jZdb#F>z>f`%Roud(Qm1OccCjgg+v9x4hNfm*KJ*>Ps~R zU~@arbR$o;+7hkwT6IMj?Ha)n2MQlMBxnw3$?Od@ z9Gtzfy*CE|8p5fC&PRxS<9RpYY=Htn17lUhdj%fjGE`7I8q=E-Gs89-7Dze;6?Wb< zK%|Gy>q-KHEYh{-k8zgg970qG1W@2?$SM5DMe$fstEZ0t{w`JIKdJj1=IxVD_M}n( z-yBck`j-7T6i>T#E0w&J%;D3B9a57{AxgF5`*qlH?}+}CKqy`lQ2ZQ#m|K2xnW0pB z>PoQQrOabUQG4T&|N7AHKbJA?SRf7_%K)tkQ@~X|!{-H97MppZ!1U10;z|{$dy1|G z)geX(0C&Ig3ZH%4&qP{{eSJ@}zJ)uEo*CZv zdL9!JrS9QyDF_8DDzB#JGrYosf92;G3O184Lz5uTQzl_pv22J|$U3r}bx z-r}H>EiGaDcMSF7!zDg*!YctiDJUa5o%6iHyE-gAe0l!TQZbS~)@_n|pn2o6sVd1o zVQ4W(U1(i&9Ot%Gj=G;T8ub_9;1xn-;WI+CL-YZI*l3L&vRE=oa&ADZV*>p>nV0qAhmq>--J&5!{R0b>}E&+?N(Q1ko_)&Ws4isTn zON1S50Y{h?S|@{Z5hp+~V&E8TGWZbZ3|lgYd|R0QEF%AWQq9uy0hDAX&P^KCxM;8A zAtwX90VIhwy@-|npoj@lqAnD23|_JSzX$6)xl^O?i*i4NoI;|euL-}tm%b5 z|0?nsIJRg%gz)09a=p#}i7A7Hq=%MattwV*0xE#3>^!-DCH_9x10+T%_PtvxB001j zD+M*tIwag8yiuG6K~A`*05BTi(~qvJ#)R(Sgl4Ws4RWTy4%0Xa^h+RA3u+wr&`tg! zizFP?7g=2s61hXxZ_JDBkj|YL>hr|hO|u6JpQ9m9J_6IiMOtSNDkfB&_eTOBQJRDS z6*TIRqcU%frCURn(0TDr9ot{xYsX%6m%&nZKV36%qzK2ph@WURs+2T z!uF>b?arvD^Ltg|jiI!iX;tC$)4QCXX`@lroXoo2xYOAU)c+v*)s5AOPl ziuk^46~cvVmo6yU~S>wqYhUACwSfq)FWYNwz1I3(JH<_b|}N#J<1hOA1L>R<&eyU z`Rp8g(A)uu_^>A%*t&T(K>Em~?2__gs(;XSDQ15HJ>`tsD3BecgwT6)wBmU!U5bTGOyKu!Rq?Y{aEls3F><=+CJQ!wsWn-z3=! z;Y1qdMNI@QBoB&cpZ9lCF%U5};@w2>^iBWNYfP-yE96Ey!iRwk|FXfCguo-Eib@eW zn^RA9ZN@G@j)Ud+=24DFEAECWZ+EzZm*ZUNNJ+B}IA`G{KK zXqhhq&Lxb4C-XvMk_kdoj>T5i1H(AsH%W1Y;clA`v+o34v*zOKB`^RKFHK-YMUiJd7%1(KqH$>R}8Yed30IjjK z=M2X=O+_EEcnN{LV_Fqx&MAbGqKAn|@em0OL%lKotILMH#ylxka<|JQw)4FMv?;ti zc@WY;=Vg+M7KePJRXovwGR^?%6<8ZubjXKq{2`9GpF)yCFMSd2NCGnM)UHiLWO^C$ zt!S-U{2djl0~mx>OfmyAR1E=RUg~k(%qIbU5Q(c|Em8lTfJ?)7&heH6MtVhuX1=_fIk1E8UWx#Rk55bwxszf{q30Aen9vB3@7}Tot^Yxukp4_4>%61m2 zeYnus1}6H;(v>b}aPy{365|=bSaniRjF{gAq_msv|=~r6Au~SQ-)bCa3<341JSkN*lm!RQj*q}k2xc4YV(}b_*`olAC@gJ1CphC(|p;Ss#^IbG= z&YlxWl1#8Q3<-xxpEpnCXh8HGe|#a7*yN3EL?^|{l-?7=oTA-*}^XGgQY&4cYD-Lapr z1M>!1W$x1L$81T=e749>FZ5w!K-CvO6NlXf;r&EFW1l@_0=NB!^GGJmx70^u0PPr_ z7u_spz}*)(VvTGX7I+ig4+Koe;C>X>6Y}_geN?u!g)2&ucoqNtq2DalrMFcN-qH7y zv^0_sEE!AeDUi0ZKZdMsb=#ZfbK_wFEw0vzs0%>&N$L!vp>f0O7Tb}}cEkb4>sxVv z#+^*WWOpL9Rmmj7g>8@yI&6KQ7VhYPh=0!RDT%hU_kXEM?ITA=bx|eM zqm03di#P_{xMy}E**@G#ea$EIbpRoIp23t6SF>CPig${=@A9$#&|UDoIwWBqNKDvg z5T)f7!XyAhlHjQSTyOhcT|9l6e!Cr9uw$F751y5nXUNSKd1%ivO)E3fJJ05ip3cS@ zkA7Q=6H@9m*nND^gK7vu1QbUW=l%87u}(%Qe)#u|Luu}{z_*0*!}T{}CB>{ZupQok zg3-skk4zxnB=ol?`%6aWNAj!dAHl~Dgix~8f@+XP7K*X=Pxo=CIXt)%h)U78q3}tK zQih}OIsIZLTMtsW2+Iu!2*BZhB-YN}z7HnlKBRrgmL)G#`Y<^89kghGcz~1;LnL4$ zm;kaRfB4>8{|ZNQOViO+H8RehLnjLN-O&5;OqeE)U!p1`9pG=QgDN888)=HVZWz;J%EPN6}ww~y&h4LGIG$PO~%h_ zPlMeW;U$=@Gyep#Uo<3M@Mqmp34q1XO7FkXQCR|dIRgq}__e92U;#F=#|>Boj{%@X za4mBCORI4ilm8WVYh_fH`X!#(0YoEoK^mXX-!jDdI13R2s+;f=ag9E%)8aB;xV$FQ1pBCyr2kftUhG&b`Uf5>OX%7D9%I9cKbaHx zFUE@rpXXTA%Ri3#-4dx?Zn*NWXe2M7Lf~V(6ivM&42wXSK8=^xdF<2TtiaD5XR7?= z7o)HBn_VH)RP%2_eDPX+A#G&m^Lr<1<3n}p#X}wMfJ}`RejRNLw&gQHUANcfo*9Pn z+bP+*%-qFQLuPvE;-xBXG5S`AdaB)HxX=SlrjwObR?Kgo4@9DdICWExTDPKKWtm53 zBGQf7$6XkRHL=sj|LjCd+OJ}8qyn?%pU`VR+P_SPJLB(u6E}S%Upe}_O~&3{>xJHy z3En8)MTCfGh#rsks|QY0G*3vx_b(QoCCPsh)#0_!o)QB$8SnBCDVNMt2~T)QD}C7m!`G3_lTKkmvhb2hhX7cJ8EYo;Jj?f_!!QVjWWA6 zbM^c8ql0ZLlev=oOP}?u4vFPlQ$Nn6vD~I(++B-%ZnxWBVOxnBgX|tR?Jrt-49CY1 zPW;!4pPskozh1|en7P>G=RF&h+H}FW&aHJ$`NpO4IW{^gHH|`=*p(`on&pLU&mPru zG86?TUkmvHRjW<%f)p1k?6Y2U#uXQHbTk(acNv9MGdd;hxgreT1&E&ZhOp3%Jf+1X zY%-48=n>}GSZj&|x*y%6b@jh%vA*jE{vmKzjd&iMWJ_hYWcmx&h<}w9>OmSr)~-($ zjK~hyxn#4@v0Xf&tu3IMi*G`cr^5t$cXmU%B+%%q%l_1`;4)lBfgoe- z6@wal=xH&ADqML|nP?=Qr7{fEc+xN%qc}*5Sw``Uew2F-EP!JHI>=!$6GA=A!?m}k zZ9|T0aInuTw(w^q#m)qXZZo%!83&H^y!tWb_#T3wAGBFO_#Is008x@XX+6azTQ%KT z%b5_j7H2fgSjtfsTbYQ0G)Kfg6mMsaz54Nrx%A(ItA8l9aemVk+%XsLCt5GW<SxqVX`?dm9zE&hbOuPhrSzf{~bd}nq5 zTMIWRINx z0l7*F^WDg2^g>3#Y&C3{I64}$at>c}F9lU|w?o4q5e~T*ek&ck$;l>fRf>nWTnTv& zh!C}W@;U<@o&)s)%nD1yKPGQ=d5hN>IwK?FQg9CZs76ZOL4%rlxGd|~OXJXl9-ud) zyFEwL9A52Ux3zq)6lyN>Q*<{fCl)GAx3W?JyDQhIS`K{FSt&>!Gx@3^xb`=DZ;Yul zY47Cn@obT=SWSd+HN61BX~2I69X@6b7UyoD&q@4Jsa!k6YW9svt;14r_F{F7 z@d6LOgF$5Yw4(SI>&BsgUF6Z0@M>E`s$jdUwVtxfzxmc8A#MChV(YT$A+O&zm+2X9 zKQ2*p{+1;FBL8SIU4W_Mu1uS;bdEZF*NVnK{aOmN!WAXIhTreK>Ir&po!iXzKEd5B z*F}hXUfSzL-q(lsF*PdCH;m2A1z`P6sFOW%Vw#!J%bJ&~Rh%C<<1^nn)x#u6prCw> zb3Cm)e9$ZB1}4ngoLTO~2Q?U#ao85`-o2GEmP>#kh_of`Td0>J3UaGAcAd9v+lf=K z;>GrYX_bmxOB$u-$hqV1c7Mf$&}9dvofF0lN%+`k`k{?UYKHey6iU{k zEuZz6yucTs+?vBa;_JIxJF(_x)WQw$*~|wG-7B zf`dsN1*mt&ON5lu%h;yxk@@ zds>}frpv3Q4$S>2TaX!;Ow!)?^gI!3CUh34(S3`Zny#5$eacABJb|@e+|HkFmMvql zP?8`2vRv@-j!9)(NA9O zAu_P81dolD1%dY0zfGP=K`6(SRUfKLv}&~=VV=0h{8613uVb#zTUi*UuEKZ7`})r< zHiBr#!kP=(_H(U;ji1hRFAu>L&`g9M3_Q$Y=Z;fzw&K&iKaawvFAs7P#Np>mDWQc_;6VXOw5~I~9W;YEE4{+2-6&gf0IzDh=Z>D)-8+ zvUkd*?MVH4-Gxz{g#8ZGDuAt|f0FH8_{Evu5;;_Ho%PykWXNbXTaOl?lEUwvp>)Q^ zCYEC-7|v9UPPUzQ-1)GfdU&&@kVb`mA`g~2g>3`Lk_ivPHSEPq&R2hp3 zuncE@Tc!VMGmO70fH)!^l&JN7D|d-MR0~letJL!hO_Ic6e(BrEovqgr$(U{o8Nv)# zZEy?JdG3QbigOfe*);n~EeYst#Xj>06}se8OO-`@b0eW??w<%_-fMWUlCsk)FaxDW zi1nnFPP3zG1Ynh0s5=>9-9E)`-MZJdZupN83)yms^Fy^(|mJw%@>U<^ibpG8@=tlmx+1lSy4A)ZayuqvY8|Sb5*6K|k zv1Z*nS7c>Jls%rEu;_aBef3i>Z!i_^k13G7Z()puGIw?!&SqA`x(ulCfoU)K+VkHp zTzAg;f<%;4b|dqhSQw^S%d$x*>{tys5_xr%t-szzS~@3*&x?t(wuVr=7!& z%eK)$`V;&b{zsH}6mfA`{(7rfLw^!wQIA&prdx#7xwSP$)V!0z4d&nvi~4jr6@DN4 zdU@%^ywld}-m%*5gy~^!WE>}Og9}rCzoMZpQacD!!;b}bq=$M`zIf(K3f#s|sl=_g z!W3>WFhh0d>+!t`s7N6Gdh=KK(P6z_iTf5|4hnr=AVHs*`(_X!h|PrL zw5@Gbu=VGStTmI(d#JKk%RuL-W2gDa{!02PW>n(WOV1@Y87xLEaNtKA+s(~PofgjP zc3)Xpc_obiOd_JK%=a@Y*TbGquF&%o!Y}JaZwvAN9v4s3%33fTlc)sw$07@dO(LR!=jbhThU z<~0gz=%%KpZ;u9>U;SN6ke44j=4fEzG%8e%b>_u6kBD$_X1Lt}YxX_31S{E1aaFd? zx$m^hrxa74F6Kaw8QwV=6%@Fc8n;{y3@JM96&=f0Ml_*7uU*$jlG!1*+Fk!#siq*W zZM9oipGs)Zb1UKQaBfgf43%B3p5=FOB8`+!l0@O1pBi<4iXN}yu+GXOvcfxE#?u8- z5yutnT%C=6icaKgCEyt>oo@fu`fcb2iQZ@IGl=I)*m$`aPUtM9G8`^dU%Dt!FUY@j zd8!I~j7zQ;qp$ZUKlhwJM=kopy6ixlhb2AMf@-+pIrQO>>`vU^nVO2m%61+zW&+`4 zL-VvfXZ5EsO|#{SC$V&BD>e10QmnfA)}7VmAUajtry^C`w{ZB48D zlw6Xdy#B`G(1^UlQ5iAb{~Iy5k|;2_-5h1wc%EMNj@&n=K5(c1ARSojF%T~&r=VT0 zr*hduy4`b!1q;B7y@hc05;cJ&Dl>_*yIrY9*-R(JN1OIMP zS=XJgX{V*LNp~YNhy5i7r}dJWL>Q)>T3|?YBQNTK@K?a{JIY#B=L?EEc)C#jVrZAY z?ecm0QGX2eaO%XNT=Tb96w=|@==e-Yg859nnDN=O(B+u%FKRDdk)ydX^_543>+Ojy zx4j!~&fRk7I?p3DPU{slCEcB5o>P146&p_hw}?bW-GZ=hrS1=HYy43_)9rK=onCcn zQlWgL%d^tUDbZ2xon)!n$)nrtqnW@s_Tv}am-~3hPvHu1YaN8 zGatrPlYA0nR3YHDn2`G-T#&UpK3iin?C4R;<#rpRPp8^gWHwW>J@(MXy*hC{7zbd@ zaBT#Q)YH^nTSHGhas2yFnpYf+dfuKFlZC15SNxSIgq53AVa;q}@Rvt1G2Cb;PE-}M z5eZ*twNn(+>9JKrGLCMi=K`o?rDbfeL#RHgP0U&fDZSX0-`w7Dn@?T#gDpO<)l@52 z_m!yE*M>A4>U|LJ!jelef2z0SYmidMitLlXCldl=(8w#7H{Y}s2gSxsUtd?{xLxh2 zx4LhuoOvnC+m1q2wN`+cWqSB*L)NLu0Y7_g`K( z?GGtEh21ktRLi$k3Sl^3(kcP3ex^1dgMd}98_sW+hYk-P6FBZy+v!v){OHsf_b#~) zt}3qwlPAqqa^lpnVP2hxM8oTn-zdAFn15CO`9f0np@kJAQWzeRf{U%;v=9)8;lcI| z%1y8GGJ)=3%A9HPR**O2nWeHN6Rktaw3wDRQEv5ikTz5{V(rK)*cB1y<=#KF!7|@j zh%i@5Dq@ z5>EZLc?ART-Tsa9>O^g2dwI2WN6zokd6WM73^%uXvF4pqcTS_JB)&br@SE$ZU`%Zm zS+PtP@s;MgnJeqI(kteJ3Zrm!^N}U%Bj4cQqf&&eZ*4yr+g>i|W#vbJ;}Ar3?Fr7T zJHWQtu+u**lJ*$fNECv&3R~NwiiJ zbh>}%)0{D@@&)2DM_+3I9BWIl8Nt~tu*bp>8&QQPs`fHxjWK8r57VBCex7LI^SC6BwXd_a-S=r9I>^H1Qx*aPl6|I30#Wep z5k8^(tB5wYwQ7O6iR;cOmXKO-aq-~k9sRh_)(*2g3FM!$VPE7lLdKVGQTF3=qitul z{Vp2`N#B0`P&!$z|8}qnv#u43^056Frz`UWb$cyd_LjI?U86j-%&L%ydyO%-kql zTlhsy-U2w&ItUT*OY%Ee%;KI2c|N>2a~eNlj;3+W=PNbn8+u;v7z3+UzuDhkk?pU~ zDoHfTg|>R7YP`s9&$N!bCl+m|`P0Sb{6;^F+-`o>o6Q!NI}78N(^qsToV7p3l9Q9G z{?>Bzn9_uSXfgcD<5Da^6Ja5cs0RjpXX;a&FeB|BtD+fQl+= z+rO1YL_nlt2mz%#hLT3Q8EPnz?r!N45R~pt3F+<-q+^io6owv}@9;eD|9wB!S&Ow; z!na|E+H!sVgo0T1c<>!znU(Lb6NK96w z+;J^!oZ?+;)#qfZ-!el{={fAisQxQkZK~%-YJB-dTEj#EY%x3kWr^-#DWgtvdA;3y zE~>M?ib7YQocv;iBNRMQU43(11g{&FFZuvU$DjMf*{v_8K#quD)TqckJb>+|S*jBd zr2Gsy+j-BzVjvauX5xUYy%|h}>^tsgc4q$FC#9t1D8R35(to7q^HP9IH2zL5S9fa`+F$~+@649=N^Yik zW?q5lWAUM0eKDXZX2gYl$b%A>0N~CNXpdZE1h#r}ciZ`7LJ5ZZzn5%>j6Bw5b{iXQ zTgza$iKJw(_s;FW);6gm@I-Qipn!Ug&Cc*4v#s;D2&%y-o{>>%6%TGS)w|>V{ZKNHa!Wa5GKK+aJ zc6O1B>%6~4|MS^9g4bg4PfKjY%vM{)loDZ}6Xc6S%jK#tpB(st3Pg71!7)-yPWv2&CB<43t>$3re+#ZM33 zE+DdC{HmX%y7XIlu#9)^MOi?c~(g-F=VinitX zt?9*Rnkmmw9?tZ|S7*&!4NEi6>uuwhKLK&n1N!G1`fZO77Ht>vx4P{9IrFaj`&$gU z_o1&eqvtQUT~?7v!K-42e=}Aog3cK51yJtK2erJsLML0@p3_bv9TB~1z~{iINvHa# z07fh6oMaXfzxi|y5$3RIwA-7~1NmN*C<$G2^T|<2MB$J>Br|pBRWymkz9P-!vIvo% zx)qJ$7JwF#{f==uRhmeU($C3Nx?6CkFfrA?b{>_AOi)p9Id5-uqNp-DQ;rb|^<)^X zdx?V)4~gviCcT=@*=Cw}UJ)UvZO><-`q1oCz3jW;+s0*8Td0&=iVx1p1vILMOgJ0k ztCSHj%oXb#2}>7iyhDFRryKixd5r&J)Kz+xrfMG*vn2@?e(VrBEt<$|N{75pj49jj z+tFX5YL&#BQvT-@ue}5I@)s}4U4$|&=@t5*mEn* zzI_7Ns6}?ZQs_>N>c|L;+|NXfO?t4STRGqbX zYa~I8hUBMO-g3jTc<4@64n??Xa%%iK3d(;t)}nqDPWe<&7fntgo5CNTAW- zs1AsUoo`kTwa?cCE3J<6f;4JBl~1a+1R+%<_{)hoRug zz2#fD>d873OoBoBH$rSLjtItAo@l8ALP!11j}Dqk#-fL0$Fw{k`IHnc4cnvX zh1WIamR+szB_o!uH*-mGO zD@vKG1qpkS?Jo$1_fx{q_u9rf#ux32te55wfek`uBoqC$z{nHut{=y7(t9v?sGx1h zy8K6UH;qOrpK~bFz>9>Tk}pYPTfjU~u8koTC1<(!vbMWjKu(1kE@ZkvlR!aPIFiO& zob3KGxU`AuGfD>jg83FC%ky!%+yK`1$NM(Hyp0o4vC>jm@|f+%s1e-AE3iNo_C*;6`;xpl$dyAd_sT!)H_6wf>V!kQO% zT&S(!#L}xPo5ScNMAG}E7O3UW=pVH;nl&pAeP;vTo@i2GtNea1#NgaS-gJqx^~j@N zz1$DhGQG-Xm8};8o``Gi!^P&lzF8NZ-paSjhWncOapI8|`*>Kt0*~~L@Dm*kUoV_> zGgZ#CRDK=58WWusAiB@eBdcwSQ--u(=wUw;hF#{{Xp6x;1HXk6ABmz}M~xe>zwXjc zZ9cUY7C0m?C}h~OImVH=2Ym5*?RBsJHaQnv9xah2a{*<_2h6Kue?SnwAAnBj5K8FT z_sHHcw%a)fRPIhfS=+BnfHCwI@Sq$6U|W8?Lkkr9O8<4Qsl>g-rW*X&79v8E*qxnY5 zwE0YrT3^ zt+uCDaPFUEkart~t+|38YCRcDvMeFbs9FC5pOdonKx-tK%W$bVtDq11zj-dkP8zrUawMe*96&oYf2yQn)UCLWH+Jo<}-Qx&XjOeLPx0+eo>fg559WE%3 zb=)KpKJt(CP3H%~mkxwMtv6y1@=5Otg4?`j;ImNOT8j{Ci(dHBcW1lh6Q`psiX*m* zVrU-y%|#O3C`GeV9p})W+c3Y4chn-TY0wn!vR`cn^1{NnKEblxjPtiqee1O|Syt=z zY$&ta*djJ~-~0i0YlRpBfSsewl9OUC*ThM00}}>0#L0pG2tZ~SgjC-DjvPnF!*Trn zP3KA5jLbq24zMJfx#3Malng3bl~CUfUg`mR3IE%l4e25413)8DC4!p=tHn=gluKIH9nAGLeMn+70CKxvnl?Emqgxzc#;3ZPdPrnK z+o7`wq1I2G;bC=TzICUTn2e05&+#yI7HDbky!-z8`Y$PIsZymjX*oqjjgnspnsvH# z89p~8av$ZitaL*eHA@N;50x|&x*!N6c{y3N{Du_*zQygIOgqeXKHaSwT4h}v2TU{- z--fh{_sh>run65MU;yjoVzi|i7-2RN6gg(>7a?0i+JHe@!K|$VBGund;A)qH9ov&p z^TQ23U%<=YHd_GGuPo)U64c(!-)xgp9#OfLn{nwP&~9;xi3GMy7X}48B(?iBOJ`I@ znq4)UnNP5sC5t!o>P}DfHCx>NseRwJb-i}wD+_i1vl}Qib9Lnf^-HzvX}(~DG=|S- zx%{e`Wmd0fO_CPiXm(btWF`X{E--EE&y#`R+m$VCmHX57#ocuB-mv`ihVsl8-mqRK zE2#m=Vm-uE0&dp75p!=g7x;}U5M@4YqM!LHC)y09%U88t1@%yFzF2dCh<{j=xivZ5 zIyohwnoZh-eVHXcvac09b&f3+OmsKJL$zeGEc|haNxeR>j7z~7+C9CI>Pq@fh*OEj zN_+BzMQL2AT}@iL?Kd6b6#LUD`8V}71eH|br z3z!8Foti}sYu#1=s?q58nwTjGUv-BSNCgToBGhukLo@(tai$DSYE8iK0R|aBosHE>|ubfmujdA+U5mJ<72Lp#J$NtY*fzQYI2&HOIbn-_n@={~TS;o>9^7)P+44O}QhmAi zuuJIU@dqkshRftfr5`;bm$g$$Smkl;cc=16ZVqHd06jzLlla@b@|d&vHk%rgz8B^v zp{QwDp#9fd;TWOBq0cU+)s+QQV-DN^hhGbVl1grEJu>-g2&I|yUs`@u5ez4QEu;?k zm=-U6%4Is&7pwmCOh(_qDhw75xDuTfKA4!~IwSYbCmc3Pw||H%mI7s(u6oJOL)KBb z+>i8O(tCeviqeYGMXopTGXSx^|NB~}jG!qdXlrAc^++0@yDcsA1yIztVHkRjCWc`d z-fX*2?|ShFT-A(OlEL=8hQ9Vbhl*BsYI;%@&OR~hdu_=^juD0Dtd99!BvXaMA^B)S zH7pD5X72~JYX*3*zDpcvcZZ-pPK1;IWrvCSoZYEn*m?*1KzwcM6WB*B?ZpYB6LcR!gp_wdu6>e89g(tbs!S^M`J&_0b{z(d>-9hKy#ZrrW?tgJ{h_N%Q@v zUfZwIGB0dvZ!=A4V?5(!&vTdS#$T`ksCo4_CLH)up|s+`!R#_$NoIp@mpJN7`PK=m zsXp!G+rz0bkr}3)g_+!ulTv6=Wa(`2`nvr3=zR7^--|tO;DcYH*0wfo z^qfeo^oLoCGV%rC?GhO+t{QakR6cM;yWooMg;j~yh?SBo%4HF&zU!e`bB|ywbopY# ztgDG)Q^%GZvx6~>a-)y# z(SeoEJF2hBGzwZwe_~p$vkCa(2(lWRScNG8$O|Y_} z>1b!x-3rbvx#E)(>?M)t82jv0SL;UWto@k;pbb67f&>>Y`{dqen!=&fP@08)!mpZT z_`0P==`g(GdGiCml~w%%K^K9(CAiTAgFnXsqFN*0_afz36(EPeBrj@Zw;i9mRCxGh)Tf^dW8!m=e9h;z09HLvT!HPy|@b4)yZKCQ_$wlN3r zcvAr68VI8SSEC1@1-#UTWC1mWznI>TKxzkSFwHX>8zdT#UCHJJunckm={c@R7K3pi zp>eFac&s(~VmIvtWYFh8ERcO`kWLsz&=+^AV}~5|sCQE6eA5ZM#3EjCGAIHl?}HUb zpR2xk4#chM-1ebvwn^*I9#L;z?oACOa2#UA{5ITvZY|VnC}QFUWFG4r8UT4xtWeNS zltp+NlL>ue8nzy}IjrcQ!Y6a@Ik}CuyzfBJKS9Jp=k3OJ6*OgqF#2j7^L47O@})*X+tReaL-p zB+Cm~u(nox zRP21_3ZW{Wy8tQEru&1IeUIy_whjz8A==#*_7m?S1o;OQlQa*waQ#f$%7!9_G6oK_ z47t(DRhxggd*;wGm>!(o(rQ+|u%_tel^+oDdccG-hna)g+YSc0s;vaXO|S3J{_q{I$r2?=N`T0w-SP z-HdffVZiBHpeg@mn8aqEArI37c~Nf8tc2OlW&RSYT}#<58hg{qwm|<~luh};OGpio zVYo^@m*vY(D2_91@x6)y_G2gCUx}PZbxJul!t_^VmSM+YbWhl+gil0x_-vCRR3mnS zQY$VwJ`%An6RF*9xeN{_K}nOeCo%CKW-KbxZei1`ll<=6ZTc=>o+9541xDsaOLc>w z#D}F@5B*qOYI(SPaN2+A8sIs5l612MALFR6if;G{21t+KeG*r$CBe7>kgqh(qGuUv zb8eDc66@Nj(I%VP6n>r84$@9!>;;qhh+4FPjpjawiF=?n+>5*MW1DX5c(Xy&#pb#E zkL1MN%N*$4FWb*bCZ1nL$*{HjT_i~}UHW&cp0RSm4E@`z|4~1n_K2YD7n*j#oJgDf z%)cp7Vta!9n)=Ri@FGxA4Ck-GF(GIaFJoK*Y7(W&x4bj49aKa)z$aj?(ipuc0s@yt zDSn5!I2!iTg};)sNOd`d3>fwbqbPypAjl{Z1K~09?>0l&&U-VvN#jEKDD;^@Rd;Jl zBG{J9_Eip79gnviz8TNJ6A;L_*xFTl)N5>)p_@=LyW>o(Wkq|S5dE^7i<~-Whk*uZ zY)ECTD{O};kf56s9tMC9W-)DA?QspB8An0BFb^oc$-z0n!NsiRJ=<76CExb#aVFuw zK44=0=aF>s>f~pR_*xIy8ar;9N5@I|WeDpsUKAmV6I?J8D93rU(|)`i-w#&fT37exy(HO0#*&kiUiHE!4e>p;S-nveX|yrj!q~(a{xEqO8$p-JcH<;$ zk#+p4&~I~F>PxmvoA~CI@8}nvunjW_!7#5u0i%HM#>hIuVm>5W_TIJCo3Rs#KG&gm zYlkUCz9u)!L)3W@$RqVz5aSQgi@mR0qYh`a62*DbT=TrOC5=fwbChw!STZPj<=U29 z6o9@5yq<-Q#3ufB>v=lu1#T?v5MSeANF!jf%9LQoVZrJd9J^!Tz-y#vWC(;~`7ayx z8;Z{P?K=6tfWn=#UAsEEvTzJQVE-!9tKqwqoNH_Y!WU72un@bXh30Y3tksJqMSoYy zuJ5s;ATOv$&cU5o*Y^(Q2JS%r+pFPMT6X<85(0(hn|H!LAB3B4Hw*S>wtabiE=tVE z*Q+P2|D3M6dk-!?;bcM!zeF}be7P%X> zZ80LQz6(~$m_c-Pfw#W0Dp83-mZS5&|M|y#s5s&PBRi@3c&+4h{uSh^I)%P85IU`} z*V+#|*q>#y7^whVboMIEx4P%yzxH*b19^OERk8Rt@ge`@C5Dj?YTt|D?nBtHEW(T4k--uCG3bbAb_}ONP5}BQ*3u$$KmnL@g#Se8Q zM+$MUP2;$uNPb_l$}q~4R`J!Ca<6dt)w5wYNpxmZ=O8f_hXQOjb!m%Eo+0wTdq0N=S&)preq7WLymupEU>QNHX^(fJRf zihr4uFVG@N6F!78yHI`x9bMJV5HN_hhrq3UlXPSBQrlF_Rlevy?&ene<|34k?O? z-HR-n>zDX5wEVv@zA3A8fiHzvbKh>REj?6Jjd@hu;b}KqB3ml_Zk1UHwUxHA?v@kgxEHroUd#y;<6zzc1&3`> z7n?mJJ@F~nk>|?*+Tvs@E%dG7X|>SXW$b8g(q700H96-xDB~rwh(#?e^Q1$OsmzWo z!1Osf;EGlxorXN1N+1&k#B5AK@@oROkfSwGWWlygI><;AvL;vKzm3R9C&IslJNFKW zcS>-7+7yo&A+lq}y7G#R!e;%v`F&%eX>C}ro8e#(okG{YR|1^Hj1_$#iyr^#&CZ~< z?`6^2pL!Sik3@X$Nw*=(4nFPM8_hv;ayzlFKpD)W^W52c#VX5?!974xeqnx{pnsx`I&c$A05sFvo_D;s0)r2m%pJc#VLD~da44FAr(2&W?mu>)y;mPAx#*Mo#pCNMNaROgX&Mu~+wEIE|o z&IjilN=C5GK9Z*;M|QTPxS?u&g>)LnMRdw~BR7V2A(8G;m!l?$?rdd=F4TxzEe72C za74R{!YJjXVj_|wsKR;Pt306BaDMVf;hv{{aI}T=WLvAicIxj6iUR;JzQyYzYV`*;O^}8Grptqp=~3x4yUxgdhkKn1!hP@@MxS zmOhxryduJuk(6rZzCAW0!FoaJ*NY~;C}EMO?w%c5Xip+0^Tj0y-7cpqOK*`e20xtv zMOZ)Ep+W3;GdVoNB^(!Tk9~6Rv+#9Mkkqne@vrFEEcZ^ez)-M}ZTx~o$Pa>9q94yQ zlsk|W@&x&haH8+OD~tSfg?vmpJ2e=aOB^Mo`ZPH~M-ONm8g18JNbfv(@gUEc=rgl? zEW~`oJG)E2?$DeyHEq+>9*evPjg@E&z3G0vj+cuHXV1)mETBfIwMJEO@Aw7~6AFF6 zgfp*iP=7iOcwx-)=JfYj`VJ?^+hgUJ@+1r4L%vR{%f1;GtBvvbA)6M1u4@&$({qy- z1Iw%zokg@idkm=kA~2I$WjteV;p(e=;t(kF(ia@()}5>5LBUD)l&v6YGeSlMnnYu< z@W$Ze-iv(wt7jhtZsA(G$E_ERhez$KvwkXnlM#l`g!ngBWuyoBwulK&^42f zHFRE}mI-(H;Z;)x8A!^R2@51DB<7&UNZcz~fedoB3_@)<@tMnt01t#|i(r`QL~7Ew zCZGPc7UunW_e5lii(Sj;mDCIT9V@>xcDu02cXxKmKvxdt$D)u0Fw05b&=> z>%0BR>kwypoOIEQ7tF-Jkr$qM9+RzY5@F%26HH)IXN#PCa|i%TX8c;K_W|-Ogr-6} zRk}U7-*#A$t*M4W#32~L;LEj)5CU?J6Wqi*#PeL5y!l_54nz>~lU4+bcK!I_L;0%C zfhZjBHaugXz7>x9*+VI8Jt`5Ree!SnS^@F`bQ<1A>*)0x*%(N32wiz-dDZobNw)mV z1IK|f_`LHM#Tx4yNp|M-DyNM+@`0@@1q`+i;Do^M=!8(p-5B^ zCHjR2wie-e4T@5}6!H1edxI0@=l{dx1EiOQ&|T+J3MF5`XSj9T=jyD9~RvGRDBgI{C=wc9cx zz9s?u#2dt4>X!oOd#?EQRL<1;q(a_LSqrLyOlk97Tl{8v)JLbkbB)fh>G^B?dw}-5 zVzwqrv@a~}bJqWQiiFKgI^2~@ZPWw|HZd{qRQazT${8%)na%{zA!!j}I6~fzr+gsK zQP&T+!1m90BCv&%g1`L5y^=Y5Z-Ys0did{6cO?s#sXYmc zEguLRhck$b((_ob07Cc;=XAiY*FB%>pZn*%J96gjgeQ8zvoWtngKuQvR^hBdMV`E- z*L?#dez%DMy|ogn8_0y9A3pL9kHX#PWS478Z|>sI#9niJcgF;U64%ZzIC-X+w zH+fV!olqxouJhi$7D084KU*dLVrq(=LI(Lu^U3tRtgx8=R`Uef^GS)~O49FEA{H{-a4T+%yVUSUORh~l`QVKB$E*t7eL3?RL3AOZD)O|Wk$ z(7;HIYO0SGwNoGH4a0q0wy)x2u4D%yq**;bwDxiauY5T68bV=0)~}LSI}oN|>=n_U zcoNSrh$EeR#pQml9iwQPabCT-8YQ^%F$}9g7)4&c8rV=&(x(3>(UaNFl`+s^We(^d zHwl;5)PM`~Vk1ejw7lAEa6X$3r@2!X2kB|d?Z9Xe~W%$ahL7f$1%5YhG@2hNDG zz>A616*6;TZ%?F{%G*7|jGmL&tc|(_4|I}>HthJa+YC5*9(`x^JniFH6{;eauvl9f zMCUOov`66NBpN5OsZaG>)CYBEB+L!1Vt%^XNLza-=3o`;S1a3ZzV7lY`_U8x_brm8 z>hizgx@~d+gv#B;Cc}C1boJsX$qQKcVN;@?P8*LtdO-{MAc4D)9(WL8 z7;8ax-L3O(T7GDr4Ycaz44tkSZ$8#msZWZB9U4lpZzseMx#unucaJ(}5*3~wO&h$h} zxKcJTSr;9s59VpM_d)-;QlsQJ;glzc404cIVJKN{p%*Y{uD_tlSh(_o;R?R7C`WZK z7w_6i3gy5a9k1OB{^jprN_4lyjo~ZbsvIXG(@71?ss~U z|7!-i1NjS?vg9k}&YINNk3Rpdi{|MddB$-{O=0b{(RY~3C_Z^R;K%i(zCw#dV25Kny&YS*|Bd)C4Ugplzq(!iQWmpIMUuy5ehW{5 zKFZ1V4t?!vM8l!{x2Q#ONCLC*Ll=FFzQfMM?v>@Go?!pl)AwI+gTGSs-0A+(p74@a zw>7w;8#D4H7VPMgrFAY$OfE@oKy!NUqH^2$rA+Q z4jJTKRl%^l(zV`lZ{V=?)(&J>5nM&ZOG$VXyvQCyanZq)jqkAL#Y5PT!Ga<ATsi>He$al8iy(4qJLrv-ZWz5aWGK_+^7N`Y7M3c%0!7?!^pVgtz~}n!@m0w9ziOV;{`WM1auNT(m-ooO zfDf}eJd)C_Ig_znyp4K2<%T}{e`lBB`k&Vk%hxMR$=7&W&hZ({JeFNHBg$SiKOVgD zGVxM=Qkch2UZT`LeF6VHVx)nuld+kzB4~R!r*bT7wMrQwL8txVdG#nhUE$r5L@-Yh7 z+8g2YToDb9&TdS3SzV^uzu9hcjU@VELX#ivV1K$z+tv}1Taa5l8MA5NWednD?Jm3n zGX95Z4RqnWr&grRIz75;_wt!f(5*gXAb9sJCt3DAE2DYjg`Z4_d`mp9)t^{e9-E<( z`AiF~$bM1f&sw3GS*{^|^H)JH1~$XPn&L88%skXgc zidhIZMMq7y@JB+1=}&(15gSFl>z&(abjJ;VrLDzFZ}Oazi|Yaq)C!6nJ9q-Jp9_G< zjJAClX4(!UECdM9idF%z?9R&8lf!)uxf4bcj{x!miG5QQ0_V4dAGe}3roXTn9`w7} zdtC60&K&lo?O1GNng#rP0AT&wm#=OO)vAod`uh4%xBs`Bp5g%YuHMLFfOo6`Fw$zD zG`W-Hxb&}N6qr@AztGUq5&*+E#Dq9>%v?Uy8nC-Z3Aj_KaaC!3oovexoxUExR`JqO zQfS8Uqs+aUJ`DJIY5qH&t|>CYwEyb_fN>^-IPFRTknmfQdi#}%1^W(zlamuW5h0<) zqcfl0xi00pJo{Ni9 zq95s*yX)_MEtT__>g#%jG=u%_JbVFK(0sFNnVz&TAO=cMQ&U?M0y2`M1l^COkCs}N zysZFsZz|Ze^z!JQw*4KbRu*eQFd2Y$?*fXc$){bGEL2}<=}`HgtwrTd4hE(4VoZGc zT|n{VGVBuw=UFZQX!y_n`@Cs`D{f;`9!bcu-|GYNa|X%C$N&UXVL*Da)tdm|_t6tK zF(UgX#(?jx{NerM%CN~;A~kb*MJSyz0HXr+sX4a`ODR2SKW;YR&YKr0KMvbEA#4-VYGK4$ilJF`Vz#Vr2}!vn#Yr`FrCF z8w;3z04B^oel+NWAvxmfg7DV7(G0;BZ%=@^A5h@b(bUkSY~ONNf6v@{_2J7F5rRK4 z=BetA^a@0VtL6}N1dxB5T@SWVK#A1FC{dz7l9*GXBP507k-4(9lJz=?`J(Oq4c^w&*dNGo*CyZ%UHK#f4&@=L|OW1Op50 z`{RR3v0X_Y2@G<6hiv>ST+Dkgxqd^yH&Oti>*jh0==NuUoR3PNO9^m%Lrgt|0*J&@ ztI(X=F1P+|8-VO!`4d4sa^w%5V0Hd~iIOb0J!PJf4Mt}`#%E88VswztYCSERpC2T2 z^ZFR_6CHGmPzq9NeScX5@{nV-_L19zB+Gt?Rqci-+OAG>szn2r|b8x zQ?G9Yfr-sP>Gz4NCv#go*X8D);7;L7Ah50Jb~r2aB}pz2rT~t^tE#x!{H~nNA82$2 zz_t(F-AE-#bPdRP!hh-aPOcM zIn@_)IPbD5myCG*`S%qLm(!-~58qs(5W>IcL|W4>Q#&e19^rp1DVA8w8p zvIgeAxxY|>JG>XU{a3Rjq>}XnpI*#N-e^SGT&@&WL&pD`jr%7$ zR|xTa9e$VUH~p$<&6YrM`EfT&+P(5bxd*%M^JDctZTYgu6jQ}$0QmzM1?qqf&UHJ* z9E?SGDaG9T8c)6-aK|*hq8N@a4w*sAV1F>#3;tjU+<$ARG;atfsm(s(txd*{sMom+ zyJTpbkuJ?$8ZS*>qPiCQm9*d93k!>$SuqSsqFUN7#;C3}g;B1yi+>fX>Y+%%jCz-N zO+`<0M_a*dH9e@~h_{2S1m1hg@D_;vnZk@%T=>w8A@F52md6CR{W9K#51;9nXA}T4 zHH-p_i4EmVIVoGy=IbQhTbfxT0z&3v;zw@>+jbxsDTCODzgL=2Eo1U0Ug6_^4#`3xx&ATa6zh zZQBw>uO^Ttp2dAx5pl`hO};ik?xsd=rBc%e#4DqL!50`YE{#{-KqP`Cpl1ts1FTE( zjXtR+d@WmL#;rb|kE>P3Q5I0gn)R?Lym-TG6ENB3(Z0Usc{>!=3&Hj(iP^yZ6q z>a~$Plc)mH)xkB^PL_W=R#azOY~Qbwu@5iBB!4bq#JHvr>1qpTn{5oe=fT~N5BOq2 zQxMV7TB=Nhvkbe)XGh4w9t#%(uGVnw(=1k;2? zR`^WDUHVT?9dT4AkM->ClY+A1-lv#GvD^`F5N{NS!`UmhEJ=f`(dHtr3!2c+){s8} zdU@5`%R>WjepDG3Ql25A33k_*+d6fUduB?E8$nT zFaX|>8jLQa;H|Hj5h`MfykJY0S$@h4i5MAuCNwK*$o+JQdIrynJ)lziXOd-#`}pAd zVm2X!L0m3WgBz^0W*j%4(CQ8BZ)6F-pS)JIo1FOx#bEvmLh(M5U*zyjQa<9z#Kxds zbS`HoBttOFcJ=0MP)cDKcVcxD8Rhf|`!6~U2$Zp)X_)B10eTI8H1ECM`x?&Nn;b4pR5GNQV0bN4*NeJCBjk@)hfiHVS`b)FG?66&Bt>4 zoO&v8pUcq5u=ncn@=-h5mjbih$9>;*8AkH^uq`X-s>GWM48Bjy7$rUJUu4RAo}yMT zmmBwKJ)A|pga}KIk*NqG26t^(F@PR8H3n!ahKOOd|vy zHudq792(A0j*W+ow_LKYh;w&oEO;u<|7>6A-(u60&h^gcJ-}#vVi4W6$AB+1z^Tv_a*$mc#{)|B8Y;X~ zZgI)tVqPOrQ_zC#Ao%UKM=cdW{q~~8q7)5)8L3N0a01WN2#uNB0>hyzWx(L&#PZV4# z9Yd9TAB0G z3SrD1>!i+YuM*^>$cim&-m^eNp&y|Jk}CP3*Rj?pl13^y1|QX;B1fi+HU@xV5ZJtr z`h!S`_2zekSQSRtyD;`aLyQ=7IUDhP(_>Cahe;Ym&}80nO*-d`(3_DD68Rfg@sK@E zI3M&buHgFM5#XmzV!PeTV9-4^`UVb0iKNfx`3E8U`)`DI1^2k*B0y27=gM9pS!n(^ zY$}%H65XElhW>o(vEB?Q@jMEhln9=mf5qZ)U;5M+JdE8RPZrJ{VX1aaL*U|k3v=-a zxiixV3wIx&aBm#P#NdAGKeIzJzV;&C^#hFDM(7&T)(oA7Z&V92zv%*fG@>|?& zrcKPywaoiFO%I|W@;~L0th~RRCu=fU<8wHo8WS{91V=pMm;dH}dR-jsRm+4P6zbVc zNfD)2VvWK}h-BK!VKQw6HKSky#G-3bZl1HqXW`SU@1$JY$Of)`Tg?}VWITS|Fw&!f zl*Gi+uU{1m9H7cezS{QpmkW&8q^wHRrx1MHY88~xZ9$K%wjpAxD??K^HWp?+Jgbo6wksy)(B zLANS2Y%f_XKmFp@(Cg3nMb>7D4^+AQqGmpISK33jVe=f*?Eo;(p(* znyAeN2n}qoTPr)d17D&(9Q?VnOSFSeX;(u9&i?IAHhrgTd=w#g>GA7NCYN>)nrQyd zh59nfsg3iwV`E00U%_tPh$&n5%NhHQiu3uZl%o{qg%Q5}Tr9RZ^KtI67Ig0=k0@9S zwlkWhvhYK(73`sh*m%E|#OqJXfIIx}MysloMrENYU|o}}FwX+zxUY~M)Whtu%rk6NQ3T2DH`R0y;U>~}-lNK~uCXWyIGk{j#UsIMl zhMY%^cglc5bb5TMaQ}2$!c{s53a(o=+y7hKU?;l9BkXm911=K-;lBoMuCG4wdz^Hd z4N3;EYeOJuvsA0Cxm2suG=r@kYOIK}ZxK z5W@s(G)rr+nNb#H(I}83(5$pGs8rN|0@aI2O@L0IS*E?$NFS|PVWV>xq3nPpkuhIa z=QTt6)1X1jwQa=)q(!AL?0+rA_1Uk;z9X%GWM$OJvL}W@u@Ek)IxX_syo?SkpY*^@n0`@*kJvx==M|^7O zId`Rm(LZGSJRoWLcZ;@snbt_E*G04{DZfRy9-s00+YJAS$M-Zs;?bnDuylblm(gpY z?cP8ftSOvjSGkXhC$l^%)ihGBduH06x3Ao8M=vXe3A7E$PR)jkjf2o4qnPr6XXzGK zkzn_<+k2d&IUXww*i|LNUb`3$P&(48oINFA(x{PEoH@&+rjvhMdtT$PHnx1vC&TsG zr_6KtK|~KAvWklH6@Mj zYe&?RenL4Sf;l&BA$C$?sqi5FvwMk2Txn!$;!0pYNUbzn5W{ff+WNwbpN)aUADy z1j^&H}d#pydf72^=Fe<%8*iXw(bqph&p5SvuJ=wNM8?kXwUb(Mp>Ss0} z_fbgv>84I@t)IJ$#Ee|^0<`gfE-p9lFS=MD2A3kAK5H`1_|)*hW>zbhEq{;GNx)LW zPT?T=F0jsd>ucCrpY{1HzwPvH?LyJppG@1pJ}E3>;PEI91wkH|XxSc*p*X^Bad zN^wx+CSK6|K+qNlIyLX&$wZQw_owHC;WKUK;tX!hRnHGG(;A+ZI3Uf5WM&9o*;fL8 z@*D-WCfS2VpW;1r3PPC3w5A?M32W=&GpcKchCWVyW%f^JL?ljA!?r-IBWa(XTSwGy#&c z?Y?=T;{>**MR8Ct>Nc0^lc256$ElOio=aOn~0a*>O>+;8K-QtM=Ub z9Hg}Il{#M2wce!rP4vpN~+>u8kC9iqupE7 zf!k_Ul)dRNb>3qI-}~w+pUE(*!GP#0_F@-x5@XUZzxHI^x;0v43#~LgZ8WF;o91`i zscDXT3IJwE3+Eer!E_C&QFwXWfZ=g<+wYOrZC6)6YGGw~C*CQwR#UV}Z;x-( zLWpb)7Lqq%1rt)vHVN$JbT|27Xwcth3sZUxh_Ps9sdSK+pw&qVGBt7S{O})sAaV*v@~> zlss|>pnd7I&cdpFnA3FW=r8}7rI04*fk^iErvbjbm*4NbBe!`i^DgEoSH=Urdbcgi zUpC4Q%4#{9{74BIxAvJFd8UwNKUS>6Y z3q02P=*URgv)N8UgH4t_fegpwJjIlfq@>NeKp9#YQ58k`p|LHl)Be>$gT{OR?|0-5%4;Hwu=`u)r=&dK17h0P`omCN@yr`2_<5as|OT^!vEzJ2g zSx}N-GgFTZN6KsaW;oOD{1L=S?`ytd$~Ol?uiVhMyv@}C@;QN$(U8*^HZg!-Hn$gFpEmXNXqg#aR^tujYyZnyoMPh3Zzb(cbj+ zjSekEzweu{sk@8~pK}d%po3v-Q8?MOD^CeEW@|;er3udG&Ma+PxK z5?W?YPts;nJzHRr?lJpVN53xhn?;82u&zgo{AC6m6U|8d2G4ft3EP{=I6=Y06=Mlx z=HJXR`*6h$0ij``4&@AK(49*#Snw!^CMUj(R}6FqMpH_gvK81GM6u8*zV($fNbgLP zVtx365b)6`@=xPU?Y7UrRo!9(#kQvwWp&g|nPaYeUA-M8#^!?Gkfl$`Ex3P(GRGaE z`Vaj3_hG#Z>6KN0z>AeEDzrQWKoWE?VgkAuS(0eKiop!{uCB-A*C$E2U0OdhQ50fOHC0*+O_IP zX+urlDTrPv#^o!H)>>q2Ar=+yD(0JkkNQhHL+`WNs=cW!ey;o?r5#N+3|bV#w989G zkhv5|H{aw*M^h9%j%#u#-^c9WeVu1DFngfU=<1#3^~-X=<~E$Y*>T4{6eHKUH+De7 z=k7WZSp0!&_RTj*WiKJUWV_lUa0DAv8*gSyCgWRbBK;wc;y==l-lnQ$%cMH zhMYG^KxTa^!hPk_k_A?D?-~v*xGg9Cq`c*~o+$0wm~E~xpnXrk_NJ#Fcx(37icwA$ zWQF1P9Vtr`l&IHcdz|w1^^8I7W z`l!Wk)i`MkjcjdB-WG%#1b$#?f61)4H>3l$HaWuKmK#n zf%&$^K1)7oP%wv6qoa|68T}XND!jrz?XE*Vm*T+tVAT*Q9m=oQwiY zzaDFRJW47;nC72!?MwKcd~xSx(eD-m(PwMgg#T$frw%LyYk6$uacg*d^%Q_RaTw{D z`(;k_RwVDjg!hF=L1$mWcI9YYZL1CiiGbc<;~~)ZIE(TzsGO=#c&1Tfztl31Y;Wg4 z`2yD+^i0o*eO~kopH1rLK!9#17AEnxL<4UnEkn-$lk6Pl)~gNon^n{<)D|cdfs!nn zKEp>7bi+#U?uD;*orGTQ8aJL_Gk{0PA}l=oHShsNDBKX8dZZW2tWj7X44K{EKQoSg zwLU?;TdG~I&fahtTR%z6VL6!I$cFix;P=DETr;7*eR}wJ{!ZO0?`}-|h{)k8))_rT zneF-U$+?0IpL?B21EsV^?d_+lT!Y*5a0qADDRzY(KetIl9J2TK zAQ{2J#|7cxu386R98|T^e#sktms@b1_0A#DnA!-tyRWYHv)ts=fC>H{!8g@c< zHFaWQ9p$kPD@sD1=_`8HIBj7jbE|0*+HN)SpV?~JgR_q;%5Wu8#TR!*t24}h-)bI2 zdCWx;rB)aWbf&_+{;2&0RU=yHgYcoHO3lzoPzN>!A3iDg7|0;;o{Jiql2Qm?c-kj%T!i`x>`5$GScS zX(|^9*W~?~n%xeYUGs)aLvcQZ=Tv=lZTPWg4vRWp->sHWvu;+lL^~MuY=}mn3}{obF4r!Z5K~ZOO-&I&2DEF% z3bfT(9x(2U~sDbC?xHa6NDNAv-|h8Ks3 z%{D-9EnwW(L_C(c?+|L=+0}tOvHI!Lr%mJ^q36H&hU=B%iw?4rC8|i}NE6>mUznwo z!5i-J(b3zz$Xbh&wf2$8RFPxVi zAffdQnf@iF0K6`S{+Ns_#;E>sB_z2AM#++aiutDmP$J$KY5)Ga zHhdmESe$=-oN^GtzqqXe3D_hH&X}xqVBl!gbPm{9c-UUGURTFfK#0Mp*AL)JuoeA3 zSZ3vl5z*0q*E{O5S6>GLk}PIdM@LS|ScK%=k2Ar3-HURbUHz=nh&TArtl#U*1ixvl zQ82wO@Gw$nhsk<&Wt!|!hk^d-% zaUBtf4fpjM`tc_9{K-ZQGc?_ek^ldzpc3HOJR$%uPHXt$yxY&?!*%D|O80b{#%dks zz^8<)_+haojSFf^>L!0Eyvq)WF}^s5xq@+mUCaMKI>uyd|B$|}qPCc2w)&Bf!jrmDBwIHRq?sDm^}b4$vE)OMo{?Y?18WMkbhZn z%vH~BOCY`;qQHk@RjdoKuO~j1>ud3cLi_mBm9FV)k4`7QpCJQ_tpRCr5BwXcD?a&o zc~%<(X(Rx-cd8Zy=&s9x_XSc_0^X%Cph>mpVe07UB)_zb)xCho0?rY@?sI`B_gw9H zBOnVUzc35ay#QnLRKMS96w03Oq)A-`86y+*LLPF?I zlcfFK!{GbUEeTs4=bF>rY|sv^>g(V5%RUS`}r%di9kJ7??w01T7ko3*1v=1O_xeoP%63JpVHoeUEi0-1BsGE)8nRQc8i(*99G`)4)(9>5-%cb*TdvmCS= zXgv);6)Fp6%l=G0-hSpyp}G_xhiCwj(0H@2Z)Zoxd(aVM8{$HML&PH>SeM3*j*7x$ z=_PGal}*MbB`r!c^wIhL_NU`Sls5hHK00{oWiYXWiyPI@@NlQLTov%dq(T4ZG%cCt zz<8XuNIc0W!xARj6#Bw-N#P+-BO-E~qN?$M$yshnMo!jY);=bxIfpg|mtyyzCjGmr z^i-c((m7(d7s48Ki#0ROQsEYkxVLBk=jQ@?z865Ug>0uwm4HpJV5vPsFws<+Cjii^ z=9Eyw08gO`CXrZ8R;JkDU#?&83Rp4+qtIccYhb(%z(N8Kw=4en07u}oJ*YB|9$IT;IbcYsCLt^nb7GRvso;nZrt7z09*jIVB~j{bT; ziL{;wu(B$F!0Kc(^S})jG^YI@d#K}Q)ACv-$z1Zz6VS*$$VmU*{Mh*S^uF?+7n-M5 zSAlrA8{coVY0b|aXRgcfD;m9V)gkM@SK$J=SunAX+20t-rgEAF1FFDEAP)<&w{>g7 z$4gms$&AZOw+&ZC0aQ1bnIqmE_LVGTI9qC8J8Mxq(q1;_u`!MqpiNL}z{nuE)a4Xc zcR-74v;LrI6m(!;+RpMp(!WL`tpf?n@p3St+12QE<(SB3Nh4|CI&B8i&2$FjHNk2- zlK0xCxM+@J1!`=zt56);Q=l(KQ9cz3u5=kvDD>6MhaZ2PeNP{=lim=;HFLruTtssV zFaxH5Lfm@mpUo~ZZ#oC%glz}AmKM2ucH+LO*22$d&=4ZIn8ex<9vxjU$o^VMSN&bZ zE6mw;WyeKnCkC*9ky~wtGR3}Wm8Jtp!R3~bJF#6;eL&lo)0%*>f)d}eoI)de7N2D$ zglfO2vK*BL8H^)1N?u#E$Ta>GEWnUA)KMFo2K~pvWBSxg;G@pRoC@TJ{xVN2k#wKW zgZpA&`>42Lz;UbPY|`*xz1-@wRmtP_^5}2P!A*B#jB(Y5#?Cis+rosi^y%ti!FNGt zX%_HPY{_2%@t}Qn8d`#QZFv)y$J#2IKn4wl#-{*M5uZ94MgmOGT>2ii5M<_GU=~uV z3MOqUZ>sG1%>bKZp7ev(F@+HS9els;O;+95_F7;^XG+s>&gg)K$@aZCO^!f?N>DZi zyaFaB`6%F(36}4PQelo*dV?dJw%KhMDA^K}be2`dLPNt~3g^a&;+wU`Yx!s*wBrSO zl!Y6W_;>1Z<&Iy>$1Op69z;O{lcEG>=|nq8&~?>`CxH!YFqHw!aI)HF+Oqs`~HN znYoUr{9f4$*OdJy+BkgphteElhl&-gp5sW*{4BGuQ&P5&_zMZ=jR)(@3w>+X`vMDd zgS^TJ7O9O0ogY}U*%fHAtjrnYfb5V8gqe~7(%z&NCn2^f5pO+|>9;-cy$Hq-oRHNh z(sU{5tHHl95^84{1Pmf9mMet78l!T1fIQ2~%G&z_D@vNLB99ad#P~YIaW70M*HgHy z*JnRFK-P)m$>ayW}n2ej?-3g9KuJlG0h-WRI``-LQnG7b!LEleTh&G6Rg@Nf|19T zDKx{h;R8VIMFM6mRU4!1w};JbsUxd z`A@6mrD2Y70$rN}ApKF-8Xt=tI zKs{cO)Nvep@=i4YU|<5qPs0_FgJA)!n@S`et@}}wSTZydb{*j&2a|A_M|h?mqc7KX zhY;Q$qcf{zOa7D3HDglV)6&vnO}BSnauLxDv1j?3Ai$0~2!D_IhGon(>w=Z|B><^i z-MQwe!^hUe!yzT*ZjbVh*J&@6m6+3Tv|-Z^t6CraN#RPT9W+av=>S)3%z)Xz~$`U0)P+x-vms-ga&PA zTT{i_zM8mh^K#}*qolV~h)f+{SSMP-o0)I<+a*z@?++PYkA1ym^lSXj?}JPB-}!~6 zJo8?Hx^HV-9;d&9Jofy_J$AB%JeE>jwtbHgT}tK@kaEJ1{%Zo@{@*0B8xhAiX5cL+ z&kv}RH!O^Si;LvaeUuJN4HQT}jHzU>k$gh}$_faHAo8th{a2TOBZ9W)yyNG!(Gr#Z zwNEOtGM^N!`CQKAhEQ>g)Bb1x-r$JTfuz;gMYWaFZKtuXGMt_k4X_>3DPDCj?IUG0Ok`eIv-R zIR@x>`^;5|gA^EK(EDd~Xirf;EZ&)=uW`;-nok~hyt9JXC}`tNEoLBS?ST4=8GHar z7%na@zX7)6HQL0q26QXi7@$IuUTc4g>e<_=E%k?O0k&m|`3tdlc=iYi59n)`uz*zE&)$5y zJmC8~iPP_y49dZsTPcBUYH1s%fyE3Vwgi=)$u|hot(5(28KlA>1rh+!=YrSW#mZg) z6W{Ht-TmNEHvpidYp{_y(h&icZnJ1N{(aaiA0 zGL)URynFYq#xwLz941c@j73>eJ@2uT=`zI;#ruh)WHRz{m81+(838#Vt ztv1a1V@nm_x3{FG4nd~P68GqzH=T|syam+vFu)N+jDgI%)0eGd!SZZxAi4Jh3kz}r zsA=Q9^clrSXaqIxQ}vM2>$9DW;?XBh88WFQf>)W65G!nEctC2XKVZTNT##Vuzj(Qr zh$d#$wsRnlscDHN10=K6z|`1h3g5t#!2TtQ$zqEiTLx6%V-25C%Ol7wz>>E?+A?3g zzwl1sR$j^QDi048%b5oiDz~M6H^S;{JSsL|!r3+B(;EEvZG-z;qoTW|*>vA~cc52F zS4I~Cx|ISre4Ly>hkHm2dfIp2jwLd7vGB0u*v~EeY23soNZ?`CX|UD_4GnGd=cGYV zhVMOp84twg;SM4M*oDo^!d1g3*K_Z!&MFeuc%k0B1$B7(ULPK3tKM2p;S zPTZw<)nAi8`znXSfBpT%i`?H-tWifPItvZUp(Il-Kk9gEWVq$YNp2N^J+GxJY zBx8PRc1;azzpci9_H550t;+qx zd~d2;0uYS$9(^i0Y%i*+i@jg+nC>V7(vV+w0>K>bmbKQn7#vc!O1 z{c17Y5>T>sA$TjytXVt+V7D|4>J2P1UoaI=x1MZ0>&2nniKMGzf`0+zoVI|9wkqH= zoOd2byw6Sj_@YF+d^sbKN3=z1mw%_YBK6RN1ML@WYnV+1{?b`l?Y1Sc`j@uP_!I;s zl^-vx{UN=~_^hQq`p+4_Jx-DGN5A>QgItWk1aZ7sXO#{=DaFUl&CQllGVHGsK^ZPC54@ym2F{4LALsMuUE^}%xI|sP zaQ}U|V$-ME`vbstTM}wSsD_vV#t{S)WdlVF^-jd}I@++yQD7q_v#~f4m~{KCtH3R( z%vj zKW0AN&A!{M7~F<=E5k>zo6uB!LYb8iblLt2O3V2>WJ$T%7Vk2D$;?*V*cXQNB&Q^<9;$T+G&S66UO zr@+dTmTBQx9RCCS@T+oitw(G&Hf5tXUqxDIEt;AhmZXS@3T`{LaS~CO)5d_yn*3b$ zJ=Ku9LCtE3PK9)|@O8mVhV@vDgDGrWr4GoVr%KV!I0-RgNoo}v;ZQpicApVCL>$-O zU+?Jan^3aIISjI^We3i?twefjy6sGsLQKp+aGLY-f%l8QITlszu7UU?$)YyOFc20C z1O)lC^CkS-AA^PW6-xM{=Wk>YC4~Z|Sq}aAr!p~7tthEd4$zO2nPLdQ(Q5LsK~p^o z%??aVT8dB{gKZER1?7ZlqGo+$2u+gmSViINR|)XOf!Oz&ixhJ?G1DVT7r!WXO$Du> zih>-kdoAD%?`gK{@}rSPS#~CulE*8n&g{?5brpM|WoN?n+>(TpOvm;S@rGsV^%fIu z94(zdL-Ufcud6o-$cfJZKqL^}0xQn$?qI_aq7jP`L)s;J{2Pu6(?-V78PE!*_G-f<1hAK4Yf^!ei=TcunMG*O~2~%0lMXi+nJN)@5fO zf2SO>8Z1mi7wqzTBm4VV!m)89$p5jjbi92cfi#FIs*t(as?Ki_aEfKB5?*Mog3`N% zYB|#B#T`%yXi~`8BFG26f!u<>uRWl%xBVC+Tr5ze_zt?DU1M)tJwVu4mT(LB_JB!e z!8d7wNcw!Ct_;jBv8_`RxY+!ubjYSPA+rAT6X6QB{1>7tk|lgXb7UNMSkWoPv=*hoxJmqiU zvuhvOW%9H&!n)h>{spe{`?>57{(T{EJ`94CETVTwVKsNx0ar%Up=d5Xa63}~eHcrl z)J$!4kPK+erSu~Y??>>(m-^CMS&a#Pd$ z#K9&#$o8)RsE$y}YucIEiIyj*pReIBbG`WUBAd!~0=>;o;Cp-&OVeFtUf+UNsi;7T-pg!s5xpdG^AKU z@!3QgRylQf3tI3gj*vsY**hCV>XBQ+I{xbKj((^+NuOB>QE+>JovHcJhNp4<}9pWFLprQp$gHNPTf?3&Y-#ken?y?XDZH_=5e&L9R>x z${&r*-ha}sgnbJU3%#R(G9T9qO-&SAdQq~V<2Zim%M~0@&P67(6_A^E4LdEqa;ei4(rEU!pU zcgWH__Qz*`2bC*% z_FFLSHS5KYW@ohzL=Ef421w z$F1B%@+F#!R3{CVSlluENT(GZ?;A-zz4mEBYzqgA+z()~%5{oXg12Ny13x_RjmP4M zbcm@ zQuE!^A&i-?84j%jtYPhfT@y$HUCJLl(?37kp-J$#7sh_860n1H#21Ry!_Woo)tkKi zDAF?wX0Y}KGMr4FAUR}Q#-sd@|GHcFP{Yf$8S6{=W0vC{?LT!~f?ZnHj7U*Fq50Wx zZ2!sUPsy9osu58w8VtIm7sbOJhTY!=H~P69T}xXGU{%o3;KxoX&P_+!H(p|@?VbF* zWltrF4`q%!F-XKv{7S4RO9sA`0hLVKKm$b41abyjQd(XV-A6P&r+YJ#I$x+)ja%2z zlC^a??d2Ju&run60|ZaN)Nd+k5J!L!J|!jP6z(ufetoDL-mYt888oT0g+oQ?EZ%AE z83rPm;VJwA2BF%g7gR!dV3Mx2Q~uopb7{j!s9!3CLpzRIeAy>VG;GQhbOGz{7)6Td zy|W@ruLEWeZ|wE6(4;;O>73m#&?*cF;9x(+Q@^mf-j2rKPs6nNY&6C8OhTY=;PW?i z8jI6C)gCSfth=drVbox4GxhU$&Nk*XIsG<_IZDeUwO) zDMCw_yUy8!ZrT~cgr^=M<+iiEo}^w%t-1Ekb;5#$F><}bLN3KIiv{nQ5Vd~(1%x29 z#pK2LXjvtacvgSjwkqRWLWVNXp8e1yj>FXgs@h8~ueH+fyPP&rKN!g^)scqtH5p-={rFL1sHMm_6b`>$Be{B2_gwZacA*IZMk@@yYCK5%4h{lKtP&&%3LLh&Wm^nngQTdy7u3=_uk=g)3qn+PRTLZeXp zy$cO}Y7Zv~EhBSKOiCOdP@>)J$S4AnGo$d>s8{Que{bG7y4<~qOgVbXyY&9BaDlQ< zvh|qMILsgY4Q}jLi|b#mRcpappCCtyOUJz$>WS7i#NOC`A2N`95KDmGdKh4;iG#3y0udL^u3qUfKU6?!4LP7brBr|*v*bcRfCAe*^08$?E3QCTZmuyhp*q z^*ib7SLga?5wmHRoo62BxVY@p;}cVS?|8#sZ84wRAW$y(PPkQ5q+Z2@NGBeNXpq5jQp$1Z&5sJKoGz zV?C#XH*0NbwFE*R{}aTtZp(~QHEnOy98k6dhg4PMrRnSR~f-dIQtayr>3*T(cIYY zCG>w`v`S0H5x8zxw}3O3(dMl)aM?R;AwEKqeerhi^C z)zs7Z_BXSaJ3W^jM7weh)7Mns{&85kg$;vx6JWwpD>jI;{T=%O z)EBXdepASA5(@?X<%d9}q4VQw=Pc0)Q8tCc!?&P_WMQ}CPmrSFt$v!`DKq#u;4 zMyFlnTtlwZyS4cGtp`_|+4!@$Cn|l-YAwKM6pY{udhbot&xBe-{nXHZ#Il;1%K_(K zeh76jmQL`D>W!7f_Iz9yqp(dPuvP)m8Y}o~jYm`W8~t(cg?HTATR4XeAZxpkjyx4} zqa}LPdUjVM_)+1IHu}}F3F;N?HAuyJYmo=j1hrr&8-P|f5=>)c?A`PV1H`60fdKhz z6&Oi>4D|g)o^?J>LX7kUaO+Xz5yXvfMlaie4s|@ZU`6PvUIv)M}%Ny)=TBeW2WxjlS0k046}02(1f9o+TGq9X)4KD^oX${Ocb654yVWDduyY z5ToS)Q%nr*PP|Q_SiZ7Gij@U_bykOSp-!&r_RQAnT#J0m%hl1J&O6azhgIF!>@zbK z79)A0R=yY}RJH~d>F#-WZBSy=n%m`)zJid}l6KUH_7E%xTskq)BQbRB%fC|VBb;G0 zY~8SygaXzu4AZ$m*l-7` z`k!z}yc2m?Fhj2-40=VxbL_tNcpS@#`Xp3f(Gn*xYbkSC3^=>Po$LR27fPywYkUf( zv7Fp#%<*mAd~TUKB&FfffV{#C7O!=c*{R%dOYQXzHh~KN+EZRbm<5fGI?q4tNYDnZ z_&1DGVj~9VCv$PV;j63M-#@JtnApsk&=9abWaI*PvqC_#uo}=X(Z~XGlsK~`I=qwFnAfvCvkqE^?#G_Ys_S*;Ze>)SjU7VjDAE9KpuM-nSq&5SocOR(j2ZaxR8#}on z;|rg)47LJm!GQ96fH|=Ns*m^N<7m{VNdwRC6}teM{sA=KS2|uv)`Dt#3z)(|x|c3d zQ$CJUwoy#PyCZSN-5R=#Yithz!_?X}0$}zr{n)!H+==5u_*4kDIe?8_Rw8)?c9NO!=erQk-h*e1y#@+&hD`ZBpqFg@w`^O$7Q%4fQUH&fspV!>@5K?e?5IrSFYn zP&*VNPUk(o7o}5b$3n1+?aQT5Y11i7`jwJjP7ihxi(!Yiz{m^386oJz%aSsP9FpCO zHDR}y=@*^CEnzs;5|mY@LVY+3Wltm#PsJ)bt{nh(VnE;r7^nB+_Wh{w%*4R{gZPM9 zvym~b+91U8p4ltw2o%SzepBUir^c%IzuknJw?!4Cb*Op;ujyC6F ziY$i3->ed^+r>H9FHI;k?Ew<|BODEy!fC|a?eSbM0OuA;*z=JUYS}ckX1B7EztwJM z$k1MQ0HzQeoBf8P9T3ht74jCn%N>gW)P%=To{o;c{V4Ini1&73KD@$L7{LQHWMG51 zbFbXH1%?Rsd+IYR9(aB47FI*mzJ%gkfx5qL&kNhIya2J7d%=B!oKP2MYN1;Fv{T|2 z4rh#8fBn*&-!pfF^1kY%0NA8wCl}p5rBP?dBVZ;&)XmV2&fx!y;s}L! zn5gXSJ5d};pBo2wI9_wWjsad3>X9VeV?WFk2z6sFBeW%cV409~qXk!%C(}JQ1}Gqc z!Uc*SHnW(tpiT4Q3D-m-dc#PC zg-W8zVsZ!{obQpGSl0e`l4Q->?DW+!h+J`?N&GrDZ(e0@F1LJ+eFLn>aXKI zSCjnbT;v8bWFp=PPks9Y_f|FoP6->`TmALx3qcAhpigtP;rjy{J?PYb6hkRwO`9G! z8ha_qX=#due$Yp_2cemmDClIFedd$z$Qt_*VYz5WC&^Ouo);nDctGmitJY?9_Dd0# z71b#Kbp(T@w_JRc$mh$IyTH&-P9Pkx284NsYd6w&r299B1Grxf!nH9T-qHjY!4}|= zvG=8{a{cV3X9OAl$QfK6pA)bqh##N#!y4sPo)0Q1_T-hpbI;Fhfe%$pa*AE2wtMRZeh!f!7d zuA}YpV=78CJ!Vn-_KVl;?|RhSi6h_BLT*U{4dqHs7J4TJf$dE$nwNF)fL|v8MD26x8~fn(qNN! zd+sN!`|6cN5Liv&f0>M0Rdly`>uY|#b$8S(${$%-*`ahHPk5)B3P7j(u2=hlBN1Wi>({S{>yAl)nA8 z889kKf(`tq5!$oCG?)N=kT$Uk#zYRqqrcypnx3A%2DsfU52ep#7L2I{>^PAp_xd;KAVpM>?Vp+NdMy+RB%z7y#2 z{Peuo2k;TYO|C-XJ|q$=Xm6%^2lV2G@|03wxF~3MPXco_(j-CG#rezaqEfv%&<9g8 z+VblKkIDt^g$)C6ziTzGM z{PY=QQhIw76|5wz8?7iL(O0dAwflF0gR!#PVB$0?B6ly#RHoUL)F+ppWH1ihdZTriL@Z@;_%=rb;Ttks)9FABZ*Dgem2S+DlQC_~`` z3&PpzdRQ;SWXWE;ys46H#(=7QcXI(*?gg;#OXp2h`T;W3sDPqVJvdmUHr(QO^@^(BW0h9>t#sQc0J}+UK$@D0Kz%^u z{?gP)4s$FQpis=h!omPdUHn6ZPRJmN`L#EWzTSF8!ky9@p4jCKsBSo%g@t;?IKCnx z<@sRW#zcwgzm&d20YIK|1Fs_~m|LVDcd*0mKbnu_l+nsU3haYTDxlG()mo5RBdn;l zG{1Tmd6dU-u*d1y{{|yzNG_a5>B&E2+Af$J763tOn_CG?p)bI>(geu`x9`1|l=S^H ziYu_a9eBG7AS73lKOlG%6_wQe(qRJpVZ^Fffbe|hw@70l9Z8<@udKyrEFhRPXcUAczQS`73?nDpx^D{zZ+D)J~i zcR3*jVBae72muyOGIg?Gy1ot3SL`T*2ejm;I0S{o9}fP|GI{Kj%f6dK(5Ekw=_(#U`k-t zj%si5+L5J22*CUnbvNq(!*Fyt0F6-=O5t^Qe`&+ za&-an1MlSM-`n7Ra<20zSUer(DhxImF+mna$<-{@Uh*+zCHf!~f*Kai<=^Kg+Oq}b z#cm>l679@-uU4POtL2f68X8o6(4mK}uk$E>q4k;fx!F^G^{a^VV(EvF!}pu;Pei1| zMMb*8IVCP78s8>o<&GX z3KFH1WG&VRkTMS3g<+DP5yaUNT!p_X2!MyRw&H7UgDe7b!_<^EJ$y4@57-)RBZD0J zTxFuHa=0vB=nv!D%1xg(ZW_Q||E}_SzXJg16q&CrR!nScwjyvG zPfea{7OIsjGzT}Epz z&qLI!3@{~X0_L*&X&?ELX9OuPwqYPPu(7nI81AZelpLZfAwP)2duVRt@{7ynTR24V zY-Q{ImE~BRolO?Tke#~h=Zk}{qDt*XMMcsRpI;#yg+gQ$>iDR`qKQyT%2rG=XVAAecSp2o#}B zpk59ZOO-^YZ4l(*8m$EiF11Lc=#g>TMX_)DllNwNOWxnq0OYT`7OPaq$3JbXrc^0e zq^ry4Rg09z;yc7T1Ks15kR)1?5nr0wG~Zek)UH>&P`@!a6Hf0Z>0IJ&86Mk=)$BH0 z+0<7!4laNOJLa8n)7yC7rVSA9RMi7d+x~RE6yYu) zmJT#xwYdk-$PT6p>Cf{@763{)|4}4Q9bFGnXt>Q(nK4^@N<+|nb$aKUlDciCiwiXN zhQ+FtF!c2`G=FV3pz37Hdh;w%&jh445MSDPDu2!xSvHy3)S3lM8( zMo93XS{4mbBmG&R(@tDfMF-biC}aGWtBh?f-131 z>*4I6vx9|&P+!zvvs|-9P4hT(Tb@)z%js)|qCG%rv>MN;mKS6@kUdYJ`<7~?T#p59 zfsVu4um^ES*OX&Y3r*fRx~_p9oxR%X8Tex9H)8rY=j@VFq9)nc;&l9hd!J<9YeCVi zcTY0t3DH7vj%kJ72DUf&FAuPO5SLX{skRmr-e_~u{GhGKL6r$OE*Tehm}>F4DdRNvqh48&2qB zHf*XuY){hiKyEz`R6Ao%_6Z0a4$jgZ?7Fw*8U$>+VlaWLe8`B=_Ch9-WbF?LnSaf) z19O)kJ@&F2r9k3^aOm6@3AZfc|lC-(=xyi~5__KneJJ*7B<&)WwfI3KVVc z;QZ1zg0GE++nD$1Eu&_Rr=xU!%Gh=XR&{kH)4{xB3{| zH0DabMG|P`=CYc>-;>q}NDiqfGe5q4Cnc~1art8Y-D#le@k7Hn#g!X$jSyek>Rp4F z$&r-xuLNG4&EGG$l!*u{_Es0OxVBGZhnFm>rm!f8P=~0y#D+oSoYXOwo|foNo^}%^ z6=t4AM;G68ejVL%)q^34a`K~(%_+@V{F8$z#}Ew)KC>dQ!=%;| zzI~F2x?x_G(jY-0@YVUH0|j!{Ei@Pm<%HWZvKeBxwWBK0s0nizQ3Zw$v5RN*s;#+$ zE~Xy&P;dUe7bc@7+t+z}uEpn6!@|@(=mJR&7NW~b=^T8y6t$Yi%vYZUh+EJ}UiOej zJFOTU($GgC{gTbTZOm}RX;r)H!50bJe^OVOOaHc6k|%RMJKGBDmUfYB?A8q$E}KZAo4 z?3(OSH{@3ARX=QgIPei1pm1xh|33F)U_?TUfJg&}P~{a}lLTg) znGBiKyLXO+!}x09Wbx47r!q-N?Tj)h(L+)T!8g)E9gf}Ns=wC-TMwNF)`HCUS#5YW zGE3SAjopK+P2dUDK5yV0y`n(9tD0YkQF;vimi-ru?ZZ^=67N){bdfHDWUjUs zv@7TxG6iqmRdGUQ6kRRtg&m1-tR(UY{4l9Qm6^z2FQ_3QN>w2&FV5CF>HCP8`{Qg7 zF+7^VsoQ*YLL3n1R&+9~_Iv@G>wJuSpat;O>spE3w2=JC5eQOo&;zk2h~9&u!?R>} z70pCL9(UErpz2E15K&rEZsDk#ymc{xBa;dmAtodDs>f66iys_9EfR4tHN2y(==)rW zaBDxr2j@?BEf}0wt#LbV-P_+nq)9n|0z_G$N?24U0aWbPgB zX<`2n{S%y-)NaSRCs&Lu_uzf7yk3oS6vM+lWdP4i!qvOh{-)hN)#%Hm@E*b<8|@lm zoa9GY$xADuYvTKhw5r%2o=34fAyuKSpJqd$-l%Mfv%>?UDn~=lgHHO?kj3*`*|*^p zIfl=v$VP;R2;!au_ExzKO`ssjcF6n^PsYE-Q)If9ll;T^M|A&jluBC%B>2wyWz__V zb9toNUF1yM7xVMEuv#A53DG`ghRO&cwbc}w1(6R66BCuTA)5*0S$7!Wp|c7d!osu% zlsNFSDyfC1aGM*CGCY&qaxwCJ-14RtM8Ydun@Zu9HsVjNUJS8|N#=RyQwCUb5-+-U z>W9--(5mz6@M1-0nzWLCn2jkcn?3g+07=HJ4$dgsrG|} z)ljC`t_KctV+9O5N58#+%!buI82dqy^dg{*(A+7;SYE)V{vP41j(*_~3qL7*6>t6+ zbK^+Yp@{>mSG*nxF2c+Vo#Ba^6N7l;Yi#3otKK0O^-_;cr*dhy8=?=4u;p@^yuX|? z1=v(RX{L&3xcw%{@gfO#Ues&oVb2p-v~W^<)B!Jy@L69?bCx~M zfOm?lngM6|j39`$a|PFzgpKfTm#rp7@2RCG$0|T+CER;DgPMjw*)TtOJY#Mp+Pwzt zskoUtcAD5L*UVJlR}f74oakteYL;_Ek$>izv*U}G2d=1wFMbOuU@v%PShzu89Dc;? zW|Awr-UbFy`Vw>R$beBlE=`u121h!8EDJG_AkdhZQW$%E8{Z-ZpNl8 zzG}A^r73G#mqt!tvrcKmWiRLAf^MxIlCgu8*zKB*;PgVRqsFmf#4xlyZQ%CCm;T5-D&8lfU| zA-N+)@(K=c0ntPF_Di-1Qpcg4t&}-)q;heN+KC!9cCX z0dSI?!|~C4%JwmSNrky{$9a_Ey^4@YEmR(M725Uj>zGmt16tb7zOd42uD9?iNN@J zHHV1$!f}S5xKE-gO`QDeT=x87j4IV-nDUM73wWi`*qb3CA-Y*Yoo9Vr3rC$YU4`ME z9URGC?`qu{rc~x>K5HoU6%nMz6di^mxS_Ds4VV1hdFHB=( zMhrfLq;YaV8DW!wBH|1@%Pc=TsqBvnXIswax4o$6mZ=)%j;;5RY<>k99(23T}%lFm185G?d@W6_CvbgTV{o%fr6w3_5)xY@GojR3L9is$*gd z?>e$w$dp06x1A?9$A~&I$F;+tVamtBKMHxAAcGCn63J3FJ|)N3`MV=0j$eY4%Hi&_ zHcA^TYAIk|)x^9Fw-e&MBRU<@7_xZ&nlmt-HjVd5Dck})q2VJ2QKutv37DzRX`A9G z3C>&evg}9cV-lWHSAz|kM?5SA6>EDtn*v zpJ;_==iJKrDtw+<%w=2p>@3Mq5MRf_*?*`%Q$I=+dV1+fzm=S*1o^vSIyuqt&j#02 z^64al3winw7=*+=MKc-Rt+*z(`eIT`9LXap%4zGwBu^3-meoyl7^Yofh3-PL{bcFO zsiwv+Cv;J!VY=wRXroR&fugoWypK7IorkJJ>SM~p0cx5 zkftZw3qdcH^7ht>dYipF@kV<~Issn(!M!fIC_5;>t)0$9Asc`DniNy$;V>4Jg>jRN zS`K_KROMF+6G;r(u-&SXHrlu{a&x3qNg**#>W26z$)O?ITaNW;RqF*2CmYV2zMiw7 z4n}HclsdqSs0d3u%=cvyJ=QECNf{fu$7ukOus6|3luf~EU#Lb)Ag5wgpXaMgi_yfe>_;?jE5pABlkM@ zDjwO@d<*_I-3A~4!=ptSfWgPM-ZrjCsf4b&TyeQ}+0yr@S&~b@?XA$tT1XTcQjTJeU+tEEXm9gZ(?ZNpuE*l0U%jfRH}A&_?5R>cEd4nhxlXv!(iyKA z@6|sEN0fbRGW`ahicRf}>%d8oWfL;aB0))q7M9~gx=&Xy%tqYESs=9D|3X$@_{;MRxR7QC_-YqIQdyybqU|Z64KbtD7nHd#5^m(pQG%Hh5;72xH@} zc-A3-K7@iGeDJw!*lGOAW;`59os^WYpL@K1RXV?-4ZWJ-);Kh7=JM9Vf^TY;Y_VO~ zX1$!?);d(?{dGb-8QW&;hRySK>5?vai6?2qfmBC=q(%Z)AX$ z3~m8-0+>il=A_-Nvb*^1G{S&@^|CynC_k&efOY<@(47_L_l2~7IssQ8tY!Zz?=OV}xV zVYu4ZJut$$f0I#lqa(=dwE-pLQZsYGFM^wP$og)}PnJq4Ljkq}K5=ES?%}(c@TPjQ z)^KWs5PFV>_aQWcuemglz#Av5SS{F{@=Eu4N#RkCur~A6iZ@w_xMQ15(}SUEFD9f~ zAF1WwYN9FD&Oi>|#l#Q^1rx*5Kx z?;^{T3jZpwI?}Ag;BYa*L`TWQ_nXK%F>?t&y6LaAy3Af6*oQD-#R*csj3o-EwX>T_ z7#<9luosN{%FtUS{$%F?RQ$-0uRIM64;2}9W-y$1KE|VNNa>4K#p9R;NV3x$;o`-I zlogwLl=-8tG}rA`a7}Vv(?z|646_nL@fx1!X~F_lb~-MY{WZ|nd|GH>!Q*Zu94}mmUgheZTxxc z*yHl#>{VBIP2fK}IX% zBTeO`O%EdQeK6E*3)9?8VeJ6k_Vx;jO{)C&gVIU2GR19lGzh3wImW6PTpQOrN{9Pi zWs!AIx??D#l(eFJvhh6e0{g~$bK$Ja?aKwVBwh;(p1d4nn9#Gp>q=+XZQ&@wX;M*A zdi7zW97SO&AwsD`eG!wqAGHpFLCQ^0PhU;43N!SmVR6(oQ*rFH+*18+6z6 zc@49FWZ3b#=;y6OGe?q~(pAsb2Ks#9F@c9>1R|Y5vLqg*gFEKGysSuZ)~K}0=P!4P z?7i=1bx^_FNB2Jb<&RgLIzCnEO(~+xXW}rV3(ud$U|g_T-z?d$K@fFh3HnzMwBzJG zX^8#9tCuc+nl^K(C>hIF$e58S-D~~q+P7!)HB~Q0-yZ?f_o{SL z#F>8&_y3NA(%1Ta1NVQi|L>12Lv$brgI5`{RY-7tyk2uHlZ>CA-+UnP28fI)$4{Qb zav;7WHl*ruK9+WTj0BGa1 zAJz}y=u2Y%th3_#Z9A?J&vqs+H@9PXW##n({i=tLIh?%lb0vu;d2C*NKU@&}Zx;aJ zDN;E$k+cPQ9I@YAdPo0%z5Bm9uZ`|;v#=A}StOP(*ITdE5Upn0K8qH7PB4lLI{EKc zXJqV;QyrHKdgo!(Zf@2g-}k9QdqExn)E}yIlq6c;*bk3fyJsqt>z{>hKAX^RO87TLFk74CIZ7c)CH0SpxoGb*Lx<4jfza+g&e(D|Rx*8tlI$&{ZE`2T2CYj}6y*G_w@8c3ABHvb0@$p;cnsV?~MOumkp-hvRznFly?TA~iuKx2r z8?C(LM7R%aSd(mdAy))&ir*qE-Yfx59tH3t#qQH8?SPQP8Yu4pccz&Pt3oLajbVB@ zEm_6;j#mpgABcrWk>9*|LkZqp5i13_88_fVk&u*hdvCo#g9s%_V?6vkw?W;z&m(xt z%BmQ9YxT#M>ie1_z!D7WuE#R@9UTKuKPeZM>UifDo;=~)0(L@ka={ys0djvQq3;oW z3iPpb+abXJrDZJmq|z#cJk0)?)z{8%X_NCo_#Zsjw#?B=_E2tQ_3`Z~IbYgz$L#%R zMWySG!IrNkwT7eS=AAotR;{U^98c;^WjZqh1W)oo@kF<%&KXFu@f{6P~NVFMHPUifNrT1xwxT{ZU%y+hHC=$C zkN*$p?Do9ZZc8sWe1-!iDE)1BPet8FHmtkXKcR*lh18_nJ4R&{2IfwzJ(>+r2xWn)x30y&7=LJOF_= zZ)f^O<;gPz0;7lWW{3B7i+D?O%9ah_S6jS&W_sElhHR-UJ`qvvAp_#+1O!hntLK!j zS#Avs?>f<`t-Lj6pKa!%BT0b(6kNKQ-bx2Q46v;~x%=wLZqf#jr~$dj70_OinaAT3=hegXUWosex&S1|BXL#J%5_}ZPfA)IUHyQK;sw{w8Dj2plt zaS183?(WhmRT;6M22=~)5zZw-?DGPYdzI?AIfyUh z9-VW^+YKoGb1V%2Ra-nJjHzNYzBxNk^m)kZhmjA^`Jn)z99P?*ja#cv2Ex{LD>TVr z>wa}Gy*@xdrfOejxf&fVPp+k5n5ifojK~>#{CSs-Dt6n7GQgn6AVvQ?P|~*smUTnj zpYECP@EGlQy4LBksI%xzhhW7G8nDVpG%zLGohMe6kD)=51&b0MtpZ4w`3i9sUO9LBd)+*+$-O5=x}gb8Ji(js8=%dHZ9>9SixJ^&)A0!K`| z+NKv@KjLyCXU6*m^_rJ~7Kw+HKF^0CARRRW_{})!-Ki_VUqIEgzgPx#6n6Op4!Fk+ z(Ey?Trc@8d|F~g=g^7R7C<8E&mbP0y#Ot2s~0&EY{VUOzlG>~8dzfL9*?;$j|m z(VSY&ZTicS{VSkhed29@#3eG#e?YU(A3zpU^znW8zVIsiU2aH#4#q)fSTA0ipte}W zJooN(^cV1a8@SW;Zj|L6Ab6L$m3;ZNrM#Aitfqa(t7VeO%mf>&o z&m?8H{&P}Y)J3()^sw0;D-Z7mwt0!r6W(Ao85Kp*?84vu-Gv+$`CFY8H$;>m_z~IS z1(Hu!kab2OjY-@3=Q_a#T+%YNZ z$9dVVX>NT?N#X?wFoG4S8?z0oc>M8D;AB3WcSA8z0;b7Pl*nev0gGO=!TuUu)6%Ec z{fS%T&-_s)<3(dAZH!hU%9s8sOfhE-Oj|S;Lq^tekFr;Du-$yE8%5OO!nAEHEGZ(& zNc~D|4U%dv=i&vIqF2D4V#zpNd-e4!B%;b&;vh1eu|Y>)>z}bf{0i2P7`uJBSc#SE z0zdy!WaJ1{D|1}9%F_>I$#Y>V(3Vhfe94656!mAN6HAr+2c%<9}c3P}N3meZr|%HA;1%OlV}L z89CPhTS7^A$Ig+5Y3e`fL+iRW*Ki%@`s-Vo>-vDyK#Vlpvb(^3spHx|e76me3j>NJ z$3e~jng*nes4N;RN|kqj)RY`I6;9>Pcp4monbh3Yx~N4M0F(i15`f56%&^D`6Y#}M z##(qO8=z9+HDIhdcWw*pt%$LYo<)sa^&nWDWrg zguI~R%3b40s`$HedN;WrNl3CeA1)q;P$U-y$lpswEsT#lb~{Nv|1jlrxmPxiO@mY= zx`FE>ia@*RF5_UbEsXjugHdIPR#Bh%Ov-WTqR594|jb#}BuBCxk~ z1v-pjWqMUch=e#*-D7Y6r2-v*3>hl!ZKU5eRIrgt!qN;txtR z!p^oSpWt;@8`%M3ASwCW+n(#awE@u&8^bwb%Ob11b2ra=YYLo^)g(Ua(lU?N>KIdA z`6H^%O};qK?03@B7~2j}eJhILpvtn;>7gn%;O6U(0vc$?xqq<)?+B-D87RX{9(>_p zik_T(cnNQU&KYEzx5g}`&~bLbo&`^npZYrnlOyz-a18Nb#1ze?*plk06aRB>3Z*t4 z&?8OfU|h)?Jt`B11Bo|g6|_yVpE?y{x)l{>vfd{cyR58zJJlk0`Z566d#_&TJQeXE zHtZQUBfBEPo{!_@NUnsMK(8Q2y0{9Abx!1gt3w<#Cn@j& zBTTrNcQKP%78rB=K6~%2^Y-YlvIzl*{kW2(9?}oG_vdr4a+ZXwe!2`HmNW*<4BDBo zEk%dp_jnRo>`w~I^P{C|`2R3K{024lMB|{axP{RV4i9*-3RrXv&XbvGw4ZQK35Y>p z)uU%p9uwYl<-_aDXCFC;0mi0mS1*Sz9*J|Uc>pu4MKqx0?UQYv;2lQRR=JGDU4>u8 zEFe!_g$q9mpw2v5juZQy#tn&Jt?7Qx+}yhK9w#m~35c`QX5*AtDc^#8V9zktD#wgIj4+7*G16I`1>% zIc{A)JtiAc1?S7FS*VR)^}D_0zb;C>u2j~oy0Yy<#M-2D@O~VOZQN)vC|f?}U!H1y zOJ+_$=I1g%y?S^n97E-nb)7(oa-Y|FB{2u-`^K*K27?rL#k?hti-(uMQMMx#8Q1sM2YkhofF7FkV z<$wENW1h$?7ve`{kAQw0j7Y7_u9Nn#Xor7%@J4kTj$~VoB!xb{X!cDhqK*|H-_3D5!Xwsd#40d6(%GYh3Xm#`1pm#-sS&+ zh`0JT>vCZ~(J*__;MchsUlR`@|7A%b&rQS9z3lecSNTqT@8o~?E&h0GQK=APX}mVt z=@I!@`;u(i(~D0WAHdT%^2+k_d;i{lbw8u`!*fp{a9*_YQL!-W?$`}beYpYQ(KS$#uSS9kRqxKa(el&)R_k_^)SeE8?Y z=8f?MU`!1!-}s%pAGAEE(*bD=T>#B=18k*vZ?GG)w6n8Y<)ycByGZjiUEY!&`n#9^ zF{2A^{+gI5ZC_qq&IT$!W+2j`*?~}+mjy2f4%!_nWdJ{ybd0rgP&Y&W3p1und;kCd diff --git a/Health/media/container-insights-health/health-property-pane-overview.png b/Health/media/container-insights-health/health-property-pane-overview.png deleted file mode 100644 index d11dbf44c2d941fdbcdbc8a1e391c6f69fd43443..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 95717 zcmZs@by$<{8#XQ>rASG4cXxM}^ynBN(%m49#1I%=N{Uhg>5>|Z7y=5?F+!w6V3fb{ z`M&RQynp#bq#_v8r~>*Etm(~#xl$&-JT znktIM!8QlQPv4rTl8+F$^Yg5%SkK@kea&F4LVwG3)+$2oyZs2rD=6vVCo7w&Jl5)Nh;$=F5`e9 zw0@RWWQloLOfB|Fn6wnhhRua@p)Mr*I>9oS6Tut?N;5Qa;n&N*Ai&Ddzkf=sA1{`% zV5_+K`+p^B%*88{5E3#3(Mn&^M;{y?^LO}epK%tEsv5jh?GX75t~x3a1STv8twm(+ ze2d1|Wz#R=Ib3SHqWQ0TE#G%(Y1!@V?b=OD%=PzocUwd6NSge1^>Fd<&JKvO*!C}8 zbN@G*ZRE{YyHqZsLbp(?ci5+v8KkdjWF98=xbDfDNp>0E6-=4&&1IWf(o(89gok+lo{%K|=K2&tOhNIw zwfVoBW%{;%lDPc()PGu)+}eMd)r225j0YS#xjbBQf)`0T_e)KPg-uj$()JX%en8C? zwKe{R{tA4H&&jQ2Mjp3YORL{QP>o`$Sb84E!K$HJx6rxDs|g)x`0eM-Fu5(5KJvQ1 zmKl89b7ey*`UY%n_IMXoWCg${^<+4Jx2_ozYbgC#i~MC@oW$Dw{pM9UNYeCi2@Hu- z306A;zD(o^Dmyqh5cBi^g4y+oIMhKjL{U*u%oSPM-rkKOZgVYApv0}ubEWg|uO_Dy<4>>pn>ppET)vK2j^liHdwz&1uilC+g)Xt< zUUVSQs+4n#w49^Q<6Z*QgUekmCDA2Q9C2QVBFoL~hrd4F%!>Du+owYG?#>*db{so5 zL1+7j5fE)Av-%5IHg~S$c+MnGH~~I>_V3@XVfn%?F|YLs#p+a4Rax7um;4=oz{%c& zh2}zi?ddYrO3f^`nHnSZtko=iInLn}a&a`(PqWDS$6Xm4HvjrM1I%r9v^o8`p=JFm zKEa=VCSld%rEaU45!G{J?@r2z5eMf%XC#}){Q1hyw2PV2M?Hp1awZt88 zS(5x2U%dC*G9EI(^Um}=YTIPHs5ycIu*ozUXCFRDZq9g;$M1Ix zmskgRohL-#1TC_YOC5!7I$XJLP%MVGcic)&ZK`#k1`Pi4bZ%paEbcV>EzYlY7Z-TZ zLalIBha)WWjil-0sdMx-8TK($CHJPO|B)*A;GrsR-hS@|BVZ#=}E# zb?i42^bp&G`1J<2Oj(w4{+TbFoi2H@dbahwz-|FKajz9UDV&{25`w4~GyBt7L2|&A zIAlkEMZHNwO-aN#ZD3IKoKKv-D5}FX7&y~4;EIxZEBs~C9c}xEJLK8PAc1y+O;2j} z_2~$G?$0k~h2N~&z5Y`@nf~2dfVd(87rJ)euHWx+%eJ0|bCk;`q-8p%QH}D; zd*7h2E4dJWT1PirzL~%d3Qbu!Ip*fT(^{{!UT9sx>z}Bz zBs{CSJ)S$|2YwWO4$a{VRx`RkA(;B_nZ1b=$OmbTlNu02*iJV)55xyJv(Io5Nq`KV zk0BN2d%3Km@>xE^tNyP$YJUk!t%eq1&Pb0$-2UA?+og}v$YZV|{Mir?X48Aq-8KYO z3j^JdF9kAMia3^)Go{&mZCs-R_SwQS0aDIg=WFY>39$ayL}E;o9u{|F|t}cqE-Rmk9LBS&L+e zGUJznPea$$Qha#5O^;rcUY>Jg?A^nGoc2KH?rcx{RTw{NqU0Fb0*DxXCruD({x|DX z(T9WPsS5kS1h%sPXe#CQl>9$%W23|qsJzD;J8vDE3??j*vi_SbDE5F(Pa$3fw?q!9Y-R86SkSMVPvO%XR+D8@R*MnSy*gn&KpY0cVcKvO8SWc z@{12?w7U|3vl2-CU3Z{#{RS8tQwTr18T2#vK1p0|Pi4Jbr{q|S7JjaWO_VPuk)~5D zV&G6bpm|L6^#DNu+NKH9mGdDDs@~u$Y<8K|NWYTLNu&!|#BT&S2}`(Y8tY5iH5=d^SGAorwZQM!gLo6Y}|u z@cJnTU|Vx=1x-P+c@5m0CAJTy-ph0uLwxI%J)272&dkswa48l!^!i@05%NMN_(~XZ zr9gRFECN%v&^Pz3N9n_O1_pApuY?USa|w1yvMq*P)yvheW7ta2n#WwT^W>Wy89vk6 zy#H%jtfp&XK94JA41=jz+kO6zg$^qBV+&zn1YQ2tKhF8ABgIk2y>Ztuao zP0SB#|NfT{VN|7;iyn_Rez|h%IM6P5=JAi7)%x{&qHn_0QyIV!J+0Ml$yC5%t5X+` zdAC*aOHQ8KOWOCmmSU1jQn?rVS3RlJ@&OHCkbf0PebZGW5u&D* z&F6~A>2wmY_8{c{>$u4sDQ^f5fh+b$LpR6eO6HTD##shj^R~web$6z0d0h0=$GSV7 zIemK@?Rz&w$!T3*aj+I;d=s%Wl5{qhn68nr>EzU4qREV$;c> zFY>($8*r4&6;sdx^ElS~oNaL;KiWni@6!#MfFpFUtcLgDi?_ghaleFvUmZpx=soh+HC~XK|iq=;RMf_yL-N^Zh38GsUAkeLR)tYHj$}E|8|s z)PI&`<(pgA`5Jq-e{-9BhYc5cjz7HQv9dDdvF3Dvt4KH%{;Ys3hP?PJK4CZ5805ZdV{{Bs5fS;w`z5C?eyp@Y1aL!DI#ug%6dl%1*EF~6E!G_ zyd#oicVQI~vCafC7lWkU@O(=mj8X@*bnRL08T;j7N~0r#<0Q(&UrDW_?>NDsK$#FR)&?`U?W$laV@i?nQVf-w-m188X7hIV5@nsu3R z%ntFX=SD~Em09@2pn$?$UK@^P7rk66@%i@$l1(rKE&cn$rK`N#e|NL|+YJU$8iUeQ z<0ZvQTl-U?DdlaB+h!t#MRn(iM4b^BqD)$&+6*r<6u>JzfwmU9kPr0h0D9F0Q1c| z0Sl2D_K|e@>_V@4w!y?Z9+DA#-}>BPqO4(7x7Gv0)^JG}STL5yw)53$c;y%70gPe~ zr>*q|<|fy{(3<(ikIPp3&d7(t2FLNLV&a3X@+)`HB+t-zZfy=- zLl4taJkreMY_5XasZ7=s=Ft@G-f(2)rfxQih;>rx9YWk368wr+B-VtThWPhfpLz^A%82$w=ShEMn^XWo#vwl zn>;6}_U^aEXS(epkYDH3g@P`5%v%G$FhkK<6Pe#Ewe;D(F>Y`lGA#*5ZpC{xXf;Fs zRxUl`PwHd-mCIwrvUEwGt9nj8+C?A5z*{Ji!QcB};XIYcS19HQmj1K$XREFKYx_B0 zjq&F^og$G*qC?18f0&o{xU^^PQ9~ae2{b(;LLEMu(6V3QJ?${lhBh9|NY7tVB3M#3cxsP{YDK^rp)=wOVk&HOSGUt>RD_M8Fsa)P#_ABf<5*c~P z3AUtKiR=P%Sesg`1f+wxjK3jii?bV_Q&zxeCo4>{urgWwLeHts+GH7CME5Ha(-1pI z8H7gMmB`GubjcuJWAB|PShBCBYo98GY~?HlR2pu7)-xB$naCe3Qgiu$xv72l?HvC( zUJfBUex%exe0pobz{oKCJbc4O(M{iZud4^Yva>s-*mFZ;PdQ8(wg1~$K4xafT78H}Aoh7SCXKwtJRIE3s@jZ{^W(yCP= z&@%NOg{+>Gw9>XAj4*Qr<-uxUN7aT~K3JTmfnfnz-E(#&C?%?$P=BZKVDqno2Wvlzy<$go{zEGmqUGF(H?`gl`HCbtBZ%llu$(>`W z(W&Gevld0olvW^?jXS%W=Zi64SFE0z<@u3m zm}LwJ4-j(15l+Lo5J7DIqK{-VJn2*ika1c4Isk@WugYRqo(o%V9s2-Tg@gvc3-1sk ztiL);N7SXYr{-<4tLl!xKMMns`uy~QC5)6*sTfh3JfX>X%5zMpC}4Ld80%8#v-m(*lDHw1*Y5S zL$-({!>=19ch0Beb5Rh+cr1b3YckLmgYty#>&+D3oe4_6iT+sZM~q;m&QhEiyz(ls z7r9f&HgHvM-6doBPrEPjej*@;&-%~88NUiv)Os{_sz4h=c z*J4mpR#u*L5l|GN`NW@JUGU+J8S&1yhr_ltILS#cr+}Jl#LY#G5{=ht_kzDSjcn+T zX4dWZeAZIc_{DQFB&dKKE0+}%Qhg)Mg>5erei0n9zS^~Q@-eog2gW=@BxrKB!mRkf zsP^)sEdYx*L@lA=TI0-dg00*vg0YV`>LlJI879^WC}NWZog1rHLIKFBUI##L?zA z`Lf2ec0tV`e}DGn?qs$>$ZO$Ejd@M2CPvEFe&(<6Vp(c}6VNIzY9BPfk>d7@A`kjZF{2Ttk2I=W!LQom8;*VJeG_F9?}xGd&S z7{-KufY{j38-xYuD+WtQe4l=+u28;IsI`U`ThV896pQtbt(oi|Xf;;O5%egvU=Q`& zv8!|X++8*DjL2s;@144;$NFb)w}Y>VszD&|Y)19gNq%h8{U{Y>S8r*zOnrereOMOH zvZRv3X8pu7VtexrU#ENl`_LOm$ch$4(a7ePqsDxU z6nO#pbS-~G_k?!^DSLJTn1iisTCrlhGSUmrT19|yA^%vnWe2rMAn{Jk^JZ(K;CE5u zxwcU*m-dsiqMJbrQ5Si*XBUX8`>pRA%4+SEy8GTdZ*Dmt1%dF!sb4f-( zV|SA0tU+_niK&PgmRdbULw0OtZz{8km)Z(N-mHXvnJAT3H*Ekgb~HJGMRpw=cR`25 z2rDnx*iai8xin6RS+{?{jPy2!Y{%DG6bk0Hp9`j>h6xCqLbX9J-1P zi0`bqfSz3tRc8=W2I-OgYc|EYxSvBINCA3eytex+gxvU4Ke7xf$F&?)+L>pRw}xPd zrl?~ZzepRc;_mU*D~kZgZ>2f&Hc?4o+ffmR*^~j z+Npapb@Wb6~e?~sQ@7xgQ$P^ZK-s!ixUW| zN-a{?mClxKUHA9y3&g7|I4j}f|l=VLVX+*T%yzz;og>yo8|DZb~7N2-) zR8s{@O4o2{DOR06prGw%>37{CG%E~#yFBmp<$|Yhimf`%H9V)3K2$2jg{lv1dfQ`PDOl;NIlV{AX5ivo9N4 zeW@bu%ioE9!{cQnI<1-77oEcQA(lU#;^O|r&U_i z>iaInzGNX=jV+q|qiBdJX#0^_o|ryb%v&&45MWt}0H$k&?v{L(8o2uD zX}t~6)0e)=nAcH{x1@9^2>YPV;I(RXrg%J6ag}(i-SYWoWnkK$sw{Salcopb$MgVC zY>o_n#I7v2pbsPyUQ?@seW3c4=+B zLmZ{0h#E!q{Cl`{E{*SK(rhrUF{~OyGYc$hv-r(3GJIcE=qY4+;?;{(!z1J2Amug% z!?M`JK%GJYDR>BuaO6Rzr3h(>R6 z7{)_|LT}b+vgQCgNGfT@(-*D2}cgcZEqjJ;BhMRuokTX+Cb zmFn{HK6yj0GgzuR=qRm%(14BLzz!;%#?&a8}zkDja zbYf)@mQ&9m~&@n>2Dkcr#m4ThW}_a0#T~)gkyDCI4ZOowR)i zJ>QsDe`lqUbNKeEYge39{`t#yKFvE1Z`}$JMHi#|v%(89=r(RDD{N8jn}D$1$#mU$ zSd)t65~o=eRyoaegg1Sy-}qJvVj&1B8GW%b73X_ul2d<)(!(_rQei~zf1r6bIy3zs zh1CUi!7(bJ6QXeV^c3|8g!_ zb5bv}-L3A$yKGfbBl~Iw+bL}=L*;jzBiwm89hV~BEx_T`T)N!Nr@OoJsV2`ADdv}N z?aXjghhCy}5z4o``6+~siw3l=K;!4A7k7cvAX(23KT`3O(qq`;lyMa7@`w* z7{WpPnG7JZCXl~tG_JES8O!DOf{F<{kt07~tfL(6_os-FaoV6|`w~ed62`b|)o)D8 zCoG8IIow76^GWf-unjs&w;mLP)N$ClG;xg1s3!%cpT9dB=dXUdh$$q z*g<~*c&PuT8XCX8ukrWXmWv#=N2#y!{Eg|IS&m>E0GeB%CevMsyL4 z(qLG3`U`GP^kU&tQ5e9RBd=D1CcU!Oo)m685!ggNFGSnS`yM$2*6>qEOs$(`)Gcd<7+63lIG?E_Mj;5FIBO8SM!qhQNaDX}^?UNdhL-yTPmcAp( zGvA@vFVJy8kIB~9+#(^oAs@&gY3~ag3?M85Xv6&0dLz#pLW`1u^OaV)pFbz9V5T z@CaHBspjOK`B46L4UNHSui~T3p2TC?o9H(njA*R)Lv!o~Munm}(LlZkhu0YTHMPBB zk`do>E*?;|!`CWDqSXOCBjeY=gUX+nf15Ok+AUFw3VW|A+rpQ_TYm6`w{1T{(CUFC<{b+jh`%ck#&MGV};Y0uQ3yhteVBRwMP{y>VYhV|Qki!-l)ri>JQ2 z&gKQNo|HTf{ECOjB`94b>JxlthgA8>@EXEo`y8aYWsj@kEks7?##Dx`O>ej72pO17 zYZ=2BVM$oo)&8;Q;rTH!d+k1i^;-lPHxi}7?5q-aaZKU_qes2-+0MiSRu^MP$-2t} zx)iS+C`Y*VO0@8N62m0Oym)fo{vNVO{W<5rTk;N9j^S7a+w!QLgBcnTy}o^Q0;Tq% z(3av`om*L!-t>21TJmJ?yVC=>zx2CgbbvgOKOG`*re5y^vF0@mDB*)~i^TvFdpO@% z7qLvJ1?=7;RlaRDdGzDcc~`0(I0THKPego=R|)N;mKY0j_0A*?ld%EeLM+)&w0ySb(_<3stac*|-jrQkCEjsUPs z&^{of9{zIA!t3+0=>cO_FRe>aky~`o!$IfDj!oaMKr->(%Z@(F?l#ue$d89)S7Kc& z^I~4hx83R5f8YNDXg4JJ@XMBK?phMd?f0S?iWX}i;teIWbrLz z7f4BYK{sb=e?0SrB0YgAgfGtl!L>ZUkjEElnrQve`GD+K zc5xLgY!=)ii1Z%C)c05;HXN@=j0B<^AOogvky7%4(>Ye{>{ebxb1YnEG?LI4@~Al|Sg2mUyZ- zRJITfLIuIpL^p8hd-Lvgt>>#%ldYPyo6NcvvMc*^aM}WE3{LcF3i|TFV+P7f7&C=q`FQz3$?2U6kBpa> z^b>0jT{L*|IRe}&hKgDOahz^9sWUA{93hQk)ia+bNZ3AA{G=Sp%j?~Ck)#b^iBu-G zhuZ}ow#rWUI!;a2Gh`Re zeeFvL-6lH_v>PVNIUIl5wg4}drp~okE?SAf6JK*0&!KFy8GH6c5LncssTE0B1JD)s zSlmJ(VyTi?R$2X3yJXp`=(FU2)_g;}kMpe_mdsRjgnzM4Cn(&O-}}TuMQOzS8gs(A zpRW{?1=9%R%sWflk2_)PKHbhjgmy*dAnO+4IIWz?zT28Uo4*Fa z()z-n!h*vuvLV(Ku{z%cF28X6M-izYp$xOgIihrvD(fjXoKhb0?fHLWcI~l-aKVZm4{OG>|e8)2ick4C9oK_d0GQ$RX?mlle@S~9fz=wDsfbI z_FD$9eY_d3oQ4P;o|_$_STf7)GsL6?kRrfVV=wWqeI;NK?%Q}A2N_N>t1>#^!cYWt zzm@wazm~8Vl_c$rf_0;iJ&)@AiBuObRNUVk-CqmGCUkr4&Y##F&Kq_aH ziX0(fs^Ex%MJqEF1TNuvhTX2t4Cen~5M4=wHu3$5^bsWoMoblIssCz1HYtMJtlj^U zRMtHjsUGFaBEWem1PZ;`55JMj<}RMu-8lGW$?c}W76Li)=FI)lyU_i2)DRiCs!U8v zzKuiF3`;ut{YzsZ+|m9n!SU@k#YQ_x6&fK^5^g#NvS=6UJR%;z5}^P&`nV7utVsK% zvkIx_C=SJrv6_r6P8_pm3&70BlFdN^9ar4>Otq1xtzAulF_O?6faa5e#q%zSYyX8O zZ;2_6#rQ|k+xA3ZU81YR?U&pV2tA*3=$i?#+Dyz)2m0uW+JK*+x~HvKeTj6Tqkt$% zj1<6DsObe-SsrJix6#O0A*T_WkqzccX2O?BFZ)v~v%8mWpWNOnAquiEOud-|W&i>xvBV+2*RJG14z_50-ojHkE zIJaw27zE~HYXE|GH(Ld{0?u&rn?1fRz+a}o`QG_1VoDD9j&kEeHDT^;c@g86skt`7 z7ULTyKos}KW6A2n%D2<_=A0}pWIdK!^K(@o+Xj(IyVbE6D`WX+d|0;~b|o)4K@b}C zq0bWk>CbcTKdLc|*ddhO4eUPM8|&^NPFB{qC-S)rAfNpDyslOm%Uc&%NpItVnCFVi z+4ezwrnoX&NW-4YuJCG00J7xtY@zmcOJytnpFc5xa70%t(0}F@*8E6~FfimJfB7G0 zwTz{BUz5g5wo}E1)@%hPY!EI>O*=pvZN+D%8&bLY8b(+&>rUOiWzJdH{Ly%PZLHQy zM#NUGU{wiJBDp#1;EtP4=4xKwEMs~vJw^5(?VJiEa~3aa^$luI}Sqh)Or>YF+CsiZA?3vZlRP!8S8;@R1z zZ1YwfZW}6fRycZkLb9tn52j!tr)2hZn@(hrSRXLr;e4Ei7PCnTFy=#%EcI@9Xh8Af z)W-02axRdS7SYbcB>M~Nu{!-sUoNL|%ZadASl*ouH&X55Jj?p?^ytD}Qwoz6c$&cA z^kY7E>@6CGBPvH+(sG_J8`g^A=?*qu{!9Gk**o^=&UA=XNcn{Ggr}#PFIk4_bekOA zv(|mRhS?rHEa;a|nJZxNgzB}@*o2#BH$Ji&tO&kxoR@9A7`k?8~DS+=- zTfUDn;av8)gsE=VCmU*`o%`!3er*T}N^sg9%P!%Cx!hhJ`u_RBc^>ia;l9armhGJO z1-%?k;tSfd+WUVv)qnTwPTahN8LZy!1zj#rER+&qJrRb+KEAnrRiscb-k* z$L?UaLHhne4Ub8VDcz zA-C>e0{%8!8c}zYgn$4?KC_ymhD4S5X6SSXe{Ix7Sb7XzzRwczA@6lzBz&bhfJ_eK zPDKbycnYD3^gCogx|>qJOO`}PT0TmXr_38tSN^FVH&#MVokY%QfcLsDC_;cVhh*DH zh^1uFEb>2hY#*cZWx}%3uVmn^#afHJInf~>b|(wjxlL;&rpCI1j%}JFP;^$P(g7O- z!#6-Wgo<9mTOo(v)^mt`K!SxuCGc?3N##6;*HXsiE>||}aukJDrili@GFf--PENed z8||WbEL#ucnF5zuJ=pkB;Gs^V6-J13#LZXtmCm0ByNhH}*uS4AB_+MD)Xp74MYKA- zjouiZ^H!zjrVTa`tD-Ra5kD_*JW3Wa!wuslY{I+~o&zp2k#q+*z=lvUw9sBx3IX_= zKRX*jj>FU?vYLs>i%YzPS|GebrS(fuhoPhtx**xiU$WDUAEFh2gIyDzL%y$2B&>HL zygyZKJVb1KE2rG5f*h6w{%*vtXVV`376M)A@3UWlqcm;m#Z6-{gn~gDa!Gbja&qjANdXkt38&FH3)tG9=aCNuO06M$_S%x2MOdyQ5oe)$+y4NB$$Md zDt3POLsuv$C#{`z^5Oor(Cyk`{af_er1)B%U$ss?Q)%Q)4dj5y6UAiWkGeZmS6WfE zNq9l~=CTe&ChNVwT*B;^a-CrbK>?UhF;{P@`E{^l?4ty&tD(-AFE#A6a$_K((4 zmW+TJ6{C#LrV0|pJe~|ukV46naM=T;8?G>)!!h^gjW5@@4 zUsg(S&mxi6-x)s6FDW6)!;xpu_*$Eu(6etC1T3ky7Tv*+ZWMl3wejCEj?i-MQVkWg zWg;*OGeaF17KomSzZ93;nLpS_o@!qp#Yna29msYDJ3+H}U~+q~>bycZ+olA}N6XWW z^OHdCn!z47Xk2R7$d=JPBJF(m#tH=jy3W<+`~Ud? zOWH$alwdtqtK9l>$O?!fb9c=+8LtJ)Bi;|&WlooI04XbA#5|(xGfhs~u1l?T#RA+d-fcZr&)QnnlldK> zQ7YF%)b06N;W3}wUcMn^ib~3b>{SNu%b@`b4d0@A2merjzPIuWFjH&NVu7;@}u=%FZr$N(CzyUCq_EtoH>`Gpjb4UR8L9*s=B^K+-xr zu$g8T{SZ`HN9ub_DRT%G3b;4PSvUDX8caV8xt+I-(nL`ywvPM$i52&>@~OSD7#Aip zZ#z*{G5c%`Yi7$7cG2@I;rG#@wXV{kJTLC-wCj&S(VWBA1!vfK8%BJqU(3IQF8Qb* zaZsYZuV}v7{kPjSmXVqBR63dBRC-xK1rynAR8!!2YMq>1Pio$}@gBB?S<;IlA18&E zq4T0YNG^MHY5Rk%;@IYF$)p0ln0#?wF7z>oyxFVX-bkbuTl)R_Z^8}8pQpB~0oL-% z>*;4|6#p=2H!a}V-`pVU!N@DYBR9j5>&}uDPwB}T>K;bgplT^}l(|Ax0zEGD?odK7 z;{i{&9AI14C1ku7!I$v!de5R%dufb*6i;2F?HnY@t3%aZD!7`I+=Qy4IYYIhHdriEeI|^bKpHIU0vF+%NHVY*^qy66oY79V z49exR-a?V~MMglVM0LBLEQ**0U766URJ8FXW#(@WWOep%J}{ar5UoVjSvXISk~xSQ z|2D-GE=iV|;W+hFY45jqJ5CW2`heUefmYhve+x(pD_?&7^_wz*tmE=oFr5z3K!n{& z+xPc3f9Gu{uX-bHwYmZiCuUHVJx(vFx+HWA%)htkS&g!67NZnJMon=$*+urMX%z66 znU*nk%Ccb%JU`-jj;`d_LQ0L=wnsAtlYoDC;sllb8aoYhze5Tbm9dPlf!0-^uQ6HY z9t8)xfM-h{_c)iKy(jR(uwwl=m@^0ozR=ADQ+sz^(t$|Ily9H}B!AA73)_*~*L= zblh<9S%h~*b&fWV)pm04HQe4tJ=RES8WbuQc63L#{!DQ@j!^kP=n{s1Gz>`Or5OaU zTSgnfRzmmdgnYL&m{lOgn{;Zb$)D7=^zsDyljh_iZu0O-L>dE`JZ+=y1DM4mOAh<) z&QjiedgmrQ&5d@NR!F6kr$C3J#N`^3$`DLB6)!Qwn|eCaUPr`GfpVdCf{(mT*8A1M zv&f;ZrX@}j5ID&L!47r@;H8#=u&SBrOcx|7m!zvN?*s;H>+PaN11JhrOByGPN-%|( zWyg}SaIQR`%E=yz2;exxv{%|Y#n##Vt*Rwl!VF|Y=fEeZT^3PtSI44#x%Ey`V(%-; zfj|`m`hyl<7>aj~gkT%>8OR&x8>ByGcA>*m6xtIMcC7{k0&4RX7As_C%Vj=OYz$-p z>_=0c+D%J_`}j*95xR7m^Ys|}KO&O+rQXye{syo%q|&zjO>A>ojL@~jdP{DBDzDU` z`l0_^-T}Fp!oP*`DntTginrTiVuKw44WO?^rz%$dUTEK`5brL8W2I9SNyd!u?TXr93|DvRh z9BZ5hO_swb&hi~Gt4!Dsj$vpoZZ8^5EY9#u%3o5*+(%{XW&*-koRNuqVQQkWc5d`^ z0a?s6j5v1FS6nJ?AFBq`mhB_he&GmgE0@TGOysda6@_0UE3jTjrBez+w{pXYG*ana zb2(zN^G)N7x46w0AT+VZxu?a6#&C4G!sU7x2>>A-WLFwgD!(e2+GKBWLR%RJI5BGb z-&zXCl)#?`m19k7BRA#LsQ~yN%o=RizT>$HT_&%F#))so+Og3}JM+5D)v7H64XSw> zW&Avj5);UGiA2G-tW!8HLlWELJ*d$6Iwpu8{Y5%S&Jd=lgSQ;hNL9b|i;sgbtR($I zw?BBdJVhw;>QQW|Do_t9S(*AY zHzJsy=qmR32Xm3vz7%Mq^8lk>xf9$c?!Uem{;F^Ov&r}mP6gQ{h3Y}-Pi-D@9dh;S zm%WXl%&Ui@HI}2|`(IITT35Q8?_;s6&~BFP4D-JV{S_B`Y6lCHM%9$VG~BumN6aEUUf)W5zpZhOzVe1{{5>&Tl9*&r5{6w#FT$*+|+e_`*0u14*KMB)2Bc$Xu6wwaQ zB)_&Df9+uRuEK?C{11C+xWiCsyw+7Dt|O=7nJw zxAKw#Gri|0V@WbJ(6=gbDzv1ez!W)4Mn4{hSsQ{jmwL_E^?a9-407A>=_F%J-jR?& zg49p>I3aO-S6ZK8qEAA`nI(g zQ=uGtda7sET=9hb1LDaS3|JA#3QEzLW#m5NrXY9?$oTBHl10`UWc5+j$IxcmJ3Lo2=pnb_ZRIe35fA|nY9n~9-Z$}T+XmwmyFmLc!M-4@a^7ZPSm zMn#g^0$rSlSW5c1%x@rX8+Is~h2_dvfp&Um4MnZpe~)Fr^2w%z%ZbBpJ8~U2=CcrlpD5o;SrPzF6HI}5|wE^w4v}yvylZx{% z7ZBxQ?mPTp*)u*?ogEOw1h8r`>lw znSxukv^wwkdaF-~p#~r_hwfA1mfmbAYsrPXE)|YEPo}4ti`6?y+ql2E43_OjLV>9e zLqI;|vv^DX&D5t?uIQnF@ye%H&ge-`ZoOxrx>RVdM-$&J|FWC1bsBwr3MnD`V^O>1)qk1ZPfN z=Pu6w(>Q}`81jEk1xhyB4SZ_9{V#DnDyBXkT5*)TqM zhuADU^q>h}AV2mGCxqzZ%6Lz5d5tILb%CT)<(P1BB zV=$7(up$XUIjFL=0x1nmsSEC3!)x+sMr~EuauJcQ#{XM2 zE8O~4OVP4+ROpo{R+oVHxO*z7A8BAiLqn_TeZ_}gy_j>jq&n9?E*adI{`JjIwg9Xx zDXLyGXHQCwwTb_atGA4bdj0-}MOt!1IwS-{x;vyB#Tq(>?jfX8Kw?0;LsC#gx};-h zP>@bZX&I1^@VUm{IsbL;XDwcyxnuJf*^~v8kmlA+J+BX*6K_w-H z=_AmI2^v6P_l(NqrsT^ALc05?<8kH~nn)oV#<3>hb?P?RXL_+DIkU{LDSz%j=QH(u z*eWfx*Ul05$0OGLGS!L8`hI%W+*CX4eW45VzqW2}NH7QovS=YjAfW;nA~UUM)VEUL zldbOPDS7N)_;T;5h4>=eA?!G7UN!vV*_e=G$fc5e7OPnMxqFJ-yB5f98SkvPf{&&h zw_x4nn)u)Jh5tR@95aito~#P+tq{MF;aM8;*Q}#oU`v-*y3GEJ)O8p_GEl`0hdff| zJ2m>*%r*1towf78I(V*CzNb#3`U9K5B~TUkCtx(=-~@qPut=rYQM^F z6nIwrau{Std?4l{QCR1%slL-eJk(n=m-Mt1e^-2d$LJCX^hukaY?7KlxBP6f+{k_& z6U1=v@V1`iOI_?%k9{(c9fhQhr18VnKGrV^OkRMXi+&(+dfD&&CL)5*d*vDcPPS;7 zh_%j&i;-^&Kv2{{v;=~)@ZXMUrai?gXF|Iue9y zg3c}?cg0&xQg(lHUcbF_i)o%ZF0hC>jmNOzQ3{tH^EqlgF`4rPP5gYJ8d>AT{^#Kn zfDU=bHVeti%MTf5BMQ1!q`grVf|L8YUgztnF<)AC%0Cx^|Hk#lm%Qj(d*W^zePBRy zXa!j#Pte|uC-1a{cm6%v3XRJp;g{WHI(^#{AEfOmtxqO9T$%lH&%`~WJj6tv-<$nv zKDm@N`qL&}KP;o~bNy)YnD~y`wWehZx%ijdqd<75%pS?6`$x~J7AUdI z;RgKY+Kdz?am0+Vkc>qaqOqg!qus_u7b!AR&mUiy9=4|dhyehoPM@`8u8yCe5l%Yy z7<2YFlufzaKy&1~2D&Hl zPl-Xz-X^&fr$87cvLt9VbRVsrp7cD}Ec~Z7sc)||-R?bTK8i(?W&O7WB;WjD!74^R zqj_v7@j?p}xe!G$iD-)osuu%B22G0+h9muY-p;OMdKE{`_#VglK~E%nb;=VXemu$f zmVg9+6FawlOnzHuKrI3Jc=SNamYMT} zTcDF_OsvT5!8U@W+}}Xf{}4FOsi%y(S9D~r?g6M#kV3PYq0@DDx)ODDx9Z)b+ZJl`p@JtINx0@^(nAYe>}W$G096}s<>Ph@a80%X3#g;F-)uHiV7oLHZY(RTA6xR>yCt#@Sql+ddPjwvw2|)rp6M(`HTJ^#>-2e*Cdxp66 z>-kF2G@L?{i@;Yl2+Uxbt)zR4`_qPl#n`(fBqX9Y$scC`Rluq-5foNA;F`MA(&Ag zgqiTqkv6&$yMTP06pMEKH(!*0%rz=}EXuijBy!Rev?I1Z`EU8_#w~XQwhG&WRo0C8 z_0o4a>h3=*)!BNYVO%AX+g==Ngq$mQcAL>G?$n-Amg9Wq0oP=)cq4416VzPk@~-`x z)d#6zpNmzSej=g#nQJrG+wz@sUx5_tH>n^`uTYx_cRP6wK55n>jwA~d3 z%?ZXI^=lEn$KZ|R0VN^zQ0UeMovS3q^!`bFW24UM zZ@>y*GwphPwgor|X>P18^iW6{AD$?Zy;Yl+SodL4`fxLppShRTIoN~AiF@p!{MDES z-6U+Y#vOyXVCtIDWXOpH!vAh9TF=qq_eL9nv(~f9I$z1|PV?wY>euS^&O02s-Ox9B z^L#QCAIfM$YYh6DuxYj!PtKTD@?$2*J(s!ks;CGmBpE4MBwh~jFcL59Bqi6@2#%z{ zTY-j3c(uNzBV(I810OHEqCVvS&_qde{Ve<6Ci98t4qiS{R)uk^@O=&|y%2XD zb!TpPh_#v$EGe2|F~Tm}di{lJaM*u5_z?ie(Wubv0g{Jr!svhSxp>~V!82oKW|kxT zPur`u0KIwytYoV(QF(jjgP;?zBYfhO*@U@etW&58N3?g`InbaItamhr#QDeb!t^+j zt1}dR2tg7SV{pd`u@l8dxhb##+@WYlOMst2Tr;D2~f-V^coqM-iEpu&s+hK61u5VYJXS6Zx~Vp&G#N9{)* z;z0v3C~tLh$DPm>&ξPJ|Mi43Q)$#jp0sca3q@ZFWYb%~EP zXU@|&2n-LlLKz1{%4ZOs$m9%vKi8!o4RP}vKRtUH*vH1$e@}!z=1Tj3^d>!6CNVWrGPT5vifX?r7uD`=8+{~ZOpBh2&FRfY-}Wh;7hbLI2pL9(;OonbLGs- zt|XW~QFOZoIuXs^;P4dbT)y0xm0_uQF4Y9;4}e&Op!Y$gr- zi8-|nl$_6ph5|K{?0jC6ZW2!R=EvL*qZYJE+j<)j8?#QCH!zJU7njJk>enbkVIXnm z4E>Q}Q`#a46B$O+QM5oTzop4|pVny+p@5utb-~nzMa^>4bE@2ESh!De2OK_?_=-s3 zN-j5&-GX@+GInk(>Cv#h$;I_NfO9|f8l5IqOId45fXl$y9B7(Xz1S@zlgf2_W;tIgQ%U!&oVc1BfP>wjle6{C2QwZp0kefv)Kw?X?L9mbuHMM4 zwLKAlc)70UCK^)mQsK6v(iZ6-k=k3T9o z_pRk*)F&X<;9#`dqGGkeTD;$%{w&xN(z5HW%MvP{8y-&H`%(;FLAkv5&;ZowfnT^t zbwY=3@_eJa5Xk;F$A>vBe|lctK2q~Xx;$#Od7|4A#$vlSix)VGLs_8w3w~1DD3kxp z1mB7Gs=0abhYm+p5h7G2S}&iTiH?^2_Nt$T~p#N4f8yc!_??kXk!2P-1QOt;ss;$Oq!YQ!9EpRZp z4f0mG9$?J`IHW5os;IsVtgs*$=C*Vdj{DuikaC$m;2Zm#2itI^61j4-1Ux^Q|BZvL zEN0xjXn{QS@9Dg*S0F^@Ia&KuY2C=HrftwipoXeP`wHGj^YqZsx4UQwVLT}*zVpp> z_tkN*9!P=OEShsHSRdc`P^7aHgYq5}9xLc~@GjZ5et`IN13~t}?BB_J3IM2M{2G35 zgWBcB&uOWs;wrEzv9Q&f-1FE1boEv43li}jMv@Ls(Uyu-sRWtCg<@N$BA6QQy*i@4 z9oJE&@aOWWy+ymX&BFo+;0e)}l=7aHIK&T{MG*`An; zExcU+6FqdRS_t?SbpMyh@pN-E_dxvjZ@K^eFBVkMmtz{d>C{d>r~BOF?{rc@rotE( zmM?>Zdr74PxWVI~gXAC8L$XNH5G6o4q%V`q^`Za$6gYMkHBHTKxoo31KuP=}pxCp)z;YWkW+l zy;`$L9+#zeQ5|Md<>loi0Gd_Y8&znn@&Ej@2|+QsEgb@v;1c&Dk{Ds2x|b-vfY0Hnvo&+6oD-;^W`PmeXLSsT2515T0(8+Fbt(EY;|NxGO{^A8!EN!q zGfWrQ>I&H5^$g;!g^^Z7JyGO|pl+*{>J|#R1K`C@ct>Cm8wvOG6hc&Bd122wDGZ0X3!SwV`24qm6n?_=9lV`;NaEV3^j zfEL8+;tysmor$G6te|Kqq_=3ZThd}Nw$k-%hyfp;w4aJAVZ@eFsYiKW_LW=V===x@ zd1kQg)onbQ{-2A{Ja-@Du1~A%+yC*+{p5BH=ZkVfwzrgDKRpq65X<|@nNsQj%k}x= zvBJpsM=FH4XX4ly;a@fnqjR4$MXbwhx^;aQMxAskyZ-$4d~{}tf$KI@q+F$9z>DPg zZ~?zHIc|E*_ZKi$Wh{1O_bk=I0u@25 zNC%nb=K=a-3)SpfSz>T`Fk-TTN16akm|k^kxeylRzV=|u7y=VrQ>Q9W0=y@$+m?$D zH!_`RUJ(6TFv{=VqU3R?Q3ahp0zC5)u~w|uu!F?-55DQ}p2%A;l|bgG>hO&O3QI}x z-eSS$YvOY%eNob6Qn;)!V7^cXGO6|IQJF;1MXxSG7g^V}2y5ptae$0RKCov0)k?J# zaPbj%_Qw%n7lB=jfynQy&!*XNCN}}8%(nF=p&i3r^&jjY%GDd4whDlvyXdk7K3s*K zL)HqPKX+!_{1V{>I5%B-kfVv&a-l4Uaa_wdYkQJ}Uc2LDRO60%jhm)sSzmrOi?j-B z3}|Tkz&y$YUUA{hSUKKMMID~3)GSjTkgpL3Ms*n_tRSWX00bt8?5$r}s96IXL2a9n zu`}oYYKj^?`bMPeQM1E0S3V-~RyCQ@S-WMQnoHdjXV$;xW+UR=(m_=k_qp=2d5u-C z**?Tg^O9<90+&t0Ybf&URpO0F-LlrdSOuWbhh8FAZlCX=J*}@6Dl3}M7|e{Q)4W(U zNicqvWWc3YwKK41$Z(e`s#*r4C%3N!DweY~wpLZn8-hpJkgbXar}>7*0oUi&tG9`d zZ~KEGc&sh(PpDAv>h#FH)kS&t@nS6$pzQ}ip+yu)-M`LoyX2(Ke!b<;58QNg*fHxxMX=4SkHm_#tQ1XX!dP1#4`sxX&mTd zQ=H=P-!pC?COB$UV;S0wA-?m2PbF17$mFei>$!^e`^g(Bm*IW=c=3cxW@9ajcXcH; zKML4QT%hJqI;@Tv)rSw8vcPBcI5I}V*rgO5Jy?HsSPxCC3=kmp1YsmdBdU(y;CNw; z&{13fqD;hF#R}$a((xeGY)Q}ZnfjM|`Yd%V==4?U0z$vrKK=k>VY_0G_Mv*XJq%zU z05ur%r#N;uw6(QCS|Zf2#6phIb~)Hg7l2wiKzI>SO;Q=Xp1QYR zIaTIHR4<$cgM+5*ex~=)2DRCs2b@ghz8m4g0f1OI*3C&V=7@)1WeryD7QDJC>Q-zg z9&p(JckZ3|D_jJ%J!uD=c&Y6(Zn5p%s#`Yv$53~}(0Rk30b5V2b1l?TALe|JRvlO? z;|%<`$J)jd*riydO_jMz7kEMk^OCmOxEG{!2c!4W)c@in82~_PKzCs=S*B+t8$$dT zK~7cK3+DrvfFO0f5jc4gNDtHf%c|)+dKtZ) z*TB9~(=7%ZP#Rzqj!9F3dTxy=HF+L5an*EmbO^wZglefg*-`+fV;cr~Qgwi6TW*aN zv1Q9%pLNZ%oOjh`t~Q8vcGo*C=wDwR>A2BR7-87~z-p)o`=Lg<$L{oFAYo&>>-RGl zK*YUx`fSXBJ0I(Zgdqn&d2BL-pN7J-% z{;vcDKBY6csi)vZpkR_-#~~r2@1y0+dsIh~Dxt)XB;#40YIklhSTb>-@dtY$CZZ0O z_`R$kx;k{tr7p~??j8}>+XfvL97spR^KM&k@Qu)t`GFro^TM)hFjQ+al{eS=4f-&< zJ^wz}YX$!tt>TAvXZTjadm>iBCHPD#qGf2P75Xv)41+>d2n(|r7!Rr}2Ps&ha(%Wr zSkfT55idrreiXb}14E7Qb2~-ZNh7ZDhNoo1Bl15yQ4#AGb#qRM0JY1s*UFZ7Mk8cn zidx0Vi-`z8C-2;6cU*3QzCXvxhXd8+1q$uil8WQmUu}mwdAiAefd!*=Ov3^|n)n1~ zk_NQwe_P>!U(w)8d#js+EM8t&c)zNI@;u6k>-l`0q#5gy)AEawx@wn*YNXzgPMI$s~N7MKVAn8soVBIC;t_`ZNUy47O zp=6oN139DK`<0yybQib3K@%!lXWRAYUQLh>a;2E21YG^5*5HLjQqNs%z00MUvMKAz zdQ(fiQ}t=tU|s#rLcq=`icyydkS0jNSrrAF=$Rx^kTLEeD@V93THg4QygLr|Q zllbc4lHw^w^-6Og6P`PhAC*;Etk6lGWz7wiEGW-|MaNhcX;i21%pU+RvCgwkdLyB8 zzK!cC2Ab?QpYKX|Y*S6y|79l8{o(p=6>#8@QY2|@Q~E#BmYgN+Ab1$793@E!%Er0w z%80VsTN2*&Cy&vgAk3C-^BXe>LKRT+m|w+Shbu^Kn(uwnP_A^W&*sGZ?eEnd@tj-% z1*$R@=v)_eS)k9r5fOt;c^D;GCyKyFN;tNCJ_;&fHCcqkR^C}1aa7{kK^(A}QcP|7 zG`wOF1KH5e<_jaC{=M{`RGv1b2m$k4N8aY2A>V*J1kW*4fH49x2oe~y8d7M-lpNdw zGDRE<9~UTF84C9VzG;5Bww zA^rYL91saGcV-Gw3^#s0{+QCR*jW~pSg*;IRl3Kb&>!=)7|!A;JuOS+WCRiE4nOy6 zObcC2qE}0NgN}A99<{pBWe>cF4m7w_ufiwBOVk!UO?^Q_n0R|;P~(wYoTAw1p_gJa zG(2nTNUhh-dGK?+PV(;7A4#Kx1gSHa=>v{@Q$eLfP)=Qcwdc2!(&t|bS~7Mp?Irng z;F~KqSW~JJ3vKxI{AwWW?tl9oAO@~2A-%V|c+!W_;cKD_cPnW;_hu&|ra*r|`=%He z+cQ^738eJs1OSkJlIvYwLr?K@9|Wy1E5ltc|Dq=~JoaO-kLl zVm5NVP;Q#Kd*4|&hk0qCMQ^|Ki-G{oCMqk>kQka6_E+!O>XTr=TnI0`u@RI`^d-`pYP%v>Ezh)4^wLBD)(w_H-A_#RIGyOaR0Q5;qCK1^Mmbu# zw-_9~3AztW1>C>P?I1^HJ3r^h)eN~*gYcT8*dFE~F7akJ@4GCa=~pUxIFW=uwaFeq(R5V53OaUC`g#NClR#kZv0$HihmakSM~zWUXatjB2lh zR)1m(07ihFhsB>x!v5BN+JP!eFq)B?yb?bzjyLP^!#;ON?}ks4F8yQ@Dc z{3^ce=Y3VyHzdPo2CX-lPCk&fBk5h*gD|XUQ!EEsKqg-?Q=fQ*X{g9b+6h@qE8t6v zl@U~CW3cBq13}0-96p(%2R5827TWXMCd;?Po{4%xGJlivMKTE`sLM`~_`Zw3VBg{Y>DLfO0 z2-eSo6->BwUe9PTWA%#OJidYgBh(h)5-}0oD~J+{U8?a-CI@)E0s>Zw+-{fp)vz_= zPTIA$2^=s&!R8e_EG4!BNV;oou?lHnxk^qd5|UNrA#jWiNvu zMHJ|+X{O|uOU!4AzfTmXkgRUJu2A{mrnS+?-t^!Jg}^#vjChC;n0s?KS|eKrS_If) z?VDarfi|#E$P0Jus~4~*(Cjz>3NXghist#-qXb(GNOglC=5w>fYC1F_ItYkyr^O?@ z7ZSt^3`$tPJI8VI21)59{AP3(kCqQFjsmnrR3E&RS;Pk~%J^HbaQ!)!T${nh!UN(RB_u>$R3 z8R5*{?dn9(_(l@@p57K00Phty_eoNaV~Lp4P&%OnHn+0FBecu9y-vSGM*0edP?L55 zjIj#x6_nPtneb#P5<0FX_eWLP#7fQKKi2MP5iH@q9XwkDb55os0%oNtuo|^>d9O|h z_~yXaeYZOMR3b+eS`431W<1Kqn|d+#O8wu?ohqz19=~b??wzSYhfafC4EC~+;kB@f_C1;pxaC1md7iZtuni;OcDqN$DZPR!p3lynaA;i5Ub?mt zK}d2Qd-ppzQUxGAiBTav_`X?|pm~bwe8aBZ{3ckDGLE+bn)QVSZ5aQB) z*uiT#K{}0VNssM&U?|m!k8eSHvbz7#l&Rl@dQd+J#|A3}h&ChT4H1qtHdvb=%XG*S z+UnO>nPc2XKbIQR)8Yd#Mz-Z>Y)X6D2^zm_>w4fy9_rv83HIRnOovulvUoxIUOr&s z8i3YtBVHrP<1SbnZ#qEakT`l>10A)yzwiWJNv?p@FS0s_B2lWHNxm^SC)65kYV6Q6 zWTPgjDIQB%JlAtoR`L-Yj<4@AAdzpolZoLGs%$nCPR^FViU4LlNM(b}9NgMZVBt0_jG6@5U>wRZS6QC$B9v|A>&Dz7Ztt`}=QiM; zjS_Jlzo#}qLIov>ZeKu(5YKQ>NRctid0J8_95XGM{(ADo_R2Ma@3EGdKT1JG(IDL| zj85w{H!OqlyXh#}C{e9yYcW!zG+$$!VuRc&!Iku8SK`fa{2)FyQcBxpEB}ui`{^M+%6IKq;Q zDY8eTG5;@WlH?`gOi^l-nJ1Y;#G@Ruy}D~(0Q=86BHG;Ou#iV^KkU3yS*hQlS+Nq# z)>)%z4beKU05kV@LXJu>{KDWxx8-`Mbnp2{PE2w|#aVk~WS-N+i=gT1$5rm9Ue@Ot z!1b|@x8{O>LXsdwt)Z5D)3bjVTVDfdn!WP~4I?Wpyhp#*_SnC0jy{p8<@R0EEG@5y zO_XxfJI>v-+@F7>*CL@P;zXF>e{pl|c#Dx*97PM5zL==hLCgJ<1)Grll<)YZXRFKJ>(e5mO-J{=os~GC+ zUe=dx6aV^sGuPgGzqOh7;D*fK|DVdToOOt3RBBK0_X{3?{{L3Z6rqXRNze;)sXNa3 zs?7|eH=|o~P*tSLm8Nu78=+!G?Nq(7{1H_z<*UoHGxbT-P3o^%AKc&sfydE@$$w{3 zUlC>HPv@#5ioL3;mqL*;ksbk`_(u|0{AD8DWL{;#ddn}=iL(}0#F;ItK-wrZnh&!2 z_bKHU;^-!efgM^EyGw0L)i1Xkq=a6i>}Sv%sPBP|A_gh~i(<2S`y79&{JyxyC-oT{ ziLEV{k_q?@E}y;9y${f;1YjIm9RbFpe;zxH>a!IuuzpDQ^Gip)&o4h(4;FPmSEn29 zGx5<;m`Tx8kg_Cn;K8h0w0d3?`mevSSn3~kW!o;~aNrsqeEEmE_-}Q$$hZsYMT0Qf@Kc+nNRKILb-@8qg=?+KS2-q%thxQer#V38s9fv8_-P?kr zQm_=zygzLPsnMa8xS+d91WlshqW`H88b+P@EApBr(~)A5_BmE1tp;Y^R5T$4aEW{9 zNuQ8f;9iFvqG6*+_VVYe=Ih@(u79ph*$wYvTmw8PqZxo<{sKS|S^d|r*sAx^A$>?f zOk-svhgbiN;@xC1m_ z%La|8lSAlY1cU{l3Q{Z)XfA=wUp?+^#Ojxzt2mhyV=`X-8K>5Ef)>c%1&v`QpJa%S&4e+H4{Yn9qFrub@xk)GAvj!)=*3!XzUB*f?i;3qC{Yf~ zI(jjeb}nG)I-%o0R?*$LlWSWxh*qru$zIwoaHBNPAmN1v5MjnZRUQEwsbUqF3QghY z9D;HkuQ%32{STr{clk(8d;SA|M zJ?Bf3=*>n^u0?9kR;W^8Yy*oR#}KHu2cX(;GHsqBx5Wq7+Db7R9&x#SO;?<2$?yqK zN z><$D31cH;s1e2y*NK9N}A$m+ey(R(;dB$OPg!4%4msifdhv(e`Lzmfd)Kx@>F?_wZ zWc|)uVDaq!XfzMW7;|LeGJCX>!zioc?CeJ#-#+aRTVE&V)u>-zu$Ue(%lY^%(b{q> zSPk`iV5(@Y$_ceU>$&do%?w1Pk|Oa;VrgqE<5YuGKh?G%JS3QTC&rf_{zuWGW$6-tcerDwJ!jVSeFEg5;94BTD~(w% zb^)r?Oyx%JfhXCz&D6koresX;SBIkZg0*1I%{@xzhn0HSm)doh-ojc;Ot5&9OJ-0wz_Y3$K67 z2O7syzG%Lm`{|Ei)6fpH^G9zD`AhjBxKYZ>_~Cb=z3(=E?l*a^S9|29*Pwsj2TWu= z;Ifq%0x#i$oN?GK=&-!BG-a<6AWnM#&jL(4J|12ih&NQp5OLg|$?!v2J>c}XRhu%7 z1wXOr0eU@q;-OSewuxl!*C`qN$VCR^_mM-TYKkNp?EMpmMlr=$WKo9HD$XLXq^ASm za~`0mx}S`tVON$zKRG`&n;T~0kD#S$1cnkfBVI1F_l4*Dbig-Q?xNn1SLf*VOsq+suuf=P*AV!^#M3#p@bxe?hu z25IzL<*vLM$Yr{mk7}5dE&+zY!NCcVo=&AtOA}^dYf}z89#xn=JYuoX}!PavR655p=ryMF<*Ey_5R~dER?($ODhCz z3>ff;eG;hDtE6H2=&hg8g*`RY`R(PSQ(S7@zzBDarJh~coyb_>UvVs|Fh%6vU3M)i z;dn`_C8lcP;e?;lE;Bw>ACmeW$yZ@n#!o$OVe=gQY!JRj!?wbUbam*Zex>zh2SrY4 zM^HZF;M;P%lfchcutqe(IZ^dIaUA|`UG*EJQE)Unf$hFlh**xBS8S+C*kD1}-M^lN zfn-b=9XwX%`V0nhuz*?u8W1tkPWl?M2RPM45I?&>HOe>t-6#roXI=Z9FE{AO(&U~u zJU78dSw3Lloup>=7T8FDK@Qz$VTP9P**Fn;)`los6*(2j3(1Kyb(&N^fdFfPhopb; z)$bruz^5*XdMA1!HR78{1O)&XHe|DJFflQ0;0};bael&gT?szVg><7cD8$P_=!KP} zd4qXR6jGNEp#^B0D(dkes7<+s&*d`uYo*1+vMmWLk$QZF?_IL2fP#MSEp!a78)Mdq z_IkrX@md<cE{J6W(qUp0}G~;KW}U?^0T>U;94&WMZUvm0p2^H36jg zBo{mE|3*EoRvLSMS+KH6X%pfY*4zDAM@5Waa^=T#jjp(fxB}#Z;%MW3;+N2%ERGgL7gSX5odNcf&%k(v>i!>s6&>>LA3u7-f=+oKXko#zcTyh8(+l(Oq<*18FY4`8V3` zQfreIx?h?VWg`TVgEb1{78mJzsMG7VqMdwTYI28ohb#nX`9ptdKGr&5EbaN44U1Q6EYzh>WeY!vO z#~D!97!guN%ooEbxpkdF>(!aD&+E6m4`RJhnjcMytb%>-i7^hj{k189i^{sTzEWCQ zF-lwL&We8cl`Uf8efo!igI@unPNPIfp}5X20*�t(Xsj7&VJrr^R$p689!lUZjp_ zNNB{A2@h3bEeqI6{&^Rpk;zzhm+ScLQ{G2LGHHK;pVFH=BUYqWCGFjqCAJ>VN=~ch zPdIUCHV|GT?&r>qewm`6LZbF6i>OdP#m)*ut^&CYsbj{L0X`05J@pWY?uIYVk7z4* z!p%~-_VY5a6s#IHD$KgpK&v1$X=%0kSCa(x-?}hwIap z&SVM}$)eX6lgaUyl75TJai<~C?CL%5_I(TQ=rlYA8Z@>PtlX&(^4=F|Z}lVe1d|h9 z!@l+owaRbZQv2>6CT-9I^{#*IEV=}L*UB-jwIRtR+iD2<<;2@t8mc&d2&>knZ*!#P zWv$_jFmb;5l$6NNK73{&`C4E#O*CuPbgVR?G%8)$ZfENw8?V{5nR4?d*+yexmAff4 zdUmR&vu9NdHLy7Mtck|~Wk&^4x<4T3^JHDw5)l5#uyk9!Pi%@g%IpH;MEntHGgg2w z5C9!rBiOAYt<(je8C?D-bI(#Ib6nsE)i1#C!o}>-h*n0=QzJQd^w;~$8Qa@q<*o521zS>hY;hy3N`xJHHQzU zIM`B3pyRXd?Z2o5I9Pd@DqdtAnVh)Ktfxw}^+})aAMT=o9UjW&443!-bWo0Ot!RUW zV9^uM+FJnV?=KLKmMv}wDqjk$a|SmzcXSRN*DO=aa$x);12s(Lb4ImqV_G_`HlpM@ zaHR~;HJTISaVba?bC@rn4vCKb1!gg6UnLMC^#A}_!bkKhaon1Fp-O3vASkHO+P>Ld zcojh_Y>Gae@Gqq^pZl-;)SxUd?aY zD?QP^b(aLIdHBs?f!=KmaJ)Yq&Qrk<+7I?uqjx7GqX_*10Dx~bvIZ2O=|}iKk`o^N$LX8( z+T$Hi+0~`LuK;6fj`SfPP9u+S-FHL{_nMoysGQ~!Z7qs1XUt~L;% z%V60d8A*w1yMj3azg`DL<{zCKm5p+c~a9RHyfc(h61hQZw&qFMwqzi*u(eT1sk zH(Mk=h}28}G5!;nQwGqwL11OJx{~Lc7)S5jYGny`TkA9HMuE_7FodzTPuqq1zptWG zLFS}X?#QI3_D&^@T6rWr{&3OP;DNdJN!Wk=9V_qP2iT5A+xeS;x&jSp>wkf({6g)t z;!+?nL%Je-h23b|Kp;PA#yYnjR9>K59050Gy(rZ<0lg3mNbxM^OCWtio`x+x9;1&r zUO#1tJc24M`&is4VOyxdF!aL=s$=H(N zz9Z-rBY*$Y{6oXT2Y|oU0?1SZ?7rPc%ddyMz9Xmw)xnBKDV4~>9Du!K>CL+_32umi zeP6mIkC<%1J`wkw$zjBkYY;{=oVJ>z{GJq~>_Emm7s;v)zu@%eEJ8X1Mq=JUR2Y4U zfBK!esk10lsK&MR)gR>~Oy60D|k!s9syVa7{Bn@ zGgAkgcKBT3YQRu4EZ}%#s;1(QM1Z$C&d*#_{Ve&NJyC)~9(GLTui`FRi;mbOX_X>P zygcn?%8Rc~`x2Wsk@?VuQNfh!kWYE;R`V;%z~7=0zIrC;`u zc2Zzf1_zLO%!B?KH4S@{-ZzO=yFFGEJ@37lZIyKhTt;@WW_|*}^#ax-yK1aJffTt4 z)^PNlHM1vMe(vqBjA%j95I(LP2??9FT$7?_)s?d6*wXhboYyaWb!R}=*qr`q@8HKU z>u2RbAd_^}&Lb`2Va0<@1K)0X{jRoOg?jzkP@~K4LYpnk3?w$Xm_{M||MS?;LrP zd}op&Z7bq#ccf~~k)A-WmhL@!YLFub1N2-?%4w5S&nx4)r0{yambrxYhygLWoFnP}Bkqw5mTY@WhOG(LJ_k(oGw_G7 zIj@nm{<-#D9n)%U{k_fU_6PV=`A4~HV_*Culjk9t%>lnJ8Eq4`NkRLA>ng+iYZShc zQoXF8fqnVR(BRa~pO5jqt4me;drp)L6FheEAqh5acpsT?Ybxc=B^dlE60Y7U-Lm9h zUT~t_iukSxu&$R?7N>^zZZ$Y4yodf05@QhN*br@Iv>x|+*K6AEQ4NC`!XIX0mnDI; z{ag_bF59qwvxOV2Zr{?g1*-pZrWOz$sjRpvKlo-KO1D*TB&~|^j>LXz1f84<^R*ZS z+z*`dv=~v-nGzoF;k4*eKPK#0zPt}8=ml?P80Xr7QNZ_Rs8}?1M|g>KfFKt2_vy+S z5r^q7cok@F7OL539l8WE8cluDZ;;&R>=4xR0%AkWP)Q1;j^HWv>1(AsA9&2uU=;}h z?RS}CN$bof76~pYq68RH;iOnNH#P{@eJ=Bun_523HL$wnzLad^SLBI5C(U%{`{@vr z!^z=0TixQeTnS$S8wM{cQyAN7Mt=9O*N1}wNM+BI(xhK&hxX~O17)tD z%sHNF=p8bfYWlp8A_(5iFw!!RdVrQK7Y6gnh7e*zsX&uUleLznj7w;U6J}#B{l$5w@e+VZq4mBcL$BnZi78j~WPFhTwN47 zAei-t(CZr70NHalcKHaDON(?QUK}m~K!-4VfcrtPjeqRBr&8}jK(P&1 zuBFy^5&tcxprRV$u27vqIT5^JyIV(@92um|^Sf!aS0Ju?KS5Y1x7jeT49MMeTC-2% zhGr#{$8d9a5r>AuG#M{$UMWZ!@C<<&B!*9+un~N@qux#xL2-%>81ndL*m!~SR+aKA z8PNv9mO<^6A!E|)RPrOS)~wPSx2sJfvAac66cl-%qm3Q&4hoFugs zDl)ETVwULZK}E^x%a~-EL2;)EJnMll?cnaWm-CBw(4xxFagJB(l+mK2aqfLA`v(o& zEZgaWL=Q)0WEQpsa_AjZAVY6{p(NH|;zSTL7 zhQh8{HY%Po&j(#Rw3$7KP@PY#Bo)x6nzF>A386W(TqwbKY}5P2mB-R@3i8pRdWHOj z0TwytmKq6ZTLcFd>!cFbM?|o494xzAt20G3MOiTgJI_f;#D$(S0#bk{D~eECo(*d6 zc`z_-jqEZv>aE(6#!2DtO~T-rSc7<)4@@u8{*1hkn6yR3)>7DWQW&9LmDH^75QJ`m zN-AxCxTOaf4PA%M!e12c;)hRwkW!YYe#-Jgw6d)cXSHZyG{+aNvR-|Tjna9CxxQ?q zO)Kgo1QT_s-K74a*5L*>$PLz}GCYP4~uw}wIW@*`Ny#QA1W91v@~5_wfYN--eAnjp@m7o&teJC*QG`0sK8$`!E=vzS#BO`kFZQfF+5_@E z>c;7_Mpq(5TArQ;lzB(ZDh$qKMaP|=?M@Y~+4Q`IX_7Q1LS9kyv>U32ih}l3MSlIM?zm5`A+X7a@>*PrJgQU<1YtDzZdh(6^Sl_<_hG0=@?`$*Xi8pf zswGXCiZM2v{4r_z+f(kIR$m?mOG=*i{2qaB03~1s>ihYaT2NS;hv7CzG6|S(cf4mC zS~83YwD2?`JipZM|KwC!ErFACFLnc++ zeS^bvvyN*5%(q3nZ*Gdq8Sbyg;Bc5jH?JH)S6P~DvFby-55DL|$$FrrtH}=0g@~{~*6vEOGbG zFPH{9)nmNZ4NW5~`QD@V4l}>T=B%oBF*Iz9d!?pL%D27g3u+s_m*ZtsyXJ(mVn6xm zkH7qwX`S8SU;9O0?`y6ZDq6j)p=3wRof|>#yxjKlV?N6+`1pJrDpCbJTe)l7kycyV z9o?y(oWYZPYMYTUuDlnYWlm)+NAwITT9uY`<&GYNFGszo^FD4$j|p@&OGF88?`H?3 zP86`63#XBh3SnDY!w2esnU9ndkCM5!!Tg6l3_}3%!x)LsbFiKC#cl9>X_{2LIfb3A z@C@Bt8b3D?-E^!BX*Aj#ncgfLyV~|=)=z+(a(yApyQ%p7EE701KdjUKb;NzdTrR?& z9%FH?m`M6=aN*`{jQQ59Ag`bSv zl8b1qQJO8{KHQYt32BWOkIY&Xn1OUDUvAYa&~a;TIW^12Lh}v@TD6QSd6dJPS%O>U zs|S1%yi{3u?P|t+5~x(8O#DjAG{OMCEXp*@%Z$?^q>v4B{R~x&RHQhw6jzOGCBb)> zBI@Ru7UFLgt3LgKj*xu|r*CTK1smSQn;7xi6!A@Q!uB1ml(WT1n`YE5$9|I<5-Wo- z!ql{@^L-sX1(S*-oG**Q3SVMU6xD@XLH17E&iB?APKb#KFS^?6CbE)Y)pB5K`)P1& zlZ)F&u6bmIl3hRk;r{XK^Yr7m)ZSKFi`5EPR-LG1JQpe-N+Rv@KVJxC>a8X-p zB?>q4`BZ7m5)_VFo&dj9x`^priE80YeBh(1UivYM&GbK?Y~BSktpZBK%M3(9LhlnN z=hoLcEw(5Qm$9}wKrM0p*zyK?)J)jz(v~ISRZDUZY|Gtkh)z3@NW9|LqxStEmdRod zZ4up0zChaS4RsD=u&4d3an6O^+~&N;v=13kfZGnWQ$9h=#sw1*GRx1#kV?kIU5&U| z75hroiJLB_Py*ALHosX6ngb$!Fxpp9izX3Q)E!Y3TyW6uS`>*S>Gz!Tt?nHn)7W1b z^T_2q#C;*vO_#phS11|wRl^fB22?iYN$mTs0hl-If{W{7|8fQ5yG6N=B1O20pEgro z-lPPC^Kw}XV{cA2L!y(tThbkVTIG5i`=nqieR>q$E;~otE~|;K(Dlr5tNbunQ3k@a zAso9DX{}S&YnD2dzaH`PKby0Sa{r_XEDymC(+cwjO$t9BK5wN< z-YEPr!%_UWk)ZHrBe&S020NLa*MI3t5gpp%6XpFb@ErUk^$r5&jsp*oilw9$=}*i; zIoZNINqTarsiEh%#33<_w?ACht)Usv$|SU+BdUF>*6` zO`Oz9AtB%CRZ$Q`&Y#eppxsp?W;WxD47@s%+VDS%XDCH;re4~}qeU=?{6#S(z_j9V#Ad>qT!;5m99j1+2Vy@sDL{(l}uf->+K5 z7K2Vr0oOp}+mfWuavv@4Ylx!+eN?$o`)31S=^r=8qKp@KYu^VH7vre12F2s<%$)>K zBu*#?ao#jhX@0WH{K1iQa1at|PIXTRG`u!mj6&vBO-n_)2WrMsHE5MSFOI&}=3qoFtW93_G9$kgn#} zuH;2iK6@B_087-RAu3Xk^$!t626xHSFvszQ;`~vNW|?~Vl9rgVB+|6v zUwX!3SVElUU7EY*b;u~U$)L0A3N++Kett#2cxQI8c#8FK`^5ML-Lb<;?kMFandmE4J_!15qQ=WDBD{#K~ayBiD3g-?#Sbg4s(Igi&s6whWU?L4u2nG|w>`Uzt*2l8{oJCWESrZ=pTXzQucDVz~(@V&;# z+^^9*=J<~*d1Am&q|fdnOdyYhz53Z^^r-LM)^waZGZ-PXEF6hbFe=?hD~@*2Jk>TG z7QY83DdYDF{rYtSOE*diy$>`OJrnJ<$DZ|}rI$QKjXr-WFP8kgET*P5Ypr#feW$oH zQrXf~6$SdSJ(~l!(4*9NT%Ght>P}K)VNM1scKJFTu47Nn)u1#8HeV`C-u&M*e<Ra3sS|Fnl#JF6JXK}Jv3U=9NP!mCRuUoPO3 zJIECf^tiNMBpa7L)NPxid{)7+GGiW_#_I?`TCo;K3_{Oq;-7d!%t-4m1=R>DS&!*y{@ff$^-+!4aTgh@l5292hW#-gzo znJ+qe7!c~6RNl7LqIj4u#S7-WGHHhDBShU=0LjU}k0XV@NF5({M1PA`4{`&u0&})p z>cpDx6uHnB`CFaAF?{KE!c{qkb8Y6#3@<7t?7AC1d^dRij$0jowlwDg_r+g@tSA(J}qhaHUND_Gjh@`Yw%ct*U(k^)IaG z*ap1H`f}0+v(+#he+?e0Dg@S5ydHZMC3{>e&@&-E&^lG=JR}GzV3qR=>q2JB^NsZc z37oYFkD^mmB0gFigioK(His8~4K#pnwOvXi2B?d0x!GoYUa=blTa9Fz_Hb-Q>*Bga zYBY{#^N%+_EDQ`b^YNSzd6@M*g`O~|e6ZMAr>}m#))5=SvzY=3TajqNomY02E?{MS zXto-I?h$pR)JN@%Gzd_c#)NqT-oRV&uA~d8*RD8Q*~hU73~I>e&_n{Sd=M1kwVCSK z3IiBB!$N10@m%4i7F4set3fo{H0$Xlq|RRS&~8DyO(Wf1SP$!+WFr1vjESKElIaMs z^$;#X!XYn0bG1u~PL+g(<5XE?)LdNiDX>rOTdx@u>D1*r&&P^t1Cp!D3nUkUaZ0-B z8|i`aEV<%ub-ri22!fmCYfYqYGYHU%w(2I)ichz?xvk_vMP{k9YkvE0R8qYUJ&AFz z8~Q4xadFrjGWC$orb(ld=qq%WNFF1^2sqAfW+-EmX#%K{(;I5#fo^Q^eJbT*a}~{U zB;HAo(qFr_>?VURjhpo5;FH2XaoV5Zw{V>gWGdwNiO~;dQ9c}muZv#swBsh_4C1qQ z!bh@E#jyj>0Q6TNCCs45)7i>K-p>_VvEAxp$Hf^>STpRtq^;Y-HqBz%fDJ5zofS?E zsACmxKBW&*ZMqM#7U3PfxU096x3H&`b*0oc93qObhu3{8Xd<^H&A zGT(KMogf7?;^X+>(JqEk+yhM>w2E@1Ol^0w!F>bn9{tZ>j@qzWh0)~wVv8j8mZo?QYqwg29kJMMST9>w@yusjoxJaUfPYZH9UX3)%$H|SPRN3sH9@m zqF{9UsrV}AhxCs9Ulm$&HShTU?}#-?;kX!A7j1vT8AH$~CdE+sSLHfyiKZ}!HcLuo z3MDEhK0Yx_m~pU-XK~m^OHbbOitB$!G>VFnz2}x^!^sZZp1K_1DbG$KB`t}VgQx=S zdj8WF^>>s7`p&(eZmn&)ufVaYAb}Ut4l#HavCDSZIM_AJ3P5RZ)Pa6Ux@bP_>B z&~UCR4RZh$Z>YwcxqG1BN)d3 zHW+{L!M6}}wwJ=~U`Qo_e}CWWcq#KbRvs|1z(h-aW2`i3f;!1-F=D8%8$(2nE)Gad z@_BH2!_~Cf_UERRWWfE9x3DM%2J!g@+YGzK#?iX>&i%=W}E^fcCW^V zm}QUKGgH8tLiu$J*S~~Rs|bKK;g=0E445c=f#nZg4sz+Ef}OJn+#h$K1K9WdUcQjK zVmKxV?YGC@pSUFH`au47t0xLSuOVQ*&8yD${pjx!KTP+lpmhM-L7Do#e-8v zE|$$L{{+!LCEOH{`_k*x4#n-Gk=QTMUl!l`6`oS;k_T)CzFm_RJbZt)1X*l~RkF3k z?zZqRlXooT$5}W&Y~hiUvjK-=UPFv4$oVM%4^Qv-DWd3;*~o$d>Xd+?4Hx+ohPbPP zpR5Xm)@;9k3|a<$sbDdM2Yk}NXlhstmMD2IGT_DEmLMNGc<`hHQV6}v^EpWh$08%G z+1vq?HJ9C7k0CJE5z*Hq{BfFbK$1beXJHIH@@>BlK7ny=(*QtciTG@I!I`mQLr)=~ zgvfmUTfjFf$Bje}OyV4P!gT;l38_`G`9!~@6>JZS^wsAP@-9 zxW5em0~mgM3FZ<7lK9_;x+S2aaXYSvKnwKit=F~Mkak5uth)4A2uSF6M|I->&Aq@4 zBJOomABO?dz;=kRJA$LRALOt}zDUTlq{A^nkMrUhQv&o6s#0gA&f1*TecqUrNn@>{ z84uI^H=t=WdpH!!cz7WMgKlmzb00|>7$Ek^U4Ban<4*$bkww(dP;^mLr15=t zCL0(_$S}?T|6DmFw3plG+OF;tWN)vT38zbx7y(do=N>NYF>23(e|;2Bi$aOvBjCRK z1Hf)7ILv1kyC|c4`~Xq*a&Zu?z1X3ApoPO*biYuVIz@~_(<6aCt_T#J{66X}RtA!| z<4JyoxLL-t5H~@z0gl;S0eyueq9Fnghp`&b;Z(u)PV#(BpH-kGYjV43YF8QPDyFmT z&%4~IN0|O9!hiWvN)!eNt8gJ2^4_8RF9snS6EJlGx*Co@nNF)z9^W`9FS9-H?$TWXzjXp~^=PE>f&y$GczWzA%Lv=(;rO+0f3CA_76FVx zX~k=oV%#VR*l9@4mC2&{5*7=BP=1x=`*ZIn@h@l@xB|XY9&`*XXaP8;FFhpg!`}}B zva%&eCFDUL!))PAN^m-Kibp7?R`wYJah_27#n1NYv{pznQ9J{{?VGHY2Z`c z0uz0H+N?$jdyAd`pqSt{P^#sN8G_o z#fDRwcj!CZ_BA)FWKKVRSrMyTxDwDzn$TC2QNcNeZ>hxOVu@*huAAd!Grg!lIf&#S z38%5Q;H-s>C}r9@jlw0z*JevRC!QyHjx2;HK`ylutbJM{Wb!agx;p}Ra0y1zC90=BYH}O#c$5+!k1)LyN{MMM$<$a31_baWJ zQ&^{3D|c?}*8mOH+&6XplsAvcdyfYCV@b-*+S*(Lv9g_az)KE3+U_e-4q1Kgx#6w& zXS=FQAl<&LfBy`064cpTi35SVw4<8h18+}_`ea3HLGwf+=}DIPS!NN<`28xDJK?O| zYDas82fy8%$pJ~zuUMbLyftsw^%BKm$17=d98E`BRVI87ISzMXGYp!DH_7+Kn|xgI z3Ep@(6#!@bxC^`Xk@^FF=#a6}%{I+Q=H>dy5}xdW@VT-Rnwa-hIb78(Rj$U<8|Ikp zw9ovSER@16lM~cVbTYdb9Vfp@LPIGSVQRV3J@Ec$&UEND2#u1{{+W-OTj1qA)NiZ| z(^vEqSh0xL!%TY6*BawB-@|-dcSdYxkPs1zfm5jT#fI1z5da#Nn$rRt*W28&NC2p1 ze2BNUI`imRMVokAxE#{T;}x!1qL{_^Gyn%wG^jGEeb0`24+AWo5uvW^RiurnSyzA+ z`%=@kfc;T1jfF(1u>}Jm4>AL~0A6+%6&xrPng)H&*KQJ4p&&+@uk2 z4L*)SFYXpc5@|wgE}bc^H5$>aeEt&Vode6#XriQXIF3xq2l!oXV@!yHb=NWNn~o7v zA%dyqm${D@>OYN=SHxjLs}H$LsW;eJwfjCK)zzOuZ+WigTx+FrTxq9Y(GwV9wuv}b zly2!;NVYGtf1k{L%qGa3mI zUFchI+Tr)LIFTVBkT7v!f^tys}T;wv=ZEvFU=ws2=hu7$-{JUB;i$frsye*4(9>Mt! z5SeWTE;=rMMm5Ny zwaO6OpXIhV6b`oLdj|_8d@Y9C-1UYUa1nmUpu4S{lE=s-cnDB5bVy^*%6!*yTzeek zS^^!{T7Rjf%XQ%0c%-N3)=g`DQcpETT3}S?^+i?J@8jI1Fd;+XzZ;y^yWZxyRI87J z-p>BgDhW55^Z}yn55!w&NitR(x&3ZO45%9^S1o+$zEyt!?LNz|xSPkl_4s0`nXf3> zJALe(G~qY=Q1+^Nbmj$Fbe{u9WbUHqdpfmx6aW=?+XFqu6(V^O!}X0~f!=OYSI;yq zf|#1qCgLnxdT;%!viEl$vs`S9DkZBZj!CePavn@$&47e;T>EtUnQlvyGY}0$+akyY z0tP>w=N7{DtNX6#30j$GVhu$>CvgWR*Hdh=ycYpT9YP3gohb<}+mAul&~F@h8T@XZ z=faIE6iY}H)b)PV$d|3Vj)C!CFxDNQ$&}-xq&@>Y0iWCuIm73%4j)t|5Y`S)-)K}Z zz!#z;H1j-$~BcBInA8ZJ zP3>%}gaxjBHfsMWi0aUB_f{(@nM(Sz`(Y`UWQpAuD|l3oWH{vTd75|}l-iGwMW(>6 zLLPF_{5KKJhb=9oLF~zFQIR=0HbySeQnm*PT)`hdGirlN(YYbps6o;cox&4DZQ18V zrXsXmCQ^1lh+FiN>?A8(>?RiFr{34&8kl1R4(>R*$AOyAJBZsB z#-yUv6N-*Vl~7(AQ$r=TGh%miie7TL_dH}DvM#*czyp|WUqOxdD{wKDWo(GtMD&d3=tKM{@U9rSPELD{l?XXa9g~Y zeKWLIfxJ6p5MQY@FdHn!5%M0|Fs@* zevAjIJ%bSWEtZMt4#}dG4~;d(q}a!0^auM1gzzvD+`Wg!{j0*d=y)6<6gI#j;Ih|v zEae(FyRjYrg>oa?PjGf4!HD_|&HvkjeGT<3XeBtYw|cT|5nz>5X3w^*2lk?P<>2-S zjU5*{W#8eBkBmwMrFiLSK9A|aS*!yO6x{$@+P_tKd2=K>s^u9gKc9m|XK2Czu;<+v zs9dt+TpV=XJDu7C4twj{o-D{lyeC9#{TFyBq+D&~dp2cUrptDCAsH!)4N>(=9G)1n zAGw?EZ|OL$us2FWgmF3EvyH#*x?-CyC`c}*r3=XEZf`NZwqpcpNKD#_4yn=A-c;)m z)^0MT&2+1ZTI&n4sWMX(UQg>Y(_hp$ymoe3E-F6c>gQAQfnmW$UZf^V7&dSpJkwcx zvBN6?jok^89Yb3~oA&kPSgU0B7UarL$nNzMOuOwzjp6gZqdeHL-nV(Pk&mkrg~m6P zBXzXSDrAi5+M;ZxaHXIN^m+1GPkfIQ70UE$3BIV3v)J5-ZI)@H`EewwwK{QkbQ0Uw zOE64zM{RgO9GXuiL~`Wk8M0SYH#@WcJ=3IzraS7%A?bG4CGaM9z}C3*XQ8ytqNSU& z`b=gm{fZ8TkwNi}Coz6M>~8B3p)X`=$9V0MH} zASJ%H_OQK)>KcVx3hM7nR{UE*s#wT$xu}smp|lihKjz)i>k?L2{`Hyot1~I)B;PFuq9i5bi8h#*-gU z%is7Jwf+)xO%y{Tt*2*R1@?YWW%XBsT@E4z@GRwXG&B065u4qLp5Me3fGy0;XuDpg zQfsm*SKDL)FN?d32(> zGcEc-H|EL4yd(z$7S5u4k%UA7bJ{=0DDF~1ys}OuA8AXbN^Y_;xZ@ZW%ZrkvDa(5X zdD04&`-hj0Rl~nC8nV!{VKxsZd4TBx+0&JzNbHpeUcGZt$?Eyv-DE4nZPUx{h`-YP zjFC=FZocxni7mt{thM^-)HzkPLX*9_maCgi?PL5r%>BB|WTAQCf<-79R*q-HMfe12 zv4v^I^uv}}8G?ot#I?VHO3WtGk1#_s6sl_#pmH-3mW?|OTC z3;Y3;=oht@Qw-06d=qCAb=OgyY$olyW(I9_w)SEPz0{}L@7cOiIcAJ1`wbE{TO4nA zHpv@X0~jk8&F578mvW~FPl#sU0nd~530h%agYz%h*C|v(L-&I8L>Cgj>&vR6?|-)& zpkIX1KWF8i(Z$Hct>0d{89&Y8mj5TS?13a@q|Ri3bo?2O&5Qb#iIYy-U|T%f9ZAb> zu{>xV>H`I5D9))^0W_4mbopr;5$CH3gzZm$Lzf$;k=Wefe2EP9LYNp1N2i(Z3n^Ar zTNV$ShZQxd2;h%PyZvYKm)yAkOxlTozSX^czn$$hK2S682T%IX=1**a;~zC?7t6Fi zF_!iLp>Xf`SCtb&lR1kM3nN<%Ebz`@Imv5q5=m7?#*v1g{c~iE=L#@Elum{SxZTX@ zm(RhDJUl4N+wgoyT7AlF&eR9jv{i7p9jtxJlBEr3kM{41xg~rTc)^bKHbo9mdyP*8 z#l*OHiDQFGsmjAqU}A5rSB2x{%>c2r5N3Jk{__H>4?{7i)Lv^c%9Ku`P5T-ZlXVHW zeU>q5-bedsrOICGj158Xp?mwQkc@aKHtFW!DUzoS=Lc~f&~#MsnBG6jL>*&IMTuV{ zJiadeR_tdYTUyH-X>q&#y1s%N+(CI~T70 zpJX0RuMD)pBcYW|^@4$JkKuSq;uRAP8%==|P1}jD_tZBGgiLW$9EiWgpipgwqI?lO$-Y-&No^r$ z*@Ccs<+!~sfCgo<)pz^43&GkJxl>;f8Vc{^jWL{l3-Fh7E4uaOIAa@NJQk_!e-NGY zN+tG$sXMrtAlQpEFIiO>klel~FXhxv&Liyn1M) z-fhP}VtW)*{FMrgtqU#9;3HT~uD8Jpkhnv1j%>a}q4`VF9g^AeXY{yBdRxLWQW%R+ zA5np(0D<4WhuT4$*Pr-i9gBmN>`hVzndHx4CkyhVn_)sC=03vdPu7J9CP=nPL>G3f z+TQr{s^biIb)6Kr-B&KPI0(Zrxf!MU1u0cc_i15$I5)D4Pq2`LchH#BD#zStCjyIT zw}h5hx5L87s=YUp`-ue>TjPCbM1L-!E!rT1)zXsuS)PnCP5AftxQ4E2vc*7mev7-4 z9@#rR=AXkAeh!DjUUjCysMFbWmQF-pq4uLtExf&X;Q2E%05q0IHf_jqnHTnLP z@|^vOWeUto*Y{Y84kG%YX1Q@XJKmhM3R$OitwctOSc#I_dQZL7zZBYKfA^8%)v_Jb zYp>v3xh?>7E`EW4o<`>z9N8jpG`=@y{yS1=ci;H-ZzLX$a+GJ*NA#&fRa;Nvy z;@3ASzlmQ}FX*v;;&-z?tK?Fob{NDMPgkQhet2xgUO}jDshrqu8W+loG+93UEJv05 z%9C=+D>3sWY3K3sJv&7x8H`=h0_t| z`w#XP_pKAB)bvGQ74#HijKz`v14~mzn|@cWhk|tdWo`8RENo=8_7Xdd$%?8=z_?HH z!nrO?lPlpgV=k-!?Nn0K0j37==0hwblV-RlK9n-=jMi?1=4II?uZSAYp+2RiUG^%V z*-#>$Qa{HQK(O2NUTMFMRP`<$Ob`zEF1WQUt7MUzr^vz_>}2xP1C9JLyziNLHu34^ z7>*Ys6<;vj{?xrPiN;;)a`s+(f04cMk76nBtzn9CN%%exfkyI+cpg8@3HLQ`Jbs1@2rvWFzIZ9vgXREmT0n<2>!&4Qe|9= z#5hXOeCM7>B;(vpx5ovP$;iD|)OsA{sM4Rz*CP?2j{z z+t&6%3oZj6zJ5$tbX(dtMmO%CjFdaNJc9T?^jU|THGOxC9z%YMP(wlOV4Y1(&Sbk_ zo)bgHn@9AXoUglo@5j0rGe;i6$-~NATgzVtT&AAr^(M&=kz8miEWY3+-3i|Zc`X6c7F66TVEZg zicI!3+n}}Vn+dwu{bzh@ZQtI7MQCOuNSXI=;)%2yyN&vhzYT0dVK9ZpXp1=mEsZM; zS@bBX}qZYf{wYP_F3igaE za`qy31a)yN6NgZIdq8U{#%>atu{%4L((N*|dP!pbmZDX7%4=*1hXVaNOYcy8_d>YL zj^rQ=eOq{cJ|2<3Iq%YE&9O_5eUNcH^b-H@%0o)m*%)0`pYmbl4r`*1T6FC+<$jGn zD6nTJx_6eWNAzx|$rgnoALb}H>%DMJejB$|rHU5C;hEvY@i4Z06P(!4`SZ55KESb~ z;A60Pi&{3-Nz^JG*k5K{c~))3xmRwGk2?~b3OX{m`hP+<7hbL9Xr>I<6q)U*+;|X^ z?2-x{CMhsnDa-wwj`-G$UEAft%)3MfYjo zP16_C7tAOk7H4}J&c0!h(V|em7)k@p)ruBZHcy|Sr?Y`TyFt8nvfy$rs~r8ldZ(t> zVzex{;QwM_wE``f^=}u?1tD!uZh`3m1p=(axw9zR#A+_92t>L9BF)3C==gg2uyVs= zwfYW2$H;}A??;->NhFxHcBa~TqOK(;^vf&C z{s((Fgz{a5#AEy^K0_u~b*wOhdR83E=VEN9`I&VCfOV@Z!*cD%9{`lSz7rnI5ateF z6`E|#|7KEM5S;Ys6uA-NGp(-SWAM!qc#!w6BqkE!2GaUJ%87DsF-$5&lOD2c7)y$P z6Z^k%X$vjxd2U5Mz~ZJ=pB_Y#=vS)WAVr zTOgHqW}npf-)|kBm@&WwbgwixH(MF7nvwu)UCG3xKm@b}_Kp4q4LPYmvm_xp1o6Ea zMG)@Z;#%WHuJ3>Um&AC*GK20QH%kP4{zHxL4(-u zonkDod!-)sP)l9G|1f;GnLRX6LN#>Ierb7GSW^3T6)~&cxwx*fq3#H5K5O$GG5%{C zNWBUn=m!pXzI;OIb0i!Q6K?I+K`=ZtL9YkQ8d(6Ia2|bS;4N!n3$~T%M zsqgaeaz65~y`s~}94HMrwD=yIVew6sV0Y3xk;> zy94mApX(GVOZ<3>X#dP$;(YFwv|H-d(8g;EhE+uVv9{?liozrO!BvhEX8aky6aCCb z0iZ1?16c`7thclOJ*B}VujpMS9fh}pPjs-w>AItQsS7M1ms#52a4CCwWCkZt#)q$= zt#m%s&_KV-_MW~d>Hg@gLA~#h7G^2m^v;5ha8@_;H#a1QuI6B6PcqOo8*lcfK}=O} zBzS2# zM)A6<>uy$y{VJPe(H_RwxU-irW8Z&?zgH|anxC0!n51LQX!>B`^6vh7)F!v%qV=jC z5e~N%Am*z8X>&W<^&H;kjEM3q$2tE$>DuSp7mPp#QD{7e^i|%#1F1(potE8a2jLB# zpeD8T&L2b!y?_S4eYV~WPjxbb4#=#0?SZq6TM(|Fs}u4YP2cOi4{9B@RdUr zE)pQcsNvBKitQEXnq0Usx-S-Soo*Tvs<8@pAHrkewmn3TijM>FStQXH#X_r%_KUjO zy3(R4D6Cxkc1c1`G5-`uZu3XDc_lFXTT3`?ZVMhm!`lNGS%VdQ+EUkXfnV-W!hgCH9;F-q7URQ|<3wQJSM9VxBo5#-PqDCGc1^w2Rf?YNUu9DA7~>`RM+&JnV07-VF+nt7uO_|c~N&~3KG5dw#}$Kx23tlr6%u6FGbyRXba$f|^AdrQy|6Taxt7aeEM|^1mIRI1 zm+8_R6+*X5EG2nm34&Ek~siopLa8RJszBSE`lNb*z1Cx z^JlTDgRXH_!g=DeMn0cyLaY56?dIvUN|oPsNk#4+ztJ(Y6ifR{S=H1e1)km8nv$hf zyqKFP#>x|<)-lCxqLaw6&c5&x4KFb>3w=QOwOgnkOvOh4(cNR-`Rjg@n{R*raG2}G zJL8w~JTb*&_Up978UpQ8<|twi3ikYw18d)Z^1tm{?Yl`iLSt{j9MdWP@+OU<{UzuN z>=k5Vdd+RA+EMZqXjHQDX7k%S^_xMH@-F<#$3!oAU*Jc z?bSQynp1!Q8zRs#FuexG7-Fa$8K0f?3^UL%0nSGYSWezLT!ZIOb|&IdDY%_TK@;q) zMn71Fa$rRQK1J4Dv({R?3bY#BYLi)Y*a`Z;Wom;f5{mApF&2hf3|e(w9RgDwCITw| z!UORnUiKM!@7`N?^hBpafCk_*?%xj{g!4$-jgBrGQdspe}l-R4fL zCt&k_f=Bn+Hs87@kc2Z~VPanS#Q@w|W&24s0U(vWL*`A7g4YrN&h%7%0HyfS^I#&0 zqwqg9C;Y3v=0m{*%|H^i+K8Md0_9*`=+6}Y%dfcrY4Cdn z*)_0EN(Nxb%uEEcq~$n>0n1!GC2c5AktfK{%6jU9O|VjWDiGSTGOdMZ(fh128GG7V zG2!>yzQA~$73N(eib{|S7C976A#OWG-)te?$h@Ig(O$u}`uDRjc8knIe0pZ$Dg913 z6>kU1wovq#^ieAGuHHWNa3{5Ltv{(<=Gi`0h_mbt3QHbM=zr4HtRBCl*#Nr$vHF4B z+d6PX0iVi7w>{r}w!_!K=5Tx^)54nagFSgC43M7m?gC9tVS{9(ji=jV) z=pfkA4u`JAE#ct_d5Ej2IQTsbh2c+twb6-)lLB&b-nykiFg~7UlIa~-b#mgHJFHU~ z1C8I}{yX4Mm>7KAwZE5$7vxx73-rX`;u`2z3(Hn>%%)}FTfu4_b#JFIT^`}*xADBg zTT%^w$2Q>Db{-xz`vSmO_gfrhDzm&g&HvEfy$M0Zij~PC;!ig=%JF5KCq_7HHoVl7 zeMla_u;r!wAd7|{N1ps{K2*PlX*#mdR5H1W%&~1U(q(7Ti6-P_4Kg5o^PdmU{oR0( zm%aI$FNiZVWCakHDzC%_+rjlSg%rt$oQH+X_-n{3Q3kJoN-_kNUu>PjnA@F$kC^X5J47;L0YJ12U%)hM&G%`TnQ*eD zrkb1jW=XGyP=-y0I-@T@vlgE>ZY*Qu@Z=7)*?L9LChk-BV~oT<9lCQhi0{0+a43@! zExE}nV#;77{$TC+5hoP`{zdGNWSiE#DMQ6*j7vx0p-T4aPf(2Kl;iagoNBN_Rmp5_U;I}yQ1n{Ni0d2Kbd~&>)U?0mjL)Upixz#qKxPebMahCuCi8-y zqA-q(pHGrd;++gx$6t&_B(9~CQrBa3e2;yt)%&$wgYq~G>-Ho_r@Q1{a5Y` zU5hlX7Ex^ZBs_pFYGj7~ZU=hx^kbLVBiQ)l3Vs9k-|1MkPR}CnQ3E&w4WsRTl7wOA zeimvdH`;jb^=VMLrN+*6lRFMQKK5K?%tI_eHZpZkMfIh9pOdk@{|pIq~1lVN2LaY?;03dndVv zD+%GUAMX5WXTF>TS%jhNc@l$i4Y()Fsw>rw66as3 zbyzm?R-Kjq--fJ3M8a)DQw96kQV=8r{WTfJusTKPIBsJ|15pwtEc`eX7ic)}67xU+ z+VOciCwb-&qs0HTV<00);4KO5G=p#bp&nQKJxx*S8I?gs7H%V~i1l^3$&7OdE6+yy zul8!kP;o*sj9GGpi?R(<~S2XRTxYU+KRYkg!stQcCEdn1}u+iLN*cPQQ7PI>L zqg+50i`z1tMJ8LJ_25s5?Z>E#_jMH)Z&MU5Ugw@t1T7iWhV`_Hz`v@YZN%VFjFX|} zH{yWs^L9_%V8W@Uk@mSQ$EImA5|R@0c|S~eo~USo^>%43J{E)Lq_Z+XzrLZT_VZ#1 zxv6T$^JFO(xQk;P-Ii7Lvq0k_41*(zVK74C1j4ZNA&THS__krN=b`iQxP~n?%k?{+ zie!ydo>yP9ot9jW;UYJ}FUv#BCHIgC8TD3E!INLtAF8p7R`M_$j+;$W#-+g1M6jN! zLyMA}G+)_i<{G_X-s7X&{cfOi|Kok7;tSlRIPpaW!@i?mqYBZ=Q;&-jQwv$_FInQX zAVzvLLArFjOs$t$ZxDIGS!H9W=7hOO7Jx6-)K z-|}mbPIXXXP?~w)q~k`*8-=g~qsyV4gdQ#=VL92q$B<>4f>k;@mXb$W4w*%8_q@n* zf!VV4%NA_-+kIX_cQgw*7IKI@v^1t`ueEGQH}+U=Hl;))x_K9`LUB%kA>iES;G92k zog?mWXwli%z93n|Bh9~y*=)K=sEUT5E-S=dSpsZZ^vp%%2&A~lx~j7MgHqk^3rk^?L^7#cc>C#Q&b{~#_&e^ReOhIttby+@5$rR|E!;n4{erhSR5wZT0+Le z8Y$yf#m2mjT8{gCa(?lHJW2@T-(&08STXx}j~{|3`)kU0+upAoJmPCEVX>jY_p3)o zm#M>D?Spq6^UP|z^9dY@m?@w2;NvrPLfAE~bR<4Q6Rsk*U4DA)8~kZSRBL3SuGmZ? zX*#HIqF|O~q?j~$w@}hVw17UIlIcsCa&An>U{mEw{BxyP?n5Z~GFNY;Bc5uK#D^Yj zx&kx#SF*YV*yZ_D{=3@DXJAWGaQxBz3ih^Qb0lCb`P>g+Mk$4Tg@#sru=0_tT@RYj=f zcyiNA3Ca^(g{f1z-hR^lzc3(?yf=P7x=1KBEvAOlq?b!5D}}(%`nz4;49y{BcjNArhcpjOiBYR+SSpb#vqA^5&d#H#bc;oa1Q*p{w#{&uGY<_7yA<34!(;lY~%5B%p5D)@*m!eu#3MCW4F9O9zs%`KM*zpeM8hurNW!zii+{jpI7?qTQV`y+XvnAKOKnd zmtBI(J$ve^?&O_boV2ovJJ`uKWU52;WlBm|axh8(2(;JRF;VW_PHJZpQTlA(Y0CHx zMcV2DVOqMeea0;5ZU&L#5R|cKvsgLdmy5?wa+fbW%h$hD@3zJyFV1tMT#hVqcqS)OBVrtZ@q%R_S>!7I6#V?9ex8*SPH8bVA%BgScI2kWF z(vn0>5vFc0KZE#^UigwO?4PSeru-Eh7-dD0{-Xf}`-_eR7F%Ox##J@}7RSHs#wTC$ z^n;Oj1{*0L7qE}MF2*IdT$@_K6hjKVHE5*#V2qFoy401N)Gx^R!JS<3Hv=F042*qI z{D)C=2Ge>r8AwjiH&DMNnRTz+hHW~3W{~m(-DacBL*Ug;g&WPxebT;(!gcw;L&LVD6E) zX8zNE^QOJFN*{$pWHa%~qcQu+z@|cn;S;$K33M!5>%v0eu2r+w8PV-@#SkPs=)+<% zoI832(y)PLb@DvRqc$rlVs@vGB70%*&D?)921cbK}=;DEmTyh2Ro(xO4z^x4n*+%&#B# zCHJxxYcpv&&&ktyqsB*|0P>%||; zHz!aHO&XuXoG}PffEyF2VJKVrUa=v7P}NoxoLU8YBOCXq5g!^s_V4Y_!p@Vx0?VBg z0l)PBVd|`-qTbrKFM0z4eWBVn`fp21Zx}+#! zcwf?06eT#F9&p z!mn>)+94-f1L_BVhBv>DV)vhY#xGknLPI^QphUfM)zPIUwYt0d6_8X8t+Yl@=LFMV z|H-#)NJBp-#*DQMdlxrV*0$k_h?qE28s2B30~`#s5KfG*oKvM@dVgsN>mlRq<_MnR zGSD%Q*eOiamasnR_hmpxya0i2oblO$J9{twEhN(NuZ1PTH{HsGj3K{OSJ&D=A>imUq8$o$#ao{-z9-1X6^m=7MU@JK!@SKOv*EgIQc* zyTntne!kDm z=S`bhM1!(Y#6ZYhI`8YoK5#4zbWg*J&`}P$IuKo{t2PG)q``od33A|5PX{h1EfL50 zO)v~W8ydk-uIK!~lxU~L?{^9q>djcep5X#%cqWL5)mUi_v<4=<)`q`sUqq3c!bT|T;Vi(Ft2EJj#eoC2?%A6u?gw_85XAr zs%X1DiS7STKm_{s>A+Ok2h2fSz&F<+{#%nS__&*bgq|Li>&}awJ_6EPb^@uA8Xh?5 zuwY3rA6ZS|UQQ7A40=5ib^6Ja@Qc8sJV~pkha-%5&n-)cE?$aJ;frVn!4-unKl6kE zM}r@sxvB_ij6YebLGeW5g_G?AFRY6>g|>?DP5h@x3PZkv6?)h4Oz zT2||b=+>fWArSh#0~|)g>7bLGWaQlP3PgBvtmr1ABc1cTm$`rwO*sD!nsftl?5;9m zh;vI%(dI*72Sc-jTG@_Ww458Z$XRkIAW%j$k)6x(DgPAB-D+@|Cjiq_sQH=O1pta` zijx}tMH3Z3SCCa_{14zn_m6#4(88Vt?n4%I7le6EFzDLnMLEa_{j;4ne0y^>JSL8& zTPP}ZCt`iz{QHFB?@l2r?%@MSXR7Ew=})ez z%vDJe{{CfWAQk+PRyyD!PVS+L%=zI0$DUrf*FfNW-ZM9|g-QssIPI~Pub`xb=SJi2 zD!)s0TzACY^_+WQaDjLJJ=ZFKq_#n;6_-)AXDavGg9hd!aXw|!HH>CERDgKUe*FV0 z(C>(iqpL9JwbPbFkB+ILWy6{%N`e$>L~dv@RH=C|0}z#{T%N8xYc!%NIysIy)OhYva{8Pg zd6=fQFRZ~mp6n_$Q6@?0ltRr58R51%rt`+%U&HoN8+7c`kR-3kr$An5zn;*LJbK4e<#?*?KD42dIMCjDD7GR>-LdZ+ zOx|SLw0H~VQ7P{`9;2bH=zMATetnq{DZ6G8fYni+4}&hc7OH;Z;U$ol1Zv!nt~YbK zlocMwy~4*J>Q(r8&$WM4=BiZKasH)A(4{qt1LQhB_OVj~d8suHj)c@3A3`9x04ftU zv`Lv5f!gx6BYpy9h6>R{aOKvPPv{MwoH^} zKShq7^QciKbTvA3-D>BFtr;a0ZyD0S3w=@+3;Esmd%Lj(7@TdfSod5LHFHIVaaablF2zMq zoyHoa?`gXT@>!uQaP}&#V(t79*LzL1;;x7aoG${e@5q#f#mfqln^H#_VLLMv4f|Q>y_~Dj&Xs+Ny~oe# z7t_B=o^8}-x$yyJZ}(K}7J6(&{x%pj^SoG_E?dAdvLR}A+HvP~y~{^pBSxVf)#R+>$NUQvNy}0t;En6jP=SvRe2KuRmpF(5~R0I7WEX3O`5W6jNv4tbSBB-S zwJFty7gZDeBVG;#!*jCM)^5u}DNpN%1kys} zSTe{lEaV}KNkqxQrEm`&c8e!m-(@w(n3L~|msjGLK_X#+W`S53PZ>HAmGDC2UX49? zip7E&@}Otr1+yBji~k6#r63+KsE7hyp|g2Ou^m4G!jO^$o(yQro^8Pao& zmglA(UnYPh6aYxbSgBT0QEP#bY3Zz{K+{}=OQ=%oa~kR zDDu`;<~*#e@e;r{wsP#tlfk(z+kWHFd$XsB7S9U}Gh1ZDRg?>_M2ySDUPn{x$=z7X5R2dM+S!H2L(>87&*usUa|szflSJu65^ zFNplhY$W37Nv&|Y^-e~5E0efu6UYcMcxb%)Vz#)gdwv+$7+tmE;6F%u@_o90N^O#L zYJAg_fa)=!%$W9ktuK_`X+@{8b$(d9nAC0mm#c4I_Idk_<7UGM-#|a<8VRYt;qt+p zzK!M|4U>Qq;vLx|p;MyUy-{2@v%m6|Wg{q@4O8Vfud_Kl`nTsW&0PK$RaUWe4t!bF zs@ua*#?QgmuTc$G!6)Wzp(BagKfd;|E>nOaJ(EppgTZGN1He|C<1{$du|L{vumfTA zNpzMYph&=c@IhXp^)xM_YOo}c749THl^0O?p#6c!R?*Vr6GnXiHqMPL<78WnL#JWm!>JNcT z*mLUZkg-yo&ymD`a3mEuo<5Xcd)8cPR5t>9HiGP?Fct3YCdhEV1q5K{%TNhw8dm5|ENy2 zG=}=Hyy()h*h%SfzP;#*%P~iG{=l=ZF}gwYowsSotL!Vn&b|8TKK+?3h4}e5ABGkLrlA^h z@@Dm?;RWs}3AtG1KRL&AdS?&#)m?{QYMMulKTd#o|@_O_k-!y<*}kyEeWT3=fSQr|#7K@g7x z8O9YvOce#!#|T#5Jhv48EQL~T?v^IRGe9%!$JB93{a_CvoiL_w;ROu6=cV~D8obVj z$*LQ4^05(*_9|$M-3&$q`8+s@KC1^dbO<_@wLoRWdGoQ!6}iJ6!y#>Fi^Rt;```?l zk?24J?#GPEmCFkw4|l27qU+_`lg(#M@D@9WU(`R`pa=f=*Y5A9ceSGK>B-}HMsN!w zEj!fTF7=7ux5&}P74LVBa~%>qT^qO8IM!} zyI;D4_R|vw%gr679X&9R(Enwn_5kIc5<&bfR`h;`k8!$dfvjnrC3nv8-S*!(BfKaz ziG~Ylv;O+K+2;tLa#v!=~ zc*-<5@hjsn;>`Mv9^=GD$EFQOeNj)h5beA98L6HfFr%KEv!by^+iiVu{ZP<$;XGwY zC(*VPF>;!MEugnlxau2ECfe@n%2tT5d}eh_oPRo1uwqMH0liKHyiGXIx^0=M`mHWo$ZSU z7+ty4;(3}p7I)iBo6VZkJ)XDUEMG8GRfRs}GP-@>yK!)?iT~TtTO?y|?@4k$DaYVh z8IASN6E+f3I_@Xs6=I%s{1dKV`4Yqt}A zwQ9_J1T5lQkQV1{M27e0|F!4gIc#{HvodcyFMC{CYFq_n&bL^Ep=B)P+Wp%_|DESYnMXLGZ)b{w#3KFu zZ@)C_-Ql=jUJqS_?@nZ|lAFVH6Sg86E&j?~^8{OZ+Tw76l{QY$r4>pAXRQ7U>R;+T z=5_k|`%X-KBF1=4&Z|xSbhYpdpIB*A_Fa;W=zH0g7Dc^P!!=c`(-b7}NLj=)*SJ262)UPIA61Nn zyas%l+~-)$cYg)6e}4Bc7l2K(#hgpqp?<`ZPIdmGi&s9`M@{SW=0@JVIBhikgp<+X zcoN8`ag7J~ZY$?8Ddc0@fz*uCzD}Eix;?%yG1EM*ZI9A88GsV1zolnr|LzQ3VcaDjBg z6zPl*HQnKs+JPPQ!BRFiwFNj6+{M6o_wejXk3Xy@;; z+0LK!7bz>mCuHT#>UZ-p1)OY?iHq3=#Ais?I5Z*EOZqk{I?jf5I#t%gYp|_-w~P8o zPLE|znB6N0`cZpy(>sgI!KVVU?dx0 zTM+&8D^2NpAls<+IGFzl3*eR^ZkYP){TF)i>*7vn3zP)@NpQu`llUvdG8n|~&6wCn zDy=)0<9g;lpKCdVZgA$9wX!f+$=zFuZu_bt1GPQ9R6OA#Y#46L8%HT-MHlfl#2!{>zgrjatOOWoj zErayde(mmD73b?=%w%_YaQW_p_%`HTmNt2?o{fJQ(eNe*BUXqD=2QIDrxit?3PsR6 zG5+97_;YTdR{qA+{h#I|{*L$_lChC^Ogs=7&z3LxifAQgx?5(yGy$pNFSO_vSN*2n2{jY+-Xd~D8R zE$xOpY*TjIxsRCsS`D)q+Q$bYU2Sq7nMqb3E!S%KEJs-5CMS*Y<31!vzhnG^Zana@ zU?)d9@tyz)Ey;Rw-y5t88}_FmETz#Zz#X(O@n=n2cO&xgcHT^x0T-U_XqwYu5MyGE z|MQ&M<|PBkR`XDz?KK(_60a{Pu}7!wnJHncFUU|disRN8;$fVv zj5HUh&3LAGM^6;uBp1e88invOJbx*QqgUJg=k2}wTkJM7HmusJR_tCfx6>S7sK7E= z-l7NHBO0jx<%Zf%^((rC+1IR|s1H5)bZ_Pj-h!n;;b$zOKxksL!FF@=)~a*DT2ITv z1;zyU?7YM56gFyQIz877zSzUXG6nnnUV_w; z*?XP*Dt1XPxq{l@?5FtZs#byp$Y&v+4@`c|E3{G`wZ_ptQP!qiV&Zt{m`+7%+ZV@G zO{YWoOKKC)WIigm^>NXRN}n#eec`t?h?PVoubBosj@{-=iTIQlx&n|g9Y=Wpa!&DPwzq)Jt9SA7pXKi`|K z(=#FwcZGx#lPela5c{)+-koOq`pB|m!VH-jDS{er=C(7dP9Ph&WppJ~c(x{}PVyKb zYmid5Af%^bA*l9HnW|54SYRNH%QBphkssF8b3m&>vqF4?V-?J(%f%fZZ2!9J6?sR; zaeKxR53?AdGhNfMcX|C%%1k|L* zJB*9XU)Z$-IxfyJu+}Y^c)EIfXYmKIGE8GJfcw=qD-x%1Ci!WL)z*zH#NFz=XSKb7 zwAV`-(el3g$2`!}=%U`7!5c)wcyfnyC0JAGnpG*{f0mDF@l?)6AKN8X$q&cYSvS|#0TdR{xIgf z*{=xhs~nX{)=V%Q&8$fB@LLmaSl|6lyGGge{0W-D*kt$z9hfSji1=gg8PwU)lFU^dGxffYMHp92M~rHN%NsT-66+4Z z$cxSxq!&xi(@3Q;(-hW4w>kQcHW8!*?us~0O-=$4)+9P%AC7FJyLF=^pR8jhM?RN- zq$`rKcIKe;^6Bctpn%J9p+jDxc135W^8ta!o`E8*@I+J~s;UBevd+1-zw=mzqK^}#uPN4NePyU)ft#p!3(1ymHSvdu zy_`}5{u&i%!Wh;c@2QpScOv)uko6fUH;j@SXtPfZeXYL-HV zYS%XSh`dFfqc4S3Oza~h=nJY}zoR#QCLWeoA)l=+z6W=z1XB!(E!ubMr>!`ywbFF4 zRo)^G@I9&dkBiA^htyUR=!13BSUCAx1NLVETY=)o^rFmG*fno_RU)J*RC|IMixyR4 z5?hiYV-DpFYGeKMBJBqV|DEavNf(SaX@#rO4v^N7U~m0PMO4Yui>F~Hl;2$*txwef zr@e7{|EIV+r6%jVJ3d_nDSY-P5dtn!XAyU#sE8Sf1gJ zkx27A17nLN(`8Ap8I&98}|0YAkmZ{81gQ@q(3=6^>0G)nZb5 z9HyRFut6r7Qa>e_QY$dlTCC^w6Zeyy%%t=>QF8GKLzyZ>U$Z+e3+gwD7(9MWU6wxL z$)P%|>#b%cN1i%_r&IhCoo%Z#!hl80>uEJr~OA#D- zTt=!+Tp8a}(guuP#B81=AjG&t0c=$3BY;ygxFi(XJQ`AD9Zp;=>APL(k;+EMGHq1#L+tAOcJ^QF3)k- z1k%;UZ?D)Gg`@EQoV*bCJqdR;M`u!}jU}07i&#fC-|nXh+MYRlHTa(49R06%cx&R* zx|pYqYu+7nuhe6IOgS`PP8xORh`8({N1>#9!|MRoD|NU7UN$-O+x&?1L7DaFk z#X$#h+z5=@SMq{&{?+~%tAl5S_dmJYZ0Y~;H~;7V>CxKPzWtxxbEvo5PjNuOtTPM^l_pybu|(qkI*ef*2N!)Ejw0M4wz1Q?AY z{?|~;npsl9$z|HwoCapqqR91+bKhQ2fIAW|=%>VR>9%a2GDqBH{v%Ydd(^wDv;>;G zQLJca?Azwix+7WOhrU6#coVT=G9X2})TG4_ZI@Snu;v>t#;fxw{1LD5tmyeVP}ZRD z(*5j)OG#f_HyT_w4<{GqgOpjZVkR#`)-i*_ES4nawrw5X(zD{$qW*XHiQ*eTj4fJB zF#cHu_@*Evj~;CT*E4zw9v%^q|4N%WQ<(jY@t=2&P~~_3fGJJ}n1~Mo=da~1EBH?- zem?v|vL;oP?I;TTnd*MZz2fNKm6r|ZzbWXDV)3qme@d=f|F9INvy!C1_+`l6aX!rW z{@`Z@rR@?Vfj#fX8qJ)SPlI#(<~Ru7elu59lqcm2E|95SDU=Bq+0#Mr>E(8zmL0zr zklJmJ798f1jslk{%nUNV>NZaFYew9@X8BOS7Z>j&LHjJ{fcMs#X}{9Nh}dCmF{^-T z#Ulz$9tYs&p_L<`>re+y&-kMIdvTr&4gli;U(6#!qj!7ZX3>CiTx00_?-K`vD}v|+ z#eAf;fKDJ_9zorUkf30H0VD2*nTl-<;q}o%Mi(!NggIyA*`2S|MHmd({4f*9R5XAOKR)-vJtijz5=k<7tED7goPA6Qf0C4NoloRQB zKs-DUG1q2!RucUMoijk5E)fdL^+l_RfVd^+&qtY)tw9a|bd~<@5YFdobjn8Wr4R+X4eYlyaT^~=t^@6_| zeJLASIQx$O{&zTSywqxGY!bP+k6V;tZ@#0v7CvHjSabz~&c1f)BS#nji*=D%N0@E& zHdx{(plQUxXt@;(hQne5IInibS;SElEoV9Z>`eCJ^a87B|6WBKFNm-{I*_*l01dQT zncDRNjD=L|oR&s#TogCZ>M1Y>72Gm6;#OS)Ao^4w9if!ZeS~Ae9gqnzUHh-*9&<9QWtWq|J_&6yN?8VbN*z> z63?&9h>abob{SB!dHXo9C~$b0o%^?FW zizRcHX68-lAoLT!@jCi$CaV+%|Gm}yAcw&MJAjL+ln#P{xoF3&2Nq)*@h?`=&!0a6 zr|86I6+$Z6pk|2Sk3ML_|I~&J9%yZw72b%sqEpD}Xmp8-uk)^Sm25!jV$g4U5M|)( z=H75t<%5J?I-pW*0=*}Ag&n3p?2mM78U%3n*Q1AImeR`20s+|Ydf?s^divAwB|Ht? ze1SgAuKYq$5!bFfC4f;rFS_XRjyN+32%UtEo9jhqzP><~QZ9lB{P*xaW&|8+^meA4 z2guL(gR32Z`D}zrD8VtSSs~$wj>*1T8n=$KE#kBax!ohlF!L7D05g#TQy>+}NvaUD z-V?lf9NA;N|L>OlBtGjiZ3_CyQGv5F~&xP(c$8ECE3iD ztT>Cox=j6@9Y76<-qkeU{X{e{$!V zKK$_?$P1@SyFc%MSDF9PV|s&xipI9ephq_pIw&8O`PpSaT2%!W%rCru*3SkH|A+Sa5r!`TnS5B7VY|4}Z z{XwPm8loCY%Q-0|#JQL8g}emm2&|5j>#Lp5dSznBdES^ZskVK$o1Ek(yvXm(;@F0VTk%+@foJTA2n7KW)3m-_OgEXqlia8%Uu% zQiHE8c^*IP%f}W^u7_7N(wXZnz>LSj(A8Rb6~iJE%V^py#$IqR3mf(%68iMo;)(Ke zAwq6W)F@0oB8zzXhXnQhZ6Pz7)@CND)_c8p34H^ReZjR zUO)EaVQP1io+@h0N5_GcbpGVW3NdI0Wn%BJxMvMK;@e$|W`2SbjZR!3u;weq|-n54@cY8`u>;5df3tj!G(JC*Py#(dN zf3uiUdKjr#HWm|UL`Ot9toP?@G+J-YjH(j>C}d?(16$#QfKFm6E-Z;C{D)S7%+mRR zvG#kZH<4tBS1ZE+J2LeA-EzV^;AtPw;A^ppWI#ys!%vs%T|ZMTCt22`lQ^gv$M8OL zqNN`;Vm*NGJ?>$WN0{acexFunwJy7d_`!YRO>3<_`|#e)k5Tj~plIqBoso@`ItE!R zxqfI63ObAhsEPS^vAU#}J!f9qWc5p;>J=Kj4};|CguV`g>S+rUC9QVWeqia^0tLZw z;2@<-!Cz4JwK~U~@cox@r1pZ}nEvAvbZJ5WwWtDPsSN-JS;k-JrMyLKzJ3e@kd4x?^Cfo^NYhVv)N%ldq7%nRLJ|Vhvq`OiS2^aC(KU)kx_r1Djp#^& zEy&hjn815 zV(8s_=*nRC^{2dd|DIccW#>_x8i=(y&xIr5%!v%L%XO@-L@q!*Bse_{uInP0Wu;Wpr}D}4D?7wGP*%sTF}Y@+p#Zx-uaKUsG%$FPxN!0wU3hoP4^_L8lwY|hKg zf)2C%EUYVjdpy-VPv;7(@in-%KE#DwNnEwA$8e*0?$R2p<6NPXPE#ex#NVX-{z(FF zjyA~K;Q`r4;FSSUH`hd;k(HiN*KlOdr|)#uNX zoa6tJLk?Jfb$tqiGaj2Ne+K*8b`WClsm|hmkKJs;I$+zfbVP*3^(nBjZtKN(5@Xc8 znPmT}U-6!JCUfE~K3bH^#Z3+s%o1w&ik45MKa>8Q1`qJdGy=j~>&0NW+uc3pyKnkf z$XkAg6y+;sWPl}64G6hLs_G5VCj}s#Pqi;^=n3G!lgRi|-KVJnDLm%$kz4_Iej}H(#Q?#d=U7D$$$4X_j#70}J+yPp}K!k%O6O?MTc1 zO52JLZzbPhi%Jyc7SoWN4&;AtYbxUfs^#2-CCCNd^LURCaerFZA^=Wlq~rS){u#n} z|GHr-G9rR1mbH8cw?PUM8Dm!isG08IEgoJm(RU1Hd{o)YoYTVS@Jp)FEWM@^CfHC0 zJ9Dlmafux-G+UW|vdEox$DWsW6^9mY`R;xjI^81R=4VlaeGoVU3U$qmMTdCKnhrg& z_y?%)MbD$8MIk#l#oo)lz0g}iIti<74y^SiaEjYDVHLRq>Oy`enJt@%C^I^#`1v2haBgTZR z5o)PN%K@ia)ly8qDCIO?=t-zK0Sm>>)YnNW9Xw+~3C|NZ{<%mnGi`Dpz)Vr=2cJd5en&F8v{YVCg!cTV6tseINuIafOT;+q zs`=PTRXgqFa3^2Y;|CF51t&RK@4tm(p@GYJQF5FZqJ?^}v;l>8Up6JXWg1@KtKFZOVcX%n3@{sRrqG((b@I*iLhg}Kw=Qf9327H9nzctc# zEe*ygD1Y5VtrP!2yQG}x6k3=o{Kmo}W(~x62?@+V-(jr9iFMO8fds;BXT|J@hvA@s zkVENQ`UCoj>)F$>hES#adPhY8wIP?kHdo*czZsH%*@a8oE!YCvY$X@RG6C!@`-76pY~+i_>X^0 zCrwyuturFN`$00l>pDV6pbzIB7Qx+YF_UnZBduvU1Z2-ZGXS_`J2zF(MdgcjC9Pis zwgql7tQ#W!JQGHu_++c&fr>G&o+Gym-=lJ#^{F0HmH?P*tg0ag0~H72+y-1P;TFl(AE^P zAafmnf0ab3tY`1cqK}Vpo*7q^8vMuqc>Hl`A+74Pj4!%&%5*LVLORGIZQ@PR%Kc4Y%xDBV(DC|DLVZG=8xNXVk*0d4{^=>%Sq2gtygK$<5I}ZbW_+0 zp*baDVv-i&c|#rIV)yYzpkY z>5k#1Bda8_O3C{OE(|gGJ@Y-bC1jH7wTP0oy%MO{;<{2R^_z9|Y2jRTorJ?c#96(H)y zQK=AXey-XWZk?i?2vXT{6%BC&jRRawdk!cn&6r)_0~XfWPG|Obp0OcXeTK0(mR05L zxN9{&7J-T(jfV<#-4sNLdjDYd9@&c55D}shy0@K;OW;@=kK+f0dAs*#GBR&`yXkv9 zyzg9eU0s>e7>S^0Amykk%yp`c{=nWw+Aeu**?L~+)DC0kKw_-eGT!v# z{hC4}PKMsQ63`M!8ce5;2`&W1)$L)G&(x>5sqp^0m>G)@QK+4nP;WvbdAEr~GpQpv zpL#U%B$yCUn_xVps-@bLw-3Ho)j&uSwdQ`6RBmxhlbfT++m<)jz3|O^Ev$seP=~4O#mahxM>1Wae+LxaHHKu5Pl{1EcdL^0` z|M1CEJcviFF+Ly8)(Klz^u}cIhE-SSrQq8)qP~{bzfZ2S)K7K`o>+|yVOw>at!!FN zwaNea&ss0-QXB|;Jet?rMfBxd;*2(x;eI@hVdGPp85>YN#?>*qXY=$qdqqt=)EH7@MaRCtH?3|VTJ}5f<5$?T z-v9t><=W$hjNH_;O5C2U4J)IHDt=EibY|>mW7i~GW$I4g zV$GgG?j2whG~G+K(8%r1i;yC%(>)dXnDkQ&s06UrV9ycj@3Y8~4~Wc` zp>x2-M;S~1>F$i}x+D)m)xC|BC1GG14QtOHI$s($^-K~=2W_51I=IFg27n@HLD!R2=g& zazCN7X|MSx1WUL}5_8drtl=f+0idBXj6Tb@x@% zy1$L>13b_oDHU>9qYZpybRy8X z%0ERSP+B$E_AqVUviyhf9T%<-3h%IVSyy$}H$GFXWLdqg&lMu}miGAd+)`efH*O=Y zC|jFL$u;%18-9A{>WzW7L9p4oO&r(Of`vINm?WLEz(d>z0?gvLWNFoqez}o+uS#m+8!MWz2C@j#YlLGPHCegvPgqvs}7C{X*X+t1EE%f+P1tyk|^dE3PA&scUTUw_N*T3)iz=W0%~+G#}d-!Culma>IF#ItxePGfDK*?+ zpItNeX}NXae{E8mp-1f+YH{;hclZbwLse5kuZW}RRKLi%VUtJ_?a8YLMLMYt5`9S2 zAXTG>eeINvr@TRse7S>5;_x}#e$ec!{%Gv@-x%kCE$l>^`oe&u92{LI_9 z&4v%g0qBcL$`Jay(3XRA^|EShH6U6ZN0ORNLc36VXQ42^Lxcw7ir+Am!FJ+8BS!nk)r5g3uvNUNrREgLzL1Hcp_d@0x?Hj5#d?-7OW&;ehT{&xsgq{ z>!iuiIEp`5(ufnSWDrVGeo7D)UIT`;K1xrh9~ybelqzmPl}TnCO?0(y52nW3J_xnH zp#O)gr$}ZD{k_C>+p)K-)J*c`0{*3nuZJB$m%X8*ZQ+TrHc*KH3&xVlCkRjA@PDWfnt!eGj)x|D9;=qL zP-2!@`2Qp9ucM;u+VF3j4#@#2X@-=uIrk;&wU=p`8h5?H%3>82>u#(Btz6fL=>!+ znTHdY8wC8FK1n7di02&#+C4jntpu@^ls}gHK18Q>ZwUz20AZv5tv@+Km>N?>9CdL5 z!pfN4LH%}4JqcHxO>y3G4r5%*)U46rXE93iWwCbnNR5M3@)Fy-l#G57up{s zwPxa)mvMdY8YIsSUMALL*T1jAbpth9rpGU7ua0P)+r}pUM}X6RN8c$K!7`>_@(Oib zl=?xY?5>KG)s0op(^}HQp}QUBJwnd^oTP;6D0RRaU+&R#-%3Cw%u&Kw+|9T(WJpO9 z*h>^-EThm8u{%%Wr#TJ}~9=etT7Jm15P zyp@j|XT23h7b|TWXI-s1ceow9CR!Ph8nKTB{Tm_)dG7bi;gOL^EkbZ@Tf8T4=y4Fy z4T0zW90{v&nnn#F#HW)hZqshXlc)r8vOti?{rka z);nUpYAfWhn-4q0%-mby?cQ-ae!o)!$*4B%uN3K_h%h4Cs&5yd-v`r4xf{pLbgWPP zh32rs#}lO!xw7w}I8xpv-9SyU{EhHN^RB&AlL~1Vn4wtPM2^2O6&iwh?z}QT*wsI_ z&guBmchdI!9g?|J##0rXa*FRxF;aEO6f982oBGI>E8CU z$%oi(Lkg1R!3S?PvO-eEJB%}AOhYn+ln#z&UYCDyJIyvMKK|Y$(~Vydwaz_z^P62H zO+^Pxi7ACbThzS~b;mTG2QR@-NW*NQBf+38s17u~)d}DgsyDe>5ze{~ak#2uC)9MH z1+nbvU7R9em8h>7u$Qr#*MQXCe=Q&y`;S`+8|b=0QebMa1MM>tH~h`wWf&qQOftW7 zj8*uXE+nomyCRk79g~wF`pCg*%?9cTw898!vxm?a%gV4oRXEovQ z=`q3icdjX+pvR3Fm0Q!AF&JP;$uFY*>SVW-rr$L3Ml9qyu&R`96?Wi0Qxt*2ndEry z#5uzh7a1>I`m68bEI%|n@94kZxtl!BkbiN+FagmOU*~SzUUB#0>yl&vPR}BoZl-4s zikvPxY`#$5Krpn1WiqvHd~&Zjyqm2QWl z+!mCfuh@Z-K|7*tCa6sQ6kf|C&QS3KMQti5c%k%I7&r|`l$xgAT8YBmyN7k`_=4zV z_4k}E?!xNXoVo7j>dp8cQc(6|&wm7ecPHgtpxZK}PX>`J05QUh8_R*l>yaIuN^G0Wap-f;UP^4uC>&;fulWfm?GlXL!DfMsksUZ=v zjqiknC8pAY0w=?Llf44tj|EAyLdJ3$RAMM!D5#3<{z3#f?yUG5@Rs72?l`H`K{|G( zzDaEvNCK!eo_MM7LARH$fhZzi7Z*ePMz2J2t-T3omiPDjmQD2VRCvqA9~zb`%qqWP zm;d4?B0iMgeJASmt&mwE@HN)zh&UD7F$+<@&9m~_hQT@A0YHq3A!H;Uj{6#Sesnc& zP<7`cmO5%C*GvK_VF^scbtm4UJrU#WRK1=uN?c;9EC*!_DWC)T6Q(xPChXVZaS6Cb zSpr|Zo6KI?Ptyn{a@g(r13`xY@l4)iof`^OSd^^eNa)sY_Dr@ToEj0ZYZuRPLO9Dx z8r`l3(q!r}sX8=4*em-s0|q1x=v2;uHCrKmzWTNa@V1s=;T%-4=lCmn>~XPx2ChlR8H(DHE%#Q27n< zRp8hA&l2X4L~bopEB4|boHe8?eCMO>P8gHN;bT-JL9|xOwr%0FA!85gjlY_h@~Y0& z@7WI+X?+}Ny$U4(m_@wx$z;6*5JhdghT~TNnzj>b$u%Sz@HER2jmk)R20V9;G8nhT zU#0(iSNMk<;L|8tM9KTw@ZC3fi-ia)c)Hr4C= z`#+8Hwb6}Ig*kzDcO3je4xz(Kc^!wp^lg`8+RM6IXs3cq|MQ>3{>c;(xvD0wLo|b(%?-AC+0@6+9}H%ZyUqe81&T&qrJmUSZ#!}Rh5&hIOK@+1Y=(gdE}hPzp$&^I}{mCNPbG4<%Ex{6tr9GlO@Q9vtDlB z$+L&d;fo&vwNpBe2JT}`lmM{A24qJt)#0b{A2=F!JIEKX1xW+~yNLbHSXw@t)xONt zJqzSgN;IG0E-%wn36>-B##O-h$U8TNjkj+N#pET zR@q$ga)s{!97{Oc%lh99PxlfDtW&VFyfU7{9TT2 zZ_nIw{eF@T%%d%rIfft+i4*8eX~3q~jkQ6BuH6y;#Ew1&CeS_~uo#Y~Yy7X4^A(YE zzXZ~FH*h@$&LzLVoaWh1Ej*!Nq5jKCH4aM>K_t)%q<%y-PWXP1H}fnX74mCPmECI3 zrq8TAaI{NYHsu4x5+2Xh)F*;l#Ds*I&Z0ylchPeoz9Ptzh8IM~bR;kx<;52RHzL!P zoANZsojL@Eb!~h5Pp+8r3i}~0&-)4hTeTVhfKd3TGHts*ZDgijDp>6@Gr8>u zhQ?MK{&&+3%#0U*1GPRG1oDi*7L$7?uSOE9)ymXm!leT2T}Ua&(~O$L25BF0)wLdW zk$J~xf0j=JIk7pmW9b%&g_*yX`ia-N4~)9MvDD850%Ic-s32(oHp-~I2HeI$+NwY| z_D+AGlxhXp1}L@)O8>8EK$lO}U=Mn%cy(+vmmSF@>-9%RpxJ{LX_ zxmdV_tn9N|*EtunW|KXvcM+HdJ3Q)vYVWt2X_s&^cGyD9-ql8R->FmyE@aU(y`6Pf zxd-Irx98X_0fGB2?Jnhkm2ACV{u*GU*pC|aW9b96IZ-})>`R0r)Z=n zxHv)I(j~44`XqO#q}f-$11xv2@E@$j6?h{j&xEi4)NNe@e}ugDG*{~PaVs#L+3}p| z%QleZolEnkSf_&Y-Q+hrm9MZUxo`eCIpBWYpCu4O?ppmvW}x3>%f3c zz5}AFDidvIsYz8)_zN?+RfpX!GE3zLa*!e6)*QyV&mdutv?rjY)+(W1Y_jnzB#@+6@V9iEgPIj_p8Cj& zxzum))`16T0FCFF=eBE>b$+y#bwSW8>avU_`8A$SI=MV_gDu4 zwy=n_+fV*^2$J9oZ3@cwU9JHE2IKXImw2-R>;{!y8$Y0k$e5U{Gtnc^@iq7MQ4ZkL zxnYfSqAa(W7lNV4__%Z(M$9I*r_)$Pb7XcQI{$8DB5%0-`?B&BklED3kGz?UZnk@v zAgtMJz@Bmkk3!HKPNgKOo+|x^AeM%K0V;3X?9z|g1{S=#!ml3 z2}I z!v$K1ckt-RxGTPqK+Rr)P01e3Gp5D%yOG295$7{prKgR)?o}e9Ke3KU1gH580Cbdx zN&@ShAQ(bW0BYDwb9@nada#*XTENkj5a9WSOD%z?N-2V9)${WA!75LWggFnZS^6g~*U zQx7x>K@9xsb~0-ajlAux_u=xtFRzuW6MPF`n*IjVEgZn4&`1Br8a9Y?R!i zSLsI>{Mql-g6ri)Dlb6Hkb{);p6)o8+9gr@_mosexVm^CMe2fZcd`FNQ?K#?rWTUXvHSyYheBw1^_?)ZlUMzkC9)59m!d8Yhu5eURDG@Cs&0_ASa8GPA2khKZ62X~Q|(IIJB+tYh|XOhAusy<55*ZigYT7A^O3fA zKCdt-dJqDM)|?$wX<&WX_no(zntZ^e+fx{cfyt6?T(%`=W@j_etBU8#G#Y9CvmX4- z+{riTAfa%Cv5d7WR8@XtA?f#G&|;8ios!vzpVs-t{<0jKMWlk?KrF@yE&d_RvttS2 zQ?-M7)h36p^e{$b5Le$$1Wt?Vm~cbUx6?|u$dg%H`a&HxLiy0eL$eWwMeM zGE2FO^}S*R>!TMqdopOQi^V{PUXG@j1s(cXPv61=^I{`h75p`4mDoVKkj3o@Kqqavh;G5v}Jh$$5g3i`vCwBiFm-+0R(Dcfd@e<%9Q- zdLS01jStRGB=!9I$*xsPz1Ci_slX)pS9eLh-$Tl#ywtEDsvDkTM*WQ2t2Nm!h8WM^ z9^y+cR95>mnE|r+^U6}W@1_uJ@Obl3A`qAlS3V{wN__u_&@!*rIX1B7T(!lOf8eLd z0Z0W!Z*UqzcMdk_RtR;61^l%kPj)jnwvKINe`SI zyh)x{sWAooz_z~O)*%S$6>(%w-tQmBY!Ryta z-$L}!nykN^Kv%H}g55x*kunTPb-GY?MdU@|zRMU$b3>nlJTV~Nj$lWIxppT(fHNtu%}OS)(Xr@I2|&>&r)q7R~~>RpCr+jNvQ_%_2WckN+Wm zzF(d2Y-|cbiiUQIek<2|4bpN;?E@PfJQfz3wUgdjp+9@aw(rnO!O4v8Up!F3v8Jdl z?H_yA*PE6>udpgl;_jnvz%|<_sy?6Jjvua$+nUzRn*q;MVEbO%w`NOr>{i9f@z&RO zB1Zl|3f&=*$7o{~u%m)uPwl;0Txz(ubvTpG>f_9|+t@rIB?@74X;yTa2s>;z>s8!| zuLjVhqNC>B3{NZ5O^Pic_vyR5ja2@dVf8*zsOgoJm4}8x(=~o|N6l-~b(A`<8sVoA zwr#xZZSQ(}+w04#^aBPCrM!FVMt)bNJ2cDo`)Bj9LVU#~kbX=FH8Q0eLt@+3 zPrfBPPpl z31{uDCvO`!52aV^358gusr{mjBN&aE8EeX;jktm;Qm30!*ZAHynPx3)l$y?|ZB&t_ z!gNxsWtz1SUVOW#-gA~h2M^DB{y5rLwv&?$``&>sg@Ww< zeovdNM@(R=JTDVw?;U^{N?(SdOHxob8G;2X9aOBm=20wpdY-beVl>=Dya&vEy9g;dHX_^tgNx z-jO4J;G$2snej@}<B9XGi=UOIc&53*AQ?x^dP* zA8jt%dB!{PBo5v$@@&+G^Qa*Q@jNsN8FY(Y%0(I>j)G&63Ox5knYsBsAhV$fVM1dy z+L9?-aoEUR`SR7n#ScD=j{H=(ks?&&Y{v*D8kPNzFIeYe5~irA$OwcL24K9-Wg>uo zl0t14M{Tptd6Dd;@_n#PUEz_djwBOZp1hC@<%h(p^7}nu4XIOyy%F?E-R_Vzc>+JN zM`t8|(dO$210N^R?-jdyqe(jzEBP@Gq22VhYW!0f%v83EnX&Ff=+6yLS%%`93xu+n zshrH^@ACRSIxKJN>20cz-(Rdg*@=#hm}=bhlA@&k>YrWB`pyd|?Zw>m%h}C;XS1)hoP5+^ z-I6|$0K9R4uM7N4EK7o#lNK3&YaVR=Ayf+Y!bb`Lx@5y#@@@8!BL+oFj2TaTB4%1I z@ZYo_W%)c)ZsiqTIAN)@>|V~XJ~GIrBEc1U?*btwv6j8%+sQ{fTjga5MXWzmPvzi- zFq<#6pNBWMytb5!sz7mVQYU%c;K*(Cj{5a6NU0(6jOn7>Yx~y_N_EOdm~{fdC3_2% z0mB6RRAxPxT`}7*E84Y>;2jOqF^vy`pBI`_J4@Z5clPK6 zBzV3EA{Q9_a)15qT5fFz`THEzi|8e|Ore6ZUs$F4b9+$^xAW(gGONEYbErq}tbCkN zTEj7R!9WHSAK)1IFzVQkEJi1J=j!>(AB9R1D$h!3DEVJ|~ck8FsHZQi3 zmynSfc5pFVd^mjLSm@B;fDZ?fTx%y;2T=cSlt!ky*d2r2(1$kmN(g}ctgtoqyzi!5NgJ>RJ2xbTh%aZoGIroeXK3 zj4WI()B0PlQR%6P4EIE1w0B%S@oz8%_g*%|wik@VuW5QLoVV^}&v6eh*A`n1xW0{@ zll4nGRM}bkNE4Tpy2tf$S_3 zz3eFD{dD|g2lWAo$J7Q6>h?%%*u#~}PgJtkGdWc2WUJP`3+D(3uB)7DXX1V8udom9 z>>A}p`l%^LZMXaOI>Udd&mUbZJUdF5XIqqo*+Yjio9ljI_>$`v4`QPJdf~u6N)!Vgh2I`p_qCyf-Hp`Ztzc2smy0ftm+)Bks*VFZ@xI6>G7^)1 z2-#=5ldUx?IrN@JLJ2K#9At|flF9a?9I%~UjHE}20o?S=Ln>RUxA8+KStnefOpNGs zYnQZ;kA{4~=S}Pvmc07qr^44y9=oDfpJpxh+yWCg@HAV}5 z#J$G!%NjzO2tB%K=g)$X;ZGT%=G18dH4m2#RNSs4!pX%O4L0#bBJI!KGbgPLR7&VSUMymj_^dSB%whfCRhe<9WMtp3Gro@i zhdrvYqXkAvKPnDF%9Oi#V9@gdeD`nz_^(cO~T~M@SoqSL&CmjrTQyk5qkkcj!96=lXpJROwkKTq{bQURqA=M}G@g*8ZPvt(AkaRF)u^^X_`8#7; zSbW@Qn>}stPvLU6A&iTaGa>DFBs4ApKcZ0i`q^Qhu1bOF(tX`Z5gzLY&`1>Lg@LXB z>ea%_)KP6t;RpH>yP7}*$Y3TREzy^Eb>LXiisgwX5*_O}=17ncEpd#*FC-9xlk|d4 zEAzoxSw&KSaJoeqi54Y^NJ$tt?MM8SsF5>)z-$g*D6@Eqz=%j|R0x(9R;W}3_N>lG z7-Cz_JrzlWYFn1tvL4VL#B!Vwb5WChr9;xA<<~-g;Y||EB44i7MY$R75S^~Dc`{}A zPA4AVaHPhQPr>T^FwI{yA=15#9K?Ew$#^cx*4x2Eou4KC0{Fc6`{HuFeyroMa6bMWWv` zBul3s&0HHQ?^W)62C7sYkox1}d0jr=jfQlfz83Dv1b83A=mU9~xRN*h-u5aa6TTUVV)4aDK%7_00nfqobv(PWZOLSJw9$lOO z;Skbytuaj663u(AhB^y7;w3y;;6TI&-mwm@OT#eF43j`F9fQJ*eWN z9A#%Aaqw=pMd{o=Lun1a&yP{SH!-Glo5A6>?a|NIubWfr#IR7AI1h*X{KN<9cMme@ zQ(QUi?IQ!en$n|jduK{Z?{2$u^5otVx8JTRHMO*)Q!g2M6zX`7R

BK^?UiheWm9 z%3|Ce-wM{2m(t{`FyJbU9kAghWIl`_6KWHi%w~*jk)1uERzzu9w*U~`*>hf+7c_j@ z=b8F2hEDG5u@|x(DWQEsFhdgt>Ue|<0nb)S=DU)?K$;&qX2Pv;7bII;;#FQEuh)}3 zRiz;x)ZkJf9E}z7hWfC(v1^5wBnDZksotojT|N9=!pOXf+<8}33*|FX?@;PaI5{dP zftw?$>e@PUHmM4PU(u(App8ACVz;$ z(nYd9+{fL=yR)B55@N4O>e^iS(D1gP4elc~Cy~yRwF%X0irR&gE#%MCr6cARf8At( zN@LUQ>vDS!42gqcCPI*xDZ^Ct(JM5|4-=s@*r!MhKr-*jsY-b7AytaVCHoQ%4Kqjrcb{&yjo}(w zg^b^NJ0N3qMR?fiCe-ud`C?s@*&U=xPKrUoMcy2>H{PgtH%gj~E6=&^#b{!b`0E41 z_bCxa->2srKb7@XY&$?`4i2*PQYQZS_ilLL9lW3TCygv@`zOiGN{(XQyYC_QV8_#? zgP(E7)Jp^^K3A zve6k-CrkF8>qIFFUC% zHeN~T3A<^ufB(rVrKRiE+5TiIPutC(C}U20qdlxwGWLF&F9OPo%neAwFfGozCp7`x zBFK@OqVzX9_lp#A@xLlR5Sh90X(ARDb(|Ip@y1O$wrj*!nOIx#)e~=lNST2Q5ZTiO z=2CSNJheC)8Uh~jsa5ppb@~fgl=4(DXN~+4(;&&0YZXAVlWLa#)WaXPgyHa}FfdJrC0Y!8qC*Ik z%GG8X4VJUC(k0hq{TB+`Q~#rBfGKlFkky9M@%8$l;=8d&lI&)DCgJTmO1Qvce)A?j z&0^px37rhR-{atC*EQsCXP?30&>+))#k+q$zXKs%=UVq>kdf`L592(Zyl(*Bi!|{* zo6n@lYT#-&mxR@f+qr|u-fzOcxIiRJ1vxjJXFG@RrH(}GxN$5JdD{Q~`S1!7z`?V3 z45Tn&D#hRLUDK0qz7by-4rmny03n_31(KymL@X_7*G>MyBXeP9Dxow+4Sr(Txr>@? z(L#@>Uzvl1-i;sO83$wO7FW zW&8K~>JWetv>rSNb=?hVE6_~QIR`Wd4M0aY>DQHUcXzM!-ZB<2Z$uFg5&c>H6H^}- z8~Y}lkn9f@F7`Xdjx5JV-lEodinpburHMzg%~L^A(zx{d>9eCgs|%-z%FMO9Mm9DD zenb|ah0Snf(s!VrE&XQkEFN4yn!obJIky0G>cK-Etp1BkyB`uf$Fic#2%P_ydm(>~ z+yfO$CKWViFB|Hz(~UZ!CeK+Jr=4Tp!rmBRMr$WchZ`5~9A2l&v5G60aD7yh9Stkkx@QLg(3nWR>Ghx_^zTcAYbK0iS&CR`*i8Xp>m@9d9L+z zr|=bmIlLyBzpNnIUIKmvd=~nTK8|Rssz!eP0SpudK=WM6+RDQU*T8hT0&*)(u=nmj z-yhPcm-PU!V0e|3l$vC~o~z}7Rjt9^dV7NIT+@Hv%JY3rp<+*FX zJqGE>%<0h19ziLwEWJo<|K-4^dGT&fv-gYnYrm=BH~Mz>1vH>%Z*5nOo+O)p>q~$A zz69hIU;w{>8%zMFUF`js3uIUDuP%AsFxFSRP#tjQg7rfHD3~U2CilzIuU0hz)k@+; zA0rfr9fnE=LY_8~MTQHQDDI_?_GJ#7R<04d1T<53n^f9}!w;In@C|_~)eRU`uKgkT zVV(8c@mGL@G<=r^*f&G~Wl4;p6u42($LOu|-*a}k$K4e07*mu{+#LlDZz(Dq^woSz zME^CUcB~RYN5=!WRKe}LtNmI1e_%5p6N4K>x}{_BLFR!6YIQ&`??2$jr+C(50Md~_ zn%uv!E&xMx2%H~UO@Zf_V?38%cxb`9Pp=}Aa)(rBQD%a2uClRCTP#uI&7 z89G`)E6ksv3hs>_9(-2|(Fx_l_B}CD>MEc(-(et+reYVt^2!%~W8=YqcjVnU37S{; zT^Fssyj7E^dE;*(@z_D9X;g(gNGOx=-h6xx@^BLD3nAlQO}|yC3R)Jb>HGj=R)-*c zq!&vXlLhOn)S?f-Dyh5GgD}DPk_BsYC)2PD2KnzD zT1pWKC*|vl9SP+x{%%v@#v_U=eq%G|ph@y@MQ071ts(xE#%YRruwu+)LlZSsg=`l( zI=OW0R6smc0l3TR1p0u5;neG&y#>+>6GGMBV;#=h4Ipr*NYCw{Nbj>$shZZU7GV6+ z$6nsd&Z@~N6YS6oH|2n1?h4oz?nbBo*DnSh-wsrk&A~2{ItwweP1?72dZ`|Mj@^TV zIV+t!_ys{Hr5>(nY)G+Op*`I?Q%W@!iB`sPmKXw?H~PDi%ZTOW_DZgBhnqDnGtb7# zEJOjlOF}v6?F8Uyz5P+)3+D;`y%3Z5=?h>7U^`o8P_TOM zvQ5=E+M!=y{R>&rRMfZ9vx!&Qlrg8UWEuA6$p*kjI0tvC&}VP9V=}Sfj>|As8~sdN zTVue$5^-g@vMOKBW<-VMBs3mj0kb&9 z#(R?TkvP|R6w()s(9+uiiZ zzI|U-q8QAnOCjmn1gv;SbsD@HK#Q|`E@zem3<9?S(d{n7IY>mhcb|o0>Ih7hB>d`7 zmb304hr%*1c_@ExR}Ca|ajW|QK#;o4tlwJBiwQsk5CA@ejEc&7T2reHwdG{__{!m~ zHm!sj#xFvN4@$nKW}@MDTOU@o`y(s}6yPaYwDF}^Euv+F# zMsW%)Q5Nz}RHT7P6&=>U8g2X}EFS2xHGB4AZ<6bZ?_CsRPCW2@zKAJirP_ zbB*l+x9BAS8%GJ2`2x4se0R@vR{qX?PU<+9{B1g<3xC=33{^`s&m3^Eb>Q`v8^BzE z5m89cxI~QU>R|$5DpfC;u@k{5I+nISu(@7mVnUSO9$tL^&pLDo@1agP!)83`Hz#nC z`pZ3BnL|8gsCPv@5QI%u+`E5uoN`XU=3>l`Pj5YK<3_tC|JF?#o6?MZtsH8!ANai% z)pn?BvLPc)8qbU_L5@uZ1!j?FPduZ{p5`&RBzL8U2pdZ1lYV}7Opj%2)g+@}qU05k zruW1fVHWOBuy&(ZaUysKqyR;i(j12}Mf1%+yBbuX%CzNf#T0g`VqTVvoE#c zLH5G(uamVJPelt0ey`MS&t>A4)YcpMgk|$SKIGd1(^JOmc-^L`3+d80v^umg%vn=f zed3Bo;k^_3aa9m-QYLt3qMZDX`Hj99f7R*46In?Ot6v{4%X8QK=AFo>E(DEs&wKHF znpJ>#O^KBYyS@GUVe2l3-Yy;4g#0$%IM7#_2( zO$UOXjbwu#7A-3q@=qhWgV&OPU+~tO=&Z8wc1A}5aZ*@JER(t=4Ow5HSvDj-Azvpj zi(xSlQU`N~jKRJ-Ga+Vd4_|!gICzmR{?6_38(e{C;!hZ%0wYE1@-%r!`MUtK&=_WDiX?QqfFpoJv6^F;RAoECLZtM4cy2`_?b3oCUJ>w=^L$6MU`yu=;#;2C_4dl8k1eI$5|IYw_NU8f}rH@XpuDS?G4$p~Zi2vvsN9 zu<-mY=hNE;68ca4J`VZTmA$kp_J+~`_ls;ldhSuhLEdlYkE(4$!d%+A$ z;2b8(qs*jA9h<0`MN1VPvWCf7%LdfLVjECeN&o;Rwt#Uh4Z$+DHl-yc$E5gjfrF|W zRK~!Z@&e4>N;pqe6J+oa_n_pxyiL*OTccy~oXkw0@gW*D8Lqvo3 z19D~$NK)=wex6Q0e+dc|x1rq?P<_Eb9k6ndF8&MnkhiD44V>vdy8b^O?*^I`&kdNP zG;b(`TmoxtCDyQ9@BIZ5ox!u>vG`#w>%Hlf z&ZzeO8k{szGR*xqgz~F__1VYL;BejV*Sv-H#g-oLPl&zy z74L%~{<&;){?R<3jii99`ezlL2X>e_VSUB(0{PQl*29f{p1^Tc>gq6Qxj)}>2u>r~ zMLL;*o`x9+1VUBLV;Bm^rmukk5%d3hIaDXXRwjalEP?!IrblmC%5}Kpi*+)&0NyeM z>&C5o(Z6Zxe}JvE3&H8X(q%>#di#!@Vnb$>Wts8UcL|K*tuS#~&kJ?(pP!91fQ=a1 znZ%v}4wBeR-9%g3_0`iKrwPuGwjjo_>E1QZ?b|&pfFrn2IqH0|*4@IcQ2qCFg;f57SDtY4EwmA8|MjO@;wvkCtcfU73@mRkI1+8}R^ z%cXyS22iO1#96$Wo10s>ceXi}Q+!j@+^p&MBH0{^TI~OZ4;*epvZw#VR`U5OG0Px& zS|^>cZCKuORY$0UvyAGP(;P!wf?Mekj|IN|EM)pREDE|y`cHsz=_U!_II~_u(#UxY=~a7 zWWatgxVp|Umfo||tZhDenZ+=5}V=gg1t=zoI~h)!meZ7(46{NeuA z#--3w)7cPlAt78pEclldR7$Z>NC1#T25g!Wh?jFT`{<5yS~v?SVi~6x z+XIHiKTSN8<4YjIR>IcmuQ{hApd)V#=C_&PVWZQFQo_zjj6=L9fOFhvS1avy=AT*s z93DdtF5Y%(xskYa&t(CuZgRaj(`0LX;FC0lDcwc*vy zZEUf!($aqg>SeLQ)GtX%fy4FOa0n+amI~$3Mj?egAG#aI(3M_!36)bx zWed9RKZwCE{p;oXNbhuE@0nW&sQSLG=9U4V!$Fsuh4uV(0GPMXRak^P;KP7Jpx>Bl z1ctSd1s3DDtiUJxN!nGO=q7*%{l=RJa>@ymITP^ndJJMbuf)#-W=JwC{2FzB?WY8O z`&2A{&W|YqPiTb&5bG;@qjq1!f5>Z2G`cOV9OC%hiR=6Q?u&f!5X1Z~$GF{t2hB`^y!LI+mZ<5cFFX>9cOp%k>!5zv5F`R@$(AnIuB2{NTp;Y@KAP|tnP+wYp2~_ z$)a`De9-^`*Hrr49BR-z`8BmowD4KxAlMGy`BeAdFt`VLwtzBV3hTfo{ckYSd zi!OAXYf?{Gd>*=Aj6Bfec)?7)3R?qFP9+zTr@4-rjj}F(13kWCXI(#Y`D+UaTfNp$rM=bccM!`E$s;B<==I^Vv@t;LHQ~Hs zH4)s?Y0?cgSA@ZHV*KpiKOEzD8JI05^ls z{kvWj(b)M*JD!bJ;bbCd3R^RzXbk{ma2`n*Yhum=nQ7k zi9K=I)}&kXBv`8cQi8M6A_U>n5F1ibsnsefg1aJz#$E~++g0j=xF6zA+_s9dv&jyI zU^{}3KUt2ohsS+VONDEm5TD_}2WB)q04Mk@%0Ci%u`=m~9Hs5FRW=y;*NRX3Eiq~| zC$f)hO~nqMX>p_V&8`E8`-%SNK3f?PMhoPgcjKG?Mn9~mEmpXY*#aXPQ#g@tag zJa`CA{$7FcLe8%uaaJ#}$-;hn-_I~NAP>+>PeLt=ml7z6OSX0?A-j;pPB3aA=$TZZ zI35ZMmQ4}@WNXbkBF9>j55OtGLmHlO{BBxjY{wKTqpTL5ZO8vZ^P8Q~i?ux*JM*_k z4R*nKu|D0|Uq1$F8?l?^tv}NL^fWcb6yBzet(&x-5i)2Cxgwyq8bPm0Qkh99!ZxuF z14D}Xl!sgAKp{`HTIiV09-$c)yC@b(zd&QZU)GuO5u9jcmdA0S-B>&SdHA7ZG+K{R zm3Fc725XO2C$VMDawE0<*lpxb8P0qpO0_Wvl%a;73^?y3d4DHur2YlF>5i+OI^(;P z*u=A@tYFCIBQITTKh#PB7xaQeA|%L8)R^2%YTf246-y_t-`^qG`fhp&ek}P{$1%Msfsl%FHrq zb#QLTnH==4)9rN?;b$d^=~1tJqi5QYFVDV@pYsms5ga%ty2}e0V&KE)qN)H55t$Y% zQAGFq7>Tbf+OZyf&sVS;OgqKc;}WI29S{;2fjhGr_h6SUmF08pd}A#u9Xkm=i5(fKK79_0C=Q zD9@nQro8FelYfiy?Zl-x;Inav@Hs{J%8j2-NN%^S=X$v0+nu?p-9ym*+>! zKAMhnjnhhuOEg=0&8aNqD4OC}d#>xB>?9APGe2NN2%_U!pFR9gFiIdEeQ)rYsY96< zXZTy{SQIxjz5IH3)k;) zV>?XY(z~4(WTLe0;`t1Bn?7Ds2@z^~Jk+9el@b~63zwCpJ^|qcirV{rQ}rx&#BnLF zdu+dPZZNsmnV(s&zHy__mS4Pus|yz)8#9-e|HL2_n?{%sR7#k`hANo4n0Q6wCSQ7I zt8PK688zN~6gmg7B5>KyEwEk#YQ-s-?vV(f#RxEEk@Fzk$v6dCqfV*&XHMg@5wcui zO>?iRkA#q~UOD6m2Ie9>BHb&Z0bv@t5_jp%9a1}tabKD#TXSXJkw;-z)9002Y7oQ* z%2cf8Y?=xzf608ihemdES^hm8UNgKizI|22#Y`)%s6juk0_!E&A-s zo!VO}hG(QIFW#VgZm0;ArZO!&4417GP1(shWxs$cOV=lnU2o1L3YRgDeLL#XL&{08 zK0+8^n3BL3&EY9NUMB2S@@vj_b(E;;rJw6A0rhfXmo33=rp5T7OVs+hMg9Tiud!z$9z$^JI{?tS>SlhqC# z6j3$|YrRDgWISVjyVOR3J?DuQ&)10rQr2$AdgOA-Cn4qbVqUgyb@!fVXuCZ?fVRDg zNo_#A<}bQM#L`!d^q%gwAwwnmaA4T*p-hZ4=d+6 zoAJY9A{x#p9fd1Yj^*pEQ@To|ZMvwB9>8V3qx!T-{7KzI^2gu3W$q!6|Mg4i$f-zq zK1zA|h6&{ak+0RkJQpXy_k5w^1CLczg-O z+g~&lD$vAzCb)HO?ka+2Vtd?pK@;KpYfL!Qe(sl#;7O}u`8L;Whz-fDTF%qE;*GX= z1d?OVPGcMHESNUN3&R>_OcH4$BoRJ;8AaS;_4s$wC3WGgQ-a&n4QR@X-XCznbh^*5 zFAi<-hC-r*hM$vS6s67%JPEyE{+Bp_>hMF&%1u=V$Gc{+KQG@dES9PbOR6B~B6VFx` zz&NT@&RxJClR9HQaUgV8_MVT{IpW5ApwOHdHZB~&O(Je!pY&(R@GSmFB&g^dkTput zWrGP)OTIh#f%`!lT@HpWp!CuOasHy|1MFXI^X`3@EIKg-6@6v})k}Ln)KMrW)LhGN zsVvK;8^c7idReO+QK`9S`a*NJM6GZjvPdH20`Id?eN*GCTQ(Z4rkSVNZ##R+lzeCO zvy0nNk-Vacab9N$-iQXO?*+$P!wp)lObV}#fDAK9_&J(L8Sz?{Yg_Rw$&a+(PNx zJ#_eVSfW!B_N71=m2Td$5bcOLso5>}HL$8!exAiREjsPG8xZD4@~}Z)E@=s>?z;yy z^o;@gnMnO}udpD`kR{2j=h1{zyq9xIoJ^sERsoCQvEKS^>Lu<31|8&y%B^4-e0 z@^ndU8DkAGa&>XHZ=ul&ApRoYGRr9R`@8>E)RON&_go#fcQ7M~y`2?djT;j2A`8|R zBy`&Oe>HaI(NOn&A5RfNX2_P^43d$p>}1atT_Phxj4ec#${N`+*2F}%MCeg+8M_EE zc1Bu=lzmNVC_?4=Ox@RgpZl-pael{f4s)ED-}e1|->>iMJ+XlkDfs9~t;-D-=n6eJ z&T{2wP<@CAqnIs9nAFe z+HbTmsj+7EUbx6<>)e`4-rum?USBIaJRdW9LeC$OBhXU1?iENTt^c1evsU&s+-n#;utia{cv2goq4XCi)1#%xGd|{ztoG{IiMK^$RX8t z_Ab*op-b5C;Je7j&lCCUFqk)l%MKhGvnu;F%ZKOUex@?*D=@hw_$<4KvOs6v#L8X@ zK6id)L806-cj1tF1M2+;nQ7)pG%YrfXDX3ju+PXy$I!G~vdf)MGy7-~%-|8LweeAm z7n2%0n2uEjS&1`v%ENE z?MaDQzUL_RqasIw z7x(y}eRw#+$8&|*#e@V1W}AA1IM$CMXO?2GTU&NJyfcHagbjL)5AKr#60F&9an*MIAC-v zw!jh0|CuXyJ^1t$kA1GDli!w`vTpx55Y*KZ`$9*P`15Fq-5(m_szH>goTo-OLL+@!8{4kYQh{?I%|%#WwAI)@SZc?ZWb2 z10Sd*o3&L5E*Oe7#xV}tVCo4LldiKuD))Gu86`fJ1HRS!wSs!Tk69!ZWA5W}TOe(I>@3>StUhQPd z4XFKsKxZ(rDD058lRolk#uKh@QfZUQbN(~d4_O@H$gFgt&}rHFJMG=pX{dhvc7 z%Mjr%IhpxV6G;$tmysb}2GLTV;W%XAIBjV#E;lOj=X642@13=-{Mh)mxZKv+09 z^@k9aN^GtWw_V?D%}cPI!(yt@hee)SLolDjBCqL+eHK63@r-^b-j!<;fiopM7xClD zaL3fUd?kOQA4-_x;W?CpOpW{&&vT7kN87$3y+uSzHRGhIdSOPK(nmWU53-DIW>Uh6 zuwNf3{&brRzo|l(y9$HQ8Kz)-hVP~!E9Zn+LO3PyI)_F7mYfTno`mlpNAzh~(ZW3H z3St8WExQaFj7uWug}v$5uSxLKJ|uZ>Mn2>mG?cjKr{Qe;w2;jO4h{yUSaMElr@b`K z{+r6xMmKxU<&FIytzF;pf1c<+hHMs%{HnXfUGF=l&M{*)1$}r{VQ17{|G-w#@A7}7 z=}mCIL_DeZp-XSzKlT=D)37-{CA+Jh{vY4Sc6+gf-{A;8nvI?AzIYpZ>Ii3IzvPUN z;=JF+WbVtS%PedC$rGjjZVNFmj)wUbZO=n3_8&YIbc9N`YH`*TB_tux^>xQq@uaJl z2e;X3*^XI74A#Z|33q7u&M;K-9GE>QEf#1MV*SBK%f_%)RVG41K=NdrrcCtb@neEs z5g74xt8Y(Ae+NReR~8vq30$BjAURJ`|Jde#PY#Uf|9=fO|INKgzJgW^b67h%u{Iga zzU}s#%T42$o}Nb2%ojfIj{@Y7J~`<)kyfO7eY=|6+1U#o0cmijxcz3RW>d^y^*;v! zA!1{H_0Cq)U0B=S2XYfMt$CtHoOWk%X%D>*N^eV;gazP;mam8(KW??QF!(v=0gfqp zKU>=F&(9Hm;-@)XW7+BNFzHnZPY;RV0`T&$OrIkG5jh+gMuL9A|1t>Ct`Te6ARoea zkUD=Sc(5;F`#z;()Wu=8(5rI841sJaoHdD0HW|(am&_ncXPvcC5<;^|V{Ve-4M*GeBH~wJeHf zRs3@AIgCk@(MOY!9dzc&ZoRW%YG==A|M;;&sor_(u;n|p&;RSJ!^H5PHw@oR&g;M= zlY-Cbh^^ob1S)BtAadoRQwy1#K)#p4(_W9dCeL2PQIw^1 zf1L-RLrs8Zw@&=!kj<#Bf43rt9xTS?tjeXw%nEbAiRF20WZZ79Njqg^M1YK-(v%C1 zPe9<@p7M%3rCEM}BLKYaZfmPHjg3MpC_rgsixb>dp^FkC!2pl3>QadA+KDuxgSHWq z474+WrM^5~{}&A*oGAO+2j!Qu8}BY>f`Ft4Ds>iea{R;XKiDY=o%g_lC5e=yh^cd- z5N*vn^wq_bKHziJG5JlyzSX$1^#x&#lMGqaPIu{DwY>4SQTT&K&-_0L`Ux(-bDjok zi=pRmnN%zbkJ9wP$O|QAd^4WO49ZZv=%!gd14LudnF3uJ{0Ol_-Rxlgg>b+kZGn^^ z9-iuBSUxHO49z!etovt1rwFbmr{pFEWrx=2GF*ma?lY zlX?e#E=>bLLE>f%k;U9>Px=~&ZFrq}HG0n%86m4IdSeav=q><-r#L7Kg>tFjZ0G^p zk|uzd&jk_%1r;2{v@*y`Ig0M)6JykGazK&->y?ayDPp%>$fz12>+F5aoW}-rM;RT7 zJUQQJN)bg_Fu&AWS4F5mbEp(5Zx3`&D(~#2ghC)!QvUbT zE^R?RHwVrhg$PuXU#KCO^a-jMXQAHl_|A>rj*bDKPx)@EI=1dnoDc?A5fOAirXT{M zIkF57`G?hA&E7Adn{(41P{Sk@gH$nSdPCmFUWL3Sc@X7G;GIcN+E%it46> z2aM6U6M~W{m4^0`oH2qKeKZz0FYtp$jHoW>ZTAidId+yKB4S&O5Tm?=`;+1{&qvvF z%rVEA8y7LWAZsFRtiEp?!A3xE=e`AgnWXR0@Z%|(9_LlT3kYvZp-B{UKNev05u*Xi zwAQ(@O$e4vX#k8XpY(|+p1*#y?EbWX+}Dj>Ug!X}Oez#ugdzybXw8>>prdl5U+TRh z4J2;3d{ zr$Xx)guS7ex>{|>o8&D+LI>kJ{=F#XP{m6MJHSa-SdQ+jBhenYmzxC zTp3KiDGWr0$Ini8caRV>8{e@PjaLA;3rM*o_o4ZEqph&$%C%@&+sdLW%y39@kim-< z&{?^)5IoW@jd~@Wxt5tp**E_Dy`z2SQ_9waZ#!Fa1J%*qkoQ%?+FR#ad?Q!?eN$!R zGZ8=uWmTBq{m>3I;1GiH;fLpX8qsnhReXiaiE?`zCWwu$o%2(cM?!70DtriGS_^zD z?e1yUY|Zr(aW-Gi^>C#1DNA|~#9KKM#JO#pOhMk3T=O=nzGgaqoy_(&!F5ud+D*{K z-$e$~Xuwhy@=;PF`Ar(}z=#W``Am(cwex_%cUqmOoBc+gi_8etCu;&KhpQ~gNUN2k z@~iF=TD+z{w5lvVUv|mz!*$-{4{3#6&ZXXO9^J7OMs#!QS6pQeY*%$Ag@Zb*$ga+( zQ*7V_trRuPi!8|);4IVOgXo58U^RF9R(}gyQU`=AVb4MzQ3NI|K5N-)k46Jp(L|21tgNN@$&=~C_HP=nu1Xr8@c@D11pWjw3& zG26PxwHXCgCPPde`Ru-P6u3?EvmEiIkbBe6EQtzW&I|avY0X5Dq?yh`Rw6Ys+2Dpi zX4a|bOoux2iHc|e$(86$I4$pyG*D^C`C)$?fUlhVNk%aJ+tW%AHfKti;ch2N6?{h9 zq3+*XukNX=@V6VkxA()(wK}dT@W@rM@+pw(L-u8OW9HCbbyZOHrMv4?jWho`^u<5j zE+EU}_j`#MXOE92pEW73XnR?Kvr}DK{2!J4RfWB(M{(f})D#r)Rz50e%7tiDtUF>l z*Oq)eAdPYOG>(XTNdAg{roVVT(1pB8Yt<{uB2)H8A(dC4tj>Yft-3^0!M&`^{pmE- zp=S5^oD8RWQc&mGa%?bqt1ZNg_1J2E8M^>Gk6X%sLRl^Oi`Znm@iq5WLxi)8M03_v zDfPLwt6LCW8XqTd4SUpaWv?~1%&7VFQB=ixRb_Fc9wqOQj-#h-<45;n)>Z>LEt9MT z<)P`z>y<~Vs-=UP&|CIl9|{B2Y(-(=AJ<~Qlktqf7W-DoW!y>`E+A*kki*fd+}Aeq0jeHNGl4p&V@Lo+gMMXU}VGUU9bDA zN2=6s+u|LKOOmdgywn$rO9Ef0|5jU7Tv z#!N$kvYGB+Fr2XB&N>%`Kdr5Qi?OD-f$K-2=(wJ3%Gvb$Hync>QOy}W$S!`!X7yg7y{E}{+pkMQShycY!;WbP5F$%oCD)Faw>o6fzBailFY{=1?C z^D9Qa;$A3UMAcxTC?I_{y!*ZpaZapdA3v}!gpzt`J&&}lHv*P4u!wm1`UL-)&!3Gx z$2prFRf9ft(L6q$bH+z-j%hBnU&r#ejcV{}$*;8~SR-phk-l#C4|E64OJ)#^Y8UIW z6^FhmT{|D$GCP`cvsY31;5DrlBG1R45|@>8#0QRQp(5!#J^nrVH+~GvpLmo2)lMry z2I`~pj+ehM_BpmwVI?(Y<{#rJaO0nWzRCH25kEx==uwl^*$zs8?0Vfj_;s3}@uFZM zjyJ2uk~RpQYZI{*j^Dn!@+Baw#HBKxeRNn|{(_=44OQgk=-f8ddYmhKj?;atC1Q68 z76O>S@L_=U_l47PzpV95XDH82&9Xe|!gt`&#i7Fabyxyn$hr^3CfCF{-3Vhy_hhU4 z@R_;b4hm~sG7Rszjfy*;DvMaY{IbZ{A5v};iG||Okmpehl~8p}AGh;+Ff8sD6es2| zXwxcI9{K6#i7N=o*dE$VtF_rHNcjklIgD9~+y=7OBC>R!yb+a`t=R)##s=p4<+_eh F{|i}^7-9eb diff --git a/Health/media/container-insights-health/health-view-01.png b/Health/media/container-insights-health/health-view-01.png deleted file mode 100644 index 51cf46044021e261d0132de020452d78759d4afa..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 214560 zcmcG#WmFv9)-4Rd9fEs+1cyMd1{&9d;O_434#5c;LXZT9012**TNB)^ao6D9X!CW> zz304n&U^oUjAC?EjjGzKcJ00PTyxF!QAJ4(2a^I50RaK$t-Q1v0s`th0>ZNZbX546 z44F|0_z$AHnw%s;#VFNp_yMxDgd+T8bt2Y-DGK};!$n@#9RY#l)t`UFw`wm>5fGjV z-%3kp_!u2#p_mZKQAV!Dwme6Xbn#A;%=1x5!X##${>kQeORup~Zm9zFNh_mGYk zP$~(03C z4#7X#mcS2Z3+wNfAO6aN)*XcxMU7;$Abao}emaKn^=su=2Qxml|8CKCT%FRB&q>dM z5Lx2Kd*-V7iV?>Dx&~dFV!?1Lj+|$_!PyuY4NX4`MFO8S^77Kd!pds!Q5HXZ2v-Rq zIwnR^QZn?qlH);O-GR44)1W{W=O3>K)}^PXuX0(JJmOT7=7&Fmm|(tw2D{eA-`^j8 zp@wKgM1&rdyNgRzqyg}~{n0|b--x`iu`v-bu`U$-iI}-U%|aybj`!~Jw>b{+UvCbG zjdloq@n9d_?{#&0F(y`^S8vyNys{k^y!;nbfxz9 zjg+5fKK2mX*2fG>fA<~RfK$2WO2tDa`#q8ZNh#G%Ga-8h0YEX7#f04-&Q=P!FLVgf z3jZD{;EsqtEq$TX^J_f?W8LhRR<0c$#(n?(aw+)fQK#N+=9Mn#Z?%nmo2Z9?wP1I( z(`%@!&p|PATN@7xVx`hsX#>3wz9N^Vq!j(|pOlkl+mjjq!Ds5T#if4l!9QOT$VLQ5 zm6fp{9Yeg#(-Ap@9igtzpEU%mg#Xp+_~E6v1oOxe%mQO#Pr#j#j40=hM=dw(+`iduu?pW^=}Qz8?mo`LOulzWN&zn(4K>SAlr40lN_x4AymugtkFZ4_zXv&K$OYXF3VHd3^Wy7ndt z&eo%7_m7%(v!|fq0s;cShtuwxy@JHk?KESvz45&0*x0Gmc5-s^|7EtitZbxjZyfpF zVnb!NZ{W}MjWd7fvCO-7F=b!r(bMP*#-a~?@YzkH!`%*K{Il<~Vsf!Fa(Z!5+uO`lcb!XFAJe#M5FHg1-Php$8G%%Ie6E6d6NkaGLwi?MVOe^AAgkmtUR6sO2)zy*%o^p6nHz&rRMwXo8b**1(FxI9ams1(zu zz1ou(_?p%spjIgLa6sD4ionV7#5O+ARHyelFPj2{I!8Hjd|*}DQk*oD$kAcF)~zRo z*k-B-r$oCl!SeuO>m4rQciFhc^JP|?V5z?)Ha50>rQUva|MYmd#khWKtxzEi?sJ~u ziCli-+uAS{begL|#G&M$*0KY!8#Jms$qG0v{gA20>bf_mG-|^qBrI5#G4emEzqy(< zF86;5oP*r9a4m$NQ8sFDIf8ly?>G$Ym=lf{>J?`5{_@iqTYDDBq%-YyZ` zYprrUO}sVt7jkgth=zOayxMNq!f!uQlEPPKVN))v09thJpg!q%5+&i#3r{ohd#mpZ zJnv_K=#cKL;?M6mjQu*=H2?Hf?St_BO|C7Ubt}IJftQTcAq`=IUzc@6oo>d|+1xMW zUs?TK9}WVz?&nnwvOnjE%Qk$a%H4V7c<};3S?uw0HMOt{z8F;doSD_xOgs}<_DJ@U zfGV#e+=)P!!t|Y*P>@iut0Im3Sk){RgMg%^xH{6HG)=?67G7cj&{ZcM9;)f>3VWvf za9=Ke_MM(#*5S0)oe2z5uFoBK=h1q%UwV3eUK6P|S8JW<73eo>==~dwp@$G6CDV;| zmaHuLQDDUf3GT>iBlW<>LAREV!UqfWrLe45FJIamFf$c@Mp@n3vUr~fP;FoUb%%yN zyF8dxZjhqMNl*Wg_?mMs1r2im2y-J+WL3BD-}uN><+lB9xtIk9llRbMqHQZt3>JO1 z*=Ntzup0cdOtE>AC!dm`0*nI zUjm;~fZu7!W~Du_?;r%?jbyf{w-W>l3@xPe@kxrcuL`c-Hz- z3OfafqHq3q)1AQk>#xe00MqO@mNCAa9}t`0KP6;lQeA8&iaEjH{-ca8Eiyl!{-PUC z%)zM2B!us~+0ETS<;cFN*`wF*el5@I_L8uuj#$1EwX2Jc`Qj0Rv!)$9E2eLFvb*0p z3_d#?-#{?jr$`esgxg)gKr^$ll4ACQiCy=tegbzDl~2AiC7O^=wQ;m!D*^XM4H@cc zYQvRn*WX}9@dcDXiFgDmTWi~N_wS` zx(D)MVPBQ2&;~d#ns9jY^|GzHs%m_x)>`=@S0b#8 zd$9NN_tc1Hi3a!V4^^pEW9P)Z3~ojj?i{SG`TfJyuBqKx6{>gv0D zu~&3-5)Gt~p%ly^v$KsJi1XhI?ylSAo~j5;_OVnAVzC+LFxpFkyng;2NK(}15A7|{ zEUokGg@25MgNhd1;D@s)Q2Xt}#aMYYCv8F>smW#@m1mMw>-Z`jVm3*djq~bOBGd_L zed-Hb86qvGY|7^<$FFTL_ThM?M;Z|snJ*EBijIXfr4{5c)$e%#!}Sz{I6VS^z%sgB z@FR%m$%rB{gTLGDXIbCl;?@?a=|Zmu@F5VmWO%6Z`WJ@bYOJJ(8Xfq7N>#WYW5ifA zF5N1q3?0hr_0FI+8{ul39|&>7U6U z6MyKkcYUFnE2rmePj9bTUjj9mfKw(&EjA{m=D6C#zT9Hk@-3BE5LJO}{NzEH$o*mM zLJ1cTXka`BAGal&r?>aYd5drLm<;1yE%kub*^;J;mF=-4^-iWFQ=qaS?&kaND9*kn z0W0ddeGatlJHt&cwX9@sFWNWv_e-9Nt{6CuV51|Bx(5kI5x=KADkDwG}JOFXHRpQy=I!M!AeTkLnlHI z`KE)Kf@4I!7qt(=Wfp}U>^eo+!4kxs2ylP7gmy-tMGO9Lz@}HP+FGkQy7ThT{xg*} zBS?)pA8s!^$u)7Kngdl)y|T^jays$p!5 zZFdl|DD2ZYH4<)0xKU-{O;TaMAIa@~$1t~uNN{!w$WBk})yz5@c?`KR)hyvFyvSR8 zkE!oai5i7VJ-I_Lg^BL^n? zD+%v#5B>RSb2*M<)w*`F^i{i=lJ#%)71fbnuw{fmZ+DVI0hL`J>{%z5=@(xM>|}5v z&u8zEBt64h+$d7a@SrTR*O1~8a@&gb@*ZgHcsP&6Wq{-CaogjSHp-{_)u+pE@fPP> z1I_I7G+(c0kcYMsgpbD$>J`(u5TzoqC)SB7m|H1VhcHiA3KfG)nl3~;-K+!WF?Gs{ zXN~-E;F$8kS5mB}qd42|-R$l}f!viKHbD2qn>WvCgCD9zQ?IN$y@s1NZ<+Tgpk!c> zw0Efo<*?s2MQAH@E~cA38M@+M369_p^`Da!yIBiU>#@Xiac;jYYRCP@xC!|;WZ(6Z zf;)_fRonWmkm-)v$R7@G&WGg{i&d;7`8wP=Tmk5Z2cw9l_8)L?LNyKi2|rI^y=JcF z#=(_Tci9pbmcfeQDc5g++_T8K9leR6gGU(LnJ%=?Dk;KBPt`OfC zOrqyxvmYc`B5+c<=RuAc6|ZA(de#@LguRcaAqiy+dfBSs`JH++>3A_#!x?l|Un1#> zPW@O0=VAmLa)!mGOcpJuK+Aqt`|E36VQ@S>oBE->yV3 zjx~gNz`)0+xvJkNO4ef{8Df9D)U;riYJXcTauFoi2x2_=d>{;RT-4HcQX79o1f93i zg|%9{2P$@N#2#Iqu6N)3F3wTu2nuv07`Tt2gPj|7AFNdq+54^*e|oP|c_}su>RNq% zBG5|9tq7vtNpodprA&1vMhRBt&poP?0`hDz zLYg1e4v2m~kz|I$Auf8i$M7hC!h!3DN5LzP*ShL|(&n?52k!xKg55|;r|~c_91zUV zze20=&B;Y~g*$3{tWqR2T$_o6XFKD0QdcIT5rVj%)#nOK+~L81o&8t%B6~Kx#w|mJ z8^e>8;oPp z)O+ikz#EKSZuosgIksyk; zE&p|ouqaR&@0fY?6^w5;(kTIrnSrm% zl4q>LlA)L|R2_j?-OI@_tspq-Gbo_m4|$%**gf*$&E;**vW(ZHO%lqL8C z@Lc)IU%u^w)5_#ZYbdYvk|^0XxUf@p$WSk50qmE5qflyUl=D|00WMstA(;n=k9@te zpRJe)>n>KxsyS}*0gU$`sucAhP7PM=o3snEJS-~9#0@_*{4ylpxNN#QwI-1!` zTNlj_+Wj9+UkYOdsOG2z-M+&D-K#R}nI^Uw)=who$s#yls7=P|5 zRczHCVK;jnl?-J|l5%+82%uo-GxJv|*?jEnV4_gPih;+vj?0)7D)`)tCLYI2 z$#wJD z3PbDg5jJvIFOB~pcO12_mhtSAvY6wtI|(QPj)yg4leR-VWlZBkN$!y%`+I}Brhlq? z`l{7gEL!MZc#pxJ8;-xpd`4CNW?qZs{H$FPO8^O^yJ^(LFFpj&driTmsJC-Y!NT4F zkFtaeW1?GURWmIO)eVZ!tvj2GJ9{_xL_gDW$TSXyZ*ZNYdH4?wptM%j98JW(T;z)| z{>LKMW64(Is`9b+2g|98%15M(4mkE8pOqs;n#vPq$E|S7{c4*s>$hgMW+H^v2*~JQ zcR`4)oe>1ck(yD}6-OYV@eBTt$%|hV3BpRy+b$ z-P+*NEAJLqHWGe|5Ii`EGQT*J_jac4;N8P~&C9Ey49vxUm*xku-z7*T5WvK1eX4HN zy>1;-v&bzZJzxR=bXKE&KT8UpvWp!fhHwuxns!G*h{hyM6QHV?*&Yq@3PPAw!-R%7 z&$H8ZpM;q$RESJh1~EC5Z>iPN**(Pjdd10W#20-ZZe8vwJIWP4(KOIjD$B+NI@YR_IGDS;uA4i z<*lSH`S>zEF>lM{R)U%RYRb>ErIO<93EtoxioIlH)Kjl1qqNBoz39KT?YwD~J6n+z z_*pk>`R#oMq=CqLzS8t5a-*GNm}%^-Ym#JLHLj(lq=M*56idRCUWz@c8kyA!oa6lu zea87Jt+2lrkrHi1;hU}{bnE`Z=`?5q_dJwvfa=`i6(-y0XB=bz2aRSHT7{rK9d z03)k>!S0boN~u45K8l%Vv}Cu-+KmQ#6dLIH=a@yy&Uhm zhL9e(ETtmvvJ<|AB= zm|1mTGsDR?s-cJ2*X^BfKT>!QHZB>Rj0a!|wLgjo58o5oM#hLC3-8)!UTP{-bIahf-sutcn6CB z?EWxW$D9t!kfXfDLXm?I605q%?{hsEyhJm$w!O`klorC48C#+;zn1#$pFyj$=U~jJ z>RY_h{5wgG$9i{Bd=M${Sj@jFiY3=_2T;GP2EH-m9xocN-?t4BUrPa-?IoOS6bK+5< zZ+!TCwDt-u0U$5C_xZDZluJPdmpT6D+NO@%9ZHcGgY!#pVrTld%Xnm19Ti~1?|tN{ zoPd&fZ`>c2wjPTMEWxHnJ-#2pnM2m+r7}e_dcHgCgKP3eF*_>kcO53znh6csFmo?A zM=I&8e%^SmIRK%r>kaO?dsWWt===%DWi1}*7innNRo(d7p^vh%a#re5XZZ^Q7Hp@@ z5Pc!vpS0$5?Z#h1qnLW_OMh-03Z}#^wnz8uJAM_HAXuT=^%o_enM z)D`2ckeM~%Rm`Q8_kiN%W_Is9ZkiVJY<1=#{&rJE@gSFVKQE#$4yN;z(47pY=C}Gr zu8#bvd`!G5>0JAf2k%w))B_Od(4jvpK?*TE5b8ZUZZ5`cFx4HMO-H~sWl`=%UUZE_hWGb&D2 z$lDIc(nTh#`B1k$T{Ly?VkhqM#5{Yoe10a_cN1`}viB-L=`^#~In*&}ObKKtT6yzO z0cVU>ybHTT!yUy!!TS*q3O1S!_Nt|HPL!dIs--4=3Z@r)&MQ=KB5UF6C5w@oiuvkC zLi0Iq(+mr-sEX&h*F?QGZ~N~t7g^TN>c+pL_tqoN7Xgx@ASSYg?ji!sgoSblAmNxw z2g;MAl@r{m0y?*Bt&pz#Xc zyfsY->!unn9%2sKU8Al{IzT_8u=!~kJ`YAzkNFp^&dG$EKPef8G*)MZU%|;d=VmhP zHIf|@8g@dI%DNGB;k|UO%WK7{fOH-U&oV5{43T<AO>ZzdD!AUU1M?s~ zo}Oh{a~LXwLGkqBRdjg5iE*{uvv^@U&Q04S)KQY?m06N#5k4`dRN+PPvUCYy3lF3I zxaF&W4^CLKK^x9rq3n7#t+zWFcdd1w3%J1MzPK>i+l-yZXr!Iqz)0Wxn=ya=v%1$L z*ll3=d%R4eisyC?^32K{*zoDd_CI62(q_u(3|;T?R9DAfiJ^`r zzrXk5?CZcT(S^K33e-anIR%tJ9`Tw+Zhg4w+eSDpNKC1Djc+yR{zxb@S;NTh_bZOn z@e#TlM-K-1knrU9KJh@jMTS}Eo7}7C5m|7;{lIh424!@Br7nMqC)6=Vl6I=##ehF% zP8N`fqsHfmD4S*PlpH!Le3A)WYSPg$3XnbXy;3$6xtQ;(fkL4Tk$Sm41FLj75f6{B z!uN=E;>9{W&G;4Adx#&jd>?TBm@{&f~ z+eVDT8CbVihj63_s=fT7SGcThr=O2YeIAMG0FAwf_P8#S8{eg(aIMcJ(&VXBc0_Ss zxOrWwD~lAOP2LEgy%ub=!4Ej@tnqU51C&f9L@C2WXv9} zS@bas7wJI z{n(^VL{&B)C87E~d24+xqsP|G;i1yDD!@$ynjRjoyDCD#s}RHs#UUO&3jCdj*a$tPLE8=*T;qq#8#{%3ajKqEPTLtJB_WB#YYnXO)@(P83N4LAc zttrG^^9*7_FvsZ)G&o$_unCPVQ4fZmmwUfLCq-HgouvPCoOYX8bhQ$}FWP5lXy}<^ zzSbG?-wDf)@TIw+QD)Yb$S=-w6r0(~l@TyYfPsx&7YP#~ss{)!f8eBIv6_e=MiGuN zuR|@Ml*f&ncoTNkG=`sIQz$zTALal|NCg-$jD9#{@gXOJ<8F3l=DpzsXQH-rMzj@& z^?Gu8N>i$4x`OSjYNO^fbb^YpUtMD6k9g#yB zn>`R~V^Q|OD|d4y4fe#V%Pwl_wl!v12gz5RyNrgsQ-zGPW*kGZu^!FK?8o0U{s%E5 zOEd~K9!m3Lni-5m96z@;l+{l4DCp-?k&UM)L~Giss0NPvIY7#Eb>P_}l4BBfo!lOF zOeLZbn+X|#7@{NDgkG-cO+wyJF9i%TEW!gVVgUvS8{#P#i5I}hQM z`)sN?(XU1=N)*PR-DWl_je_ktMSxbwMp>y_EHZ6cOdko(KD< zqglc^w;87=@J_akZQI~l-Y}ap^jTu=QUj+O5O45I_ubV&J()rtD1ZGfh^SSjuI+sNJBr>Y zDl)Q;6ms5qo{YdfMK49k6S5L}-j&d@7G*_x#1oB!`lbAMsrKvt65Q=CQ`bzCnI;sTOffK#Vcb~CJPO4J;C;iaU1 z1QYn|^o!s4?Ca{iQ&=@&n%+Oa9Y}`$ZW{!`LvRU2Z6xMjo-cFz&!?uL;AnJezqPu# zyVH|`OP<)J|A>pK#7ocNnZJ)%gHK$Um(tL6cpknk@;^=!z5~De`8?p0>)tq>h&hmp zYn*}=UDA@C{%`2?uf`rkK=c_|e22!}U0sOr6nxJ^6#t%i|7X9^><4d>;u0qYcsuq% z^zUf)Pgt85pwZlK#h+oBtOy#iVUUrLxvQ6NfP^#qwUJs>``KY%z7VzH7SNrikisQR1pBNX!C-J~1=C-H z1>SaeDTbV!TqeNvSu6?1qnQ@cw-Pe1H9&+#hMzKkoZ;$jn4v^EbTszjjCp5+waUz8(JH z$A7G-|F&91>0d+XzaJm&_Wu|p@O!=x{%ZjLw=sE{(SLg9e-`ZjyT82!Oe*hZ&v2+2 z(Bh`#zL>JO)&K9NDm31IB`4o8E*=wyXA#zk@#7u>4rt&XRUvKw*~0RB_m`ZHYilO< zKmPma2ISJx(kB0BG6D`qb-obQ0+JN@=Pg>cGPboCBL4;uf9wGhkLh|RF~+8RgiYNw zz)fOYEL{5UH(9q^Z{_$o2ngf2;kZ&zLv3#`#|s!Bu@YJZf)ZU4V47QRe3^ z07wnB+M>jj2t%=aAFBp!A0a%23ccW=r^DlBL5Wu65D&_Wrb>}fO(H)lSz_22g+#5~ zcze*O)nb-&ha?wkX6`N3-p`N}g7ZJ0Cpi#SmZiVp;Ti5w$_NR1yaV&RAoKNQ9k*Ea z$H;T(^-zo~>x1hR%>6AIJsD(J-}hNl)$M z?bN;V8#muXVRr1cNuK1qKPz6A9F3fv6I{KYVb z8Z5voQ%Xb1P9?{a&(A+>`tEsWi0yP}DE77B6A%#SG`i45;?WwtrKjxSO(~*A;diGy zcbg_I;nIH1G{Y4l$=bZ1m>CxUXcp@1StdZaWv$Ts(4tc(W5CTSDgGeraB=3GX^hWF zsc}#GU-K$_h#1pSsv~;B31O9dqtX&H;mn;)?P(g*BfKF)1T$tBK#`VMM#W>0rO2$Y zDwduV&f@-=;Q2=Me5JNr#y9#JO;oh}3zx^f37@JAQYYKpkm04;{P*o8X{W3Sr_id| z)x=-hbbKx730{mhjq|2gA?!e?0{uqf)>=gu?B;BugSG)gJkQ!=`0{n%3P%6CL2%L6 z#cF#P4f|sNF+^!4bZI!?dry(+*v8`=w=-#KAc&%clvdhEmh*k96=g^Xu&QgZbhFJ6 z(fDAeg1`G+Ma^0h6&0ekr;~6>&LFVi#ltKMJG-Qwmndf3Z$@y3l7s}}=j7z1loU%G zmaL^E18RZ9S~1vW6;fJ$b{fit)mjamK!?hz@h}9C(y=B$w`LNt&FDOQB$dB_;_%}?fB_Hvb>dJn{q2?*2ckp0!I1fleM~zM^H=-Y& zku&^Y2Y3}7F!U4Dhh=&MlIz=RT1R%u?*Dki-W1hl5^jZBW40O`O;mjqK7TFQ`Lm91 zUc)HKhn?V7@YdzRweP3MM7o$2%XK9XIR{_a2i32uYXZ?fjrQ01Yb}Ny7vLck2Tn7v z3cJy_>6w#Kr?I}b>GmCm-C|sSa`mY<64loVJ|h{agSNv%;ICfBydSwL!Ig0rnBZVE zp8Vt*S6)VH)wjBLk#vV<`OpZe$pHj8ABS7d?^|FEUu&)bPLDn!NUvIZ;r@se8{QYIiPar7i}I!8=|A~XTV;8P%h`y-oy#b(5W488J~oE$uV zG=D&g;C|;rE0AcLG_T)5%~Z+7#dR8~xoQ7h3}&oJWx{5)rDJSwlYv2q2paC@ytQ%X zRAOQ$+n7k?Znh5*6t@Mbc86Ws)D$r@bCkv$tHaplz6F5V>uqgq@*8|hCbWl2CE}(L z+_3EI(9=D5Zio*XA}aCkfez7+U-!KFad(>Y13C@VpoLNDiFDwXLUmD_u!d#iM+fEd zhA9Nm^pchXE=GRn)w5n7En4G<48c{d_|Kn*_Qq4%=N+kmcM+zB`}?aRzbJWm@qhmO znNwH+bQMKN;CFI@-umQC6|GbonH_;uF8VNHwRHR2&%R$Q>bZuRAI-S}zD(uIn-;@5 z2GlSygRnI;n0OY`&WndUF_o(>v9p5(LWqtw>in(McdM;}ej(}~KgPqqzlTiB_;={m z3(!c38711`jmR$d?dBCVLnD7UE-i&Y$kOn{DunVD=1z2K*dTu?sWo`X1+5-Hf=I^f zid|`TlpS0c$&l}KP)w?ff>-*ww_Y-=k@2y;Z6d8_^CF1-{rm;;hPA(Q$|5YceasL^ zPDUpWeqp2~C=$%?-R!)jQs|fa)yFrUmr(1}L8?AT%vtlBum8PF|0r9yv!|HPO%sDK z2c;<8O5-(*^T&)R_B@L&+}DWfIk1-SW6Gqp2)}r-Wc>Q&-kL=B-aTl!1^?zGF=ZtfbtxoEoM z+%JTu+*N;P3s&+eR!0)T4!&D%Z{E^+Z}jAN=PMMj-aJNd&heHAShY^$%<^J_v-^pt z$3{ny(fqFf>N#nj4Vl!qERQg*{M+B4;0b#YVqjuU=8Q(3J0o?&WoSh*0JN1oPP*&; z-d^5YTk$T(`5MbVA!c(!R$CKy!rSE=hfO0~fbc?r5zcF$#yRuDL!^>qcPHUB9`NcE zxhC*+)|ey61Y{}v=+gWq?*`yBDgop&B{ax7;@uZ{> zQgj5QN0U=3>q&9qWTIP~BOiOrByLoecum$Wc1-UZwr{Rf~CD@q55(%gvRvO%rw z&o<=WBP#tEb8DY8RDEa{M8XRsh|Li8UEgd$@+2$&$;`gsx74hYdNcx_66<4N*&NLF z2^6drq(}1(ASmvm)@s+op%YE$9}%#JW*a0{wrbqye9AH;NN`U0)ovL@P`wV=VEDX7 zd>DVs(422o%9S}6^o+8|Nn)vb>c`Yi+~$>8nw2gZKzr{*i$hGm@ztCJx3) zo;RB9BQD;E`32^#dus&0dKfbaAj3QKj;37~JwG1df+|Tpy=D-2>RGxF)6}5K@9)Ky z1$I~ad-E-?TY(}H49GG5y1Ie@w;QqqYLPs{TGvcrK0e~6`6eM8C+so#v1uf4AZhpH z6pxCIiix1|?SVdig%7|4HV>9c2f>i7fU_8z$F>m?M4!zK~+-6uRaGrhVJ&OWcIZ*?U7#u-*YH%TK)oJ274i?>ZXV znao!JhF7Q@m(}A2*}!a}Vhz&k$+uBw;LgbOa}TDal~+`b2R8-ALn^J_{yeQhjj9nu zsT5*N zBU`}@T6M!T<`Ba6_X-%4cvN`}bTM?)DBW=St15;yj4K!|u6cyEz!|UeWteb(+2jD>s>pL>V}YR-HYQk0%beloGd^uO;T!+CpHMUV52uN_d%`XT zyl>oNxvtJkJSukWmdzt$3*M%9-yW2KOM4A;KGcQtB2Slvrc1f%K&NCKvYV)35>Dv0 z__@n$G7drs#Kx7~ACFXRCQDeCN`2@bJs3eFDTyDDFY)mPy=VD%s zhZHp0j>qpUUI<|~oynHzRihduux3q9-8>{KbDJ;l0xy0g`&@?HpH5=EHkmo!J_YI3 zx$?u7N_f1*@r1toUv!V(%^L7eCGyZ;DH0jETa?auR7P zBY;J?3LCgRy-%#-hSe{ zGzeHyXgA#=C86UNq@4HNt)TmoW*oiu#es;#{lW_v+Sv9zG&Sn5gBx?s{#BS0j0G|D z(-K9kFORH7FMjY$99xX7#FG zAk4=z`;nCzv`f{U+5CC^#QHD?O~Y5Si!ks&S8D9l_~JIObV_|d_iiDfw@8Y(TkuqH z$a6Vv-lgyP8y5g04mrPl_%<=ozCUWP{nw`3<}!N1+B%nv6#d-&&dF38N7{m8oCy9@ zk2petw7fCe!n>DG;mn?+flOnS?kny-I4F?V&AZRP=dEYY6*O+mDFGD8c^CK2o&L*x z?zs6un$E4R3g$CE#X6|(e1&pIv%h4QE*O@SBzss`f4!R28AF@#Trlzv zVa<@5_}x5e^U9lf!L4JYzG<~+ck93a3nJ=$W_7n;8+HUOlz>8-%BAJx?INlA$@v0M@_Vw?X~sj1WiZWm=y4 z!0nvROGA_BvH z{6s5G)=ZlXW$SUywCIw_cEJpGUl1QbhUcF?F>0pt`FV=8(7TV3&%UT*U-ICk)Ssze z&;wiU5Yq`asH9|(XW&|IdRWobXzHHNq@L^_X!!YS`V|T)Z_Vl0feI56OZ_Hob*7Zn z*AMWlHpM*#HodW*6jGLt373NT10m)KgW9QgOSzK@pn;Q}0ztbWS;=aUijmUGoy}&| z;!o*X;a`;_*A^n3AR?FiHc@G)cbg+!FvzDG^|L#sp@C6kCU0vIpVXYJ#JKMc>6(^; zrCf5_a&2O4f|TSC33eu8s}t$McYJr>*@=EOlG?~wS9v3RPM(}jMGk&Kps5&hj-zwW z?H-sXXm8j2l#v_{IubVTgK$L0D~Cq$XqdBtA&Qja)#gkNtM#OKE5Ud{UxkoVnUvR( z$3**{Rssoyuc<>4GuY@E5=|j>zte9QJ=pb|(Pv6$I$N7+uY3XR9f1do-@kv))vkmw zGqa2P=~2AD_Ua!T=)qDJi~lrkB26U{2dDbh)?kk}*9RvPrK<=Iiy-p81e&Q?|BX4< z!-aNYcL zh+c&W3A|3hSmsLI;jDQnkOU{Zq~T=?b(90Ar!LbKhLrHanq(vEc$f0}KH6ZanNn>z zINqxp8i2FT>u}lf^t~gufjBNDKX${CTP(>aqf+LNsdm`if#uK&WNT*OXXT^0T~ z+j9V3URgv4ud*|X7bc!x`nU^PIRY`5Pg9)Qt#x|#}=)bj>Io^N%L#%68YqYd#4B+vKZ7fNQ4uLH{=aaX-?Fi zhIwu!RnS{Dldpa}-2grO-N`9_tm+Ms!cm$;*e1qTzg5bPV=(2ES3$!a2PD`nWMRj2 zCsLkzx02!Q9u$T-yh;(V?f)DfHb;d`UQ2>O?wj7apDq-{QdFKO?MO@`={DaY2_K67SI4|pMD*tYa?{TA`6oQ^c| zV6v@p!IVSO+t5fqGPUF}qVyrScB_g+7DHzL*tz`Il$b-$Toc-KpgSLhm@us=!_Hci zrJN^K%cqfwno3Z%eP^N~ESq2fO65|f1ZQi_hl3_(ni0`Tudu4K?UA_0H$_t^Xi;F!> z9F;p!o$xuEMQ-KgAP}om=w9q#%7aGjwrjVE9`K?DzpIpFWEcDNv3ZQ9?|?^n^NJi} zk8hs{H7>rll9K22=9kZs7#(W4aU%GbdC31}lu)qoaR>kuVWEKr62=xY_8oXO4h{`I z>h3)`5eW%+cK3Rh>PwAn?3yNWUKy;LGqh?V5BIGpo1ZRi0hYK$QN2sv#4A;!s}hya zsLw*T5Y2$Uc2i)F@Ku|7}S9?7@f%hj-^A!Dm; z``nXO%d<#uDUNZ1k8=9#Vyp0WUT%h9RPWtfzYVAb5?2lb;+dvz?;cNxj5Mxn-d7dc zT<>2>FY$_M%>>gQKd_@pX;Kh98`zrpy(h*<;anmV4C_Na1b&U(8kPj+@4KeId+KuD zoKT-RnOr6nAoylq<>_V=7ej@zd#sYQZOH}cG(_O#67<`v5ej&!BTsh=*^q93qypME1 zb{wJFf%eVJjcufj%uqz_(A~>b028@w`x)II!-)4(5jS9s}NCX zQ6f`fD$F%{^h3wR=o%zeFm-c_EGQuKyWGct58+>TG|Nj%(zUsX z0-u{|rlhY)k;4!4hfCGed3kxKc-WFC?1A*?+6Y!uaY|tw#mRm;XS3R(nOR5p`@5)k zv+gJ_1w6${O2EsRRCO`~{yb2-*xJHMXy-L|AotbJ1RJuQ2!z+x!#*IM<3laN_)=ff z$NM&}gWJ*lor3}nzU<6K>UG3pori_`xhLD;I0^`rSh;BAPkRm|m{CQ7i+6S3C61~M z{>DewN~2iZtCGFI6pWKZqRKrQXuQBmC)VrM4{$BImh;2?h`{PJiKP-hkl4@H-I$uYL-A(ogICf^X>j6TBQ# zH@-Y}vnKhi%#$bnuSp8d6zns54+v@M9^I{qMWqNGhx*PW`P zqPk3k-&ZDF$eBo#dIeLs_qyjnyJbBRl2?MK2OHfKi_m&E?C&!^PcB(Vkn$zMUv%}u zCWdbM&miMP(fzQilPk&O{S_Y4&PEE~{Jz>-q(`n)TrTHWL_gW>YUgxut!o|kDZO`Y_siRKuC`P(F7xkTg84L2 zey(*p8y&8tzs<=|UQcd{;yUj|oh#fAN`BX2JtYnlab-*B(U*q2pk-26yghj7bV8DF zV!T&2yXSv3n{{_vb9r?YQcD=qu+a)7_}`y_q?ju?L5*RKM5lB(wdVzH>F~_1O6YeiD-m zpjtceJqFN3lp-h(wPZ&nizrzW<*8~i4F^{XL0WaGhoDxbqabYdfF=sB#GfDYj)12S z=VH%?kM@%Uhqh52qCY|mpRoG&giAV`GLI3xWp_-*f2iB&j9{z+$sBM*(i}XglRn=1 z8G>RqYz$~t9z`&`y|2NM(C%vM_c(x(v?=Zj_Dyx7J1bH0xOivnzGiRmIO7sZZG)aP zf2wJKAEFS%OY>xAy|KM1Dtj)Fw*tX%zr|$e-c;w>ijOLBtZ(UPR~g{tRTt)0FDGe- z*xo3B(9#Q$Zn&$^)1Vv&ESO148IA29t~<7ilii_RBCh!r4&2TMjX(U z%v$R)0$T);ia%qHw|r!Sv{4q74~u3dhc`}3Qc+PSP#4&(P0+0PDAt!#T?HA_G@>pZ z8UM01wCO3;B-mF6H`cs+!�{z5i`cAtKhnYOX+QkpjJp^n(^sMnM-}y=ifU>D2I% z{6{6ZET#JK+XOG0xi;Jl+@AlBvbTuor~1X}sK7H)Ev{B0a` zH-6@eX?Nj*)bi^Nw(Fu*AGIiEZUzaEV}QM zULsE9Ra3~Hs~w2}dM|8TCp5m=194puL7T>PPN2O_926d$@3Cfuvr%pF4jB8)XmcjxhW>{g{;eCy6Fcoy>Olt2v)*_VW)t;sL1lL*|$DM)> zEQVj3WTG-nC@Ju&T|!_yzeFIivy;4;KL->nNa|J22?(3jKcw$jKY952?)S>}k5 ze;k)=dFqY4o*r3^%Jstw$DYrW@@e5LhQ0cW^{iriksyQo1k53;Uy#8M zI}d8rrj}Ws>BPZcr^2~C?{aKp2GlGj?YdwoMUJhuB z75k*)$prJJMvR^nt-w7%+hxtS(vZBJ13tJMuql+3z|3QdS zF-K}Ej|fpSu_2vZbx z@nWre=w9v*1i^(mM2~G8Ng(ODp;Rzqn z_hiNcmmtor5t6RMyW^KLnj^Cpoh8kl(-=qX(m#l@oEW5A!ig7!>*TdiPDqy}XFSem zZJ_QA*4BFL#j8l_R44+P1|`=F0<(3Dhyo9egX}t|B44XmN(N#Kt7K60H6LHw6QpU| zmFOiWm4-&cjBDAYsnR^>fiLQ@)%Pi_AKt){Um(tfAzd}9S3)ywtzVm74mpFK>!PR< zOrUCRskEW>3XQ+GI4dY^p(0j38}Sgaj6G=V&~^73Hz^v4os;{YF#mO#=$2Q+TZ4sZ zN6Gv~8(=oA7_a@3w9QeE^F_^(a361+u}FW54#QhsF4D}`hJM9iBsON-gH3k7P-Ic|YCvG`V`GgLsd&QY}Nd zVX5Xpsqf5#3T^d+T*9gA%c7f9x+jFY{Appp_`1QtKq%BAF__Y%!mq`roUen7{Z5tLr8~LGG9B6I$aXC(cETypTGL;H!FQ2Lwj?<(&i+ z_Y=%P&_0#wSu_1JnxOpshF8T)pny43r&TeJM$xo3dqJZ3?MTp+eaXoWQ3OY>bdg+m2 zA**MUXGAVUMMjBR>$3gf_Wqn-{2)xq?8p21s|HK-UwaPD2HU1|KU34f#(D3lmCke% zeD0$PKT>zA9$kpGxSv%zO;LJ*wFiTc$cRf;-7Rh|NW>-Gc`as(GNT&uZk&R{B6_cD zcA|g%4%hX%_N7~P^|)DyTL@g;{POl_zD{p7%(k`jT9(LeXdLu2H8vN;hPZTp`dF3k z#Mw!Br*_Zoa{PnU!XH(xP&u9Qr!~wQsi4fkjQlz-sLPgrrD5M%GFfZKC92+P6pxSo zZ9H70E2V;vqta^uFZLmV7t30UYYJ(BXtq?!My~97DK1}y>s_pgCYr?SGV(3@v8f)B z@+iX^_BQIT#Q5q&5#pYUDdTOb)ZW@PJiXiDZQS1PdQ6T7bj_BCP{-WXVp0rew zQF!ZzPgXBQ6@v|>Iw=GX>vm*PH7Tlz&7BiN&`{^dwjCG_bqS|aFPq*cXy_&6(h4ut zZP5aam=t=Dz?rQ8;sCen;rk?Z^Q_X*dPSUU(BkE#E5jc^B>gBK1!cnk(#_UfWj5Z? zmiOfA;jey{rC=ZD^9DT8UVu(=>YJaRTlIH!HSl4ICHdw61$KYi>a zE&isKm?y$ytg_`Ua{-{*Bd~8fO=k3d1^(N1L0uXn&KC4I@`V4x3@NrPcq&f*BvGd zeZX-sm2zI6_BvQRrbDgO6OzY2HFK|(!6;LP^m81_6|$O$D4JQS4#|U3-l-_IeUeul zb8PwSTE5<^8D8sRg9bK1N~j3sF1Xj@>06i3Hw?FvM9)^k8^)h-c`npyYrP0o=EGW?pGOBGKg3xRkcv;wTGZBaC#`H7iF#W2 zrY&x7+C&K#zwC0MkwQ6nS2_IsAg1^7C0NzmE{oQHyR^iLDs)3LfLBhN)a?h;{{BY5 z(t&u^W&!mcWhK8APa3)um-`f|#E+koFtogN90PfyG+omYQ3Z$_AT>`Kn`n`P02Eo_I z!tiw5*w&3o2tThgWV&qBG|H5On`fN6|4llau>=cWmFyi)K(IVpm&9IO53EN{J~Wr^ zi|LF_=Pil8Sz2O40QZ~Zpm-lyaV{t8=2j}oUQ6&Oe9@o^{f|hjqLvAEo*Xy#4pj~X9S3@uw|4Rfs+!ySK0KxAuRkVz_R1I=MsgTpHv z9u%ibv~V%HEZb+MunM=4SL7RSoCL6)Sl=wvSxYWBr>1d%#*)DohgC{!(P<< z^lEjbfBAqhjTIS3Ew%Qi;vuJJx|yEG!9xQ zI`zYgz$Ty|%l!Ps3xvFlh=|B_%o-t_z>iphLF@P3fzLx1F#DXN!=)sXZ9M;0p8qM6 z6WJY5FHkJk6?i1-Ks)!&7i{|g#rWA>IXw;;aejXb{L)%LCbA7QDwV!~K zmq0U{7uLCCZjoGQYV9JqdE%88^N}M@+sS%Cn&z&5g~OwI?#JfAx?@@oJ~BBPtK?l~9x<6QP>ydeUU8A%bbuqgf6!8f zGUI0n*Q7sDooyvG-3ef%G$+~IHrf6O{g#gwyB1klreRJ{8mnd>G414YFHEN@g-Fw% zo?uHo8fYnXBHrlGt$C|eJTGcl@FV?HeUJ9ZnA~C@0&A?d8`rT$)(B7rGticRDKpurJ*2i*vf;{m;40OuW=r7HG0eP|;DkA|m#aoHFLRrmp zUyV8|3>yoq6Vbp`{|oB*yH=s&THv0$pbv5-GDvev|1#V^!}K=lv@|R|eakg(l(P8d z>R`-8GJPZSn!ug9U~M}`=OqzyR5}duqK$w43%V6XdRh>XRB3N!!m&{Ce;-k0Dwj6h z20NjS$WvZZ?1rqOCE8mr$qwOt;LaE=! zaf{TY94c{>I!5-SURMDF`KLDffk@ewYbyzMEfPMOL{({+J*~=a36&g3LIQ7sK?@$_ z(_`3I2(RXhe%3QB{HU*0uKS@_ZUmc*7z>w@9CMT%V(8NzweiUUw~MkxWn&;^h+CxTyS^sA`D{fF{9&SZMP(EAUf6KUwQ&MQxbk_Ws@CTEy#_vvk-CDVoRYhk z)tQml6d~>FdmR2}{L!%~0o{fnjg?%8`6mXl#UDQLB2}VfMZbxCwX8tKuD1Gg z)mtlN>NmXc`1m`trU-skw&kz4NKxWSuASB7dvWoYQhlA z`#}rq)L#Wd__G}cH#c{db83=Sj^%WHrEYvb1cJqG(j49!i<~*RtTQnMU7qFipiwCi0!n}^^>j5-fu{xZ^) z%$}yFrJ`WNu=OX6QU8bW{%ta z9CP)(P=_%A7ZW0Ov!AU}5$YEjp;mxcum>n?7QiHwD&RT(=my)j2{p_YF3duKX6i{j zgKa&^5Qr46ys2m9N=0pOrmJYtRg<}T04Pe+Ukk)tApew6r3Val6t0LummB}q+qk?U ztU^2aMGuQ&pj?xf$IhIVfvrEPEC{W{)UN61#P6Y zdi2=0g=MbQ%3A@guY;l$U-`xtB%0{xxi z&hipYw_d-KW@6Z7v#ljDrn}c=bjPUEA-m}M(P9CgA6TgDL8w);W)3Y1j`$tk{mBlN zX+B)*tob2uMreuS8}>H(C#Jfo6azWYTS7Ex8u5k6@w1$vzBhx;M=?s59cakpl?^YkT98P zRPo4C&`9y!EBZD-1gi|Tbdbzl?OMJ_zYzWJB)?rv=SB0b=gB*fq4xO>%$B5jgMl*o z;Y&KPs1fj8X?a|3rid!aUM(}-829;Z7$~@0Ao3FqqNc@un#B_6LdWCM*i~9+^J7J^ zES+7798j2+RN~z7$y>|BTRNoj>wG^=A!je$@R!U)kktIHJb$A{Hq;>`u=M>Z&vGbd zSTx53mzjoR+Tj8^x>}G7~Ay?sZ_+%L;R5Asm(f4J&Jbh8YGgV%fHW^tdS9^ZLhIW-4IEP zR$i{0Qg{i@nk(R5HC?N|I9i-Wm6A6bQIvOt*^5x= zg#;t%s$8pPHZwl_IO@AaHd|;#Vx7KtR@2~74?oIJ??A#5{EnH*o8eJzIZ-UnRNuhU zH4h`YctsX@PO?DJMH%(3C_?b0^W^dfQuwl1Q4HOdUmj?oupG~*V-w)KJmV8SxW7B> zlxE8Aa}pS@S>T|Br4=Zbx8mK)P@<%cnMy_aCpR@UHax)~;^^Lz@6B{+Yu!wde|;Y| zjds)?-0H+^YssE=Ye1JJXKHTw`d75__ZpNp74}iaFyf`RR<_w61k0cZgQL*|w*3t= zYgW@8n!U(~jg60t9H1sl3hns4q(G6+BLfgMPMK(v&4KNP&&r!@Tg$8j$&nvd!z6-A zkEXiwCiU_}JfJZG6StSG?g_((_eWD` z-DYu1g~0&Dr};%Romxpdz)7q*OWbn{O^0Y)SOeSy-jQLMAz{HTYxuk0IBN3+bSpxe z!oIs9_h%L2HB4m<#?|?+_3%$^@WRJ{dj2Tf6c&|dv|%tAYIx}EB;Ew|<}DHP5}E;r z_xbIyT=Vh7{R|;f;Xg+Ww>((je-4WJ1Rn|rQn{()zu&e1!+gbeD;wFu?@-jdp3MelL~_|IWjcr_ zG$6RzjugnRYhU?hx1{1(Xb^95pag0@e8dsu?qU!Wkgrj8e$QaeipjMw8k61>bKQcX zzi}PYNRP2)W54$2B}1^Yr4e?2Lik>TlRIsDKWl)yR=s!Y%6Q_<7aNfe_ezioof#4X zbec4u(9ii(_B9z&^^9{PBt<*d@}o+rp2e?er0TN+G*GP1gU{~ z5Y5G$WAA|hl3Kd+B|qxI=|z$E+KgA?PA)cOrcb^a{qopHFU(;SAEKvAqh@#;5vXBN1)OfPFhd!8 zOuW5d5&OKJO+Gjq$wF;cZ3imR?MDa)hGa6Y_h%(Hj6WDqW$TW=a}2hh5(3OiHmx1$F4aFYxh~#oz5?bndl^#s z$HE&9y_Uy*yakW7n%a{vbLCf=kLPB#g?71|2Q;yQ58_$sx2Mcvqi|X%wIgUZ-8EVC z(IOG-pIVykNroXk#OiNUn|JN_GoM6xl11^gh4)y)vyYBX#a+3JFAg$QD>f83UqnUp zTBrBv@7@|oleo^qefO6VS_LtXBvU(6ND{XooftEoB-2gwFi!g`IzEj}Gs{I)_cYpJ z*9GHy0hr98J-A`1JSLP(fh8XxxEOV5iqk?Nmda>C~zO+MeUO$Se3_Og2(% z^V-Js-d(R{S%Tyscg)tej9!m$rFdy76?`W(QFL#3bei+LJ10)Ctj80;BN7_K*ekfT zBxK7bCO&G5M*h(jy?b54(7~vRRMA5a{wG^Kk9?>dwNHaOhh?$5)+$0QUa zweY8mY9Cu1-pNwv-NjgHZop+4!*?=9WwGKK5K!~?nI{>Y^ZnYXGcM99cHuH%4z%rG za);{i40d;UNK`$SP&6U~(MCPFi%uh$dtKCy!ml@8V1j`KYk{=7-;?hP0BaNAk&6$zs5dFbeHTOYyr<*wk>H!TVEv6S!`%-%KPz5IE902>I81cmeT1J^%9n|v z*2M9$eKr^;q(pbMlBS?14jKU^yde$3bP6F-T5jMfX!FC zUnyK!tXiygcyZAOs2;2N1^^K4D`M`~ma`=jYxoeOZ1jOZ45GFCbZ^m}+0vSTg?cdD zkq~Etklid}-{)MT)w7}2stEvoGJBj`rH%q@xg!9-x>c`zvbT|J+Y9JXPhIW>tfB1T zqymIYEvIime^Rf*B==`6&o%)8GqmfBN#AO-u zkKLW`^TpErr|<*Ii2Fvt5myJOuIK5dAbZ(<#1ewE;zEV+TrzJZy9s3Q`KHKyq?rhi zz(idX2lZXg&TJpo1I^+dKDG<E~=nC^Q# za`s!7gWq0lYhWBKLWhB+xL2mxx>zDygfVEBJABS774W^8u`Oz79CJ~=*!5@(qyWm3 z*Ue3D7%Xoxm0`5O@}q&N2o-C-q;SO|FDx5942N0?_%>Os!KC4y%KfvWe2a%M*~L7a zE7rCb@R=(AF@KohO=07?3M3|!PY{-&tIx_(V6ED+K`wD{jo{+mOB^lIE7bkwy&N@$ zPujgYW~0D_9k)D5Gb6Zv<(aKpT-9_PVIz>_k2JA;c^J7-UkJb2VfnA-S3AP)nW2Rm zb~6k>3k@L6feVqLtYL;*B7*1cBaEF#4b{8k45xFX(oY$V9eqjpE-tDe-!Mz7JQw$B z*58(9QEw1{m8Y*#No4Jm4{ZI_9I7j_E_-X5V^~A089RWfm9L~^nXz~h#rn1=o*XF$ zKk=fRjeU9aYPsHGJ-m(@z(UD(5V+23ay2u+OYc3~U{KRd;g&{P1~w-tS%IK!CeWJuNNc5Cvb9^!(fhsg|aV4YAl7)Ih$^y#QRZQDZQ|_)C~j zWDB9ei^aaiZ-QHRF-FDqc{52-$;OlXJ293=4aFya1YZsvPH%g*$fd;;u{Kt+7bW#M z$T5u>kNgURj9i<(4ifiMNlk_GP7SsyHxQhfzgi;w8@0ga7T3`FQW<`8oBDRG`fMqE zOe-#|{y)0EOj3BY4?vb(f>ybZ0VH)KQ%40g z@QULitn{p%eO1fc5yeIJ?#bEf+LF|%OkLdS(!HRq9IKBxo2y5HPT)Qc4##Fj#N`tv z{O7KGtB#U_=Ms6_XH$aBRmd-DOY@?rB8ogL<2|+U`^UNzLGOe|X>3=bfUO1Zjn?*r_`M+%G?beff5DPNw9p#&`Skj8BA3?Rk>*BQ`^ zW&)gBXz9lQ*1UygHwQ1`g=Y|(Y&(2n;u2)xyf4Mvqziz{#dLuIHs{L|fd0oXBqTkr zMtODp!n)IG#Lv;AG5}Al)Z&_4uLebnwWi2;fDC1gY*!Nm8YGSw6E+f!b3VE3%HFG? zlR#qPl6tj67+KV612z7@$7{Q^NZ?XX5x(o`XiZ#DI@$l7Gjl6vRKy@MPE@wjlVzg} z&&=A|cII|*ng!Y2i@J>MyfXshXfXm~bol(5%PglX)`nx}xyPC#r4AlWhDZVaQhhE< z8p(J&;ZV+yjqr|wwYj^74iQB5X2Z9Zz}dPUA*^*g7D-kb2rcpuiFo2dA~|=Jm8%lf zD^gH2_`SMlJBU{#4XSDqAuMmrpq>GJ?Niq*R6idadFuhRR2iO-hR8~(C`IxHk>v@_ zh}^|7l3Qr~_{J%d?5K-m=oSz8@5?>|i_tle49r8O*Qx?X3gytxN;fp^EJNWZOV%T!!A&U~eG*Zi;^nUbst+%AH^&jh zEsgRJ^vcEfB>$@|kFEW;0|i<8$+s9l%U$9@?L(tARsQD65@}8K=fpzyZ4wpD$vrB* zzUm^lt^dB(iK-iK;gT!9s!;9D;d+4`6TZ}`6={yHB=NNEG;-8wSvD->Krpc+jVYi} zAiT9HfOkphe)saGktuL)aPwe)e&7nZNft_zI@xzq%|*94gAs zG)2ouJTJPpR_C;x@P`t^XMea8Us{>Pci|7ZtzMz^8bU=zD>kzGujryjX}qrOetNrQ z4s$On^YQ3`Bu^#1rvUvb{X$j3O6Hp4_ne;tj;|`i*5$zA>&C)6d>}gJeM7fcqj$Yt zGE=qp;r5)9?1(8&%!jpWt82pP{E_Xmqv3RG0)ymlo^S%VKr^@Yf%T-H(XiSXhoQp@ zL6EjRNo;-T=N^~h$%=2vF9-v9R%_)<`)I%Zc!7GlKLfye2CE*y4=bssN}B(-_+Q(C zo5p$KT*fN2-?srraZ^+Qfo50LB}*cJ?b`cCrr@)g$kk0RyLcW>nkG}5c7a9h5LdRf zm-?xtw==7Y`JC3D5J1B(y8hw+MTf9MHl+I#Yx9=oe+$U|M~d)LH01w7`7i$e4_d&Z zZo~!>n$G`MmcR7h>hFJk0A%?k{U#(&HiI7j0!ZTjK*>M88M{x0gZ=%?^67#|07FAU z*pB%ay~G1}H6sh<+n>>Ck+&%_&x>y&{%5x9e?EZrm;VhBsFyHGW-)wmeZEKcW6AHm z3OCL?E31T26VAtvAKR@syTs3wsNVuy3&5bkuei9FL;U{~y=U$|E=6aXq`N!spFe+I z*_9javRHX-PqkNL0Q|H>6uM_aq}d_bMOLiO9KA!vR9W@n{)ILpVi8= zLje4;`aga}_+b3UQ*{do2@ju+t3KO-)cq?{v}qhP6&g0V1;>lTot5KfmjCt*Etgq`FbIb$V{xc=z6%`fL4yhT} zFGnI8%YW{BXRMUC_;Wl)?E;8VF-|lCNB)y#tKgocn1MrZW!*4N4Xp(9sU6 zA3JMM-n3Z~U?sNyy`+GiU)E@d>3J6G_h&QecoF|_LTK?qtwzsILF(e+O(Nud8aIpso9 zzt8K1o|Au$8SpA1%rS6yB#9%R_u%8AJ+2&XYb<``3}DA)?i-$rP5F4@JAGolV$Uz8 z$OM03?7nO3UM(A_toQiFzJI?ueYKE7uy_{(^#&l49Qn~P>QAI@!nTg)ekLaun|`=^ z%iR=Q74c9u)qNMuteOUL^5^p(AD zOgY6$-dcgH;3%ynKJy3@9YDXQT=Kfm*~=;`F)}tbhdg=l*d(wlZwcJmGPC?VP;qhs z^dv2P!~bq>QZO*=Wr^1TWxdr4pfCGBG>J=>)r8Ewf0pAx>+NAa2QdRJ+e3`61K_YI2NMf;~w}Ag$&}(qlGGrEaO7BjwO;2`}MW8_>D8ilj$R3i*{lZ zLhWB#8t`?JxGo;&%=40MI)NGVl94^>Kh%Z<66|6GY=YbB3wCVt4GyM&+Mf%s{h*tn zjYusRA6Jk`E4Nf$r|NYT3Ako^WOdY&JEXp=;^)p93vgVo8s|6#q$*w7P^1KzX_| zNkbI7T-=5O)HytG|2>a$FPH={1QSS1x@k}}lm7a3Qab9)Ha zdClRuttj^7Tp8t{gKW4|AKqeFtv#>?^~9@}Z>a1p_F+V@Mkg_V(>^xOjoAf$A4#Nx z79~o2qFMQ^ClUM!&8j+UXFT(VOb*^ZmpL4`%)tZx6e06-ZnSSb467=*DA8}GYXxZE z<}2AVh4lB1VySim$>;XL?p6gwCuYdg9%g56vvm`o^fxV*dc+S;YpemCH%(nG7Q1#gY z0G=-$v#oE#`RCHUzUTWbRl$Qr+81(Ot+sHDD-&|W><}l^c5siHrz~aQT-AGnGDoUA zd&`Qsnc?OxcGfZEEF+=+_d+RU`Ey^_)So+@#>LH*C(kdR`C4Qvb_cA*Bg0c3*i+Y3 zR2qRB5R1_l1#rb)6A%tHT)w! zJ|0*&N4oYM1Fh=7s({W93^TPhu+X_V4eJO~VRZgRybPz*e}O0eiU+Q9<<$zf8B=#mq9#$PpuiD`i0?6|lfdD@a6x=%cs)LP zU-1^&=W>^7i zlr9#^#?*)nE44tco|XdDZub%PvRuhxPH(Cnoo579@vq{es`;}6i20ap&sn_ z6EhRL#Ja<4g|wl>#A8+XFv_6dneA{2_?8iGvoQ_Fpk8S-7$MJ=X_qogs$38+a9Vd1 z%SVWJXLemTS+_>C&6hIvkM*6VxAu)%C!f|Nl`_5!>)TXYOwGd9MX34RNGu#y-3(Jx z4@)Q-+$XYfa>7$g;aY9QhriCvGv#t?>_+M7lWv0WCK4!FBH1D*jPq-keNz1*EaNDc zgmm^Qx<5(s8#XEEhC3$eDk}OmlFE%%iL*!2nA5c`bGIGAxHl6W(e0-%s;npGtfFYP zn=sk#)y(4@86HnNYM;)%%}xHeRzyr*gSXHWJV6xy8@C5W1J0!(dkL?M@(NRD4F`R5o3y{{EN~he)RrEq>8u+{$$gvjXIk0V56l>+ z<&9*M;74v+Ay#*Es`5;daXvge1qRHC7ImKb=nZk45QzC3@LXPMT)3s-nx&{DF(L?@ z>IDP+@Y<$ABaggPJ<}kcgVr!FZVK)8H;?#xM;Y-QOzs1J++dNnFWB4Y#w8C@j1|I* z{%}pMf@K!^qoUOTp2~c~S_c(r&!H5Vq)p_y#>Nk~zhFm-0rj^-4Zc4-oZ~YKe?w#JNXdb455o5TyS^5#Kl=8Xqk|Mp>`8(WaNL!h>vM_!?;x^Xl3?;@Bi< z3E`SIR?jaqHGY=e>c9pn4{#Gy-k@C`9;(a^V!7)}U)=&UL#9(KW$9Oc-NGHOpxqH} zCWN$~LEBda?1_vgUG^s0k2wdnJN5P782M^b^3D+GTS}RqPXFYutE?L(p^mW?=2Mgm z`ZJ{<2%$Y5qrynGq^q0yCNrG0 zl$3tqLFNW5W=r0sxoy0o2STJ`_3~aI0xyFDku9D^XF&6 z&v0^wXgts3V>CO25wX4fmfU2HPEl2QlZ);1jS-zt!=UVH>g*n}*tSkjw%M}9@CF~o z&WRtE7mf=?eP+UZcFtPT5pb$a$(p?V&t<8XJ0Y?fxY0N}evNoJ5b|{cf&Cjbo}ayrA@S zIxaCfEOuzt@IED%yWn`Y-M$K^D^Vy#r)712KmR20cS7i53OR|3@7IGvU$|C*p{DJf zB|5phxPDhr?4133G|j{Y-UsX;@=E8c7pjtOycoLhMWJ~8oxoHrN5DrWPBEo14Y_DI zj!Cn4h?|CmWC(w(Yx2rRfBsj`XWrC6zgy9d>7(tH+0oaHQ&Ux{%{WG;>xk-k4Y^`| z%5JJI-Sv=XT5^$%lDICdUvI`+vn_l$XUnxBjb&U`pLL58dm&IjBX$U?P4vC^vZh4#Vxc-cGt>y)BEZAG2p^t(|w3wESp zNZ9AcM@fBiiD&!x2#`5(0D+QZr_bC*Dmlsnye;-X9kQV?K++zGC zv zvSX4k<0BRX3pj+o+Fl<6n8u9Jp2Rwtw`isIY=6zn~}exz%zc z35Zi4(T&QD2gXTlx=`}M^&l2}9fArM?EG`pW(Y?Zb9?U+AULcdKUJ9;?Ax+ogHPY1 z9ixpGg4Bum-uRaHdWEZonHZe0qKWoRRIBnil?Gf|?3&$juA|WFaO3wykxD?8yrwGCggclDQCF4TbOWjb5NjK$0%B?){|4-*1g8l zwyN?M)qkeMcPc;TXi>}sJ}#(A>QS4W?e4=<{qUYrmLSM!2w#8T_R7xDUNQzx7QB1- zC7kRGFRtJiRAc08;ow~4DR2lgh1=O3Fm{VeWEwAEa=RTrPjg$r++A)#xpA>p*|hmi zBjj1k@X~U&7`Su%GPek6tN9ANma;ayzDP;KMHE}Z*=e#$Yq-U(qSMYEJc6ex&)^0C zbv8y9EfN;dX(fyb?n0U7u~*U1 zkx9SJE*dMZG@PQf(r9|2`m9sMOHIHqRbzHJ^!-cGFF;E8>oFx{Hr(TIXJ&0C*4gc? zB#B(G`@;blu5-HTXZhGmvqV;(#L_zw+GS5`t!Lid+l%lh8nklQ>H%$J^$Zl4ZO`OW-YrpVZvqmNmGyPw2q(d6?WTwOn=$Ow z$JB@6C}A0>C@m9OY=_%rt1dk9uFC;OWlIWHrIz8M+y8)r)X8uNUh3d%7_G0wJb3xX zllpuP;xVtvKknd|l?kb2zB@yG#r2c8oYkSUtkZL5nf^Pf$`lT!1z!!WGoSa1L5L27 zyTF?TqIP3*OR&3K2Mq6By-Lv``I#-(^MO@eoy*nzW~)PrWO)Gf{$3T3+!kEq=jE8G z$CZ0=6Yi*&_QJ4)e0i!&$ORZMisR^|h6`gZ*Mib3?0f7_iCo`$UzcH5Rh}Q&nj&!}@muvs#bkxF+6i*>KF@6kq7u?VKX?jpfFeGH}schC$V@gl>6pk18vn(B1J?ieR;? ztLwhPxfW|WT_O%bWVZk|*M4;vtjCVO0)cg|#L^je{v ze%j3f*GP+;w;uooxOjNIB{eaLs&S$$!TsFZ1paLg6;w1~eY^L9*K2(AT8%hkZTF<$HE)igY*PunoqZ z5P09=$-xP;&j72V!hw6+E%Q@NO=K+?CnaW5*-@0uayYPehO~bX3%d`=)=XAqPkc+{={;mQpy}pDxJrQr+vQMIJ`dXfJ8Q!&J#bTEd>7~T`lEz$ zMnU?D9V+#O3f6qBLEDbucw3IV#yJ(iHt$KsTZd}w934B41JD(6CgIP0q*>$6LKvv3 z?u&7S$~0eHUaE?W-6O3Q=;CJCe-9kdGQWsnQ5*>7cYZ#EKio@k0SnsqD(f3gr!r@u zV&YOq=O!uNE*p!eRTVx5&N69-G}q@1PqCI9m?B99Gj!S@g?gaW zG0gI|+c(~Nt(c^wHiw6f04oZ!`Loke$qQZSvHKs6i>|m#+#gk9HJ|=*u>YB)lF`$h z(>>L%06sZ69xqeI_g5(m^|#7+pKh7du$q6++IPnFsvE^N98g*|er@|4e^E&hbh!1F zqSE?8p#pVBxAjC^=C8s+M$5Pfx4| z9K0BQ%BarjGA4SwOBG$|^b2tEUP&P6>k_=bG!&pTsW2YGORHT{5jlPE*IDm#uLz{-2T zo%p@^w_#olIDv!GfOT?&pLXR+W$)aBmgTN@!#Z-EKzVidk~#?9)q$~6tSeZKcljjO zY4P$1egw1#^pw!qoT=qfxHDn$n1t*v>z2KnoTaaTnGEwCp8w{d2lvF%$Hy;@?Z0!S z`JgTP2)>ZmEaWuq+bx(3Uq)$)(9@bVu8LIE>@7k2>EOs?FJGdHeKNX>)DqW68~`(M z3O5*0>jk4?>f;pMIv;o-k-D$FleynOr`M||Uec{5RWPL=hJbg8i5*E(wCRh|=f$NG zzE{Om)qXY9-6f&Fs;*)y^a`M+rSH6p67((4m+72Ceaj4$4C*f|i1W>D@b~A`e{adcfZ)?%{8f&9_EXO|8Q@K z(lo1iHFND7dJ0~FJg;Fc39P1gHU#_%GciPh*Y?eL$mzu1_`NL;zJdn}E+b?DjmgR(N?hWxA?xJReBX^>4eDv$XocoLO{kdZa6e%Tn^;Q~-9a{K zruT26@+v+~_>IjwKbmEE|FA(R>>=o=Zf@aV@|Ple&cx}4;ecD^114(f=>xped-*W7 z*{5`2n~b#7e%UcdDsOo8nmI{FJ6ON{ZOW)<*D81H$V1G`NPu<=l{FMl7BrbknA=|N z+t}1URGJ=+$P2B&%c)l4dD;x7P01{W>TW~1nt>o$2=l$5)xeb=Bs*EK&toRAeLas9?0 zT98BTdiSZd4D;ooojC4A+cLozY#Z?=XiM#Qh^cM5j1-W*=wq}vj>~?MT{j=7bve*& zaR!+)vi;`~OZV1-X8Jw1o5w>LMZJTto=hTlN8<&305Y=a^GI}2>pozr_=5_CVU*BiW(swOnEH33GS;OX3%FZkmBFa}~~?&>X5( zjA*-Wg-qIEUFaCe)8hJO)U`HC;Q$1ky+D%$^IY{cS$rrQVMWGkx2vlHEdiV>xA_D%Lt2YMb|A%%T(pw<*qhtYGf#R>>GAjPx zk;gH7FbX?_7cOu$B92i(%K!hW-K@2zoC7PwGnz z2eSFU{n6T*^WAE_WLRI`r4z^+1a>dC*(Gj65mPL-RDHvrP!MV=A(bL}k3Qc-W7tYc zG{NnxPk+z<$m4{wpg(*t^4VsKIkuQ(Kpz>m?U|3?EkHKqr0uj6P3P}pK; z{Jiu;jk^U+EHGK&*y@f~qxg-yNe{KzL*gpQ$aAurmZcr9J+GZ_E(fo|kZIaIDXxw> zoPD7Njlykso`=t6lrtm;m_4L5CPqiuN9)83sDe>MG!GNB1Q}j@B4`eog3OqH8FrVE zF;jEdfJPdA?Seb~YUktTXl2%C*3Qe<3>XxXnc zW0I8b4~>jVl{wD$z{asF)7fM+B*{I$y$gJ@6xuXb*_iwBS6Z$~t?51NIp+tb+X0Bx zumJ;TxW$189~?biVqrd~$$%90l^>I*=w+%5m33Vc?aIR8;mEZ7^HuZa=onbiS#!!8 zI|P8Tja|0^FK+jj)JzP%Z$dM3NT8hjaQ94%F`W<}+_8|y+?Al;V66_P6K>jQUfk`1 z0Mv1rTo#MG7k1K-&>BZ*eldu-w|e6Xn5ikverM)Mt7T3qEc8G6-EJ7C%r?t)IPWlZ z*uZPMYA{>fp|HidX=eVu6&jV(fe*CEp+Z+=g%V+U0^YX*DwM*Bd=;YxzeNCJoiHiC z8`B*q(?W~ocO&e?P~Ba}jyk(#t9ZYEY^-|q@bzUXwnRy$O&@!HPLi+?a&({5kEs91 z9oAQ;xjtDbSy5~A^X^?7Ai+vZyfr@<*XguJqE*e7R5-WQs#Hfm{sEP>xmwcsZbtH) z!G%6T#3o^&RMj@PQ^8(oy0yfcA>u~*7}`4_GkcZNCwn?_vu3iJiy#-TI>R&g<7zJ= zc>R^VL5GSyWApKX$}GPc8yExS<}d7X%$mvK)V5BSEB(g`K0^^OlR06i9>?MYN^EcxU+9b#1&p%x&u@Zn?nJFLDQvy2ZS#YOBL!v zOJ`ZRtWqQ9q7T0^EBZ|Gu;<$XSX0<(+O^RX4U&$5adTP{ZQk1N@eFo0%u<`JIS9nz z^uAcCy@0#tbXBRfUC*RIM3FQA`filn!vjFCHS0Pl@5gZY)VCV|f;f#o9u^w9esWTq z#G6kx?$q_9t*@xQS116hSL+QV8R!&r+sU{3^;IuplhhSJ0{eh?sI#trPpV}w(SE?A z=f&D*56E3xMDw@jC*!l9@Oe6eQC+q9kH*#3)&{PsUltxp_vP&I&B8X^#_Qf~wPwJ6 zrZSiV3(Vu2kfJf&UP5G zYjNkY+-Qwmps;gp(|7WslW z+*=tuDBu8#=5s{qgB@vusG^|Nfuis zJ_MjR*x>$-9{^pnVS4OMNCb5P6Z`!FFq|K{FAy%O=*Y0*8&SzNem`q!pl^Rt)3v@u7T$(=SZ=4(n|n+Kt?V8A zwM5%RO=M*Be*NemvSn|pf}H$42{}Y>*mlNl0lw*AR@xD3o@a~H7MQ~WI*j5*$2nfH zEqR6%rP7%d6+zcQq=YrrC)4+=eD{d(p_ThRBr1}k9_OeAD_~q4Vnk~qz!SGmpv5|i z*%~E$-Z8ihll0JAa8Yof)smN-Jm>wr;l_bkZvg^|^W{3FAw$yvpTu#SM@rh)n3d%@ z+na>W!1oiSQrO3vQ_f!BMy9KV>DUzvQ}N-YLU9Am-?Ug6e(5$blri98V`0IRpqYI? zxkSf&&eT3cD0n&uvLeXO=UhsS#uUitX4LgN*3!u$%=)@1>- zjtpRMht^`Anx#qYfFS#i(uR;f)3yuEwoS|@theu9LoVj#yC)}~dyn;iz%Go_%s*9> z1@A>>pZZB<7^~Ek3QoIA#MrG!u=*+@5S+426u82}TtL4uh@)&jgnutWPx1Rmh8NiR z8tDvX+E8D?SNTxW;fsv2dWDkRCj=ynV1lS3u_!7ch+^$PLVH4=3gfGZ2SqXFas764 zyrAOm>=!sGRN$g&u=hWv!0Q5Zf+iga+z1yN7wL^KTG04xvx7cc3$NXr5>;2ggXb|^ zY$%R2ZVfuDZQCc;_kbx!bf69RtIbU`$eZy|?I;tVx={M=(fkHFTfNhbJ53E%!k|$W zX~{mJBzT+%sv07g@9sJa(l@|E*Z|!S z&?#Hb4Hf0f!6nZtjYy&ehKJ&ed@dw@0M>t=voL;#B`x%iV3Y`O1~td=&9l?C-O zng!nPmn~V+kKATj=T7cgdBTr zJzao%;*hVuy>84WVDKb*Qy0Jl+!xgaia6!Y;{Q_XYCi_>#LxFPSo(AU34XqK!Ggt&UhrsS7=YWLP_A@_Y3 z3L%e<>+I@_rj^Np5#FV3+O6kmoE{hPe{I3v8wcLMS!veiPkn|;nL5J9#x|I2+7$l^ zJkFcI<$Q?hIr`%ye+bXXK|b>?>ljC)i8NHQqOfklRrxwm#b$n=bf!uZdr>zysf()S zFi6DLFbJUn5!tpUO50ECxgs)xjuFb-=6(VYpxTgl5A_iFN^^=tUseVks$##rN2>RV zhx^7>aqsTj4|SBO6yc)GN>dxrKh9QTfB4{)gFq2QoOcSQSOL4V?#EzfNQ80i~$s_D&0ZlT2qnd8h=kDc`$H-{DCn038@<9;p4HuTJ9js?GYMef@7rDBN(O%s-Us}t>jt7m>_t$%A zm-GQh1u3ji}nbLy>5lExpFtG_9hCX{B9EQpwg;HKEco`FF(84R&bqhd(Z zxv5G3+NIy-;+(c?u>?xWpiEK*$X)@Mlp}WQ;(r0k26O;4AZ)}TO^v2iB5{_o%L@@V zLK$Fd+y8TQ{s?+vG~xaS;$qTv`3IQV=8vmZita@C6m|D+=~}f1qM3l^GuQ{{cT(nT zp~&XsJE__MpjO5jy0`2qz+$XxYByxMacJhHkiY(9DWX- zmwl7PP2DY4`zge2V$_nfAwyyAZAnr` zj=3P?V`K(ZA2cPIu1K^!*L(3gM-tcL;O3|@H$oT*-&P(G8N>%NK*e7O)HMnVAw^80 z!d@Skk2t5B!nhP`3JVSq+e+3)`Xh#Z{hH-z5THv;BL8UV(EU&I^n)6!;E*4EgZ2m}foWk{@r1wE?BMoUGG<77l>fswM zzhUou$oo8mnsm9O4U*tI{$M{bs|^dtPeDeK55sB&Y^U`|Ld%1X`gcuc<+vhpuZA#tmQq}yg#(?oVp#Xh>Aq2 z2S{GyH<1PyW8ZCur0F9=A+%DF7?Xq&pah!^NaY(e3Miy-7LRR}F;<;(OZeyoH;tg` z906Xp&!}r|OmgZo0EKPfP45LKMXU-mzbIIWYwv}bm-Yb^fV!f_t*i$DmiJFa{>9=}a5dq0zxMjbq z6*vt^S|m+yfLYJ8zJ{?%W5|rSAQ;v_JBn}^IX~Q15GOOpyA#fT-%7n=rU$3RD) zCX7}7`fhUnqK)Y~=4;Jdh06F{R;Hxg z@Wn^N++X?7faajosq6I16~63je5;Ea!Ht2<#mMDe>g9let))O~HQNUa`#yk>^y3zp z{&-9KKs1tQS=PR5*|GV-zCJ;7DiBz~7T8aSI1TrHfM%3BO&r{vOg{+TUI$Q$1{ejK z+aOTLR9-@P&DB2#*fRKO&e$L}+7VAsTikTSIb|5S_c!G)iEV+*O?tedSDgE@yf?+*AUDst z*G>pKvtl9C(^Q-Vyi=r{#W$CHVR}Dme&$975|ceU0Vk@p@KhncMv^I&O)mll%QnZ8 zM~h`^1ATO0UtMGWqMz7^4dW>iu(O}YmuO4vO=Ns>X>9COA?327B(Zu-Cmri&E^qW< z+EC&hD>N&Ea<7jd620HC7t~+a=QScnwJvvwq~0QJJbgikScntx)ohAN)c;D`AK;pD zBOm=o%WPql*FU2?SD^^`X(V!8o~LGUc!uTwkf^;Z>wH>MfF#hCFdFwe4$4cVkJTQo z=DhyER=r?DFc_12`N>SYz8A;KevOxj`^(!>P!I<{>p0^_E=(zbf;w5R0C&uxgc@91 z!~UyLZ;UMxGkJLsk#Po5u8SHN}+2^^6THYIBNc`+>LEdU?cW&T;*?3cAKaO&d zrm#z@AxjYFQ;mTNr`a(? z9c&ay+jnbJUXvRqTyFnz$seJqS6#0H@N+BYlOF&WZUSJzcNhbYkLo42wWV-fiJkrW zX@ zg^gc|2vYA^cu?DaP8H6&pSuHZ$#&mJ0>CMIlaScURgYz3k@*j9=` zOns{?cj<#Sk=AIK+9pD+j~43na0)y~1{moQX-rO6)`IPTfbPQTQ>$ni5Y!$C)CT@@ zjtd19Mi54DplQX+@&|$fThv=N8CE1oPnx%VV7#j_m~b(NQIqs<<~KIins-f&7{@p} zY34{$^+{A%IC}l3xodChBWKBLqiRTPX=LkTce8ly(mcy_NSh1QZ9ng}DTZ2(STYEB za+-9%f_|J;NEd|J+DkL-M40n|z^#0w%RRsg9UC5w*pcTDW-b`%H;ist{PVooLs)_T zi$UMm!uWmJHih(im4)U3M*iGs9IFJ6In;|0S8GACx@xWydi8d4V1HaAw)i&g`|Chb zc6lJ*`1~c9H=G68(^mi?n&K2j>8IrN=SBW7Q>Cl@byRSbDOY0P*b94WTr;)z>3g#n)rF<=`3&w)WzwLD6sHI+9 z6==H5lECoIc}LpamiA*#!rgXjY!VmHO!R$9?8P-y0AgO0ekeX3RMtY1z1IQ&*8p}& z+$chaKR~*`ETxZtZ3dw&^rKm>KxxmTiyY zKSONUZnI1syZIz{Fi^j;dlQED{HwvT<@gNrnn^C;v<%Z<} zlB9VUiKWSB^)oPV;`HZ_0X~?=@|IpeJ9{%WyAv&H?`+shV6y>No#tR7y|rll^n1#M zRx^e)vH({yK#TzKDqNu9H(|&M9NRz1&f%r(BT>!n=uZSTQdu5g&!t5ds9{xF(DXkc zVGRS0L<$JPJzSZwr)S1~R5n0S0!Z)i|E@lnSbxsZX~SO^vH^=la2O@A9qCNn2pdx} ziceXFJD?=xnCPvOT0d=hTk2=>MhK0lh!cr-7|upGF|;J+vQ zC!9zrQejM-fj#D4q=->;@e7M#B$`7F`7x`JY|#|J0q0EJnRhKLw-Z*^Xn@h=raQGRYUb4avs^{E9dWcHhU zP{!;(&|G|1oIN7EFaeBmNU#Az*Uw59;U9@RzC1d8ry{8u`PL8e>bcvo{+l^~OZu_C zA7|0wePs(EC3E<0J3`%Q&4sCl?P6vRjNq4nlzm=Ooy%tdXE6-96DMKW zO2hR??7ThDjIK{_r_P@;9_*|4P}ibtO|1;DMUEcV)Ui3z{TK=tM63e@pQdh z&6I~-n==pFdVQfFQGWXgB-3)tdw#Nc^?vD0#P@Q-oXlU$`fh3j=ddK;;rgCqkj05JOpJ-qYw);NT-LGWSu+$pvvUK;+z1M1e`nI^S+e81d9N&+1)8zMflT1mWdg> zFP7{_V&{kMO*1A%UynQo{d>rBgjJ0I_l3@G>bAI@7|pJ z>jBW1Ok2hSWNeUd4;1=4G*m<;QDX#$KBl zAJ{dNOcQtt7MQjb;jMXG-VzK@v{f@=C7F^QiSvwS7r+kJ6%him-Lyfb)(N3hY!7>S zq4VMrCr$H4)APMpRL0zlT9X}!(Tbb13g z-O*fiR$$M1y}hQp$M8V8VBFL9$L)BVwG>L3fii#pq@IzLxaWeYmW&g?M;Ylu})PU%)e!2WSvArQMTOJAV(c+K>TSvh}4pFrTjf?lxi&tQ=s6J z&&C5wbq>ZtUQvq_Zv4p{vj6u#{TihM&YfLEUg$&e1^y?NPtz1{=4BbkSvFb)TvCrC z=X-8cdOPW+x9dka1}LT8Fk|M1S@zzjr;%LrAO7)UiC~lx-@3a^9Bbei4nOJi|8D6+ zcDH-xFSh%GC+T}c>idKn*K3!%-0`bzom#LM5~GT1$=;CNMh4E|n06JdOsfW9`OmvH zp2^a@%Irx_qVgRHG-WdYDTtsD(yKp1 z0+M-T+WIV81oCHbg>{aO>WdUMVj8Oj@Z)kUJp-9Y>x-mm5^P-}iG&@q@A^r6jLcX@W@P zozS}J;dB%`$DKFFv zPA2Jp@!aiCN23YHp=E9$phBh%$0><7`4)b|i&W2m^Mrv)Lo7k8)${PW2qgsbbfFi= zObn5s8KoZ|QjUWZ(Jfs}Dv59OAzIH{&a`UdI9x6I8^mY8hr{$Y%~*2oD{S6o+58?J zG;F@^<)g-NQMVH&*T&S|U^(vg3%P1J3lQ}Ijn+mCpZlJg7=QcrX+z&a9Ntp68~Q#L zCo=B$-c~nC$YBb5NZX`Jw1IsT3QZU+#yPhK9a|#xGqpG8rlHJJ3^xW0>m=Ep!~p@- zz`N1K0Fet((RC5ONA~H=pY9y%lpJBWCl#Y04fS0+2p1M@Q=EW1fJ0?V1%a&M?gww9 zTF+!PGf2zInacms55DdZPSC4+pAk;?ExVnj&dOQa89fXXL`VDXJ`SK|7; zAD>YKT*;<@Uvv8`*X`A$e#VNLiC=`ycmA2Xt+sYkcoo9yQ`38{A_<~Dxdj@HmNE?) zt2@N^1hRe=zSS7Xd3KwW1fIEMRWO8)29g<)96dU7dw%DHaq1DWn0V)TH?D zLxhne5NR06g-3k`g$n0_U%UU@PPpxl`^HiDCsb!ZG+MLR$bi5;DVa#QMB6?H%onot_*36 z&B`~HdDIZ5Xp3~TmL7orZrNjt4<)R;OV+MD1D@lnZ^;=mAU=ZHh3 zs-)7(SIt}GHe0Wb!%&IFsJJjplWh1ARcp8=LvgOr9b_snYuRpnZ9mNlzHGb+hqL&=+~8FTgA{I=p_*a$P}q*h^&Y_}OPC$UA4-8fKqw27iWT zzMIo!@`->o)BLUxrf-Uuf*BpkqEk1s6X5IT9|N2MnnF1Xe96xw8BXYM+rao>DEdK1 zUgA|BX37sp}XH1S?epnne6#GO1wCRRpnvsq^8QluYkWILZ zRN7N~r-^ewP44tQ+!saisR+gp@f!aIFK5uj2X|#K{AcvXVq=TH+!~*$(Qs{VfELeB zK`y-P{cOZmxVf~1TpZemnYiO``Zi?{xdfSFisR7y7)JSrPm=hP@ojY+c?gq-1Q8DS z5j-_g?`W}lo8+rB3vq@#kBA&j>R5j>%;a2tE{^-tM%(>K=F`<=+bdYy9Gy6XW>?l% zm6kll9Rj68Q$>Awb5LoXSrnh|rwE^B<~7W%-Ya}c=`ta;4Eh)P>maD@m){NAh%uOB zzZunuDU<7m+e~HiWo@An6Y%fCZ__Q3RB6H)za$eTGcsyW8o5tOvy;+G{vm#Y#w?LR zTVgJEf`Y0EDUdE|#SxDdrBkM}#l&6B2*botj-J^RQHoT8;;So)lgiSz_H zgtRlMcOrMEFm(DrncIHIMeQxAnQx#iK+(FnlNQn-Ch8RQn%qG4hyn*S!C?a5wg zX8EAO0TI*?#!HY9QrJDyz90P=7t8uzFIorq9e@hE6lkFl4pJ+@PdYMMY)JA1=YS?F|>^ z6qeLR*C%UhMb8WrEZbY{}{n^F%Z>ufUj_1ST~aD|8~PqI61fe%zlIOX^o;&$u_zZ*|ro8Lv2aSSO?jEoqKOpfa?Q*Q1Fa6@0AGEXt$C4icx;)_N(y%ru$x=zeUQ{2Q`0ai;R zkU_O1EwJPC0GOZo=Qe#X4wy|8I$#7k;ygp=9wg5`Onz_Z0&vPt9%wspJR*ni2wY>s zP=$70E5;L=Q@%5b7MMvFq88=;utJ}1U^|CR?4I}KLs>HNdv?R-ol%}{`aW>3_ zXOf1!{!Wuy6rKyFt1^((Nh^R|a?xzoh4YgrpL~o>&k9tyfir7)WLCGd#^yzvCcfc zH8ivF4Cd{rjF@zqTxWzCxz}rKqvY^vz~sGi}0~i&k~f-Ja*FHPa{xZJY$T z*`Zbqg{62t(=}(xnS_g?8q~NL2V^MvaU8XRFm*2dD*DVm@ZFvsDjX;#%Ic|( z#cipxPg(%AxC!uRYRgB062Qv-{`$(RANUu-p+UJXgDWM-=P*XG094D)!{qcp%qGcmJ@A|Dt+SDT9yER2Dh3jvE5qvn+CyFA|E z3CB-;6T!Q<>U`?)DNPQ_gtMJFWuutH&(G~5=F3shCxuEC89ctAJQ^>R(NO7 zJ4~@C+~IZYP6`Fbll`FuLz{SmAoe0(OmW|E4jk6Cj z{?LO)$Wi~9pln7Z7DXe4lAD)KJJVp?W8k-4WnprD{etIJZLZ0Fmcx6fGr_v}u>YGJ zVl6&aNu%}nwI3pb7H}jjqrMGkhr)K-EOi45o`eEY#D0&IlNmm(0dmZpu@?CGALXRO$AKMML>*q=Y1U2jl1Er{ghniI9pj*8x2?B5!_^cXr6 zXl`y*)Ln9@`m-ZQe5g!x=%1X@kEGw}Pqi+`!oeJl3i{pup(aQqV}##TwLdL$_V zrx)dk8rfXV^hf#W(l@)~E2_WWXLPK)T+>KMHh!A)+(311-to-QS0&@xWt9l>`5ws5 zeY?NbuKlR~)d+Zb%mVQGiL-s{4d*S$>7%ItZJToR+U10KTJ@}ya$#S4WgPXO#Zfkwv*#i10HdK&NHQ(y`suZ9Y@st}CU z)j5tWmUb)iW3b)u0`)XbogonV?r#V>=+&9~^D}kDmVDQ7_~~+5RRj%t_rit{zEpDE zTZus2opc^|BmPj~sT{$>+;}Xmo_!|)#*{N8TIy|b@Msvmc|`~FVVjxvDRp< zO2o4-kbELEskmQR+t~n6zjXK`uS~XwW;m#g+3T)q___S~ za=lw+dd7Q*!p&aAS`-Rg*y>B=zn}dp1MaCS-R6Xw?{uANpFSk%w637&&vy9gDd-*b z7)cV7NwS*x#}Y~6n@h$&g}cz@_Ph|`#_d+>&1Jn502l7$TB&u(Q^{f}XET4)6qe|; zXd6>U$9@+3>UEp(SB!2ojqxtbQ?hsxX{ejOvz$8j%?QVS3si{#P&bhL4Vd(JFE4_F){?*N~!(<&C2yj zcUYENzhb0^`x_gSYnnpTlk9J=8UE=EviHxN-xT2#jj1^pPN{t-0!Q*Q1^40i`l{+1Xq8OPT#udV$nS=Z zlH%G`Zwi$)NbC#u;u=`JEdwfJ(P`jYUKq zQ!IQrAr;VtMIm%6Kx`f#@27s_dv)NmB2WX&UEz!rRs1Q(wXKRrKb@YNiDZ=PGvi|W zYb<^4_=0S6x3`HFxP)h#_*HBysj`>mD1|KimYZdeMvT(z(B00>ut#RcxkkehP`I$A zddqw=Xws0pCq7jiRyB^D55eyYnTm3W7YhN6_7O1KHTeB)h5c>W!r~XjEs36Kd%iym z$kS92w7es?glMK9r>OL@egC~lflPTm6Lx$8I3@qM7!wW)yECDz7u_fT@ z+a$V6Ol^`A^n7QO9!2Rp1kgxLfj;LuHpm$R83CteXlwM0X$u$-E&|G_C`H5D8xy0h zfZo;$@XBwa+68E$XE<)+{ldr4>GtV97hvYD$Hb$XDqK$pKTF7OI|{{Bl_ z-(mY|QwUKY4-keg0OQNwF!D*y-gVekG!|Oe+id=cMe$_CorpL8?F#r^!L&Qp8;OQMzRMy0D+@+H_wNLUW zbMD1FO#|j?PW;s8Ujro9^k3QneBuWhy+Z(#ys}pIrU-gG8-Ya-du<6uhxr2hc?ZC_ zNAyZjTRTlSiYHPUM5J3ue4@zbfNE|B9`QjT@5VhGDn(@FU?0IB7aGPG2aDnt5UvX+ zrUq{bZE33Csyrb~29(RX8zh{x3*6WQxH!_>C%h|y>*q%l92{CeLGfCGpwdA0O*^9P zQ7y+4AD11egvK17E>-<5()T+bJH)JYlS0H6-%~xC(v^;0Sk}GaU&|#E{K-xM9%T3X z$Nylr+Lil_J_3bP{XN(_+Osz-+@|>z_D*NOL)%1HOL%g`Ddchi$TF~xB6p4672k;v z7(l>M$Kr~zj0U!((N^KP`1ErX?^%eVIOA*E&lQ4h8?Y%k0Ev7Xuue=D`&>jVuFgQa zAQ`~vdB#@~4?ut8!g!y|R;EAxU zr#5huukR0Q0b|M)utGf6$iJtxe|Kl)C~NzdI84plC&WVh7fLR9YoLm#d0l{yC&b|WNPWI>{J-}{0XxohXUVv+%L~{7XVICl ziCxEfJed9C8TrZ3%eIo}pwYbhV6S8Hs@140%;e9%Ufn)TTP<>)Iy8A>I*f%;s+davJ@dicslx3% zl?;cD&MbQ=hOw)0bk}Fza7B0-vPm1_MdB3Hcf|I@3BmPI>oPiC?<|Rs-}Dj;{sG2P zLgSB=5)p7P#Bx+yR1Ju#slD0dS&p-VhN1>Lw$w1ZD|4#oCl6v6IR}^+fp~7_X2Cqk za3e&Cq}cb*ZFnpS4u-JD!5zqkHBBenR^~7{r=CsDl zl*GA~(*VA_`t^mO#J-t7P!j-Pi>litAvy&X`+>YgqokEy>zqlBH?D3|HjI&%xvc(S!Bg zTdi+ory^C>{;}4A`>a0fJUasyNuZ?eKvN8?!rgJfC?Q;8@qb}%+qz11uT&%~zL+T` zS<{W{4-OF%6^-uQ0oOJoMe)_q;)P5*8%~(^+fR=~!`8*RCGqYLmNLx_hDD({%grIP zRT;K)rR;T+|5&2~*sZsASQAO6%jb%%=zlz2h&c@Rl2?ApbhNFH+DZAv|NR%s7w|ae zZkW1Eg-l^zG#*_J9ri{Tme3H@D6#t=zPDd2K!q*d`7~zIA^?PWy7xyyc81gv4=J+S zZIlb<-3l(|Xjz9p0a3ehk+4V-&@LT+U;B$|<-NUbNQTsxy2rn>|5{QvxM3mBW764K zhypx>yLKB-gt=Po&dqu0GvVf+=JFT39`kK$BcY51+j&QfE}hq;r9oNC0s)y4rvdfQ zIvFm6qy86h{5mm)S$MK%4cxIZg!)+S4oYQ4Yx>WCs9&|+#a6iROyGkM8Syo$@2Hq; zKUo+JP`Xp&f-z$2RBf;^oUe;=aEYY%qf}_-eLRAkh>tdmiZn;^ll}*J_x0Y&Qb|$-P1`72}-;SQzY}u&HXsVhn}OIviXBhF1Hu+dSs`gj8pkKalieP?3k}3B>q_JP0JCn_hSI#haQypY~0Q;5(7)QcXq*dX>JR@{4M# z2-mr9dPv56R)d8)uwMJ2yvE~a)2(Wi5~Y!!9Ca0$``YYm16FLqP0&k_V$7Ai>p9xd zK#^Ai1wRK~m^M-l2rRS@!quZhqDw#_9^{B*!FB<26rE1&z{OZwXhAnjhn|a_E#POr zgiOBh_3r)NC7lDdsmB};AY&AcvY9vr)r>etof8a1G93@{AcIW|c)Z{#c)=!I+Fakb zHhtDF)@5h58-kEClROjW;SJ?x^Z;a1eo@FpIVT2+W9b(#zYHw= za)iAZXSyw1zq{@9e@Ws@3JkQT>fO-gLs@NqAlc0Jh&XCrQ0!lFo3VePYLnG-rjBF&pgqOzpK zSJD-?uWbK>xZ))2E%PKtsmZL7z(vdP($;vxVzBr0dAbL)e;EXkPcX&>vX9x4Bj6@@ zT*lRj?M<7Fm`It_t|zGaRvC5p`cRRQz1js27pgu2hS)9n(fI@U1}@G9$Zv4_UXi_y zYD#7b3l#E@{8QwQOfALCZcj>1go{zcnGr1_?PtP|WZezG6fUi(asjxnak*@L@F)AW;oWzN{QGrSv2I5!0)!%1@}=V*dWEjBUxe4C4=BZyv8auREAG^tLW43}U-XPXQ3BdB3)c+DqH$KL`(Q*{n|Nz-fM;qdp= zRYhXtg=KpTbevq+T%iO3R-!04eS6<0P)f<~e?Wpw=_YO1qirLl)9R3lO&UNPZl8$HxcLWo|v)Mibo)8;X>W4BqI4W+%jdr-ZdAa4pzCusLx3ZMZ^vHhvX3SAGXAo$Kp+KEC(%Lbt0IIqTxN{S zFGj-huggIkg>*nU!+dyP&=9fY1D;Q?wU{J%Kp^9`#)U0by$?$_dN`fl0$@4NC_xNgM@TRmoR{|bW8W|jL-9YuJ`-;2bb3j+%xyN&pCUaz1Cg}_5fP} z8qD~__@l`$V{WDi}< zqilFW=<7H*f=n?|h3;g#fZ8F9kK=s+?|aO!nule0XsBQqKwZ)GS&`m@na|$16qD<7 zmRfy9v6E3Ry#D2!ulhmi~+9@ zvyA=@LbMSPdc*7IpsA@zz?V}6$bc!}06l5v)Ey=mr7wbtpRVi@u)EIRPvBeQ!ocSLYR z)C>9UQ1e^ikGL=Xg1NWP;aE49@su&cv~x>1uuzEt@n`Pjm?MV)IVD2PfcDhN>Wo*A zfB9nKs!m80(;offY7&pRHcq%&rKG}rU1EiB7RCho0%D~KBMG%hMWJaI#?~4qS*B(tS|%S-lZxeE73dJqde!Flq z$m?A`qa1I%Q@pIF@SeOT`-XCpQJRQ}`gq}9CCx~T-lSYH&NIWtFYG~wO25&XDq~!* zGYd5MM+M%7%bO0Pl=2M*T^HeYKZ5hUcQ+mskMfGLXnJ0XLRXdnG&X$+Cn+F{lz1&Y z6y|8V%)8-k2R_Wm4HiQrSw)2M=5)*bFs%D^zV>PS8_QxfYqi7v_v2P?lKUoWvI_j@ zWP|hlY%<0Fyt}ZadPknngy@MA)Pk{Mk#dq}mqGaxh9)Uf z+(~gZ;dyA7;?=iax~!sn&gdqw zK@pgN1I>1iid-o6sJ4kMKb!Ei2|A}HE7PYD(I!yh(Em1VUpp(+ zj(LKf70%R7&}zXjhT%e)_=6cicyvni2U4N(gEC|fdgRnJp1v-&>Z=y~B6~Cb|Q41Xdvu+a4wVm(@%JsQ(-Cu7q`&S5ZCvN*OF6QFn%de zuKvDL-dFa@i4+=lpG77<06?C+&+a3MbBtPw!U;Tofg+P}BVPX6^BM=lBtK3?>%}*r zcT@YtJ1*Md(W}7Uft%8r*irQ{$2F+@-ADzmzXO;ZunLsHr!d?R?3?K1iSd$T7u?=v`CBTag2b@?Vz@mJJkCEqVpzwn%ieU_?Q=8t2s-#nF2NZMwkf_ zl!C3;mzw1O$8|^b10Yym&iEioJezJRo$_JAB=vz1>jAl8!BVC zoaRLt_RFI{G?-eRRo-p?4feBf)N~Zo=*N`UPoc>~J0Wj7M8?yFg=E`UqeIJ4%15tZQMe4_BlHT5x&GMxKPsLqSSd?G zXh|5anRBSD*NC`&@XFoBxbEe}&;tvB)2Sl9K8{F{{NiU%=d5L8o?c?=234i&VXgK9 zm6BA;nwBmj-RXoaH67$kSg6HR#`9MW3Oe3wbD-O&?mfh?e4Z&gB0yG?y{(=t%K(Pc z&Tc`fecW7Zy!cS1=IWjN<)1xq5oVt>^G}^o>{rimpWCJ1BjaYCNTa+S@{M%_J{R{j zt-LTSd>(v$^YaHIv+4s^s6ZiABep@|7LG&%-s89vR*6(bRoMt+7%MNTiBZNgjKHm_ewg?;DedwjdFMI8RZh^Pr;;Mm)oLHthkHJ7hXb{rzYxp zSc4%N_mQt4#;3-iB4_qp&wRGORPL0DH|u!YXb4jWqORMYhu{gnU+FUFpRehjz?x>E zHLU)#a-$y03vREN@9&=@FQ{1aGuDgZZ%mPg{rsr2BfXt7>ob)(L-S@uNDxUoZS1rd zqj=BLFJQ`EVG(lAeLO~b``5|$`(?lgYkfvVzC06c3j2Ej@I#|M9(h~H%V|3R-dd2`uM0Ct z=kBc%+pWv(xE;RA34DUe&pPh|)c@W0p;PjIxv>9!iGMEiB^-$%{CClR{-;F}4$!9m z`)V;O@znqC*Mhax{~473@9&ns|KF|s?+PXl{`Xb?_dy*t2?-xQ=%0Z?>d#tK~A+);dF;M-0} zO9ea}%6h=e3_R7JX4(M{u|NR-s)OM%xn_z71NIN64P*^R6-`hPOc8M{aH(ZAFaj_) z!aKSK2ARzTpEySr-?tq4-dvp{skuln0y!QNe1AvEZu6gkQ03baN{DV902CYcgoohD z-a(dx8rm>B@PW~MWL#)Ge+;D)EJfn&Om}u_!`KtlvCD6C(wV43pT5OH%dXFlH{uU@ zw*52Y`jyB0gDGWjg$p^RnPyV*1WvWNL~u8%gj>(ea<@Slm-p`oY0GkJm$l4;aah_j zS^51tbs*hR#8zv>cJoE#F;Q2y-PGw$FiCwH_iMl=VrDz(Lz3lYT!y6AN`bD>99_Vb zQ@XIVrf=k^g1dVSNMDt0^=jt>Zf9k#kDk*<@o-QEWd+>SH8LXMG#m2UEBFANW6jFo z2Z$lyMgN@)Ci+3&SOZJUd~fQe7{gL&-IAt8l+$T_il@Ta-b2yXY&LGDpiNj(j z(;i9wh`#rr7f?Mc?We2C^*v^P+Kd-2zSFWc1)4-6&cA!ea(1<_+G>dgwAfS~K+hx~ zn>BNQv0{W4v3RUfreDrgi=2M21jNM2)GPV_W@?g%|K-4TZft+kMXy~yWhIqR1U!Y$ z@Q5H^$xOH2Gi@Nfyk&K$zs75f;`PZk%fSP%uF$O|4S>|gt#2(fKe&)ceSaFR>y)&UFq*8%oK? zEr54E{jh+L`;TDMXL5xE`KMN@;o?k*n%G~>)vG)!Gx{LXS`0Fr-m};v<*rfKAy3w z99UAj4e8Hy}aZ;rPpuKnXI&u(w_Zhf%#7RV@Z)PrO}>5kz!=&hVfC zBsccJuCEF20Pi5?6lgX2m{Gc{BdMI3qRpUrqG=b9$xfUh2ma?P6i>%sZ)*X#)9~#C z2m*Kt-&?w8Le>3L%A*&IW$fNlap>3bSs)my55di0=r-513vK{IK8f*Qvs0jU+J-B3 zRROvj@|N->n|o75bpW&y)p?2nMzf(vp{TlX=>HNt$y+NWETLh|6X71iSc!U;ogM#|&f!7)&2u zkG1dF0J5YXoyfM(CluJ=UYAJH_o5Rl@q~9vq?^nQcIy zdJV{PyZ!B`FWj*M*s^>pKmpnR)@6=T3)$g5c?o3Ij!fh3Bvzd3uhQmjFGcd5s7PZr zBWhgM25SIxc8(h#!!DY0Q8Q&hElUyH4!AG#eJ!azpeot;>&ZmHAkgu2M-!vEFr!)`a&pUIwwX zVV$$Nho=tpuq<%;$}EUnof>+`Hz?t<+JDeA9#B2APTdHE#y}$!Hxjll0j-O>-xOq- zq@|^ser^UnhdhxUPxpVV^+-=*M0efkN74=2B_&@FTc>#C8vz61dmzn7GG@Sz=dnp8Rpz(3 zGqQ(gN8DNspz!V#(z{N0X4N)7ZO#`6;%yskCcl1{!e{Pg-C@ch4>BO*d{L(FKGj(p z25MHnu$l_F7+}Va1s0O}L}H%uDv75Wq_mlh0g&=8G|65haddfE0b zkOe}jU0Ews1ieNs*5CN>6Hi$`&Pr7OeLO--i=^5grk|!L(3B930pu4C9pzdhf>yYK zVDVtiAw}HRtfygOUZ9?N-TAN*lC%9zOlh;5fwd$m*^9bDHeN5>AQ)x~GeZp)z^p|w z0DOR~)ODu=ZX2ZuJ(~QLlH&7M78VxuRy7hqFBs)xlHf>!htE4KF)gMl)F7|C0AIkb z+y(_(Hw~Vtnztxn`%2S&mYH5%nSLia0i`d$Oaz<+gUqjd<89m@pk~q0Tmp;|R;Sxa z=?>Q%RaM{Tzbv$;dhlLb&Y&qjFP$g-&v%}C$FF_2D$xAyWEO~R74xXrtKXM?A}6*8 z34c0665>Dlh|SApdtI;O3$9-%gop4*j<@bNvB#2*{(9{*(F>vM(h(%f%E8?n5`GJo zr*}Za4apNV?0|k#u787^c?dpJMKrzi)ZDjA=5!fw7&!(Nz zu>AhM3JLr#y637aPpRa`@6yFJ4oYlsOKz97{~B&*VBz@$##6o?%2W!8ia;yJYvtIq zj$P>Dx;FrSTxja&N(~hqeKO}ZsqYTxkTbL%zl;HFQP3>3b}vXd{1QEz3otZ>I`0}i ze65CA`{vss=IMG9XsVT7!XrLaK2v~Q0@a|y@^KAB3*AZKy>btRq=!KaZ_Jsqi>w z0bnM30+fT#h!2H_^W5&|BLEY6w~da8>=_@FcNM;Jfv!x?G!}|az3OI8pku)3%tN%P zcBHpHX7qf-@MGq9ik0-{U`9;_i60DtAIr>K#P~=~{YZAB0uXQKS2l*kC|Dhj@%C+n948v3iy{FQHRf8}-Pq2)(K@Qz zhD~Iz>-wFwD!uVDLFnD<^1Ka{a&tWe?6&sT<>DWt6oB+$-M_7QVmi+!%`L~J*L zAmjdd_kjnKXAfUG;(Qz&mrnZ8LT#?^xfpUo>wk5iOkW(Kv?nnC^zHc~rO_1_sU@|D zgXPqud;?KiZyPQJSmVg7?o;>l$GWH7)VRoEHtoIjM-N)2L&)xe= zx+e?=84uHn!4Q%GYETO-fr6M^5wQ8_YOm}Jx} z=W%1G7C!7?mG>dh8OkVcIs9-je3O^LYKui=B{sazZ7Cymfs+@4l+-MFRcg>Q4-(*w z7%^z4z$=>LR`D=rgzU6o_Tyk)_%)d5yHV}sg=}k)BoIHYX^I1G?6|5u0?F-#OAQeX zD~$sBX_9!N#l#CnGvb}D6En1r^GejAFJ|*PR`oppJt0DMjJ`+MLBD=K9ZwOl?e*nK zc_|$3!DRgC&6~^>ZXJsO2|M#-+pjtjLQyh!E$ezpo{Pk&?+)&%|G>nXUiOOUW(&RF zrSD3OH(Iw_!n)u?ja0NSL~if| zK)5mbe#bxO{Ezg@xI9H%XqXlt6_7$=#o#61aCr{J2p+Muk zo}JkmBpfY9J!;t^a94v=xgEs(p_$-4Y=1^|v$w7(u*iq5I?G+r(k7YSOs}1}rz5=< z3(?|jpbZ+YIx4<=t?#|$>;IvIuIG4??O(-moJ$9KP~3@#o%R9mr26~Gbl>SLP(<|e zp~-NRX^TM?`2R^Sy-B7%#b#Lt(cQGIKGC&W>16{!-P6PWO_TYMp$!k=uelTP>XQ6| zQk@6w0&S9L`;`9B+Q02TH}@&qv7LZA?8k!C#Rg z@!qo?qZS*GNtOXi3h(;+Qs1ZGtsDk4Z1sd&`)yq=wEH#%GW`^fUgveld7ScSBO%jPc0gN8I=(_ZHEL{lB<}JN*>G~88ORg4sc)@(}2V&{TkPkZfSw|WVtTd zGi2Qzv>ADMLo;`ZyZx=kK1DlV9SJ$23qO{oa>0KP?kUxXrR?hl2C{E6Sys;=GA^GC0-EHIEb@~9bs!d#{^7c zgSXns(&DQMgwkfnth*N14|0OqA-M^tPo|J1Cdggb@B|^80TE*6(mD8TIw?ls#dqs`x5OZ^dD4 zTN**>gr~XuC}JVhiFOsEg~1$U5pXS?+slC6Ae{y=o?)XA)scntTrMSCV_6@RD2(a0YNYM?>++$Zf=nAAZdyy|84}1Vwwh{s<*~D#8*AX7$!$tM807Y&aSfSfF6hX0 zXqudaE?pdOnYMJHA6NYmz#2thQ*M#1?Q)1r?&-CRo`(vmXoXk7c$XHk@ICMRp& zQ8y9aJn&J=B!kKrBd45}p^lsCiGDsf%FaY6wbx4R%Z zCw_^MM|foWgQUla@VHdvh5{ryQ>Eg?AcE&8fIw>hVKZ;PIF_9neb{5_@kABz)mVjL zg+dd*s%8aYS`F*K^I)F`^7Xuno^9lw&%@w%&}Z>X>Km-%_k*5ZJD zv(QXQJW;bCr4bt~btqlY6MjdcIsOEJ3LWy6vE*^SRyES0myP@f2(udu&9wWUbU@t= zd_WGj!h6|^UMLfW6T7M~zR#ncG~u=i<*x<1cn(x>*)vx|w!l9%7?7 zk)JGGVD4P~x)!+2UK2PK1>GrdJf?m~hrEsQsPfn?TQrkjfF?@?jZ@?v)C-M@6O!_- zMe)A4dEC7Ob}^bG_LTK9P$l!^v+C6N~>hi->YXzr8*W``IJMFMj4h^0>2S!c&TvBF&px76z{U4H4 z_^ZOnTdmku?iFqFWBzkcLTEqi@Y<=Kl&a<@(uJBR8}=)}DG$C2{z)0E-7cjoyHcEj zzo#>#Y=5pq-d`KiPFK6_kX50WYgnEXp}3WyJ^7(-%9+*IT+N>fVyG>slJ(f;6ll#L z>fN?hH}Hvk*KbKiqNN?d8R=5er35H)x1P3V^X7SDj&c*rJ+V>lH8l{V*@2a{5ye_L z+PwpMpu@lrrdQ#J%%xmu2Zw1Gw$G%B7f%=%eRRy-Ud~6~$~ z@#h2qc-~U8%rW8Pgo_DDQ@=#}=aqU@Hy_q-B8vxmWHBMMie~pOiNfj8p_@wXC~(FX zcigEqI396;#<;&z*E}$3dwaVL)VOBXuv&(7UqSLu%;)L16J^>ouCMzl0|NhhNp<9 z8nuR!Sqd7joE@}!J7MJVNi>*+`@aoRnnl4!L#-{AI>LE_p<`$%T%v;apWHCdWfTL2}u-6Pg$}rYq~bI0ZSc(Or^Vt^l%ez&Oom(SHrrNZZr_Gg+U_WRLuf}B(sf{2^ZHwf=pLNyK(ta-L}YYiL*_+58UUx_HuE4cwE_ zYmJS*eSJS_pRfsH%}kRZP?qBciMr^NsI67_Z34r)h$i@5J*Lbl==ZyI6YV$F`9~xV zsnD0x$Jy-E-m9QL>c#E)_RqASMuUkQ1rSLS8)CDukRcdKrnPEx)^00{?LEqhSsRM? zIq7{~T1`y%I!tn&a9%XEiOf`0sWwzqayDd<@Yx?P(9{4`=oP%CINP-9d(Gp=a8Q{S%H7TGnsP)z~IX<mSIdu8^3RO{qx-WKD>h*ysHGF1*?d#Xc!@GmVIz z6U8Fa38vPMo!afs&kA#6oEdk(QTEsi?PS)&P}C$)n5&vm}MKOeLd@JM~Ag44&qzPkRT_BGp)+8^&(n-ALk^(2uKFAye@EnR2r`E>D5am1ZGmR6rc-&@be#8b!)LND}thj)?Qpo=i@+$9MMOS5 z!$~lIjQJ#(I*8u25Hj}GIklk#X#P!rHZQ8r|J)Z$6SqhiDSfV^FX#-vS&()a zV7RQEJN;X9`{(=F`Kp*jR<;vJBFJ9p8H4@tJK!si0n~-UkU5w=n!#k{R;c~YO@`4q z$xY3>DVGQ>QiZ_+!|9AcWyGjI+OnF7Irl*MRo!7f4m;0@NI)Qu*cy(m;~W`W*8UTp z?sJ?J?&|YdzUodwDeI6v=Vi73`{~?&5g&0Ji@};s4;hn6kGa`TqEpircHch?*i@FT z++6thM&7cR&Xn-@O=`g>Ty<|_is62f3N`CDY{mR+-6{Dmip^H0_e`klb#-li_BPo? zbFi2F%JQ#nY`%%{;~+lxHv;^7yHfu^OmKgxaf|A?8FB-kFXnU}c`v`8%Y5-{kA}~A z+V;`krZ@nbss+AVJ?-_^iO>YCnQn3#V`8mdU@??ukQ(N*7o0^v74+rnUMdjw2?zSHsV1&H~xEJ;tBb z?B`^{h0YZEvV=RyGp++_whv3M4%_-1s@l9v+rq7j!1_a9pPcO*r$kceh4o;bblQkzzOu7ySskG~{gdYxG z${12}`|q;oI!QZx!lE4n=SsFUu#We_@_B9FYmQ#wGCYIhLEdva#w3>En92x zD3P0m=ftIU$^xE{`NrB!v{qGA&!R|oqB6?8K|5J_uLj)V;fpwqWKM0HpPUvtIx<;2kGzm9Qng!SrMSiIooAc;bazh6ay|b#5gHD)&2VJ)i`Sz#vD322O&Ksk3W(V z^7ReHe#hYs@mMtrDvLRv6716%lvMDQsP3KAv$1yDI;%*EAOklska8*I`25pU0_vX5 z$@A|ej{Y@dgJ}*S;^MdP|1u%@B*9dVE8zA}1&}t~EE&J6pQVPf|Dn48#m1+V7zfZI ziD88|K+|Ra%BINq?w%y@yKie>B8zwP$cVj!1n}8~^2Wc|iZ}$4Jw9h^x=$5SPUo4O z|3<4*mRwCcj2vE^`{BI}hLEL>YJLPSJdE49*Ei~>U=HG@sdbJ-i{(0s98 zDEj{>2l?bsF7#@oSu5wu&t@+_zbNlb=9;-WKeYez`^Q0vDxtWz_z5umn*ywWWi_o> zBn%yjM@1}js3wCa;d|ytEnot@LK2lUr2bi7&X;@U7-}wd2bF%@w^`N)`HCOY$e5fN z9S{28DOq|_x=-w4uMpm&5?yPix`Bu$Yy6tBtF%gPhhOstA-8Q;j%u0yBNZ`{Z6ZG4 znO-2I;nQYY2q%|Yvi<*_CrlIS@85*0MZ)%^YB@%7Fh=T+3bK;E4cyXeg$BjiK}usu zvH0m}*6tqaiRlz@PQSX$QH-=}eTRW0EPxXFyd4O)onan})Xu`=~?(yx{k(zSp^| zdDj&)I};Mcue3OCDqJfh(TuzSCPD!4wm7S+cv{soZV8$O{iAd$Ejp1pW{9MJo@kP7 zZDg|Sv6${33)ys!?TwnFTt04(4lCW0715gg&$@O~!=(d)y`44p6S%`i$@G)p79n1B z@Y5}N`00QLT-N8Sj`ucOc^NxS3BP4p@=QTuzE+{pl<2%|AJ}-1p+s$-%EYTVw)zTe z3yvB!`4h!&w6Ys{X#W(gbZ?q)lnefn{JL(GeTZb)z>eE1He_G#Zp;3icGyHK041*h zLo z2d(@sfUDa(G@XCqQwugd+L@UI{kAK|Ufbo}jW@YU(SPcheo2Eo=LCoi*O!GMPWpw0 z5TNICyARp(N-@m71|)9}+`-W*t8tLg{(cVi8q(A9Qh?6uHxb1(xKq37zoCgra}+r2 zT-PEjhGgt9aLH%3{Z7lQ?RBU7V|MHg4okIPe;DBD#92RoeY3>Eu6H19O7C*$SY7!p z!~8~?MS7OgUA7aBe>q`MWoxUJ>B(E;V7F&3u2p6wuxkyUE6lb(>3x2qChijJXQu@G zoBWpb4YVc>l(p-wPiw@xGmR@}=#9kp{OznIgKs#o>Elq+t8% zkg)i1_H=tM-Myyhbla+r)sMuG6S)vt?ve@=1N|95tZ6?R{S6=lxwodb{H0X!=1DIr zEz}zTZjXeK_uXKY(7IXR>f{azbKdAP$Y%Jy-S4Lc4O?FuwgHUhsuAgiBw3y1vjSe?JpULvB|giYuUDV)_eABUY;fYLyT8Z-e{a0| zqH^I&Sc#F2yYJug0i-&0GRg~~g?Z<-r(!s-!m}?xoU7VyYP1IMn{H7` z;dLLo(Gj6ij}x zU5S-#t00i=(F7q>sqX{#VT&K&5x|u@&1oyLl5J;jQe@{1kimHi(`1q&+J6dubzYa> z;ipS}dsM?iB~-ZnH;>N%|4i0|aT(w&BvUzM_!@Q^tugDO9mr7Cosx>H3De7SzCXZJ z3Sw+SkCH3RWtpF_o4Hm$o$`It7*TB<={wL-rwlL!J1&1Dqi^uW2$DMPuXv(t1a(Qq8;c!KX5Qnkn0utiiY+Rghe=mNz^(IQ;qy|_ma_E zMd3p5a0olO3Tqn+$r>bErPFh_Zf#n}@`E{WeX9kUWr%3eIV)r|yUoi-@Azug9qIoR zJwC=YAW7g5Wek@Hx9s&%A`+kB^~Pa@>fFhWfbnHu zq@tCGsWiZtQ8|dvV2@#18mK%};qgZOMM|WUZF3gs_Nz?v8a5+VwNT=|1)U!UugC+t zWuQN4efL|Dnh!qcv!FhI?x?BC}AaO@fIXT|a%I9wTAj*V9JD#7xD0T)Lb zTp3v1Q4vmHgA|dTXz1xuh}P?8>~?yj;`U4l~p>hvkmzY zRD)FTTZ_e5k%aF9ONyfN=bkU(n=8S_&b=6)V z*o9bgT+XnLw7O5dGy@#}RtU&oK7i9%4$Kj!fUTBA^X*Loeb}FM+~PsX0?VSqU>=s{7W9ETAe;o&cRPd$3_2>J46SXXvLw=qW;ROVU%nkWX<5Jrq)||A zj~+qgUVP)8A@{#dB3m7*y$YFvCnUdFshuUHxH-%Gf@NKRRG50o;`TdUc^WAdu~7(E zTAM+xp2s9)`v9q*LZZGK#rw`!nD|tPW!h4&iOQI3%5@)5q>z4((!kDr3v~1B#|t%N z%DVlCSG^ZM$SGiA)x|@7q7-}gmnq0#zZ233?%|^)wfG-(?k{EW8^0sGr<6-v z$Q6|Cd1P9t(Y-Duj#nj!-S^NMka%V<=@Bvh1~3BbwRa^yBJI5}5bz6uXFt4N7HP6H zSqyyqN)zTi3}?)z?6XFLgQR7(Day7SSaAmckPOv0hscv&#`hcHXJ>Y2UbI|=-z(g0!J{yc}+Le3uT~8;XZ3(;W zIpDTki^j~;1gEag1JSdS&~-uv?f8qhHNs~5EB}M^eg|!k8eP>z@pm~eHJ@KKy#d?< zgEb((e_0uHBIgS{H-G*)T&g7=i4cxk6iu2}?z%lIxdQ0T)hPsvx7y5sHy*DcE_#H?pyj^tptAyQtx#dFdSCy=n7K4 z@LkUilVXYvl>52^2z(o8?!~5q_6*{qy+?K~Ik%>C>}I^Avpx(4dy0{z2kr>{E$ikn+wUb20jLQwS+_?74+XO6S{k zkW?+lBtw@4RDQJZ&PZRZ=K^|QG3xR{cC4r@TG@E4+@tNKi#07%rA~AgeM7!UfT2TW*laP7Q zV9JhrU1bwcO2T-(tM7-p4}X&n1uY1p+9^z3?K-fF$@(99*6cqvniK3OcN|oMb01J_ zIc2s`1=@R#JdvWvd4l0vu%iuUmne^!e;$JdGouM}|ENc!lJkzWTX8#LFh4XGd*(1b zQ>^=5_ew4N57<&Y??^EtvX;>c<`>Z0UE=1gVyOIBM@6cL`r7`idpXkdsuqJh0~Q^Y z`h&QVY{`(xbz5u>@GbndM)PMb9#Q;I;Pt#Z7`|w}y_A*w9jEZiLN`)Oy%>I<#$93rr*X6T&|M?QQVnReOibKr+Xa0Anp13(ix?h%5IVFC}`LoZW^*__|0{%NKq3Pf1}owxBf z5%ZJ?6FrdOTvLiTH0_4$m>z>X+u*IJS3k~9H;85BrLi1#>)sli{!yG%k7@iF5>P)hbyXW- z%*H9YKnjfn+w1eStjlx8*D^rw!XIxlG>CEqf)PmBQ`7QY?KkyKuRxta2G&UITkJ>F znxZEx5}+MH759oRw64>$+nR}!^G_7^*17(g4tXY+ONtmGMusPkND*({7R4IdoFJbO zY@B7NeZ-eo*e*%&#q)39QuRIsZ~IYNMp^tcmV}QG#GO68+s>{f@{5>;}A|#IGnCU-9b+Scnr~vMRk`qO@&5Xg2N@Xax=`;bDsxwUt&Uw%QG;k$QS zu`+lu29L!M%uIQ26B*#sO@1rbf`jWESxgHrNavxwtmBC;(bcDf{`TxV4kvm635Wr> z=^1e)z=Ax5sr?L2m=hMkW*t-d;3-dt6*_{W@tuB~geH4}$t~z;-&cv?|JMD{oH7_% z#q^Hzp#+a0CQ1%^7!4L zO-3V{>-ZI#P4p85&sUH=Lb}I~oMB)J5V`h5n2s60^dk9CTQI$kPhb%R8O{-t;g?A} zWv}W)pamEZ45DerZ_5{&b|jI+&f*pRX@?c>s{4iIdl^>M8=a*$1TCm@6CZVrd; zKg{Pz^osh*;pt`47%CSs+7HgujZK}cQlwe_7uIQF6v&hkhL7KdYt2aH4`aCC<4%|8 zCHFr~AgeFas_OqUFv|6_8b^}OK1~-Kh*mzo?`$Y9vM&FrmNI=cv{!xAsYs;4f^mQ; z$&xlk=V*Yf7EX8OS7mgzQvhkaBUwC1Bt-*^*##ne^|nA`4y;%jG(A~2yJFq{C` z3+#6f)AAld%_M+$m0p=bG1H%@9pbHfA>PlLpBdUcfHUTdoPGydU?t!r$s2b-x7i{i z$8Ki0Nz=RVVOi<>H4!aQWg$I58qf`{j`Y-k^w+y?EY;B7F{tQ4-y~OW#%%RxWYH%5 zdiU6zewy7*dDbc&U=;nG3Y*ANgx7t8LLb;SltKKF1VK-ytMzIl{co9w7FagS9Z~Qj z_^(1dKz63gfn1g6mOv~DhZ2ABBMRau2-9RLG->bmcK5f|F}^g^mzw*moe!B`-o@nH zP>%ru!lJ+UU!zgl8$UrgXom0(9c!l%HPr|InzXG1`{-JxRjP7p>Z)E2W-W5T1gi{! z9B_x|+3f>d6afd^FzGj5W49IKOzJ;4qS7del5KSAyi2&hXt1gA7`EdCm1?Wn>D$fZ zF9(!wJ7k9QaZ?>(0h=QShwsPm0hm}-`3SRPP9!t>{$$0QqX9P9*+Kb2g^X!HTTB@W>yt5VSb|N6j>l5ClvD$P zOzw%w>5mGAJUIybPI&iSbI?YqEemIP^K@E}skO&hUN=}IqRpSt6o&z(L2#J>7kv9o zu~&%+$PBvOJ<7F+0u<$qbbuuE|&b z)U?YF%I1;?G@Xo+1a!{8hHf2AE$VI>baj;Xa0(=1dc`g6e4a*MIg)||9yj@*%S2I0 ziT{Xv`kD04HX7Xr=mZAjLdEzU7HgLdld*JWG33FgcXB=cb|jNR9$l>1=!nv3~(FCtbJ#lF-NEb57CRrxMl$x9~?H7e)2p*Jai zf7mV{pUxKKCx1D%YnQdYL%pf|J!hv+^4Kh^!?vrQ;bS>|DSI*D3c479IZKTLB4)W# z2B5`R+J@L`@;c#Y1jvUCF~=B{7i6Q1mpk%WHw<|N;Yc_Ri7@@)u!ne8>Z7CD_(fGT z@|CJZ@vX3VuzJ7&cKXyMwywxBR#q%eajkZ3j2}?lFIahe^A#!X)j_7An}MeB3Kjxo z!>17&zU)VkBJSlqFq7UO3}-?XWU{FJTCC_(_OkcFJ)xiFdl{bJamie~-_vLZDgsJz z!)(#J-9FMQ+klRcM7YWYKM_ zq}Kc5Xn4q+eiMs(SuES%*HXWbe#AP1>lEWSS3k}0jXHxwl#I{`tNF+qnn)|*!T6K= z_AEr6v=Xk4PH~6fH7q-Qb&OHOKe~=t{=}aDRFR$nqMp>Cebmxyu`x|8Zi~+D-XNC^ zp@vBJs^T>wwx2Wa2=Mb>`f~kq1n%g1_`2WdTfEfC%$tTCoWy{yxcq_w&iogtD%5-O zQ3Omg`^Al*fIgub1!SjPh>7gp(cs#7jO(5s|McF-; z>~$A{5?F_di1liLpjyX*Gj?5T*Zp766<{|t6kQ8WDGhQdJx3d3vy^5&tYpdyZDXW` zTy!aT&W<#!a>}s08`LNj)`ym~n5=VYc_DJ7_-G8Mt0^C!3`reEJuQBco-mi_9dD6P z;|-d-X1^p=@mFD(JKBrAS`A##6L7Q+L%CcypDET6(k(LzoJ`)5zqaPu&4w1@+D8x# zxp{&nyK9Ms*1i*L*M2_b`z$ky(y9&5EHU{Dxpq1q%mbQw%F|z$K>6N_3eVf|EAr^> z^G&e(P87B1;GRCZ3EVVK_5@)n<0Wn1HcygSk-?lfR3&}<4RpSwWHEOkv#zaUMN$Hi zY!{xi^tc|iUFzNCCDs`}+uG1y`qb;8@pOmo@g&L*vkUZ@S_k*3X=>7y(nUzk7IMO*)rqMX*zP=Ko>Etc;i)4ykKzVXN-4RU zhr!4cb3Q>lCWq|Pcw+}TV4?xON`b+M^q8cx4qJq2C{i_6(ozr_2M7_Z>4&*J=8w@( znT0e$oKp$kC8cRj)`?C`q6A1 zp_R~R5d*>)o9WKT6I8sHo9f*p%ZakAQT82Up!2UTg*{8)^`p9XR^1DCNovQwV(TaU zh^+be1ofnSE9KC17egYCTOnil>Kh#%F_h5{cc)FdlZHM+axIKsx#4*iFe0-Bh@5Nb z3rt!u%iZrB&p&xWSte1kaSxJSC{>|eJ|i%%AY`I)TcKO>X2@e~baEKx`1JkT$vDN( zFVQ*A_N~5hF^aFucNmsv?B&_gFGGFk7761Ifs5UKz2lzqckx$^aU5BwK~`an#cf_f zA1h=6{t5|O$|E2G8B44fcIE7cR7mMC^!Gz!) z+QPtOy_;?9ZgxDxc!I1j17`MCjQoX`6v-pBAP>$Q{F&hN;Bgjg?mYUthnNhF44DkX zi+;gkp>gjhSTet5V0{qGX@$HJt3_%vhY+vQJS1Jx#uk7%K}neW8Rr=I8OugcnM-gv zEK|c!_WFSQ@e~FX(+A@0H`qF`$4TL03Z&%Ey8TY3y6ZjmcC9s2p|lRjc7zLWbXJJ~ zPoS!`MjYHYERM$z_lz%?zQ$Z~8XZ=nz@P8C*)Gxh`~8@Bz5-Qa8vu1S?H|ve>^y!! zZU68)q#n{J=Fq5Erv&w9kuONv>UFP@-HCjwc1fHJqu`n(anPJ_DJ2T*s$P6wChf-3 z8|SW=fi|7B)snZZ_k1u&)@U%qmRe#ugM^7?hQzPy!KZ6>3XOOl^lZZHa^UJKBg-%| zjEU{*x#R3#cnb^HlkHHQes^N-vso*E{SWKlHn~c*x@6P8R%d|ZLZ^h}qRXI^C4e^T zTXE@6sq(Cs8?GV$1Gu%(DBCTrvQS97=BX-KlVw`jw}`9mbZsiIejDxZrkGNxg|#TX zpjbj{Bfo<|QQ)yR$1C;Go7k(GmqC>qI?Nyy0`YH*fN3&S;Ks*FFluTjchEzH8)NTa zKjv<452u*VJwN_`T>W=A*8l(ik7vZ0d7k#>Jnd}C-cBn~R8saPrDNv)ilH2EG)wz;H z-WV8txa#X6MEr$g>Vux{%)KzH6 zeV6!JfeV)>?>^!K6rOO*36+91x`f`zspusn*;rO#CNps9S{tQON&zS2+Xqci2 z-yk0$M4NHi-ljp2MfIye@^zzR=ffQ$QNPnNj>U8q^k+t5j8>{l|O#Lh}M zxB9Ega@XVaD;%06%*1F={Ts2P)(Q&RPAnJ~h1$&@ar1Zm z?Hi2C%GX;QXqZ~=(yVEnqub^b^lRqjmS2-Cy-|j!%kX$|xWf+fYAs>ksW|@z-H(753fLk7 z)GgW9T6}h8GAZ}fjkr$)f6`;p&>BeT&A63wl?ekcD^_V9=rzp~y#V>6S*y}In|<1U zRWt4j-^YbT!EzbuNMNa-{>My6HvBFDfg(bUZ{&vMUdO)&S?~eX={i_f94?}rZC+Zh zXRf;w7Umv{>QY-z6L#ean0WqrkZl{kTAS7pJwuKE7-FF!p;ExfImKU?X$36xhfRq0 z9S8^nMyYUBg%Xyq5qyOK-x5mL&D5FfsFLt#0x+@T9Nx9n4omwWB3ta4B!djJ7wF<@ zc{oh24pP)~-H`VvRnLC~Rffu=Dda#io~kJczYjm-crRMb0DD{zO9s7p0H*n0j0n1s zl^hLl8h1RfbOM{GylXSHaM)Rh12P?nR2(9H`uXm|HYT3?nnv?>YJOb5Ii>7qToetE zROG~&BFj+PDCrSCKg}AIKE$mpur5m|P4Sho&<*w_pkt)|V&rT@n0I`%oB z>TEKwlMQIyox9Jpv2G`KFpDvBIwQiLI|6N{8~h5+cQ=oBxWR*POCUBK7S9 zpnZL&y9a(C!?N8A!4M}%1w1WEOT3ciFXj8Y2-!G{&kcf#pCIbRFF`c9RB<7x6@prw_+#qw?uC;KBlO28tHGBLX3X5+1K)= zZ@~|)d;u(_e@OCLw0W}@U^aNF$Ju~0epLN3lp&Z;#fJX$aP3zwbLRa}7Ks9XB)__} zT7hN2O)R$+OT?lxm_cQcB zy$0zvf8r*D4|$D#1Fj*VBsahJ`P<09Ae6DBpJE_eJh!l5T~y4PBfydt`aL zZ=S(8%iTpZ6B4|;Ys0XMit`r>^4`mu+?66N?AdtTd4&JZ^PzgsH;p=qIURlOpHOZa zbIHp280!!b=e^Hyy7op-(rNM(04r*BmPRQFzKo3wY}myt(|#*##1`N6H~#o_W6IFu zPyhd(la=eYgOPL%fX~pLnjT{bad`02aR0U^;l*DN!9|eKVk9>|OLz_TNkgU}z{at9vR6fFZ?j_wn`hvOQ=BFj~LQ$2lW+;ITrJeA*9hp z!Rgx494!$2XDm`LZZiSeVeOZ+8woLYruu;){nWj-9As1pLj;o5dUSpcfgJg%KaobQ z;k-}cNf{Bd(0FJ!IvNp83X5hY6C=KwO6mtp$46MS=p+>B=S~*Jq$-RToWLA{zTQt< zMSD|9>}8y61*H`Eg!`+PAykW!NHh2Bdi-^@bE7-8XuU8GM8E#SJkqG|dE|Lkd{&fF zB2ofUd=p+2CXVvC{<-PM)E4&&00mzn8yo3vmd8M4`imSW+;oi{A|=rHaOp z{o&7E>O;T3*UebkYjvM?Ga;0_k18dYV;_k;g^ki4-EYMr&dhD9@oLOr{xWw;$$K4)H+2lu@``>$n5ZG| zjHe!-)bQqw|J{R8^IZ^<#x{e9qbfa4P6Jqwl)jBl3iu=-LOCDboXDf+$B z8;>A{j7X!iU^Jp)#6FNi;bX|R4Eeq#JtjNPbT+%ikCNMq;G}W57wA2>`*i8i@i%Nm zdYtSlm(B|NIE=gbZ^vKEmDLf>!sW?7ZwLr>#eCiU^qs^i136IGG;%D7>~6nzXt%p2pM*Gb~7P+y<7ZgBNj z{YS2wAo#Zb5FsT)4A4kx6GcK{F)5}HruiAdQs|X%J+(E>j$p<=JTsAE-XQZ){f=ee z-vnXPy0(P7$+hu56W?z`h7rar!M7afW1I6mIa)S=cfHg}pDss6viTul* zWQt%;Kf?DiF1IF_VBQ>Y10_8&j%9Vy&A?XToGq%m`UnK-pzEXPJa{IUy_KrE0w5`kY_`i1Fu4_}7 zS4kR+GClXFw&!dA2LzTKOs9!in%gsn_V-Gllu?AX^r zUAFU(Z!D+5i;H%~x1PLr9z4_f`1#yDB|Y8l@@#zI{iS5n6KCICp|qWki-MUpy0X(Q zuNnh8s;BGSQ%A+Jt0&)$8GZ^7a2h@E>+Krvqx*L8yMz48bj5Yo&CW+RPAQp{&KgD! zPYkQ{bTZCbb%e8jO?fFzJWUNOeEaBzFACM@i(0K;HEein`mt)iL2`ej!SKtDilJ-g zJ*$0V_glCTyAvh8LE2i3GBu${h-sGry6dV<24~PRZgt@)zXh>@-($I__?8wX+A5C% zJ@JPa7o2T&iD|8inXnViL4!K*Wphfl3Gp$o=^ThBYNs z@Y5jc@Vi6`ytmJ*WdGk5&UHEKu)%Y6$oV`9)ifhJr5twnnwtRv4~c_F2@|fK)g)=z zqMu%u6zuENc+rlt9{*5?9*X;?tI3f4hnzwW^>zqWG{<7LCW@rutC`4U$Ax(ttb6GO zqWzVE%SC&O?zYmMNsK>cof!ClnraDPdxq6LU%ct$KM93BX}%Lgg6F{VSO`QC$=u{qI_lA zKwL~+SeN*FgAYcSyp8f!3ZU{$>`%I%Wb>NygPd}*cbZw$Vttp5=CD*4_R4@(#l}v* z$IE4IU@`-Vm^{+igX_H#%wMC`eOpa%P%- z;JlZv|LoTj=dU@x#+R-b*SUQVN?A#?Lno^#+#D7I539~BwOUKKLQl5u>>F>SXC zUdqhI2;+IZkCHARu5-Pgxdm2}#Tg7`2_&ed zzmIP-t1pZJrOj+{fHvbBzMv$TME>-2fM}Q;yBZf^4KK%QVZ0|I<_tomo0GzSzCvl1 z(=6WWPbWcftGlpr2l-T@ldbwIfjXswh>{J(D(4kjM$61}A^pZ9LFqc+OKmT4=x&UM zK&*O9rc1tg{9CwyFFVtg{Z-%AHlFBI6po%1b2>LGaVk71v6~Mq(a6_JF%<4y(qnWP z^SG#dx5sN#HHv-ny1M*lEQ#UUaqPi>tLylY<>6Rz&WX{JCv}U9f|dA7rl0X+{HuhF zbLRD3iyc8&e6Hv~&%=oh?C30mQb7CTiATpD$ema7R@FUP&tlR)F4^e^V;hVKIxzKT06b@BJ_)C{aK!q6H-CT3im-O{-?42 z{)lHXO?0N}8vmCR!ADe=*jf0qborLsY$xV$g=L`6QqX*=og1gUtR0-o`XMoIoR->%=l9RggOkQq5Xh|O z_$)CF#5GDD1DDta;MB(kPHZPyd$*xs!d~YOp-`Z~)ujJ=3%_Lw1l`Qa#9xx4eUTSS z3kz+j|M%kXk>7Xv-rw_Jbn^Y>PW-owv#JLlN2Z2A>gnU!Ti8eEURZ}sqm11j0x6F$ z@ee+Y3?DL{wr^k^mRzx|7nH`QF6NIv`)|ez301b>ix;*jB4TOjjP&mtBT&$XO(}&> zZwrkV8eKf-O97gjHJE4d;&mglem`URE%Ps)`s*Gls8kwqPcTd{8d&KD1}KTd0JpWD z#ZKye(IMsc(-C7`H~R|SI*B)XAr?e=h}|_bH2k_hyixS88+dzcZc&Qt51z->G5?xg zh5N--`42w)>RYHOf&)>`j9tIH3lhR=&4#!vQmC_j_j#A2g=Wf-Xhn4!Mx zq@)}68;Dn+#$NhJTDHxn=`(YYN;ZR$mp~oC_ zw7iP2^L;S(Ir#{r6m0FZi=eeV^t_|6pFHrX%fuFmiDQu&YB)bI_62LDbr7g}nseL> zJybkipgskE)c+KuPj_D&FSDCRt{mb-I&Y9c+-x3G^*(^)&w&Fi;S%BYWUs6;oE~ia z>Gcs?wobP3DALzlyK`gO*!Alo7!oDyEt%j8p5hc=63!`I0Ni%Y!WUVY{EA0+cM zN_2I6b5ow%CihkrUCM*nltn?VIcF)I-Y&pYd*)QPER@nH*cqYVn@jFrQcSxfQuXt1 zWt#JsG#_F?C6Ufuk3H+&%Wrld3f6d&ihd*HzM+E;W+UZsbA>u)KuiuRI|?(ir%MO|)JBZ+FUoPy|~2Ru>RDKFX5ylz$v^KLRY!_KH5? z6qrx*%BZdi6PCc$B5G=oKPPZ8s}pcNI5LU3!Ll_%SRNh{?$b3&hj{`ZUD$RyVi;Jll=YtaeAO^Phw>ai|#g0;6$8;r6w{t1Z7}ADe1Fo;{>9aV>0h3 z9v8&82Yo;4;!N<)G8*uKSL!!b!QyZRd6Wamo`;Z<@8fm2K0C-?9+a1+$#qE%k#PP)c7-28|OPO#vbQv$8UId{TNhO|jDEl>OcX`BS$4L8R-0O7OUXJag;GMnUzU`yeYmfFq*(&HX zzgiUoiM~}F4@Oqre!{ci_sGK!V||TM8-9_2k+*JESEqD-wFC8OJFE7Op$gP#C>!Q? z|BGqatUbf^{m;K#o)*RUtYSR7Gj(zAlc)OQfouiWGa%b`2K{5zt=HGv>2Z|^UNh0G zlWu@3Iz?5`X}q#HMn)f+cf{&6m=}H%Xc)(_&VTeD<5cpb4rp@_=~w}%9zU@YE^>kJ z8*n2Ws_eVhQPuzFgdStc-R=={|ESU zH8a2@2ZZ1{Ep^(+z|0_zapJ2Aa360XMn*B#pK!>^H!C-W@6qKk)!wKwU<>Rc(i7=Km?1;V`l zrxCjk=-5{CR|cbtZnSY2a0dXYm`hvO@dR*6EF__T?E7E~qV%j^427<_b$c%j(m{*Q zKtvVRB{Kygl08m=+*~0Kb+A4rY1hZVF2865u&0k2D8mMj(Rll3;Gmui;;gbs<~T^Q zdZ|?dLiK=Kqr<2u5Fx08Wsz3g2!m+I4f3$0;+f~NNfDSa=8PYy0NJU8ZjGeGUFG%m?nhIwqf#f;PPBgGjcY$l)X%(Z8`>y*uz>!kLB zS7GUKmh7H&S-2qvQ5+jHgy@XSWW^NFyP=A}-iN^b@=#A#33(5Te=PRu}Pd$xf@ zWH|uhHK6%oah=%w{lz$vf>nNlcSDsmkK@HH9DZAW_|e&^Z_4@kpAWks7|Ykn}{h@e>mC&Ig)gegeQ{gx1_2=v##pHl2JRBCPj{J;QSy0@u>Hp zcq@-oY-<8T0E_TvDe#g^FXg+)8dA50X}`U=oPqQ{bDY#NnP``8pd#E{-r_iBPtYUd zz|b}!=F?n1@uQ0tTdc+CE5mY|MgPcO!CLN1=8NqTH>Df5Z{t{xelm_3YA>9!F@U7f zAVYG_n!Rs%c{AMp%sN5A4rgNwTt324SPw zgQ$85j(y;9JehRpqt)OV1?!8gP$@Dkc++g$_gW8o`ruLfqwQHU;pwQLm=iZ$v!=%?V4`HaJ%E^Ao3#UEH+NG{!;&%}Y1TzQF> zp=aJ=4js?eTHT9?JvF-2Zs$)`^7F%eM#Z1Av~W4g0FkkhYkw-97*K)_NQM$q9?-l& zzs{uGkOytQ61##i*cPfEkuQx=>8c6dVvo`IYC{^K=n{^xcYTdD{WPv}XmrbtyO(Vz zeQ_Yeg3%LO_0-e2quR_PEn|PDlf4=jl9yBVS4c}q_i_<cbe(sH#K@Gc40|DD7wD;da@8K;l#F*ORxRv$Pz3wer z-veU+wVk&2Oq2EeU{9TkIuZD*dDQTJ5rpH86J^9qs|5ThHyvqn7bP=!dB845X-w`b zk?tAh%x8R#2Tt-o8HpIx^R1haVs|?n04kjy*yAw)$07)0w3S+#>UT@!h+sw}1}DWH z;gqz5X8G-Q3#-DRJ53zBr23?4#Q@)xEi29-?N?^-E;>e6<)_tmUJ~1Fx$dC5K}EF( z8xyCwH27?x@4=D`xG_iW@r3G^ulv{ z-F^ZpqH5M<?7xbYLju)5rHPUPf2LL@fr@xn zGM9BMyIFReR;3)Q2O8Lm;bLSkNyq0{O+vzh`FaPX@nCCKpM@mDkA^~J_8J-yuAutN zgG8a;<-EJ6F`6mWU)|E~@r~r0_#dh2-3eAJoer{!_Qn8*lyp}giNLB|-NWii5_SE* zA4x+$BHJ24SSXGxG68biRvgQGB6iR06X}Ur4NT3w#%#efPwLN)!h~rhhNtHK`>&No zd<=RqV z_3}Eh=^owT&2xbheH8n!zS)&d-ZtckY9{Y8w1>2u{am3Z_JFfU$u@F+S4P%C2#h8P zz-H}dXDI!CC)#kw1ej;Qb zry^PKbIUus2eQD(s;THi8aZVxL2P30xM&Sh>^^LI)ILyJ**r4)NZj%Z3>D{!sod{s zhw&W%$lA!kbYS2FXdL_>ZB2dsJ-Y<@4=m2b2=dtRo((Kah#8{(&8)_0qy9cPIhxE>6n@1Qn$*J@GKA@QRGS3lH#m6*r+yuPifir28-&U5LNeRIATMBs7L^GzHX&GBG6Z;p-5mv*E+?*~7i7W-YzpI?qxqr?eabsH=R=Ym+7B>>-`hhi zpl|9Bv0{n0$5CxySF146F1?g)&Eb>df8ah#A(r6LEN#2_sGk^b2i*Mox38@Co`Uc| zM_kLso&%`?9;IH+l>u;z)00OL;k?_pdXGH(|D2a$=vU|a`6)_HN$(;oLT`MoKYr!B znTZiDS9xNGIxeYgI8P5M3hOdtfuN8+3Wt*(2cQ%k=T0@5Tg?PsZleSyQt*R? zC&v}Mp|~3W%xR|5oSrxNv$cu4?Ynw<9>c#`2l0{hz}p%3ej$2b`^^W)EDQxEjwZ1n zO#`Wx#>vgyspi1h&iyAUD4z}Tj3vg$T3bI#icfVC*Q88UK?0iOJ#l!>!asIXrg^jI z8~+n}Vo%A6A z^7_&}A&OyY+kfuJHpXIR?NxM;dI2NC)QUu+fwO@-=Bf3gXJ+s=p@z?yyJ^d@D>t;T zfe1_Hyx&^$qhEaw7gw929^)$2^-tAP6@bVvbWb;;_7@bA-$}BaCEhCUhPRacE+8m~ zUe~d;_9@$cA3X%~_CJO=_(hB^bQ?>z)A{We8Ee6WdIU#G&JH4t-Fh@dUOabE#gu~T5NYL7$F?gQ?^kLzqt zyWl&Y48wlL7r3m0v)3P-2?yX;IRThYDKSn3R({~PPA#-EsrSY-wk)r&qb_5w)Hnw*H<&m5rXWbp=F1wsA$-<$Mb|8Il8j&s0%W&3yT(%)q3`Y)-ZI3zd7Em3e> z3&pTYKTug4TK0Le_?15ew&g6)yoOsibsuB9O3u6O^t1<75!?P%= zfA;m5*=bw2h!4RP0lPxdd)p%6iP|(VBr+e zVG64FVbBm9(Fv$B{%wVp9Gx(qR!gJ?(bi=zW>Jfvn8<>}^~u`L>>ojvqS@ALYvcr& z1G<7k=HuZtnTJEjmn1J`CbVK?!1#}T3dp-9MMBZlAeKR7ng|MydOGh7%1dlhRM0ec z042sWzt~IX7J6v$c`~033%*t=v+iE6Wb&trAn<1I>$>HBG^t&{#u&j+uuM)+usHH# zh=jNzFF-O(xqKw(!b=21g|}^nw{VjLUD`G%(Kz%Day3^}q?3-qw~LMc`_aGNbIHFeVZSd z|3ep{b+eO`@Ar<@*V?*_DIPre!c=yUqHj-p?f=k5m}~!%Xb_^o7e)H{e*Kh~)nH!A zy=3u+FaN!Y|NBxxhU>G%30~bfT>a6Bi{_wg{|ZF)^5+ZV3%HPy-DxnfIeJ;u3Z}5O z;Ihy;%P^uX(EqafKI2Rl_={G(+G!t-X3BWXIXxsVe(2z#TKzB*Oaup98jiti+11Ri zyUu#JZ82Nifvt$zID zEcFF#i}@a)oTln)s~mrH;Cc>vWS4Uz)?u{#Eh!y{o!9c~9uHRWUz&k^T*^;StN{Ow zD@jDE;1XXzGwuZ(Tc>p|6Vubz5M65|4_^yB#Nwv3|8@X%mSEuekAa>^1{jMm&LG~x z3!wbRYTW1Nj7u_ALWyvucWk|X!^xV!7hr~+cZvyEhTDGm9#$6_0 zuGi#z%Bp?d{ANRBFdCa+H z`=6SKxZ~Aih**6`^HVb2eayqe|x{YN@erx0L}yQ^B*@?w+06YQPS(k%;^;!jnfZc z4!lMQ9!H~TQANV2`3HFmGhK=RMD{!=74XN5%FgJM$U_Om1cHqI0yNn;@L1NjSOi?b zER=lie>xl0e0*_!`UTzAq?n`xfff)Wxg1AIvq;Iv7N&t0!exaclLL|{o)9^J^P10H z4gHH|LVks)5CV^w$rMBeAGh!VXi-EuL@bn#PnqVe zFgkcW!L3gD?z~k1_WUsFtKu zL9w}e^fJ&=DU5|sJB)3Un9Z(w@@dRhkgXB$3CacAfvVOFS4wT%&N7PsuJ+|A`54=* zBOrksI^jcHlN0dzj=!^^dL;+37J%Yt?MC@D0oSU!a4GF}aSF#Hv@lAx#A5tSH}&f6 zVO%mffGz*Lw(B0tWQNL0I7i7&bz0Mz?^$)&BFJ&KKQ zJFu|h)5R?k*sDE?@8+tRh2&6KnW-M60z6vnU#^}0i+?e5R@Rz`i9p`#70xu9BhP{u zYUou_XQF)CVfWE|%_>(^_!${Qk7+$zH5=rKGEb0KoLPMZ$Pr%~xRCF}{Ej1ZG z(Lt2moF+AlAZZQF8SkbNJj0FBm&$QrIXD0C94l156_96e9HihR;yZtV?D3Q1DnLJ)$vONkKDydcB zRt#=2=PE`prw()+*Du9@spbs z5>cjzs0ogjgKQ8~{g2H|4;ooLd4b~zJOFu~=u&9=N;jjJ&xVfEuZgdxx+h3fvn5=& zlDF7-jesTv{~c^=<(u1I%~k+wH@eEp$sD#D@6QfRHd#xP~&`#s>!$hq->XeE|v#{)!v6u%lDd2w|&u`HlKWLcBr6f)wO; zWkIf&L@Eg?e)YGciJ;oFx*H5L3d?aSj1#KS{0^wkTJ&L%?z_G}Ipo5t(;8n5~%SYWNxjO>3!lynwA>OLSp9HgP ziX;{}L>ue}>N4B%IWI`pBh zL7qk2DOGc_FEu;`tv%PAg zL{CWpsgX?Nrq~9nsHshPNQ{!q6vR~P;Co%`ykmD-LSfV245yj62X;)WwqK)ANQD_# zzXbzlbbk9$Ky|1na&bujd(G5EF z#d(62?(vKQ0@j#>et=}c$iLyERVGyatLoP_8+1RIK{x0Mp=CqO6;(s6kD6hI6_tqv z(#WpcbhTkKK}IkI-chRp_DlLVU<))5W}vR!2?lXN7ZOPX+~&4Zv+ zvqF+Lgdfun5qjJ36VEc`l!s(l`Gs<K(0o{upL|PH)Gu3oLR}RPQ z*&Epkh^`{<2gpH9M-GDiS0DZ%MaiEWM!gZZ5U^R|IMs1<*J$G$(+59s1P$!ulof0(x$7D1|F=QKlR%+a*&qWF~OY< zKbiFj3!+E~{2|Xl9M4X(Ay^oZAFsilGi>ullMgO{yGrN!_?1TLC@TWig|_(1o-hKU zgZ#T}1gGn*vH>|qS)}EPk9-DSe4pjbBf&)UOSLeVc_As_yHIV7C$flEEk|@B)j2B} zxcYC%166Id!OP@i9Qar>MqjFpxaSs)AfG_yLudijHRoj1@1BFV{*Q7-U78f5Jg;$d zVUz|n+JnnOX2Oq}u}DUqkD44W1QmJBp3jV{r{23n)-*x~`3T-_zt3>lU51WO5WN1Q z^+I=zsz2}n2Sp^GVCLQEjKWgs(a6fzxImfT0jC?i2~m*tn|E&Tgf^w1%o=StCf(yF zU!lk3t!pErj7(BF%Jl=udic)1aVS%J-ug1jt@GsvMM3C0g<^qFqHc(z@~YZ*m$Tm- zxq}1B-znV4W$2vM+Ym`E=wX!3tp{3O3ICP!`gBdgO;HlkOr=6@Ci&|xM^*n2tOYwK zOS6^pSa9B@!`{-isK4@sd#&UPkK66_>MD1f>wOqNs=7O8XgsDxmB{s(!73xqcQW#s zb5rhheFh|v-{tvX16hIx)_SH^Gl4DpAh!7#k$8^Zt~R?LPhL63gQ(oKqJJqRC!;4Fgb zQwoItT+~j>wE5!MT-4u1qfAbZXZqDe7AhpSmhwqkIQUtP{dI3tY$Lh+*;jd}-(E;d zarJ+Sc+TG01~oE;hxn(ximBs{(QDJ1J!DCueO&1<>w)6l<0&}TieXUTMI=fOtQ=J0 zA#S1N=hFw@58ilH$qKXXyNujc{mDKO`iHudHIL`dtrzS=F7nhq@$mAe8&pvwZ==2+ zOe%Rt583J0M5cu%_##M(DYR+R`}f**SI1{1HP0tm55!#L)X|w>rKXk z`SD${1d?9waRc_Seeg>E-qI=MJ!F{^7;w5UeCgw}i_#3@Y3JgJR zm$hUF_o?%4fd6&-Rx3iate_Ki{H^8!7>mozsn_~t?mjG~UskT`aA0b^5`tq||r8UXX zCCjeY>#}{r`JR*AvQU&m$chO&TF1JNlO@GCHG5?I3AsmhD3=u87uq>Zeaam&ct-~E zDN2+c5lSGxRU7a#>-0)Xz4MvB`XybFNML6-5ygME72n4hGPJHinVG_K@wQUrWJ?F$ z$)b%(Gb~~C>WbWFib54}vl*Id;I{Kl-?yODY|ic^YP}YJ?!MH@Pa}7QIbNn0R?s3D z^olP&%CAt5CF4TY-~Hrh)GRqXD~Q_aS^py6&sXarED5qNi$iTk-xs`Y>%Kv9fRN<( z(wVEZcy38>kD~K^-^_y6U&bGNZZ4xdmE2we``qXtXPG-4ti_ZEbR0dFmlpPJ*_FJK zjH)#XbXz%3FmHsxV#%_rHEx5y5VTT?iCvROGK)EQYioKJDN>Or#Yb7f zsH-)Zn15A(Kch>E=qE#>9mnfk9VQClFr%Aix_gI>JdILk8++B-G$gFPvSY|JsPk(w zqFq>(*0DwbYl!GccRFJ zmY~(&Uk-Qug&WVFDzR;|5=8ASNHK^qKm#9RQLCP}iB5kgN}F4>xtp2fDiKv>E@m1SbSd2Ov<&bjq#heVt`T=j9@G6J7+Kmlpr z53iRNiGAFro^8tTO#Tlj-Le+*Ol#QlEh^ooCd(#Co2FMm3d>3ho$U`rbSC%l7xl{g zi%K8y=9qm*n#r7ZrdL!LvSkrb!f1-?)t>RH#6=}6G99|n%cEXyZ7 znY@RtiQ1OxyE$KHqyicR1gld!pV95 z0!6;gM~GnI^s#3l*S}_bErX!YyJI|Bj<=^}Ng7OrggQp? z55x|jUJPC$e~Q^#m{oc8(3OCKTq4BqV>7phR z4*0TiMWQJ|MTOBXKKd`&hYA*JIGWMVUZ!)=TgBVTM{B1G`|RgKqR#geXgLNZKZ?#P zJ^34tB4AguagRsrWvn81$h0E}5)*o^PueWhQ}LRG7?JA5ftAow`*| zHE|pLQgwkV(44FbZJ=ivtZOXYza*G#C38GxDEuk!fkB5Qg}JoHMVsjSnZ*qGm)96x)GLRam(F~01*PWZGozg~^!-cHHQb>pYAmdn-|k#s z`DOk48pX0+MXJRejlv*27D-2`%pcJpypWJ9o?QAg2~KE9S=ja6MH-DTf{*GyjHeZk z;R;a*O^f&)^qGAc_Mu2FK12G){AA=01zT~uG)VuOQ6-2Q-kS0)txVeB^X{Q=HM1ZT zycsV+OuK_shB8yv9jbA+HHi`;+tk4&K(^6V#k+zhMvs@YqqVNoR()lTy}u6qGAg79 zs(MC$bZ%Qupc#mHk9|9ANrQL@sSj(ztK|`%5lWd7Dh@g1K-)W zgeD>~t_Zw=E1`~ll%W$Plx0^Yu27C_ClV3eTAd8T)0Q{cp1WoX{{TyBQA>*y;I9pX zz9YEMV|38WZRK=(FhzI9BMMOfE{z_7e{ax9Lm}^w_mp>2$Ct}V0yLp& zUi~bmGl!8ICzP^dwxM2FovEo_VJ7b))7}0lFQ4_(n_F8hp@CmV=$2^St>V}Rl0!)rU-%p2as#x^Ab+6SP zS7-^h4F`N*(UIF-ylp9VJU1w)<0P?}3$~V0b7$_5?`Mpcl^gABEd6}Kpcp=68uqJY zlyg2mlT+{E=*LR_UQklp&JPoBWq1#EqeI=%jTe6)Y$>?45m1Ntu(@meB~)dC(a7J$ z$$1iH9UCo&4!1m;bkW9NhWTB9Q=Me<0!gYYO3M1nqU z1p26g8N^Ur;xpCXV<~t#@=HyKIwLXMY!NB`VJvW=Yl7g^?8$MvrM=}kTOc8d{a#E5lgIqhIVBtzeFrd+y!kK=b@VZKSs)j z?GobZ+hRI+F2-)u1$LRv{_^RF4vs;`-v*96y#8bi$2OOtc2^~aCN~tEV44r^z zQ<}~9Jy+$E;^(~9OfV!%r8nIe7#KJz3e~2tW79%fA(zct_QIT7Ex1GbQoO+JLwLhR*pD=Ls`XUHva;LB=!d3#l#)v9k*X);r zm?XE`9~5RmJ6ke;^}#ixyT0qydrWdnVnjMAlU((oq>KqRlP_zF@)KW2$ZAa7#*F|u zKY=`7sofb4-lyKuCC=V8qHnLiN~6;5FmYV)r%+{b%=DUYmNLJS_7`mPKKs4AKfIH5 zHgKEo)+6~aN|^=dVRmzVMBQE%M5;gdcoI77eYC^Dotzb=r_CEqOji5<5%twkQAKOK zDvBUEAktk*cMc#e(xP-ogMc&)ZGa37f;59P2n^jNICLY8bmuU@(DBaP`|f*d%|9+U zBj=pGzx~B8(u*F8=lbaLi@&g-5>EwT^%AJh1m~A=&Hn&8%USZTZGB8`7NB>Ja)NO) zdfj708I&S{Cr0r2>kIsQA-C}7QvTorT)*X}w9+>NYQ6d@zt1Ycg8rBNw#hdV$I8W~ zS(U=@nJl|Jeb$plDWaj?*gk;2wCo8UM|vNFJeID`q?88HiiiA9rOu@3qiMoi12AUSGQ@7yv6+j>IY>c$46Gj zMK1F@6Gb0GKXS;2iyXv;%kGK^T~KI zw)aK2EWhBq&keC^Bv)t#F)Kj~U(qP1(DQ&rA-0m~WR!C`EdX+WwoGyr8cEA!SYZML zYZ2##e=+UXA26a}j3}VpQwNZU`gSQc_yNTOUmf0u(i@JJ1F@$$892`wzDHN6?EJp@ zgA!BDo}P~JQmS900KAWn-Lvp#pBZHg7$wO2s_v)|KR9DDZqkxaoLE%& zb8)ch`Jm111kgTw1nD{Ysggd&^71Lw?hMZpA{y+FoD5K{xxpa|uJutyv}`pgqE?n` zl`$l%QzSQwCBzUgmIgOQt%gc`14aQ3|M z#d>8h-k1*Qfrzf!`?c!^zrofDU-=LFugAqYu&($%DrY3?MQ!4EW;^)=x@GjFhrCiu z*!R z2fQ%HyNtq~L=uOQ|M+eGWjkV+oo<)hh*PFR9Xd)p4sHiV`RjBS!1(rTq@Cr9ht7Yc z(!YbS;efh6l^N4(EM$yN4~#0X!8%vqXCH%uqxoy?U$5Rbd)UJaYK|uMzRa6NwoKI& zgr1f23%13Ss?~gSa<1W-GqZp3f|WYbnz#&TMoQm11K_Eb@68i{2FWxWn7{%E)-!*t zaxVEsuFB9@*I?P#*rli?+ognKB`ZrlGFg1(SJ-ZpOU9(cJiV_-_9U6eD8M*hYcQmQ z^C-FU%`+@UHjc<234EcHSk6#(s04nOV#d(Sw@P6;z;neS;H&$CAv!Tus4!Lm2~)i* ziE)5;x&WamGe%y09)NFt{Dko#+8Oz8L$(4pKE7_6ZRn=`%^NpM-m2>dRbbN)*9vaz z2S&O|-!eu|WgSfJw9;x_O4Ns87WVuR9IQ1AwRbTUPT?C~FK{Vpa>u*>!i03Rp~MWd zuKe43<^dBD$ za#%ar>{A1}#}xsp59BC{_EHufEkC|u$#pQ-pnOQbM`k)u+;@s$(qPbpoq`HtZ?r3h zF@Ts35_>pt3UL}V`DOZwq=j~OH0}Ip7Oxl3<2C|dBK}Ro(pLj9?Z*7M#f^aFqRCTB z7**e@K8CDw)QIWyi2wuwjEO?fV})kG&wKKhUfF^5=#(EI+7Esb{MHnh0+vNs3~@9sxH1-{UulHAGqPYNH?vmd-r$Dp}?VGm`Q7TzI8SAF!tV z;aUo#cfWZhOJNl#I`(niO9~7=jxZeMz%4VZ5_caL)V~N^+sxU_UL0%?J@gg z)^clNT&lTkT~G(%!oiy>*PT$WDfZC05=B97L3qnk#HmpN>0BE_ot-Rhdp#=|3sxh} z$4~bp-PT@Z#^W!)z#ne~eA=Yx@g7jD5Nmt~I59uVEa_>IiHC`EiMxr@h+CnPa#r$@ z55}~Z=oq!MhkzZjxy~SQe@;Ld{{i`zr|yHmKdu0z>gWOVF{^>$0W;35+e3)RP#W2GAz7GDkhRoBGxI|J?FA88J{jOb@X_ zB|h=H`40ik&V>s{5vNzeGSvvjCpvH$!t-~e#^dri23O&zrEtW+r0KuWMBIRfI>VEV!?) zuab~ff{XkYv{eEzQMKSDb#G}tLu;75n37zm`9It`7 zmn?=%I5x}Y#AGAT1r`C`1gue~oN{m@;V2SAK>cobdXN;i%e| zTFk$q3#;#0GuDPAq+lTDZGSK>C)&OP1urQ9bf{UD6O@P16O(ge9>UYm{~^Z_Z0bse z`4dI|dLGp#B!^UU$xKO~ulfMVPeiFwC-$Uy@v7s|b>@C=S<{>h9-C1JG*RiP(Feyi zi63>9Zrkgts;Yz!s9rFKQ7{uQyNAVflw%-a*9;_#wV)YK0L$#hq7T60_kaL?YRCOv z>IDe2|7lqEE!Je{Q){T1_I-RY><47!JPO0@KqGud+alLjfy)6SG(6Z=F4c{*?#4Bi z^Qa;c-Aoc=M1o=1XsY{6n}=R_zmdkMpxy=ogLGfN*!=G}lF{U*VZq0E`@}y@tBs~Z z-9A~Tlg2sy_l|M2Y5t?5g1P-Pq4lx8Io%0E{XlbZPOIA*k6`wy_O{c?y1>?n+Z35D zQwrCL8wtyVAG~_iAH`u70uxla&gKT&WUF|pgmQ#&b_2>v9LlXopkTR;z85U^w!%V@ zJT!SVvS9+)D7-PoMl;6dvTvURBNG%kdhfXtf~3ed9zGfMyuU$eu^_bZmibp?Ppty+ zc(QU7lyB)6L$@_9f6_q?^6bT7GG@Rr&Gk_5Z{Es|NW}BVEm2U|TWD!g&K~uG22~SX zxvCBE6VH}=?a%UnPmzlv7V0PrH@$x$^U!D(C7dEhQWioUbRji9Gz7o zzOK(B!{3Yu!McWnq~1lPgkbb(|}X^r|!mY9`e_0B@v6%z{1Jq;&WY5$n+})yzx-1 z+0}=r>4N<%USECR0mm2QL?r#Z(3U4EFDM0J;m;a=f2&)m^sh<>SGjvPEfUTikeKPc zA@;<$1$B^tOi#M=H8OVMjBuQ$HUDwuhFMW33s{FMLo2wCv-y*#LBn%W)D7VeOq8!*(lr|rPU0slp`;Mp11 zruZg!(OCcH``+D+r0;B?fZ6J40Z(i{2p3~O}$ zYNRPkYGChtDFX+oPs-lhRvNHvV&WQEy?9z%hyJMYokazh+4TBe-cA~6un~gzZ%+zj z{<8+w6-6?z@iYZl)YR~ASyb}D_@wwW0fTC=f4M!0z*PfVxjiELWAlH>*cVSi#Exq% zEwt{SvRF!ZANGmozD0#YJQWZk$Z27(z+uhos>PEguPJ2E-PQTk;+^T_CuV(CmG5f9 zz;|qy^CuVn)`5JOru=S?=X8UC>YDBWKTN~?Q0-mZ;NY0+p|fu#Yj1VfW;~9xaPZ;N z8O$F}nelWc-wDS_rfH+fwY^OvQrjdar1ot=q+@Z8a7YPHtZaaxm)@s#fkC_OZ5HgyPen@5NJ}9hu zz7*rX#@u4Wo&nT%GT8VOd|j#%K9&GIIDKVekgY%YHcZpsqIpiJ;soGxy)e&WM|^V! z0&G`VAiDSnnec71ZDb6?i3NGq0lM!iA=0h-yYFbXkM)s>q5hJS?(c}k?zVt_*yM@N zDxSmqN28|eqy7kQY0ZEO>P1}DqZ2?-CT1ch5bz{J0UNn>`PgOB!1hPseE^ATt<)n179os zgf6Fe26_l0R^aCpHB7QuBXyR|}g~2GE{ZVS6DvREWl*PL2maF2XD(0?K z**k$bcZ+ZaabMO%XPTNDw*r|+#Agpc-g19Fl|@A$Kpw)x1HM>`x8D2E49?;+mPC57(4+Q z+EZEk*V#bsgaEm&Mu{f2);QemI|oJl&^5Gg4lV(t9-g<}-5`@%#bx^&D|S?x&?S{- zH|O0lfMW(KX1(RTld*Y4*rwduKTRD~C)=7qjZmcy!>LW^w3XBh@KnpvgQiTpJI(Gc zr$~PJI?}-w@W!tD&2HaZnjzGuipOt~jNMdbG#QGr%H~{e=&zR^ufrsf0EC7^?&oF! zFDMEtI(9BtY`L#H-_A`Ts=eo6(4Aal$s#C&=cD*NQpIu9H&h|P?V>F2Xu!!(@b}6Tj);yBu^5tfJ7yT)K^gjRdgcGJ&_pncV=8kY47ZS2Mi* z7GPU%ORb=ZsVGU>sW%dhKE7BGW^m-IT=d;7Rg5jvM+N|(1e#TI`QHpyd?#sJnvsR} zpX`1Ychqo1vwEZ+E<3|sKP3Lb$}7M9?wXtEA@S6QH!URVY3wu=mX1)7B{7R|Vc9uX z+AEWYd#r6o;|TQnAJ>O)lq_Yp7ojHo)PuM#{+$oT`b{UmQVxHoYXAsPM3^BogD(HN zWdhjIH4A)cz~i<*z)mifvEz9f_<7n@>Fs#P>Uv%_&kky*G-HRm^WJ_S>B8j~Et@v4 zHEBJ-HRpHR&bMw|$(>7DBt#k1oe)#FDh6X+{aRbzP3ITsLs%l?2dK{fsKs7MX!g+^ zXG8|7GJ!UiE-|m#vbeOC!(smDyvo#3QF@#xMq?YpEwSF$+wF@cK))=X116slv7J|? zVK;!gr2JmvE=>= z&6nopo=x}X{PjP{8x5Mfwc+cO!bH;4t)JT^>sM^R+DOsnKUCMOt;4>#B8Z%MJWPH( zRB6Q}Hs{7~w!1jxp1g^t8!_--8u*C8Ywm} zjk*K$4}eEwtSyHy@mc?&`04#C_G-uKQKfa7>;WK78i2z~?s)bNDQ(lWcYa@VOo1XG z!vu8T`rMm~;h`%5B;-0^ta^{8)3qTdlx2v4O&E@)}v=r6=vcT=eR-- zDVVU@5k3l^Tfw4hcYuEoFC$?XbFheavs1{CU8 zU~4jWbWXm%RElSw zzhfU2t9_yZ*G%xZp}*Nu?#v0VFOUw_5xO5*@ZGI1rgn*jPEWWbt9st-%NTFaML)?$ zRP|pC&a9_tSp>NH%Cw$uua}8}D4303ByGxxm*Vak2Fuywc~2{tR;SFu6z$y1s@u>& zPd*)#1`bl?t{LAQvnpVsnceW6{bhUpmHztu3{v8q3kzX|v%L<$UGhYoZ;mRt6i<1Y z0kpPl4B)cQyPke0qCn4V!D==O&@y2D@Jn9KBJW`8nG~~}{^Sk9@j!d zAoXFX;X=PYEDaM}?lh5(qTz&7D`qcRZwJl2vjX@zn!p5AOx?Ijbasjuke9HyXpnfl zbyI=bE0%J-(vjl7QY&378EVa3?f!O(037hb+x|_F();=;@3{o(8Q*FlzNwx>FzUSc z9t52C|BY7vH)i2SA9^BE0pfFwcmFP<67dv{DByWwb&+Hq032()y9WG;q((SQ2rS|k zxEcyL#8^1b}S{W;MNYEP*6Ov@>}ArgHKq5&zU9xRV`C+7z7uThoi0V zXm=6x2`CXME)I~Gw`WOa3MJ3H7_bf3FW`D8YyI`e|0q6_i>bpo=xUDXA1EStrPx$n zp&aO17DJx|vv%(R3P!Ap!M|Evc*+E_gPZ%AiC8@qaz9Bh^86CaPP$~D4OpdKe10`& zF2;U+Qd3I-1p_-+9fVw3I9y_~!FAX&J*XO%R@A%$QOe89xZO zJV=8OPKYk6NLRP8?O&zAQu-{VgwHfgt9dnq)xo<*i)wn3n;~aeL;;1lQ)*Sb5V(&i zjzh2q(^qd<(sjSPt3|W156*Y9u*P^%ry$2+tE0(HR1)(+U>5sEW`S8rIi#(Abq1<= z`|)l1`Po$6t{2axrlzCY2fb_WYQkxDL82`v5ziE_hl?{)SS>6P~oW_^~aTk>trU4P12nuT%oVR_;6ms~Z za$-_)3S?w+aXjr`iCN8tmOT~r7{=Z~7#b3TOzwbK<^mE5i`^&I9k*|q(j%SH20Sy) zN_<;Tc5&XOUxHQ9L}x$YrOKa9>d>h1uhr1ZK3~8B^oJ!5_yDyjeb(?DcGdYOT$!Ss z;Gyu@*B)#KgnfMT1ACarX2NS};GZk^TEwUn0hDsi#Cto6g= zNZNnLQVff3t&31>o~_nqs=1lX(~28|?>ExU<>e4cS?*UOolXDVlw+rRj#OjGx%$5$voJYCR^L`7Z3bi}JtX9RF%MoVR7Czq= zw|skz$kpQ$M38&5uD4z{D_{1nxc87dUJo`pkd zCTQS3)sn4eQ*a82U+jb}*_;Rd`H~+4vL&Tm4+^I*h;mk-4o#L@J+S14A4A#hN+r{m z+o?r&Tf;vmSH%j^b~os@`HEE^lZt+N<<4zyFXgGX#0zh%RR5Ru{3no5KHL>U1uyR_ zykv$%I%V*Fd0fUVrxNnn6sJC)ofuR!A73EH$7u16h_MH&BH!N1nf%4CPw!AI4eAi- z>=4fy94Evhx_1cnZjei`_*14g=j|MkCy$mtFDbmd2694E1(tQ@XT;dSwT_{M+Q$PC zsyKsQ!Jv2zGU3!iY!Z!1skaz2QoZJ1915BF`f~S}-}e|v?!?#YD@L`tltIHhX+=!+ z_dTNw7vC{>NO=w!{i4lcC{ChceAO_aN$RuuXuWYVfb?TLIYoO{3u$HS$d1HAMN^}9 z2rBYm`UMm4{V>JgN?h4``6+e=()g@@MVhy+}K#W~>LJbaS#c44&Nw<{IzG@>SHD>Bq9y#DS2cD^BO;(GqW z#C5Pm%5`gf-t~mQ#P!Bf%GGRFKC9`v6;^*W13SMGlyco(Y+C&;>9_JzRjDR=H+nh8 zccnkctJ5349}S!QaaqG7qGOAY$=r^ny&IuzYb}!GX|9s|82C@p@dRDK@&a>MsM+Ea zGbnlg|JPk@$t?)81&;!HP4*aElHXY?osuv1dH!_!CynwCPpqP;)4Vn2r-aT)??DM7 z_i6=u>ilX>SgnNWEIvdo@1PB zFhjgu2=W9@(vb5$)saTu;KrQ_cB0#l;TaW>azB#xYY za01f5KG380<6gnxEWSBqwoECllr!_Bz&t&y9g7)$;jEeNY3p55?V{{|BcH*4H2`9t zpQTOXo#B;kleP627zcGbO;8%~&QVhH%<{H2y}O%s_%oCo1dpy?$(nRl+C_`sm4K>< z>}%?DXAR@m=S-7EbZ}y8p}i2QC0sy(8QaIgEH9wt zMK>u<>Z%=`e)C623M!c~zZBs6cdF(;37j0409=40y3^2`7=~A63$ARR*Gl?faPxI8 zV^A<-b})KvmXYf%qg_#6VmKfYlXpEI-16ygBI)^`2rZm;uVo~rS+M&-wxRo-_kMk5 zuuis$S-GrRJmfB0Uj1J4Z*|TMR`aCOgX~F*)XRs)v zM%WC1Wid49-v{W^ymVo}{a71FmZ`J^!Pp%*eF@i#!yS81G(oMGbqjxTYIf^)Q(*P` zqcF&oF)jaaEHp_u`dR1g?OUKvk{3PgG!a-a?MTZ%wG>S;?Hf|oFk*M%cqZfBN%V0m z`mT9|RNibqOtw_$$`^~!*~!>%InZ^1{j!t8{WM~pmx@f;Y>_Od>xLtROU*UT3F56u z<9?h|v1xWodu4iyJpY$h7I4awdDY)GxG$aQOxtw#gQ%-DNs5@Lna>)36sTKj7~o;f za2)}05WP{TpT$Vg-;}?ubY>p!J>Mg`VI*5?F~gB%|1Zn1>iR4uPr(E|v|8@8a^iJh z3s_^O-+bw3>639xRuLDO8Nnwk~bPb<3=?t1pd@3W`7$nkGY zRd8nwHJY$3Osc>!J+S^uOCSoH@vo5HkE{!+aBrdhSf2&O0X8Ca40ngI8AT@Pw{&HN zm33uktmcC-p}@<(na|?-lyB80OD%HHb<>X_{V2LOgfQ8oasah`BCf=~)jQ?gb~XP! zXsR9{O+`@XPc^?OkREJ2_Vx=9S(Z|qX3RAR(hM9YXbNd$IvTGTxh|%Wqxyxl|D}8l z0@Df>-h4rn@D~+K+1)Sr`3=k`Sk_=3i|&I$qfLNQGpcKyVmBWbX~)fLU(ZwV_eQZw zDvDrV@utZAOtwiYX2LI`u)Ap=;2`rXroFlYiU%%qmKq#socW=62B=HIZIiAF)t~rm z`7mzPMhpbhpfJ)0)h{mI5H+^}BaB9rp&)!=wIZy@vu#&*d{tYS_8qvt9H@qp{Okev zJ~<&8=sEM!!4x!1{SPuWBeiS-MD}4gb4l-uxE;uY9W8iJGrpmSX29JLo-lZ<_a|p^ z)6D2?l3WG6X423S*510ql@jP;B$>kw`zRINbX_6o)$)^58uD5X-@!@7?IsX)a3Nul zm_qXxbW8}*-Ef_Qov^l>I(8en>C5V9>WO&}v*DoW zZ6%^n3M(P*(ETi=J_QnIzQRRtn=k-RQV?4Uci^`wRJ+13-`_*>sD@qWeZ zbrs@{TTsujc09deX3V`Sv44=(sMTpb4X0^?d4w5yl5e!?j+T;7`5yhhNZLF8M}Jh`;8iPt zt`i4%(4i|)m6-0^@=xB#Kc$Z+@yS&n=I@=KC^rZG@{Zn}MjG>V)P;&x5A(k(VHg1< z#!qtSqcj7L;;pY={rlki376N--#&m7JBW#xh=tTG3`-iE2BkWB^)fbbzYjyjCofC& zf@*X;d$L`i0om5)jQsR+3sr{OHsh`261iE{e|Up?1!NR_>C&>wtaUS!Y~fGTwk;FLc6OoT8GrPX~Xxh?|~Vd3PX z_Luu1JHNPS-5p zoEcrD?NW#oT{#4NZVA(8V`0WId}xa1xr2FH8=Hra_1$`{4F}MtnM6_`4fNf5S^eso zzj@^ilAxM7aaLgM;pit)MuVPs6Di)RW)D0BXh(eaAi3QCK44ER%+a7?dD_NpU~w_TIVu>~@B7sISF zeYx%>UmYgDyKywRkoCdc|Kzn1`VQEvwiM_b7)lsm3&NecQabI{r%Kpq18KQ4dN)dB z(Cx)qCcemV$ht&qtLhYgABV}57s1!hz!^qKiB$Z$bV>aKbDs59W35UepD=ytjqeg5 zeVqa6*>Y@Fm==(&wD<7(-$*D$;(0v3BU()$npQ7U!Gy1sy8B&wKgHkk^rbi5#o)e# zFwnxeaZvYp+>%AQo&PkACKWq%-Rqoi#hoHJs||QZqU#J%9iB%XdF9jnwnFX}f=@|K z?foRE2%guiVbkO>yN|Pf$mD((A|zzW&VjqF-~SvSahO@60Ts?t9H(Xo2ipWyaZ`RH zMcU6%M6T|F`%aA5s3U3DRoB{qj#I?ZbNU_li@?(mm(L3E9DRj#t+CGg9m2*A4eN=V zo6kdSWvLFrDgv)iJ5VhH*RpxDTF{r6tDaNsN40EYBZ;%S5$ej~>(-8i!iOkKdZClt+1_sK??88aL+7f8OXC zsY;Hy6G2@(+bSKZYK!_C)o)vZpg&iAC)XFd8v=0acU1k3{ZB%|HWxrgIRUQ=YRS*W zavhzrr8RfPZn)z%6CI0mKKyik)(RyphjvJzn(IAg zUW9y5(D;2+-w23*^Gj8?1t%}D;IQ!JvY+)^Z*gRUxe4l))h0sxanzM^P8hvCzg6e4 z(=G9zm}c^bq-^?2+)ljqy=U0RfEHM-_ii;7b3MIUl{?$XBSps%Sp(}I?|kdjd_0~} zTzN9g+WM>E2Y#wABsK36nPc{GM}3uIRS==&bLuS-%J>W%9wqe%dO(BL`t%3o8yK!& zuE$RgUm(iX<7qFYQ_O5;^M#=4Dn=?}Gn{)>MFU4iyyohZgDZbWC))#ZKEDKZ--%kdBj(r?*cWJt zaO@4XRC8)phx|I+;g7C;s-Oc_%O{wAMFDVS}^_0Z86>hufIFJ>nb;Aw#;v^jh=&FdZ!1To2^{RL`OG8Fy35sUL_?n z*ZR%seS27h93;LWaC%=S2&AU0Ul;W@4hiA{XUpn}I^^T;iY6aF4XHqdOCRP>Z-`W#Pr z*0+}|a&yi$Z#-k1!yC}f515wwy@#`BOowdWDh}q@PVmE;bTp-EpccV0-KoOtfsJP= zUh{;`Vpz56A~U(Uvad=62WiSm5ZqtIJT1rW^GYb0AbB?@41#RzKuh>c+KcoO**mII zDNu8z1YedyN-g4kd^TgV0LUgs{8{Xbp$!EII=w9NR^)5ky_El#yt6nzimV7Rd-`Cs zeo{YWzISc>>2$vOw6U7Rv!drJ->qIB=Ra4>$hfiSar_#dm9)g>v9@G@jrjdJzPswP z-lMMqUu&lu)$2>#T44ITPq5eQDZ5a(WYZAD;U%LNh%TS)*;qBs_VkL5=5xe@iY&@= zTu5)^Bhaf?ujq7S&fAH?f9hmDrZWT2c|?{qY@`YZf+S3(SHBc|juG-fpahD?kJIdl z6QJ5h9cil22Z9P5@9x3xNU#PYSNFa4D!RLX-lEeH5C#$VnPOgC8xUV}y(6 zU7}rTt?Kzw({H#Qzxl%k{EnK+!qYe>E70VnvZk_fB#nr`h5;m6->HhsBFtNZ?o6S} zo!_5Zx0iv3&@peGBATPiU|0oz%_}&t3O)izQkjlfKtB3bs%VOdZ5n0TIr0rejP}3e zPDne+osTYe1>02Fw&dwK7D^N{IcPw!A+{{|xI{$(S>Y~&BGZJ!T)Zg20d?Z?m8hd)vzuQf4v6GMp~5+veU{C%89A3-yp9wHP;+i)6{f=FUD56Yu4E*f zygn2uIy438=>N050;o8@kDUKPh%T75dXp*xfFUL)9p`@cXFluEJ}QZ5twI|1439S; zri1`sI?t6Q!q|57s?f!Wj!!FTvTa4cx%9aY%lOSvQX7{w{6UJ-L!8ayQ|&H}aS-i< zH+o|RkPTD)=O$O^2}}J_6k=^su+nIo?y3Zm_2E2v@H$^}wv!h6nG^q>b_7(%GH=Rj zlg1$*qXJa{;G+^xKbVNv#jAfV?Y`XgQ zH{CI-iOO(jDD2J_eEyHt^{;$V*G!NMTXgf@-3o&GEQ&ld3fBCe6z@M9m?b`5`c^H5{j?I@;d~(-kAdDdf%md zbConO+9dzuC0vN-2?5AXqd3?l0AQ+O{E2gPmPU{T_6oIrUF7p)m$`{4~ zr8ARLqv}!g!YeVA1w+6cnZqL1kCP@#lgtnDYF5*VoyVa7a2kQY?AfEyk%y4+#H&5aop0W|d5ay(X_M$_Z%)$;}D27_gkluvbM#izXzg zH`GDAz}H;_wFla2nF1R{C&5ZLh{iCfkJ8Af{* zLHL2T6lzU=Sg~y=kpJ%cF`~r1rgm6BX9NlhI{LA-9oWL&6)fO5T~6Ysha+M^{O91O z?AJc&I{_`s@yD8A&{@G*TKxu>dizgu>%~Ue*+JAafa6xh-`M#f=MT(72g5(ZJ}Gfo z{)E#Bs8i&aM7X?NR_PGaI3!I)0fEA-S6(sQmX0BE6I?}c5>i^schXP)2TW0449fVV zT*9ozzag14GKHwXUwP;F$+uD|b>t(NwT*rmBRe^O9eCqS=Flh}D*8`dbpCDgJYO{;11wU1W|jdqN5s}@ z!#b`(NF}gn>#9{AG_3xbep@6*iPXtr+9#JF0kcaLJY@56+XFo1v~Q*U(_3aM6a3;$ z_FGD37Ks7*bTs%nmoy*sDMC(7S3GxCQz+A>l5AK1X5M-j13E}~YSJ+3bu`%FP96Mp z;Xce8iH#JWa-jpcmex#CRbi)DmYU5dETk>J@Ok1#WH_uY0aK~irqz?O@dl)x-khBl zr;%!cE1=oJdsdhgpvJ{Z;3uKcvi`gNYJ1M|c2X1O+I4r?u)AA(`*&7!Y6+Tl=jW5| zwJTfdRCbq%trrt=UP1(n^9ylO*{YtN#y|s;X zThr)*yDJNiQzUuJzhi{~=_#&?l}pubAph$8<1IzWD|7Z5Kj3zPLg-kn;vZE+XLkSa zaOIre>AC!p#bHe?NIOi-<6=FD+!^ni$2Z6>Ex!h-4FI2LSrAnA3*xIR#_IxDBR`LK z9*im(HW&I+to!tcg^nPuUZ;-L(P@*?IY%yHkZ{Pv=cXJ(uFZ#@r+Us@T?+GMm4XFv&vt6C@$5FyapTPsv_J8d?x57!aSfJ2(d2@ zhneMa1ztt)bu7I+j5W)~c4jyqx~d8;W>S7e;8uZov^p=cJZmfR$*1DmP0_txxt z69mn%^NzVScY4+M_@DwmRYRO4x~aZAE4w(lZavOGat#-N&rKJN z4hiGfhfBbP@>5=|yU$7E3{8k1?VmZ9K1M#+BrRWmM-|+j`k1a5Y}mq#dx((UY%+`1 z770?#p_C8=@)#vVE>l1`)i22u7oHYm@cAE^pr=@uVc{c96$HoVfi%hy@pAN;o$Qv( z@WM0IftJ@?0#|eWXDG!?L$ifks#W|p>%b}Yr}3H!@Dci70Z36f7Q7qFfJOWJXaBlFZsgmgbdhtM$Z?-60*nGW^POFl-K z6fv5`c*&Rm~3@jw|k?(JSW?ML~0gNd`P?;CWm9GFlleW^{k^L zjF4zKBF@CNhf7B(_K9UnbmC`zioCX4bXiih&+wrk%3G(tcnb{rp1M`fA1LTQMeL@& z7J8O=eK_EHJlo>vF`4@1QBM6(YIHh!2uBBhX;Q)=^AV?}G`juEN#4v;N)N6d8CvYa zr!^78a`v!*(-~HU0?%ovZL`VC_bvoqVAcMi{;!o&gK@tEqo*O~`lJT~g2P-e`BVP2 zV~Pa$)rNi+*avwiVfDML-g!(dCS@DXX~JFQliOVYRQ4UTvh0bAXZahccCgv$1##VM z+&7WjDA73)us!fzTf*E;b}s=IOt6Llj;$ zEwG<)XZ8Xw%<2uL^as0Z^sV5^kGT2KAg*JPX%Qp&SbdjTR?D-RAUX%`3Qv5y8*mFB z*UHhA#-7xNYyK=XeRsN(%fdV*4wUOSt2Jvx2>Wz+3Zu zuj(1~=MBI&mqM_(XEB&ZP?55ffT?u!{{f^u z2#UjeP6ZX69h?rfPo0C+csgHEF>E@1q zr5iL;kEW+`r2|~2+#~s8q?;VJibcC0-hQNKe|vI~*B)O5u4&?9K+&B@Z1$AX1Gv%#X@w7xe> zL8WR@=}g3wPg7Gknkffp)^WS!qw3|yU17pe5bIY}->f|TVbWi)ez3NK@8_TJGOJJa zFICj4zJao$D6D=DMs(h*-01#|!pt<}C?FqIx*H}xuBclqaSOpxDmu8HWUvRF1_6jO z3ev7PD0{lGBzEHvDUSv2x96@{5*LJh-oF~0*_KMEB0-(XX(5wX*YOM)6 zEx3xf4m}Ntvi0Ely5L@`*$%-jEkV`=QD8uHydJQTvE=iNi6#@7 zYX`F{!*oCaf|bC0(5Grwa<`w}cPq;k&^1ruOXzyG&uF9oBdy>jX*)_#Qqs3VTd55_t@tXI( zqM}_Rq#IFWCusEYz78+hN@|Tq>0b^rxI^~O$|57r4c|*>wzJqNKuzPaycZC-S!fx* zJD#N?)#bBu1)x5lfUQ(pLP}Dg?;yL1KoR6I$$_9TCMOeU$&QIRtxy)YrT8=M5=UG5 zWS>8_5|$)8ux2w|cyK=Uj;M=UvzbO7`vh+{(mRRa`)WSDY#y^A);g5lx!S5k?Cji+ zWxJWrbZbCJl5dHuQ%4o2cUD_qq$Z={E3_-;m&&~JJ(U50rm+Tu$d*6Qkax;+eLZ)t zLMojGM=Q7*)7&i~&zDy;{(vl(0sz&DEaNHhx_&)kqOqIlpRG)G3z^chQ}X0GaaWll zvk94a1?V;fVfn+9?VVf;dpeWZ2e3&{_bG83&Zql%^7@{&oJOj zEBt$MXvgOvxe}Ijf-^;Zf8Eg(aUfb+0*3Q^|3iHtt(jbI^ApzNRr)&&nl}bzy33T} z3z3$R*I5bpPb6O^xe7GF0v0s=jDcC#sIn6^WfquPTd z11N4(p-bdY4-T|2pN2=8WiEQq@4P0_9exC>rYzbEi4eKbYfX8Zp%S+u4+Mm%vR2(#X#q3Mt5wr!X!^l6xEi#7}RY+#|Q z^RAn81@C@I*^JU87t<$@B}a7W7cDbStuyNC?R%83bU#0a=g1z(xvP;y%r*eTzzEs( z8)Q(BxJK%r|J${)jqaDVv{>3JsW>W%`H3+cas zUn~# zbD7FkXizptm4{$<_!B<P)C6rm2g){}H}VB{ zp8UW}2$OI_qP@8T&TWEPs6rLniFILiiU}BohR)?Eb}$=mI0NHaV_+Sw))8mAbPWHO z{63Hj~O{Z2u~54Wc${>b39`l7oQ9afcU&6?^3?J=(CsP zk9}$HmJ#-;3!6=qnzG%5?a5L+rIh#iNA5EX02uwsFvK0#Ty~@X#HD!P8EO2D zcM9hu#=7)GcsXZ^wjRM0Ef(LAhu`Q@4{Pq~cOw@s{jv;9)3%Rwl{QP@Ieew&<$YT3 zV-1hD7wf1bI4gNVfFMvziUq&uZkL`I7Y z7!A@PEgd4=5)Keafe`{yA|NqR(%+fy`~I$Lf9Q41xjoN0=Q-#5z3RZ->BlW)j zzqbz@4gg;jqbISMP*@z97K_zXI?L7B?qZ*t>w>Hq1IGX205iBkh2EvCj)pRX%%-f< z$Ou?<7^b=enB8uaks*EEu;9Bz#-i1v#Na?``lhLOtE_u28mbIu&4qlp-*B7CsKAQA z@80)SPGIg?hH$c7G%<k)DJfs6ia`q*x zb)6Sp6QupNDr}V}MYv*sX?&qOa>tTy=m$m1m_fd)+cFJ4o%=&W?{p@P3ENO8!Ge9a z`{p1BnOMSLtAZ2N0{3$Kp2q10;VP_@8!?PMSe5+GU;rzV<6D+F9R;{@jsrc8-qPJn zX6pY`zTesySzH;WMzH_%*MI3v{+30;n3aa+Tk_v4b-JK=UXO8gzrNb)HfRWb^n#g( zv;J1?*YCR#Gx#ymNxtTL{C>bBuUay~f#Aw^#O0Qs+zmWwCEFa87?!840$GNi3t|Xf zcHN{x)PHbmKbm4uz>tCD;*~EgjtI6l#)51CiiLzuZ{=kCtM`7s<$Pm6{w9f(UwuSu zp%S?0L=15wsfam50A0e9>)ri~X^R;Ebk<2HLwVfHLoXXd~MwWP74r z7?boUd##-d??3GuuxhCh7yZe=_8Qe+s5gMW+bD6OZ8C)r4!o&`Pi3BJp!txw9FMb-qr_PEkx>h`&_mr3=AF*`cMXj&>0|d+Mtn?+U=m0$66T zg^Z~?tRDVj#%SqX< zH1P;&yI(W@ggZGvLFAO#MEItJnRE9eBi3PGryt3v#F*DW*336&lc@egrcnLY$E%DL zZ2K(bqYcIQMqoh;+bV>s62F)?G+e(imh*e*TnFL{xG;oW3ZW*6 zJm5;DXAcO^aJ>7O;rwq~Rq*1Qx+o7$1$YlvMAywnldJGW=pO%rdRkBDYm=T=Q3sm$ z?p`)B3@FR&C&&;h4*cmN?f+!=!~WWancDE#%N$~Z;#FxY#9;xK!t})6&YI;lr9qHe zp46E+;Uze^foO0+-FU8LPLywbkP_eFORujSB{r%bAhdgmSdD}9h;N^aOHW~H-zg)l zQDNSF~h6Cp}=Fb0=mvRtuJU`os$D+@WE#XiiTJ)BY=0vz4(JrJgcZ>#ZyxzL#AaOOr4< z+YhdTo7W-v5%*u93r@P)?yWefswM~%CBF&3wiUHyr%%V~l1PJHhHM=7!S0LFI6UIT zKri$6%N*bv7PynKw;={TiK$-KDUJ)s+@>G;0@PTDl#XFh#3SshqkHwXOM^%JL9LmMLDR&_L(0|J4k zgXYJsr{)asAHn-X25wxB-wurEaR;)?T& z5vQ@r)X!RSeNa~-`a&H8{QbSl!5rWJv*?413QDM0`9Rxy3pkaMiWtk5pOCcqSAjZP zWcr)CXl&>vL?Ok*#!G?byXK&g?q3(i7j2XNoR`1GI1299)Kd-$;XsoQ13?j z_5PLDIR`5|Io9!X0ldz>@8UPyl#KH_SG7ATdsa?&ZXYS^oq>*?iOiR?qw1h`@7=8$ zkgxqm3BuDi#X$7AB>Hg`T+ErZtVc^jB5l&oiNxgZJ750C$`pa7>Br?UHq8R2)b11 z)Xn6&6tFA2m1eNjGfQuMo0xM-MlrTjw{HiC6!HVcD~ zN-BcC!@cA{IWQ<$SiNw~MCo3(3E+}giGvZR)Mf?UAq?BL>?L4rJH*P%-^#Alc0@t1 zgkR>k2LbP?vk8d0Q)Dv$&8@|!AUfk-5Kj(3Lsk`Xs@*!l0482Jl4Y}JmM?%gZYR!f z|1)1;O@(+Za0n&~tphO{I)Y}(mO0I+Qwf@EgqN)jM3ruaov$By0Q3kIM8)t^Pt=FJ z?A6cSg1?;Z$-4Xi%nDwwZzcsw8Qn;-$8fJ^#;Fc1+1~g>4T;jEMse~S&P;*!llI*h ztY__Wd5mcCk=LyF?+-o8*q))8u2&@;o(hB2&MC3z_ooQUtra2SgmG`WngMBZn*_x# zXEOiVv#%-=SJKDADmAS;UlmIAR*K#lVh?yuvH#3>*qi?3`>b#_J4-5QRdnIenbepG z`b>JZX0LQ}qL#Zbrrz^DE^H)Y{o83a1nm`vYLFr6at7m%=R`y7Ggez_EwAG{CB3*` zLHt;oy6zH)L`&-Ux(nBB0DqV#kIhuuI5FGqnVAeGLi1vzRVniYAKm#_TwvYEdXMxg zzDixTzdPw7x$o>*cS2rq9yf?Q|0}t8I=oP;T=1@x>ij$37I-$ZgGa3Y;npw4!I8#S z6&?L4HPcz#C^FAVXw-1?%L@L*R_mD0G8D9Yt=9gl^o#fU#6;5l;X!9Zhkifay~CdF znIaKa_EiH|x`J9Y7PuF@haj0LNG+8JLV&X!t)LRjRFqyHq^~jvqaK| zkyb%oK3pQXkJhcNW+MFJp%`j2T|p-a z!-Gm!jKU%%SdB2b6{L8?4fV{x@T*OZb9_u16~G)|W$h^qb7;+er6x44htyXma6ve& zeaKFWkKIZ3vr})3^woalHHRR5%zw0TJ3s%JJx~~;KCfdEXFo}?j@0dFC7I2WSmz1v zmn-gt_{~q(=iRIIUi_0hfTY}QZR1(1U zq>+f|x6}l*s**~v!k%V{`hbvx=#a7&kA6`oMIU*)%?C*+)lcvMd%}l(ox=}10{P%= z@H4uA!?Dk;$cK;oe^4sWv2^PMzkaH~1mR66k#}ZUCUr&1?k`di>BuibBK!$xq?!oq zzi_EBR&zU$VhE%sCBGge#T6&F?b~SI^+^uOS20%#TyX!wHxJkloNHn?G9(V^+g=l> zZqE3cZgzP{`63MhFj!wG+!b@7RlO}wiV5CZGujMo1m4Pg+KHPZ!r8BT@1s|qD?g}n zrIu5&muD&PllNz(fk+@9j@Q1k%Md~kg?{WugN=ZG_f0Yr0voA(1e?zKX8LQxgNpC7 zlnGDT$>#`!a3w7931L!{YZ0kAZC9K6o%g+Zwg!*jMvz+gadV#(=^}yUUBX=3p2JYT z`~XZz29zLXf4D$>p)Ti`x^CZ9a#wr%m-McW=1kRhb-Aw|*?Cyeq4%IYB6T9LkHZEd zwIaQ4c5>w3U>%_7weiSeJ%Pi<_H%8~LKSFZkDJxK+x$$hg-4j#4Db`lB2y0@NqXsd zcq*mF$B&nInGGru9YzCR-T9+GnoH6V9{=2FU{bRU7)MDA2!KC4UO-|ICO)d;6zKaz zN4O0?I~;9x2azM^N(7|&s-dK{KxaXPxfOjXg(P^Uz}KBj8Zas7iVc@u(3@OADpMrH zqKWya%mmw9;Jta$FHIH2l-3C~e@P%19LTtksvxRZ(~zW$kWcsq3+y!~nVbdK9PG?X z(qy~f;qNcrk&A@pwy7g+zUWJPB3-**=n*_&ay$%O0lLD`jkCTcK@K52FH<4_%gt@&WnNY9vCfHT}>dv!4_8eiOor+6|W{NCxvZvi| z0+Nqt%tz1aNo&*FF>rZ{6-5K)GJwiY^WV#5Yii2gB0FCVf$JLvT^{xM?#?j}6L_gS z+~j`n-uLf#YKP7-#DjxEo?I?Pth6GV_|z?|D){W#RT1?4jrT$RqP^lTqpZnFT3L@Rj(pi z$9#t;LHFL1?VH2NqRWNr1zaB;24}Bn4%9?MV{Z<4?c3(&I7S;-%x^b#`-yPOizJ7%JBUGtm zsYX{CN$~i~lo=kHAIeNh*lM?3RkVHw8QS~y{=TqQ;D$&K!JFcE-$c$0)t(kpO7xG< zXx=YE%uplQ*lCt%Cb{_On>SKAeLiB8mVIpLTS)*hB{XVo#E7l$2ECS`oe^X>-lHm|;29{AZJ^=o27 zasd)!(|1r1j{L)^U?``Snhf7Yx=ZB{Knj<0SGbVi1^bC%h<}!8UBc_z8gw1U~ z;bq;@lxH>VF}kbGkZES9oky4@NmRgyOe_D;}>a~9P-%cnhkI1<=@vKS6h5JMjYWWlxA;PFM%O@7kI5is{Q;%TpvLN26Y69E9%MgBl z+f`w^kFW-E(%DB#d128`qHnfT1k*zF$+X_!Cw&N!r^i~b7zBG_m$I8amp>*@6+Cf7 z$^t|1X95ex(|+a1A4<^<;iQj9NzL}aVZ?_zZO`_t8@kl$q4pn~>s`$ zuc4RcWYo##XPdbZo=7o3qt&A%yT84d*{@1&=F-b#2LJL?Y7S8=1!i#6>j50($qaxL z=?{AlS!#7;lGaz4Dm9f@8IYfJx0h{9X{tB!?XoYc2hCq`E=ixv)7wO#uac8 zWIpaP$CUJn)-><;SmK2MT%{2rt&ATW+aOuB^8`uw$jnEOtR7)w3iud)Cx@0Onzl^A}g%<+0NA&*tplh4?^=;}gGQiT`hR!A5mi|Np)=+xB)kKYQ8lS{>b#?2LS$|Rx zn;6}^DPCpRXMZd?RjhFXfk`|#YBe1ZAA)7>Zm&n*c9_`G@q2?wsfmC2-rC%p{MV+; zsl!jXg)K}hjDt24ZK_@{C3o(X;kBU8fc>9EQ~$dR?|jBxT7jz=ik4c%6n(yHTtUFU zJzbUP2Y^7wuGHJl*(WmZ@K?}t=vWXBa`qP1f5T;llcO=C08*TJXMpbDSu5XH)2;}PG)M5OiA_nW5Eh1L5_ zO7sLh(xe^QO}(OIIOk=oBX@!aJ?&z;Ur9aM9DK-dMw(|E zj!j_GWP(Ci>~e~W#6yAg1t6(NO6pW3NnRHf2U z>lOSd)UgNA(8~EPVr@#Ue6+}8H%f)7z)pVgRMB@U@Tqs8J^Y@pa?y3!h*;7GAiPpM z;t4ayE6H(U#A93z(J}AdnfERJJJ4{>rG4{3xounTag6g1Khs-JOR|4ZEUP*Cj2F!> zlW-!@foOpY^fZ65erw(i`twLA7SdMAxwdEO!yVfWDha9qO${X?CTpLCW(o zI<)UYtR5xoWuRAB1S1{^9>tWJ)5&jUc{oYyovyH4EBCx;YpAQFojBsT-c&erZAVPExfQS~%iy8ypGuis8oL_G*|ia-xI* zH4`!(k7iS&d4lu$+mm?V$_o8;NtmY5=6CU(oR2hO)YGJo>nLv&%EkEonHdReAn@u{ z>84^BWg)DSzG7BRVvBME|;r|-|g3u-LM)+YvRBv zbvFpOBq-!U4OV7nKgvOBbONXi?t32D3;od+Ihv%hH6+`BKgi&6h!K5$>=86CxkNVk z{@fzoF2`4pNvF7~6$Cm{K}s)Yp4qZg*ROpiKlSVoJ|&`?6z>{+J#3-pmuz>`y;3Ru zM}*+z;zZ)Uev;`u#SvuPBzISHbIRg$y!YD5w&7gON3!yY=f&;DTTgvi$r{ zgx7qz1e`tc9Lz?P8EFsfE-aSK>1D;H7E$-l8qU_SXo2*)Qs=X7lrf6q z>=Q>8wRs(%>W>!GGZZ05J9Xy~VW_v{o^ zg`)eC&k(~Nb5jpEa0E2?T`8XTPFGb&6r*HWB2-2`lAs#TjsT^&zt|r%W?v12Yz?Mv zH3y+^Id-W94fu>M`@_$9T~;U(U1#n@KZ@@1zi?URrF|o0I+vzHPrf?MwP`ep8b+CK znp|x*^|P%+IAnq!UP6wSp=!lO9!5yA&==T9& zPxRd;w!rZ+e<+)kMx~a1y|rpswwjCn`O*#IrxiHQEtu56j`t5U&su|Kc7)QgPpM>| z@Z@#NQQ>4uXDFRm_$TG>T2%7CFwSqf-pdvFOWwgF4~(9CcU_tMO|;PEDD*pN3p9~L z_F@mlV1#FkXNC(4xz(*eA5H!8Ecn^_zU6Bz>8+&q3r=Vc^LbIL0x&CVnG_XRW*_U% zQifnQiCL(M(;V!JpLg011BnRkmzBbG@Y6dNV-Ky>j`57^_G%ag~vS@@|7 zsN-n*HWk)WdRW!UG#Tu8j>g%j_g#-eAn= zQ^}eS7h{&P3K(5YoSj{x=VWA<-8ZUfTl?djPSF9d4whwV>&MDI5wR#qlrJi6pSg4M z2}5*9jA1nE;Yo#Vr5Xx9)+06|6NQH&RS>hLlFz^R(&~ZqU|@cmTY9zDC37nHN>_hJ z`<;6hIE&}O(M*aB5ye~|z3T4Qg^x`|6X0PR0-)F*s`2~qumg5&<+L6ws1(<2pFDgi zAm${G6r5fX7L>NkeZ;@iZtI>LO&j9r^q2ap>&F3(cH>00^b}??!MYeHvj1qP-R>ZS zSNIYek@VJ?O~Skc4=vz=T}hl|)+U~6>rjVGArWEb8-xM&H%e8gx6{=0I7d!1r&XY= zbm?qdw{BvFGDf7P?n`w~Wd7o@Ucs_J$X5wedW&03ZZNC8Bq3kRNF-O^BY%fTv;EGx zi_3XxJ54dLh6P(KCpty_kYloTE0Hh$#f#u~5ZzkD>|2jpnOp9Mx=i2|nnScu2n{D0 za(dGq8Vza_XCX$2j3<4KH)@t!aJW#^vaL%`d8LCjM?JmD9G^VdJ=2@fFqks zBsof@L<3P;x41eAu2)3YD$eeQpXZ$E{Avh(9~*#)_#%u&%5mHc;lm-Z+`o{1k9$Yf z7f(Rqz_tcs%Oa1NI0psbS-*+HkQSkWFlc5Q4DSlh-mAR@eB6It5naBvQ!?Z!0l-SY z*PZdcbW`eV9Q>E4}pRF4j1#~*F&dax-7G&dV>2zVsAy2>{EbK z*t5m?_WypY8Z}aEn_$vS%2uG5ow$P1woNNeSc#m$l#RGMc`u>+{u_G*3K}Y@;fg$4 zL;NL(ItL{+-uC)!;>XukV0{PbDvC>Y%YUtzuTLGUX=k)eBl5l5NHps^m zOg=sH2Cbts=^%WZtF`{SdXtwry7aVEs1q%8$z9Fw@R2b1TZ1Ehyyo)~uOA&yz7xu~ zK)uW6;|c$!&``ePBOG|KzHVKem=is(=__zYayMNJQ?~xkET(pD&=>i`>np4;X^&W& z%*lk1o7uLNKN4Gk%)trWxbD^ z&U09tUC8q2JDCI-E>E^GF55CRdJ)B8AXiAn;O_3?6bVU0YE0R95<6wwv2@KRvJsGAHf-ov6PgwDk=7 zrK-d6s*DL8yDft&Bu@*gVIf|EO_p0vBs|UN1dva^IB3jXLHz>uJSm##j~_QTSa; zxVK`^c8Gr~)G)QJYRi^K*plh}`z-8*n=ymXJhQpGU@rBuLaPyN=9i4AbRy5v5MN6o z;}}Pbuca7QRVdp^i=e-Gvy?!_6);Sn3~gWkv2|fyBCm8St^$pKMHFz;K=jRgo;Nep ze8Fr_@5qQPcqOSDP3e+Ld|T`moD$gs1?N7%6E$x;P12l22ya!-Xgmd58&C+T`fd)x4Hx=2aKVIZgDm8v zc}>xUt3L?rgA3nQ|2|eW${@=DHd7mEGgR7~vUk?m93>J{E<)%+g+Gv9HR?38KXvPq5?^yQ7FJkcQc3ntmi!*9<~U+b)L z8>-&=|Eu1}qCoM!pvMUhTQv|I+jKb^_`w{{x!wDmv2mtX6K*WLsp>~_7`YY z_pMBY^Pwy3aUv{qV-fD`e7gMB#_>hI?6_yCG21zRFYF+7Nii1dMl0Qz@v7xqU!W%@ zm^ZPDX-}|A_a|@hZ}dlqJQA~Zs|#8RrQ%)F(O3Am8B_eX&p5vQqmru!y;J*Oqfnis zR}*m0#isjj(&gv)+DWp7*{-I7r@+lbf;QI5^E5TOg-L8}(F&W1)xt6dS(l~7r$2}H zwpB7l=(k!&DB4BRk{C<<%y`dwe0rpfNtBhMf_Zi#samYsy-6az$#gz_AhnM06U_2J zQLo|e1Klf~pfRolq3qdM+!vB$YS_0M;K<{?Veb!Y%Oi~aHeuia4{ip~J}5RI*LfwF zvOE8j>JLLVy{ta(OHaO;oo+V!m-De}GN=g9bvD}>k z9{~`z|47N1j!qxv|9txJr;9N=CaqvnoO+D>o#0UvV}S2Mn%z|3SaktwN6nB4riv0z zSxQpCs$sZkGd6XlkXyOWG5hJoz0hRa}4hQ^{v&ogfnVVp4(J^2^6oDJfAH@C&yP?FaHFvNV`RF~7iIJx zSO1NTF3zE&9P4DYr5)$fn6c}celdxjvdQpnxbj#-Q$>7ErK5ngNm?0bngs3cEwq#4 zd2en;&MT-{*M>PhSi9d4v7zQKSYE>Cye{I3f+4;5TO;B*>34o~^r&&NEKcJ}^KN-T zraGoxym;ra#`*7ifl79nR1k(4wjZG<>%I5s*RL#LiLk@CR5x+Hu!UDo+5#iL8{?2) zXh>`nlA#=w6q_mlDfrW1_jb(> znV^`Nsd-7Ib^yt86DvL1?P*u;@Zz`BOe4+vRCE)m#c{1c`6%seNeI?zf)$4!x~2;a zOC5aO?~35ehB995g%76vQ||b0886=N{#j^2pAR_Vg@py`w4O5}F#bpo>2z8H@>x=0 zS?(|prQ*4}P-Fewpf5tcpTEbqapqq*Cos^e%9vtA@+t#Ibx%!O%4t-Urm9X6V`A5@#nOxCA z@KGqz?x$VM;3Zc5@nHEZ#?8NgXoBDsAm9efV1M4fpK-S>2?5J6Xfg85<(`b9mn zx#)cub>+zm!J#r>myp0uhIQ{LyTk zNv-!T!UOnVlQ9_)N#>WE`h%1n&)l$@dhjPv<5ZMSkULH{F?h6H0Jt<7<_WUKd^c} zY6qLcN}3Kt=U;wf`IH{u5N~gr{~-a-;_pe&Un<74(Qqs54t}GhXf$PfnOV8y94I{K zunIeCtbX~%4?y!3RM=qc`v_k75849?=pfMgs)p|`wE3kyB+6W zd;l1}`iqG5%^=AufJif)(Mu>I4MF5!14IbebahU#fdu)wNZ+OJ_X~9b4>A{%+6F>= z>1jEZ>4PpX7_52%AyTn?n@<=mlupt>?!3D3kIr6F7ql0`*t-%ZYZmMAK(iFI?5Zc^ z$Txr@($eG8b^Ij4Fv>N3MwVB=&e1BW_l$Sl;J2T1+NjD)f#!JhU8IqMpch*ZN_^V{ z0J8sy;|L@8kA3?j7qmddM+F#uod^dE+GlnED(bB_gKPeqPtyavD%KK-G|6+9B~AJ= z2$iGt1<|<>DgQNYbFR})FfYD{MD+yy>zRj8nXCblu`0D{2bi4Pi99h!(S+swfUbR2 zoAroENCa_yl$dkD2F7C3+oJPYu+2(+@m>@w=*7O;>eKGCWG(@bGYK-#v;aT*d7G9@ z^-$G=pf5497t$4yDOuG)FJGy_?-DN%Id0B6QO<3?{_@EQ1e@?JIH)%{&e??(LNB>1 zKFIH8kBJqBc?*&Hfn6bk;E0sxX)IPhfOsdAR)u5PN8LobD$y};JQ`SBBe%=-jdV_q?fN%|ewzV=%?w1ASJYX~gU zo|D5sFR!Yo`U+XL8FkO#k$el^ky&%>`B0|1pGW(DSIEZ;5^7CvgcI5V5`t@1mj&yU zI_3-Tr&e<9Y@A+;IN4P+cK7leila?DlY^~V`JX?vXVSOJpm|7hPZ-8eCeZ6K>_*~a zWnrhsIeVgd(9at8AVdb|UWi+WR|tDl%9C$Dsw1}0{xml2SV!s-o6is?U$9ndx_pm`e%dOqm5q-^CY{}liY1HBDumQP1|3gSBsfn zItV}e4q~;55VMN+YWxtq(N~x71#PKpgqOK}UT7o~mzmK=6|!6r)qOh(e;nHf7YKGu zR2SiLAk9E3=WFgMyl5NU$IX%G(pb#tquk{}e^jD^Aydh&hgv+&Vp-iA7P!k~g(3Ac zU4gFPonC!km(kzv8H_-pwxZMQ+O%)phe-ScDm{S(1AtDj0_U_iY(_y1Sf^4vNJi?P zrx_~UyofCIdV#h;6|Yq$l5dVrGM*Wi~Q z`UV_ZlmN=>KM-z>)iA=$oH%&O^jQ|L0D}hIRppEwbw!!zh)rMN`~ZId+~ctpUvS{y zX;JPLq8XgAHF@4@&o)^w*8WzXA!w*3xh>OvgMo~QFhfCZts$)OOz3JQkAtmncZbi? z3#5?L00DV&UGgbdJpcs8;=3}^*dOy*^{N7GOIG}geMd6E8>EtOYpF%cM!58fZU1k) zyluRrt!`s%nHTIa+Tf|&HcKzf>Web*8T|x6pq;FEhfXnNTdRF}rZqzuwY`1l?0WAW z1alkmx|*N;U7Q@iU27p~BDzL=kNCxXThfdp$N1;39Z6@@dYm-{DaxPPGTD#<&3SO1 z9Gu?%3c#3oahetH8Ww)Na4A}c=h->>f^zQPZcQ)(lWy5LFku-f*q1iB6k)o6gDqzXj zOB?W0xSzMgjUXrOk^xHRyGv_WpU(}Bns6~?f)!%L3)VxZ-JaCSa}MXcZQMd5-qV+A z@5OnUdrR@5i~Xq|^(U?;^n0MBaC@+ZNu+xfv6-*@9JlJ{#eH`awH-yU8Z~@Q_$K%1 z3!-X%29sFUhXdiTyU{MQ1)(}f!||shlxr+zkZYBp>U+8*y(K`+-}zZ}RGMsRJb(kR zW9Pk}mozE&K6vi=*D0WD5fzS5_R8nci6-3k?p*D<h6oaw%`;%S;KRgAj?>2PL zb`7Lv_KRR2gfU0-Cse>=tA?lb_s8eC)~Eux_~3VjxwsE%87y;cez5yTcfEz5xApaY z6Xh4R5YQfbjjnj)bZ6ozqt^$ArxSEgYEqhhLQxjnNa`HCXHvh29t^z@ew6$gHK^WC zbt7x03~!6p`Gef!MK$FR{t#Hm_Hmc~f0hzJH9}}osOL#%S!z#Ho11l9A)uJOkg;k< z*c-9Bv!P48A%^*CDf;=!&c4wn9o81T<&D`?J%2&0mRHPeA6A2U<=EGOzUQyZRCMhz ze3nIloGmV8kz6!AsFH@YW$^<@(oRI4tHGPj_jkPx_A zF!K5Di8~`zV7FO!p=vMS>M(UPDN&!PTJHI?pxH#P^2yMP$+iaVtTBB9v+1^NlfHHqi~T!^oJZDe`W}Ci)6w~7j&6fhmEX75t_KfE|4>)9(XktU z#dlw!4b*dL)RHLPe#0HY7J%RnrY;~=K4J0<8CEO=`I`W!^`999rn#<74@Po?k&~g$ z{Xz4nT4jQUxARetvUSWO#z@+$8Jgah%?Q37Avcq=qfqPSn??m&k3aoDJw!2mzJ_vs zEclSP*JAYE%kZa6-3d^CBYgFyzLqIOX!($lu!R5Ap)-l3QN*--xL*BxdAQgwDg!~QUXtla?;8ru27kJYo$6v}4=am#1zACIpz&uJ& zQxaDd$43)dKbB?pjA7*c<{10CqCfc>?^O*vO@kO;^fOye-42O(1vup+X&-`dX@ba8 z{5yZVxgD9B0_(|-;x%gfj#RHIFhNvHWb^B3y+V8PjtxT}877&o-f?(*o)^w=e=b4} z1>1jOLpJ_4jqU4u!r&=w?YQFs(cs4cCLlTGePGf+lj`}88vOIJr|Tx<`1F#z%n{c( zcuuRuI&PV7_NPU^F8FdY?525T9tM{y8_E4d+_^LpL4%2LXH zyr5-mvbd^nt+maG92NIel;hd`a!i&zM{EKL6x_{FfgFKlnm&8;_495H@(XJPzVvTW zO^@_I2{?UxkKma1AX5;Q_HgNhGhOJy?3`!MgYSY@o%dq+G$u34p<$(Q1eawgr5JBh zrB&D5dQ0<{vY67m#=S4v-x%i#nKmSfX3RHkdo5C}_;m#8xX_+a%bYzwyJtO^*%7`{ zxVYekzLT*%RO5z_g6mIsPH2)}mUeJ73453ZWOL2jNn9N&Isi#Q5(@VyLSlZZb#e`~ zra&oZ+dWnM-RljlHcs(-??(+&w7N1{j_{?0@^JkX1TFH>8jzn+$Ih@6@)2 z?1ZSQ&~j8af%E*m7XAxRILu94QieOW{|nf%G3xn|qV?K>pJ(YL9X&Rt%xKy@wNdcJ z`jSAfB*%#LQ5$5w$2^jXS6nP?K4{R%%$Y|*;;>#>;bl9SG;0`2RrG?TCG5)k(ATB? zur7APcL@XK-yMg2>&Eu2QG3TM6Qq`KTf;uJ&e=1IvpJ4Yh%pGU3hEV^bJjkF+>ewm@KV{+DqSz zNGonrco@dyqhHLy+`?G37~Aqx^r{|@jvxS`?n2VQohid!&A&C>yx6@jcw>OGE8)-D z0-0Xr(0WIt&hmnQ4CGEFp= zA`!}ga8x^P?X{kfO!R1RI0r#{*e?J1w^l6#`>y9!Vn*(g>6R%_xpe4h2HL%i=y4mw zmXic5l{?o!9u04&q}Tr6!hFr{sM*~o=9e)8*Mk)ol_b|I_}O6?#X{K8nkc%L@-oL3 z?j+(GK&wS?0XF4Tc^QjHnlj_gu6_By#v|FJN-L{msbAcrcaH)bReo~>F4Av#w@28; zxy&Hw*U{i!{W5M!@hf3mDli!ztuC`1<;J#RHgNX$Pn?Z!8+6t)6qB$@-#DGI%Q0Q> z*mQFy_g6C@bJzw1i+es}N=GM$_><+{X&oPtMEeDClT`awmy2E?ldi0D_iarQ#W@S&g6t zh41)73E;`<`_WC5BD(e;JNLJ12;wjiygv?vN{ZdF)ZN=odR^Y#pZ+KrLQlW8WB$}^ z)m;Vbd)+X~Q4XLf@aQNCesMv0dV#K>E7(iW`u@z+KIcU*; ze1G;+4N;WWrO>_VmF;gQoHDwsv2t8|;(aPP5WROW3y3>`s4!0e<2f}yp~;k=^;f2! zq_}-b?P30~SjZI7oA*N{132nAPsHhZ+>QJ0MJpUPhq>4apjRT^Ao%-J<8u+_r@ASQ z7LyD~i!zrY>(lzut{CL)%Aj9THwpIsZ z&NSDe*O=5#YO82VocGyBfUzM>Ks*dmaV$F-Ze6yXd-y%aQNc6rJNL=A0PlcPrnLLD zbrbI-R@jw4l-G6<)(;KXY%0E!TUf2h4=glh%_3!|uFdEJ<w@+ox&(?7{z!OVD@RFx)yR^;r`I*jsD^XaokBVI6LMGx*lA4&p z?WNSooW;A_a z@$Z7WcjgYCt9@WT4!|b%IRxjr$j^anIm6$B-R7AORvQ;jULkGIZk1dLCcZwDO|Cs| zzR#hv(fI+h)${ANgFeUTMs2_BuAfz!P!f7I2V0&QImrslC49nB6!F610??dNlYCQ( zylLkoEcXR-PSqSnwtwN*v`fa7WaVGf;XrgHp_%=lL5;%8FM*%5b7ebM7aYa@3eZ&3 zZL=ZLX^-wJ%HL80B{)NnBj@@2IGAd2n&Jv932BG_VrOh$7U^n=gm~k)huvO&bgmrO z$9}8++n6|ii@A@0l5~*U%V8P)Ls|jj!^~z3wSd!0A{vT!wNtz8e}~7g1j z5`HF^k4Jol4AB8v)t24n>IJ^qf6xIe@Y(fmbOlZ3`zDP8t-Ea$8MQ0aI)$v7SCx2l zLW6D%FXlO2(7Jcg$(rX$t?Bpy?e~3!Zwk-#J^y80DA1arRW#h;TFt#NDKr^%@khrp zF)x}q+#U7Q*1UGNR3@vMVQs#n$m6lZ_Q^bET#NH4yXPpm_GezZ^XB^2QA5VyS%$%EV$F7 zNSlur%NyhXqf}f*Cq3m!C?|;<{aEP*hfkb{ z$6_F;(BIvqJ{jLdZoz}liywi-Ef;Zk4_dd(PZTbvtGCJ0;Bw+szNWPpdvXL%XAXi^$*!|&`$XxA zAaE0z`-=`KqD1<#BS5(Qcu{A27??MVACmOTLzRhwS*iYMPa zQW(bjo>uR~*%ePIlCNpsVe}d29NP)?LYqJ1WjOd!CA;kNL&YlR0MS-bN|7Ybx4ZN#5$P_bG8Gk*ynL1xW{J z!W;*Jl6U$G{szGhqL%TT-O^8WR|egLlwAauxf9 zTPr&t7gbj(+~>3ZhN}7fpmXAqt1*-PRQ{e0n(yT`+-$B@V)F)i4UVvpZQs7x*G9cJ)1c|@VhBpMOnPh0*F1+=DSELY-L;@SNcY*{{U;kZCq+b@<%4sx7s$9 zd?sO#@Z2M8_ThK(&K!AJvY_vL=Y$@|nMBW|{}Luitul3v+lsQkX#T6fsr4yis}5r9 zBMFhCUzx@h0DcQzkKzB>vDcvrMGF%F5xwCg6AG))7NsfFxNZ`%Qjk92U9ep(ldrW)ym3`pw z3laDKNB9Y>QS8*fCvu+0{g zz7FIUMRyf=-aPp>SW3u8(S1EH)ZjCYG`gCoaeOs9m(Zi!;9oVqo%pL3Xu6{f&l6ST{;WXM?vY)Mw|II< z3)~>y<;%OM*eHE4&TK*osgRtcn!=+r)1H>PahF-$K+4QU{x9%MNxz$n*&_RyHHh

`iS zH^BNBxomW|n<|dSOW`4eWp=k@zl`w&nR=Hwnv*72N4h&e@1*S}VROVwUi^D*GhP3-9x}>oYrPEec{F zm!Se8Bs%gB9Mjy7)@4d1w^cF_a~?hdTsU$~@}sq}ZUN@^L_JJauep?P#gl)S?DTS* zG;dYOVEaZs)AB##1yHZx;mOLIn$}{gJ{zaMbyk`2^T^+S z#v#^}y?hC|axS+Nt?W}?tyqrpvsw-pkf$g6o#CZax0J5ieu$LmZL|HQeIwp?DWcC$ z!$A2YwW&zzi}D|qy{WU>shK9GCxk&NLTtatZclp77fUy28Zf_U);N-&V=R1R!6pyU zFORox?d1Mac{y!1_v$_0m@`-&cuvBY99@KOq&LXQBsxzp!k$f|Zedhu%TLM(m-Gm< zw(@kBw50b;bb{Kn=KG3XPsM&D<0EXtRc5JLKgY%jV*1Oo9i*1)9xZWYE<|K$$W$a%%bAHXuGLi~?9Hg4azsTJR~cm#KW+#z@*HAujvkj$}f zc04KXO(;t^IhfA)ZaR;N&pVi=Dn}4S890?$-9utXL!uCw2LO_0(MN-+6n8! zcZShu|G}aDcIyMK^wWjLf9mpJC`*DyzzHrJ`sc4DgtY`ozjm6k9S}b@AG?IV=3rh( zl@LiAv@V$9Of}kjTuePbCZl5c;DjV9b`;~NHF1vr^VpX@p9SUHTe1!Nav&u8dad8R zWPGk<5M{>SU|4Ab}67IoEgV}C|wfF+?<~bkZxt2Pf8>{E}1j-uq^CRWZpOAJM=b$6H zXafWNrv>Gvnzm{~vbFQSNnLkeH*66DzUk1-CiMWnPsXX$L>po*)w%tm`dcy;jH#;@Vw`& zn0t8c_qeBxFeyHFZp1gnPA5kQB+fCx;+7&z9;B_bDB3m5N2C{)>3GH5&`mxFF1-dh zZvV&E#o-7`|4Vs+C#xC${K$u4QQ4J0=DLwqwc%fS1k68rC;f1VLH2G4k9zePMzP>2 zS})n~Md3(+L54lQvykEH7b1ES9C+Np4!BrB+OwIqaxH0&OyD4vpPs@>pW@}2>)*r* zhn?VE@f-V2Yg1QT(!$mp8vDeU_w+RrQ|)L&gGCqik48;zCIvwP0>jxteKI6m@a=wt zwEkX)WM#JWdq`r*Ukw7@Y4P6_^rw73?Ny%N_n8Y%sT(NE1qZeDDBTF2c_}x1ezJ@{ zUDDzmmeXA?!<4Dv4f>kC0--^AwAs?$MCFCR7fjCC{L9zPy@_FW?zm0PSB?eP zOfcy*Q?PN7h22Y$Ag*DJ(^a7?PM@VMBGgpp6k+u4FZ2eS2r!u;j zr3gH*cTB1W!Q<YO(D`pBbmrCz8g7H)Em?5ys6_Q$#}E{8wiI|YE_Oqq4k<;XjO^!%1IQa^8Y zw_lg{&pHLenzgc-Nz?HXd3d*xYG8IA^IF`tE|Q_Bn`9UkG`+0-ga>u% z!sU-?6oN$~Q)gA!xrQjob1gJs=scRpHXAYc5aFcYnHOQfBW^Xz_q1O4&@iC4-s^J5 ze#+%oc>5bW7E|$&A$)K=e-Y!P3BoK5PLv9^i7BJhdj&6&j@+RRT&Z$o9C<%Uv0q4$ znC5DQk+kloHk#=@C~{YI=l8y`*Ed+EOpcsj*1C*x8`P=+OR-&9s7$VZpl_t&j@s!K{4o|Fez z14Z6r=lo}k5W??VS{3Q9lj9|_S0c<7et2$#K$S5eBD>J}6&ig1M>k(ay$^A!GTCIE zr|pjNaqHB=Ju~c=B6dMa6r{@}Ww0WWWfql3y^~@~I+ z+8W!RKTwIDY8aN$(aM!``!=KzL31yJ*CV5#Eu#t2&r(Kq*bpq<`7>$Nld-+RvZg>x zZWGap?nYzL+_(SQT;B5i1)^oX`}^Ow1~mrVeC+RI)P8Jt4R@>QS>L?J%bDUIwpR?y z5;Jl*v!*K#dHsnDT1Y**T`d}%!3o}s(C>+1ssc~ctBYDMLukDV#$lgyC2BT$^`m;y zE<+&&7kfT&HgumB`EpZffQF~YA81X^6Zwuk+xVChw3g=zveb2up@Wj|UM0(TJX5~2 zA94TyE!^E{k)C0-@2DOcRJT30$QN#NlA`#;7L=d0=T~X-tg)V}Yp3Sb^6yvGtXNI> zuDqRaB*Tp+J?uufIr-R$^WNR-krLSlNH&P{gB{nzGk9nt`gaTJ{BAJTBYjp1(09^x z747+pY>?mN6C~~~>Twphr_-m4fzkGgwyUvj81Lrop}5dp?H_z9RpRPB`yOK6vzdmi zo>rINp{w1yKv?Ne8MFWMYLm&Pt2P|Gtme<%lDkU+I$ zls#FimZ8FrZ?x!RQ>zb6-AawEFz)!u%dN!+C%$?3m(=gSo`v*PG){)DzV2BMxgQ!& zJQ`Xn?qr}_l(MN&coK~OG851H2_CLQIr4qQ!4;Z*MEb|k^o`F1gnHuE-q2RY8FluQ zQDTkVlEIlb%cIyD+cdC1y6!9B!z~nTaiPMnV|IP7Bz zrPk?C_0&Bjv@;eMU}L&CiM3pb@FXtA%0Iw%Pf+&u%2(v}yH@pED|-xDOuzwyYodFzsnBS%>0F^LRo#K(<8M9ZM-(1h7}flcEf=@^ z9Bg_?=5u%EKK|K-&|gW$@W!!cBOMce-(A^aEQN+=FP)IEF{!uF$knkAmkkeTV6x>n z?$r$MEyAhyDN&b8=(6@fqz0e6SWW0CR3K!!2i7JA&NNPtNS-zL9_j~} zghqaai_1vb3IHSR0$-d1f0JnE+HOY|PVzn=?_B-Aa(ee0H`~6b7<&I~sPmc-cprI((M~&gZS?3eyMKfHlH}ZZ z>Wt6VI$F2WIoB2l@(BX1cRmSa)a39h?CN&(zdGi=;K%%xgi;m+Xi_cz0J;vgC znfR;u;!~t-a7l{z1hdUJPT9ri5G5QZEGAkmD6=ckI~MtPXffI<_)|!SR3*PdY9ar{ z#7p?;kgcW=tAz+A6-6HLv8s>`X(Qc^iBXll^J_LPK&Sjast!cCj6ImZ@MG-NQd$RY!tG(|cD}ivE&;I-en+8T4 zBctTT!L5SBi32u3A)j^Aq%Z?6w9ws z)ydZ=Km1G7wxvidf6-#33)h0MrE6>w+rJ%d|5-_`cCRYovmZTMUh`^#{4#lS7od|8 zP6G&6MTcakY@Bi*cT>h6wh-_GeQfQW1F){~@(?tlC)i`vDHHE06ccG%JTGkNG-+N@ z>i3azYuC7B5~kan-R|8|lCmL!ax9MK5X*gNXQJDbi!55{v2>C8UQXVKzV|zl*~~?e zSW#=T7)>ue2CECK&y_nEM438+hhlFH7AD4|Fs0#tU+>=wH;@@L|H>~uS}rBN-|(Yy z>DhBq0w;CTF---VB6UtuikKoH5<%T^$%>05Cny&n#6D$)1u2D$1$#7A+ZDuaZ;$OZ z+beFy&HPzH+CND11m9l7q|}4nQR|Y}$|c)<+r=45^Xo1ozwAP^>JpsitYxv4(q52} zsH}-dZQj)(zW*-cZ1!+fFT-fRr9qtge{X6^GalrlhDZ4}QAK$*)%+nMkEUi;z?58c zzhEBex{i;^R!9#%pmSX|^GpvWZB^;E-=qB=J7QfD47XEj`hCFY0vm_Lqii=y2JOup z-vpPeiZU{*{9@u(vAq*CUtvM2nv97&=`fF+z>P45X5R`MhD`qd`umf_SxD2W1QJ0e zGADSy1ezg1-o1Y=ZvKO|!!nW#8)GbhjlopPo?_%YQ@TuT>+^&Vh9r^Ajg2NIm?fjE zN~P}jZj%Sxrk~aW+h`SqIVAsDBTvNb^SA}CdcwW75i{FBRV|>hRT9@M1h7&d7nKw8 zK170bp0o6iTvuQ|s>lQt;TVDm^s2oos-qO(i(XC8%y4XKuMPYkSM}!ZpJim4>!@mQ zskh_D67NG(7va;ciFlL66;>{?s=xU-xv3k(cpbVvW@YTA$B0?71P1~XmGl07oZr(q zZ+K8Y?aVgNPbpBHVLJ~+Ie!dVYbAjICt4st~%EHp^7f%SVm<(dBPD@bX5aub7vHS4^@r zuId*Vk7!S0@}#q@;;8GtrQa-GvviJDtnB8FirvrGEJ}V)cK&UE0HH^z#zqrB*t^?4 zhg_MHwTqu)QIe>(47y@jNlBbzl?EGig$j8xBxD>AoM^ui%o*C${mn_&|fd}mHGF21WgJ38N2$ADlCguSyAIYy&n>6WFxI?K$dllTQX8y#fVXj#=xV^??PD&lyQedC)_^190Wi zgp@bXd<8RDfzJ=bTyocj90V<=gg!Dr(WevtEY?=Dq$}Lwk~+;Y~{`yElx zZ_}&~Q)S+#_R>8(NbTt%erF9jQ4W%+6u|+aTn@w=62jTtkPxh!<}p*7D(D^z`_HBl zpe`KXz;GiWOdAQ%$2V3|pFk?_w;-p*o$CY{_?4=@Y?_jSSyY1jc4`U=3RpygECc30 zA2QCdl9oKpnVjg@w?;*MX9v6%a_Uq$hJu8=eSH%Ugya~}5ExoQLBLJ1_w502keT+r zaOqzZc5e=zT�IO)0d2xmADe(17l~9L5rOM_8$ed6cuOXE(dniXPQ*qvIBKzpoS% z+qcL@`+h^8s)~1F4c^LXq>KTUAs)z~h!c`i=zv3e5(vdkcw?xGISOB|QMsUr+a!UN z61Y)TO#?FzvW^Z|d5+5zI@{d<9B-b|F_s@nV7jU05|>QM#jh zcYr=Kfp5(2g>2HyDZ$MH!^^3usqiPifsJKu?teapysv^|NWb4nw+U0TU6HYaGjziD z?$2|UkaXCw4k-wUcys6Zr{e$($pXGkx^OL~QaT?%p;ekr?Zf*5I{u~JO!|*1^lJH>VWCS3JWAs-RY{_1P6UF2t5-|3<1s* z8vuZcJ|i>CGidJFX%psr`te_{Y29rj#A6-B(66lXlY4(;+kL5QLzwntaef58-oS() zDBf;SGeDIyXtmxp`X?myYj@p(AoD@)ul%+?6bD_xt)s|IVBJsDcwcpp)c2k69ugx~ zL9z8LZJ_-k7os8A{muD_0V*#~3` zG5-gUJ}UL{t97VH`xKO<4jkw}8DJDJ1(pSy%orKr$-n+$awz%ZscSe+Zor|zL+tcf z2wFC?O*w#*S{0|EIiY`=U$NY*EM~{hpWdyc`J#QAZ(j~bLgxE*sg-?7cG!KI`*0f@ zagn217kv)4HGV8{|@VE6#(QTbPTz5u=3?39x#(_%eiN_DyAoLq4BV2u+fy&{1qN42?f z1|gul>xS9i!ap{vt!Gg!!W>Xa*j`QF&%N(&r&S=~`WQq3&+N-~E3KdBKdX5`lQ#4n zVHPl2ArU{`Y?5-XHBGYeB4HThja`K70Ca3ub6M;R1Xowy4L&=1Jo0%*78TQpX=C~6 z`|=CB(qOoe2KX2e!u z?bfR9S-d>5FHF2oWG6bzj>rI6pHZ3h+YY%$QG(4$H>npTjOdETz1`4j#Uh!LqL~?F#vSZ8ap*4c$5=tK=!WcSz90di-04_>O8!IxW(%D(Kz<7MYhhkJa9{G zc-Kf1O$}l`2QNPW1uZlTTY1U`JAS}`=V>|hPk5)dOgSKF}L4zttDNg8v|MIpg&E^h}32pjLN%9KTmip--FG8!Z=Jf_&5FFwKYqNe&YcrS#_tK1^#s7 z$K#;f+-Sn@3X zvuPm*rocMT=Bv@i7bPm$B7rq6X*ezNo_vbGmG>3LEfhk6PwF%gY$Y?bw_|S7+0u1J zx38kCX>6ZKDk30lTMIpJ2IL9gyD5fVZD>YDL-?t%=(iPh68_JQ-q?Q7lH+VH$ut@m z`uxqCS032!EY;IfOljL%BVDf@*ljD;#K01sNwwFJ3w;|TCG2knzbB7%or8L|eueNL zDVjWR*)tNSc)?5exQ+#t8Bdj#$B`q48`red?@&E_p_&PMqh^KOy%zFHH^mnRPRZx> zMN72&F{@h=>aA5cBVxc%P2|uk?=Yk{9Uhn>?~`(8I!nf%wl^m^qVD%TOe`k$3$U`E z;B7G4>`F>(b+yn;i%uw#6V$7=^_nm)E)RxLYhez?M+1~&9#}j#?p8UcS-kJ>6&KXG z`zWX>!}LYh2VP`wHh zUVpgvAkR+%0a+O^Bf&%d;f!_GA8l0^?09BtviIbv1!ovAG$CO{g%!AJ4ub!s!Ne>9 z1EKo3Ib2hv9uCb;a}RC4`7%pa;*QKQE_iF{jrXqtBwI$IvHsU=l{(-Ui-!*Zr_}n> zXfln#k(=>pV7_oEgd$ob4Yw7Z4+6;=SL_dg5BCm5i=sh!w$^{1apL$+Z^(IVZO%$@ zBXfipLYV2Qw(=_KHl|5@=AHxb#=h8_GaJ_)%U(2xjCS(9x7-)*rx{j#F!DNfm=SyJ zJME@uT<^U&%8TA-ls~MF$MWGWyNG{Cpmw9yAfqa?+A1c6Yh1)Fa6`Xm#NFDZ-9;*; zv;Vvs(sXm)D5RS1hB5V+5qcw`a%sf^|9Pr7ZBtg9rS8nPjuUnU#au8$I@bt)Dw!s^ zQiC&dbT(mY3@@5hqLNaQhGW&=tx7oPC=7mS2REUD&I zAALosF-#Z?wmKuzKWkW@I~P-;s}Lr4$2}_BqxiIEw1L9|rV5VPMwd`sHrQ31tqP85 z^P?b6m9DFxxGo?Gz9L8E#!!v)4htZvO69W#@xZ(#k7*_cKsj@*;* zI-ES*w~~*YyZrHeP~%Q7Y2~esm?t=5t17v(;8<599R8O6=YH>7KYd0%cD#(7N=_ud z3?3N+CR%*0jpA{sg}|Gtg-&IIj&H>qHy}j2+)Cv&+V%v4!^9?y#9YKdM8>AsPOmh! z3-<)y-nNW1-zu{Fe>f~a!GvMNVAsn|g<~Ngu*?gSAlER=@Mq<=% z)Z%Gi>iIc|*4xjfopIGjKcxZiyH+D6n&$^+^KT6IsID%t_UY+PrXTn$uJ^0Fx~8JV zZMi6TEa~R$MvY`U%{x*~x|oPpAHK$*EV`?Ldm)zakpI33hb@OT%UThG!o357^H0Tw z210t6c}4Hii_8cu)Q_q6~h_fbiw}}41Y!a7Utdrqr;+>1ggI2 z-llvB3iZ#i(ZD~u^5sk@#Y|G)&m1w18IaKITQc(hL$a~9Tjd93Wj05&8ePL+wuF8XJWS0pM)P3vixeOH zYyT6)u**_8tPfe-S)^ICf4P}1L<#%Bi{x?Ai-FG)kkYc0>)74!^e`48r-l}PHT;f0 zM#9{TfOJgue#6myx$(6PyE_w6H=H9}e8_Qo%}t6Q^+?FQ?Y8nFC4=oG)`7#|tsUhh zX{aU~p30oOnHBc)C0TcO_xW>NFg9>HFH3$(9VP#l>Nv1@k4F_CI!k}k|A{W{l{3!wiSBdPs-=!&wT1BE*)@t6@yF=KXmgt z!+v;~^*5;~MBQM8w)^ENrB2cd$0qjxgu@0?GQR9U??QY2uxXkIYM})t$e(Q5Skt*a z%-p$rP^6JM`}ViiUQfdB@6H{WqG6Y!4X59mVEgx~-9c~>3x;uN2*3{b%1F)y_8aaO z-U3(Wo#MMfyLZ&}50FOEAzBFBK1J6stX}Xcln1RH&A;}YFwp`?*xY;8V8s3%OzCIx z{H@F@{+R46!k?JeRIsQQ;h6omA&#rYH8zpcx9=~0=6u0Re|7i$o7`*to?^58Tfc*I zshNssVjDt|8K}JP6{r4lzAkG&&phH+ET(wKRno7N=5$6_r`up@g;RvZb=8tdrrAJ^ zjvFp{fH=euFmWmjo@ax7zf^CNZB~CY1Akxrr#qMFL)ExMH3HOr(o^x5o)@7q8&DI- zdiC{+iqhDET8~<8!&moDoq=IL1n;TZ`davc_u`e~p%K3nH7>%}OdFd@EziiI9+t+w zq~$XuFSU5CVAWkrC}EL_pjh>DqM&y zlm5HXY?lDu0ME%2o$ULbjJj!uM)>37&ZP6!N)^&(6*ut@*L*2b?q|kDq3?ysNs)fk zT)8$%s?pLSr3^B6)@`w?`}&cR2{zO3=(X;^$}cKg6xMhC_P6}J>$BpCWz6C(+NX7bzV620cRWHj%ccDYa1k8@06_=qdicvJkXP1wHo1KFWjWnUGc|Bx#+ z)Ac*%^iGlS-HjbBy_@t4niF@v!T#~@_o+bVm<7-uQ##13JRBipWKO#9PFUX*JbxjXv$ZQU}Gju{qHGVHoJ#SGnKEtr(AJ1Cv z7at(Qdh3g3(-Zr6iQ`n64C;}_O*_%-beEz99W+cdPaW6mtDI8QJZGEYLVIeu@8gAu zChtOb0j2l%hryiB4c^7LhqQALX4*Ox92WR6p5@bYjBeq2gkP9?rAjDkX2n`6aC^@9 zQL$19Jk0l_ra5Il5tp6bUIoJ6P5o3QE}NzG*2Iv8cRwR)uM%H5H#w>~Zfw?_uZylv zt{>k0)_b5rI{UsTf}Uceh0dN!6KLS0aPD>a*7Z3~vNZt8P$M^*&$4bNYi_Q;D^Rwa zAt`ve&$mS?WZPVcj78Y?uak9%tC_#IQG-SV{UI)PeiU2HlW}IsPQww(vP?v`!Xv@; zInOzLKg+$&#)z*re5hI7ke7*BXQO$A>xNo`29^4mf4{Vket7K7>XuS^w7$VKKfa#L zouUBJz8S&}ig+)-R1!#TPe7mkU?p_oL7$<5?YiB&*v%i#lRDfpVDJn>2qVrBFQ6Rx zUND^4G~}5GKoC#ueDVJfkNHfyLy#`8%C75oxt+^qg`V~6%X3dZ7+`*`j~Efc@O}7M zEPZ4XGfuziw;tSQ)JHGi%9e1i8>4VvpR=vz*CapS3mgmjbB3GOIi~mZZ+fQ%(UkFQ8UFNq^Upq(qN!W(Zn9BCt0SoNwUDgE zL~+Xpvg?IV>TbsNvnsq@DzhD?{+}3wEGtBpsS@y1t{sh4(}L!_gnZ zM7L2_a`jcVNm@r*;iT5oE16{89&MIp zF%i~yP)FbdxJ#~ZPKkJlEch=88DY)lYC*I?e+;HJjmFgw?MoBpt;|G}rxG6GWmo}S zR(nMjFBaqzaN18;)jf|VC>a$C1gi!F#&YWu;hu)J#=l#RagP6jVAR**Hq#BD!f z3kfU5bK-m55twxMdHTzVdYO}IP4L)UOjXkr-nEx;C@}wNlhJsMSq%7sHr&r%bYyUk zmiI%Q%k=xixW>_5(Et5AXULf4VvO4C{UO+q5(6uZ{Xni-+6{{`)$a0x+ZJhGA|e#q zZj$wNEwj|KhG2YU1Jc}6R;WOV0_Ze7ZO!yVJ7)DAaIOi5oDdzGE=KT8Z}WXa`X)X) zj-Ci|wz@qg_2w;04$gT*ZCI+D-)BBkJ>-pA9?ap`JDCFdM`%5@K1;)vt3CM{hO3kd zxLcEV#(119K|J^+&W1c6wU7L!!Zm2UvB|Y!s{+hI(6NWL^psL?l(jIRA$0`k`dP^= z>l31UPomAxivB8rlm6}|`O9skY0#>^%~Y<9@aocQZK_XiAO1NE9Z1fXC4^7*Ey!)e z6`9ON(TZ0~gwu1^Rt6JSaECtUE2@nPpC`B%6jwWlmW~7T%j&L8th$J{BW4JTwe7A?cF(Y{PmFQ69QD!R3-=8*3Uw$w)fsU0_&851PlpO= z?MyUXqBC{JkE;0(U|4%7d4-RiULu?i?`tuu^+*zIg@UDs&YN5OQ>?ycR4t#a8=@?%qv9bXyM-$}L*tT3&03t3qo`0FXp?#+Vid2QXe0T@+XcP@-n*VN!uyFaq+?{>3+;1TadDiux`CabiEiND4Y zOxj9ybg|C{C%bhqvCn%jlV5t|t^GvHR*%6mcIzt-=b3D){K=8($E~C+<7la9A98n| zC`?TNt>Id~ihi9&Jy;f!H70t=L435EzqyL8-{F>|660hT@#aIc-hxKIFbTv$iG{o9 zWPvvIr!08gqQa%t=r+*ihRzC-%CGsxvihW){k-TyN;XYu3PvmEt{f6&?PBNV68N5g z|D{*{!oY%f3;v zSBL7N4%~H`KXS$B8Q4?ij!E)^7Y?L)MPD|KLx-g(-`c ze&V_ZTB$>YR}LKbRQY~>UAHgG2X%nSExQ!2WWKTUhbI?@+^suQ$8<<-Ujqp!Y+Qs% zxa!5B$qK9S?H@-;&Wl8|Lr=ZEleQ#sqpjm_|M z1$vSj!<~XYr_Ko7Nj9qaLDg{IYB$2AeBj1=!tcO91tr{<&%d|N5C7Bp`gNwwL`|HO^o6SqXTq$3kICa3cElRu=E5+_r!7nBfj+gx8 zFoSNgAnyA%z1eKEC4{M z%~cxl6F`wl0v*c_zmgNopI*C`lUi4V-7QKI8ma+(<`(nrr;-o>5f3^@o;$t(nPIQU zK~T`rZpe&Nx_>cSTs_ngvBuTqOc>cw1GR1ry=rWX@I1hfi1r{Skr)u0wW=qq$P$ASJ<8O!o zwhaQ#E&zCoAvBlp0G-MfNI)|f`N~C)2+e`U{RlDI(@f&u?#pUbgP~OsO-RFo>yUk2 z6h8`L(e8#}b-58`zb*y#xTR~7L0c_cmOab5eK56&xCEpk+uDrPgQm7I(7YHV7xB+*FU&U+43c@wYnLCpM**8+PLR8#^^a$G z>Dd4@X+nPhhbt0hL>Xv%~Tcr+m&f?+zOOVE~q zXg~~+TH+Za+W`T0dkQUr3K$Oknv_XROwB1@EZxDhR&?kc{Sbc(V*|~j!9*>qlJGuINnXCy&>>+iJQgpBMl z<`Z8b(uH|kuFfM*W&ha;2lk@|@;qtF49AU&Gtr0-#uKYQYM{`;iddbWKb5zJ`61}v zdr8`BNg8I*^%(x|>j_me<-7S^Q-EKNG086!NYGq8K!V}c3TG!MkIhb0UP>_k7GwqS zbRIPW{t#LF8Ge2X9LY*)jC{vEbQynyd2(=1=M@#o$e5ex{RH#B3Ui}vy`fT{acI(S z3MHy}tX)#}fAo|*;Ty+@e_v{0R~o}m0bu{lU^_EyQsi>kObqA>1Vdjru;sRwK#;$` z+`Ep%|NEe|_)ymTy0J?WYiuX@U)-M%y2E$$+ME`?AKE-jEWGdj<$mWLGm_?IhQ@Qu z4{6cUNc&jsu>jYv2V($!6^PN6kM~Vz+h!KrdumDI`;2ee)l7uT=bfhom+B51{(E1edy@D>EtkccDbS++y`i zUJU%Z$x1%HshkA8oFsiNWIWNxvoxX=ue838i9Vur(>Ywj>t8nh#64h)fIYl8R&4kr zpO?GPQP7T$aTz$0Bf$5lAk5Yr>@_xEbk50iuNTvFnc@3K$CPSYis)_Y=F zz4H3(DvI9@N>-n#9uO&vAo%s}3~hel&Ec9!85VNi3VoE*P@aJx zhf4xQ%x|a$cL7Q?-2roEvx~BvlP~qa4HCS_X9} zr^J}r@?B?mn5X~wROgA?=}fEy8n+8ry_8SV(!6|0ON7zoV;M>VL5wBB6tOVfv2Z%p zUJxmgv(9@mn z{G_|Fx$>rjBphlx@)Fyvuc%#K!KJJ`(Id6?VU9&YKtVwg9}I5BMh8T81;5udP#uE` z8DQ<4z=v+7uD@#DH}b|RmHO(K47o&|gJ`C$DXq`CV*R#9ia>1SK?yPYxNUiTtzrw% zHo7Z^;{vXu{RZM}2Bm$Y?hg%r_H_ZDDyjT$!FTODP}u$zeY>+UiF`|aa<^%WInLCp z2sw3FqXLE(;Ed&Mu#;$ivq_9z$K1!vD9DWvYwBFrv)iH#K3}ZblH#u9Gadk6S_#2^ z`pg`0l3XdMFl%1s3xPo-Yv24!SpL8Mp8W;)iIU#8`!-Wmd^!vxmb3{d=qb9NE`(p} zkd(s@hUl6HnfWf2Jj9qf<8Y}2z{IV%*lkf6qO^-8>dv|{PH6J~+4~&~pZzmQ{JYs3 zxc_SpFd%0Al$pP(U2cAp2L#Zapi8nS^oZ=foK^${$bd`2#J`5 z?FMhK_b)_6N6xV@knk5&ktW32N-}ajVXE;JGrv@RJ*$O<1qo0t;?;Mi)dI9;TR&Jr zHwOBrwf}E{fP3rZ_3uw#TmKW>M(mjjc{~4l#RL_TP(%s#!mz>plE_W2#0jzRFKFXv zHa1ePw`$bN!)`K{{QNjb0`!0~VY)#K>$yyw7~wI0Whi4a2E@CG76C^B-uP%kKL9pz z!Roecb}Sjiegnjc*sjpY`8}Ysje$U$GrK35%^9FXN>cUfxMF}FAVJMO#Je_7LR^3p zTTuzR|G8|lnEz@EuZ{Zcq&Z?m4k`Ltj!g#oh0LDr`=yEAn-)>uC~d0ZktQp92;v){ zz+bK;#705W%$aqNiYf(7#hKNY|1ttU-H@9K`>WxC4pLkW4en`Pz1KW|1c#h0nG7*2 zIo82Ae8Y`Z)h}-Xly%ybnwa2y8!%B9k9o??EWL;U)!)@AOUw%0mh zPFhH7sNLxOG7}`sPr!?9fq|pK4G>ei4KS1UwaP~vQVNl-O+^q2pR{Oor~svLU>9auI`=RhYGsM-JwZyD%^{_0$SeWuvAeYnSQiwim?1Xc*NW|6WZT_*|? z@av7b&F^4w$OK)V=2i1weE-~8*(By-iaK53>fqeRyv@&<(r&C<<{MsTV22c(`?Jl_ zqA2Df%E26y;4z#tW~b9VQvEk4(NCAeMo|7kgs^ukaDQL80Am$+*Hrm|z-ucy1$3*c z3;BSr?F`GWHgj%?hL1p5T05!3?iLm%(1rj6&8hi=-i885xuC+yMJT;Sm%vyygZ9+ikEVor6SE;CXsN4Ms6&FqD}0@7h%d5~xI)p(DpOIM`f( zkfn62l3BjpE)0ASnLTb^HUP5^5|5+79J@Nm=%YuS92_nr4v<8=w^ z392l*DWQLRXV=!UKZ(c?e6Vi?fF#-i9ke*#Q{U5;5vVkx zbH;stj8&Qc6?1nvzH^Y`JGUv+f;`0G%T9&=xC+7|ur^U*gEcdo5|WHP4rz!joERhc zQzH_Otwd>vhP9r+m%A*{9d+>+pGSLmV>x}JA?Zh0oL%&jTEVqOH5Ut+mh;EPATaLk zQmIh~Fuwey|A(XnEaaJ5KR?;#YNNqIi2MGB1_>qh5sq_jt1W@mQu328stJN^@Nr0< z%>(Il&@+mv7zbJwGcDDrONt5#ZLTX&9yX1u6{R;JlE?Q?lH=AN$cf?4AGRya+fd^V zYEW24GWnbD0`LKu1t%D6uNI%;L_EZ_GD6!3^ljZvwKV`uoM6u`us2A`+XrCXDG zb`m)rCt*=4xUdcQCT+wUX(M+kE!}rRVFr^xfJuoi|Ad+6yh;2(xmB^^@z&t10=>fA zCr!F?s7!>z$vFc2)dBdmS60!sZU|9|?^cw(L)sNe1(gBO%&yDSru;eSM^s}*h|9FX z`Vkm4|L`g7gJa|2wU0)*pE{!x-ZJ}~F_tV`1xLnQIv*1dy9DyY;j7YL%gP&oW^2;_LR3Ce}Ry0fC5IF}6Cht=oJa^dv@e%h)=xGZ-?0*Vmr*&*ZbYr9EE#kj?G zZH9%9na))JD9ho9=hYkBMY_4tfp6I9Gu%gY6K2&0X~EyyLI;|s#g7wk9a3=zQ?uHaU~LxK z?;|IrWzI~^?TL#x*1WNtv!8j#o&L#C=|1RdTxazUWV-y>u<1(%x6x-n4o%-Hq9A5@ zXUL$Z5jb|(&tIZ8A%BDYZpa0D5_{p}JQ(I(6Se>5?jq=b=h;gdYg>iNzWo;6extFm zCyRAiQ_<}->k0kMKRCTKzoR>^t;l-Ht8KkLeoIV>kcH@iyDx4jm@Lu@PkJSn&#{&p z3xUXS{24S&qn}&#x`wC>R^MdrJu(|rV|q*kM>=6k#LCM$ZKmCVp|p)tuA}`x1Gis& z&FDgRoYE>vVY#gDx@xm$y0SFfsa07jFV{k?|J75&p5q&x8=m4B!cpNmr-_M=ay>QtbcEKnHgCm!i1Qer!;nSuaNWs9!$8&A^uZ3DR#FjpoLaOZPN5rTiM*lWY+} zKNyfS#}M^M*m((f%+xLM6H@QoTb}{VU9@Xjt8v)<5HK;uX)!*c42n3cM zHlxmhe`lmpAs`1%$B9dAQHv1KRkH)UNwb+*OjhQkL z&+MZ#&-7uB>fB8~hGZqa$1?4d-J-Mp=f3U2_e72xhwD~O?nQnp;3hYC8w?(Ttir(> zUoSazuBoPrxl-}jk{VLuD)wdNotRFM!{kXx0;d)S( zp%;DvM<-jT>Ztt-1kfLUks5(r2H&A&T<4=o^i!;a=G8Tyhxq<;htU za@Et2{$j~fh7(`dcka%%0!SXM!(isYVA=> zF=zu6j*}yo+ZL@h`O+2`+Vpt7O-K*KJQ@4$X1seXcED}owg$@^hBbf}amJMIBy|U6 zBOMV_{wDMbUHd#-d9bllHDMBm)kACPMnZ9;uN>9yFCeUCO@cRxDo%?iU0xx>kNLS~ zGrg8m_ZiCXDaL1)8@Knxs$}J!T1ipzm-Q2O*3;irZd}mi`g}Vru(EuS@z?Z_^uWVq z&Fbz2wWNV`>DMmY7uEzjva0!T0XMLKyX4@So&R%dqkx~VplgM!=#;;_WorLi&TF|D zKa~)U{Y;|^~M|)K zF-&E!#(Pg<{p0hvp3j^*XKWctU$Z85ggJhds=4T`)R@@YWN^LIWz6wrX#KOEvWB03 zZez8h4ZI!Pr04F8Vlq)!o+_ZmXO62pZJT?)KU$Q|NaM}j&^IBeV=A=69qwfbtz?DF zu`0u(Z>F+}(NN>Ty<-@cR?ej2=eh?xKH^l5m_`bok1L`_2i2A&re5SkY-Z4yD}A6e zxHD=hF+Tg9Sw!2`O>Fr z;--9?h;)RSo6V}gP!ySiTI2v;1^blsEHSG!yJM}O&17<-nN8*X)nmtAxhU4jlQQ{W zT1-Q?Wwn?w*d9AX5z;A zyG}eY+BgYjwj?&+I);EI1I)TDK|r>>|?Eg^qiwYj)lsjX{F-s4jK##}o* zbmwGW2E|)Fz81*K_kcX$v2yCo4b&-%l`6u3=LPh5pgn;%+h{5|?Cj&$bNq(nvMu_J zD>Nvr^ZS?lNxU?7;qfs07t@09O5ehHmNtMY+eAJ6BH32q{b(TFCEw+7f{>N=q;Adk*bJrW94%P)Ys%ZR zQ+lc$wRY69y!whOwB%%RV&Lakq#hsj>Zf-@aY57kpMzCv!$=SK|LcqOp{ep7?1JCam+_oUVpzBH8ELb-J!VtY#ZAL z(={PC-S?(LSI$SkmZ>{wJFe@Gk=`V4i%Ofbe@~~Ho+mG$vJ0T zSW-7NuaIOzxb>F~7qm#4(u5zt%NISP7j?HYKqz0qstUVEDah!hm$-2Wf%lTEkW~GkKh0+Mtc9s*- zBA}gKip#W&W6*Co8S9C1sXl9P>wMi$zn`v;SPEFW`!Oy}X9$_qsr8X?J-JH+F5ur1 zuNZ&Y{F+a><#hNP1(C#_!QnPKA6p(#fazIm6Vk&d`Dyz3-V2ci~R=|fc;<@dtR;fKd>uA4vn?O0QQ`+rQmWmJ^k z_x`VvN~3gxgoq4Cw=~k-jYxOR00YR-jS|w`Fod*pN=w(!B|~?^|9*eIzX$)d?gu=W zHEYe`-23da_qAUaho1?!wow^J@@0Do6MSk#fu7jH4+k%fmv|d!|fLI5VnDRv^)P*N%LkkZUTM1&+Y1DsP8qDx9)Z}cJT=s7kiL4|x zFL4zpLt?dj8r!Ueka7zLdt%=8n|hh6R5=xcPjk*uuG2od%?nXh@Ol%d4&BGHm`_#b z!t%3jdwPGVYcp)`RK5=a$_Zogc_KF)WYkS)l=iP9A3xojsDH>9F_F33YRA?9(Po5j z^^Lk)Z&>DGIPrYTW=ltyJvB8KYF|k5dyh=gtEc@8v}~f@#lEb?tBH)jT1*y9^;dyE z=XB=+mokd)9jXngQP}J|95wOtfBzO@Z`fE|L2xyu9~tK}F4IttiBSCF`t|$QuOqG_ zp&;(H;Ft%p@D2qkPa%qI)EqHk)9!q}wW0=n=J!@}1CT%d9mq_6c{fXsIS{Fd|YUNY?; z!y$7@3mlhk8ZGac?}eg+_Pkoe!7huPD!92$*Q z`{No7WR_)!*}k;%*ctpajjz(JI;X}R29CtB5V#?h>Lk^C8|VG;*Z21#Hp!iJCcLH{ zbLgiNI<^nRV+mRD8}PiAbbnEBSplw(Nu+mHxd&`TUGcaar_yWIM5cZQ@99(1-m573 zG%OiunV%MOd1IrPZc*yP(y-@P-O+fRq$2=?W*6Jwu{ChKft0EXy@Z3bh=cglFQ6kN zyY~*=^i#*PzG|}Sp=!2(x(t5Bgh)L)zka zl`QSM%YzJ;;P@eI*8K9}>5y;17OFJZZ=`%Tsj$0W-JNCztuY8+VWSISTZfpD_Fba5 zJnQz??diZAahichHZ_>g*_(##q_{3ab_8;=SvU?EX0B_=8av&3d>R920-iq#Y=|_8 z_>rOeXC?WMg7LHeIGJ64ymrboB0VE8>N*zg!9T*Nx0sV{>GW%O2X+i;wfV#BDV7;I zw_OD~73Y@d$Q}kizR^}jV#7qo6nn<3SsF-i`I#{d_enxrZ);wRxd`tb%k51RM4l0{ zVV{mvpPWaA2yO@m?}VTsu~k?>Z=QF>v>UU$zaSwt*X#f2-*%nmCuas$hG6;#ptld8&>bleWKa1R5O*tp>Aw=SFavek!iP*QoS?7H-+so zbCNNz4jBm9eb7VFUSRCi)O&1y>W8L$d&8%68k6=~~Pj_B^CI+;C1Jav;lZWo-Yon21mV=&% zt+j(CQ-W>ZHXI9;Z=zDZEt+t}zc<*oR`=T}gHAyVGr_lH6vuZTjH{$pu&>o@O7>v9 zImZrOZ^!!=lQ!-?Rv1>xn?O%8h(}Fo2Tms|)e?fK8z;iQ~ z)Z!FDl?XQ@ed-Xl=d{xx5;|AgD%fJ>`v6uO!8Q&g9@XBr?uvTz7+C)~qR~r_5^CbK z2OWK^7WdKNw7ybET)sSyYW?vu$XveECv07w@f}m+CPX%Z>Dq=Yr}l%*7UCN z`%C%KCGG0Z!Lf*yH>5%w&9r`;sxfh&Ni#Xx@Rv(i7+xOF)+#9|kJtgt$FJJ`wB%WQ;Bd&nyc6uWrAv zPzwfuIu--QAJMG<7fz?al6@9h{g15(~vS4LtGqE2eG^Y>T_s zyc3h?vJ{W4YFWHlyi}6HGQ0n&6JR77YC&kvqn3QToP{$5gk!IT$fbzvt->hsdwJKm zJB!pv{R~W)gjcHQyj?)p5G;zRNJ3CooFBhf{*gm0wxSuW$d~}A)~x&O(m(GGT)C`g zYaz`n!#;~XBq8B&EX>d&2C<+A$yIUPFSKH3;nI>1*Bi2u-ubVgY=fPv{w7xC%>HY_ zHA-e4RLdcP;UOD0e&W}S<6Bo%>2?7YW?s_!xnoifXE|Aca*BG_U==9dPF|HS_`VK5 zgXAf#oq&^|@E)?`bn7@5L-637+Hn`X_pN0-c_W<|&EwcfbvqXh-JiiWr9Y?h2>jD) z3c|a)xy|?8jP$0*W`N2`_`2e@R1MWnWvpQ6l0Yj$Dv53*u-PPQyFUl}WqX;v>=qdi zIKJ3l^L335?j7-mS-6SZd7nVSmD9G`*yfIQ-&;80nQ)!^H*C)JEsL*tA#lyxYu?$a z6$ENqZSFnNxm|Sj6d*lxm0EqjP#O7|TH+XdFI_|*8ShB35v-EX>vbylv%O{}H77^? zMYyq1?H&`=5#5o{mZDZHt~n$#Bs&>Av)`^_5tY;G!84{(NSB&m9j zx>DV~lygre{#&gqQ@&0}wgC6{r|@aJcm6$T5gFG_X_X^9-_zdC1@O}Hf&~hcqU~vD zvdXT5R;h8lGdS%DYe_g=&H*inI<2n#fyjWLFjmcs)xVrOYMm#QHm&N zSdh^sbVMjk_E|XW%RUJ;r^%cAUYGm&ahqR=Shjxrwdh#iJ1>IEHK)`5-{?ml^9L|; zhKmM1iJxw*w0HHJi?vsPtVmBg+a5&*x?P}|wCSM#-sPcrL!o#+IiI<)qsaKw_G%Y` zkXY4-pjXR=T{e*GXd_%FtEk;D4YR6tx-gDo6oR08=^f5nFVYDsF)<=6$X5qQcyPFuZZ)DVV z5HN=ye@vF=pB=~bzmF33$(>}`Pya!@>^DWQGaDJ=>nnzm^Ye7|FMh}#Y3JOPB`?XT zIF*IP*JTi#VU(_a^Zu?4n~v?Jj@%cN(?B=W3zEadpdq>DDTixe+1YPJ#aoXE4Q7N6 zcK)eb+&G8<3Sv9u%lQ*Ny4~G4nklmQ;mUGVwDqEtcQUJa z%DI)CwBFBmVm%qLuf#lkbGgA0td*fPYVG+LX$#A<1eflIpzZYYS#0Q(bJh%pu+Ty2 zp8=yvk6p>Be1v9&;@IDx1=Y9U1=dy`vug4rU<4IsXm9ZA9kLdk$K8H}XmI34DMjx| zn7uQEN;eQ;&c()=zuP)E;Va<+eVa+*_odR*x$wet1z8!$q^*Wvf zoX*5pRBV3}APsew`ed(gd)}mLOm}_l&hchLfMowrg6s5PvX{O0vhxFV zEf+;mZ+-r2;p7Zj8Jf56-vhB=S@vxEgX)c`z~ad}<6P6ln$CVK;Jw!s6$9s1M|s4( zMr>bP+7(+FL!>bp9P+Yl4KwFL@2hBTJa@uKRo2QNRXpK6k>u z#hJp${6;x;G<2fdkR#j~D$0Cuwk4ZoH+9VENYmi&DxON2^-+!3K9pkOCGy+FR4@o* zZ{R)6={Kt5A6KH|f5k$ZHk!BtKi+t+b)P_?sz;mCG|6s~qyi>hzWgC8vmBT5jhC>y z-r(?bAw$k?6I{or)l5a!f{*>exf8PIntp~Y<`ZW`T>nISO53cc&|&_7uCB zO}qlkAQPLZhn#02ib-VEG&kzC=IGnJIPs4l~<>{$No^I$Gb+JDg#NkNx zUy^DtyV{e4RkyL(9iC0@hLQFwS@3*m!Fo4g`E0PQSk-?IK=Zo_94C-@si|W z6qsIX0-M5^>cjmZq(R97XI(CBt+N!%O8K&)a*K3|pj2NgyCyuR4p%XB&=y_{L6 zBvhW9CA*711Sn~1lyv+gfSYaAZ#cfN9*ro8-;;CGjgT*;eY@f;tXR&NfIIxF45@O@ zk4X0BKht8kWC=fNoE`8>Ic^tDo$vqo1n(tnxKh!+cy0W~iD#70oZzDW{3^WLn zIrvFiUfUlrWj4%AYWU~V0BFWts83QciXyD%BmEG8Wraz*A4QPB=~$@us<-U1CO*h&BWTfG| zfX-%`&j3em_}+M4e=7o&yvzP$6$pQIN7#E80-XR1r0qT8;87P-<9e@iurQGz5J-^0Nx$C#|&$!|SW85RCW;^ek7m zZbJ6pP^}{6jHx<19gqFF@pnhuGPj5@ZqX=tTGY1@EqG^t{^2t@ZCmH{Gb3zy)02TP-b>p;R;ed-xLgNV} zAjE>{>{D*MDIBf)8ni~8{j0j*VB}xZO{p%EOhKT6DBpzB`b~Of68t1j1^`7ZT3;-1 zUqCmsG9Z3@8O}(T&T4aRt3GT?%dcgC5E{EOl6V+T6iW*(5xnXX|5Z7y9Q}@O25-f; zAjcnDI}v$TYJaod3Du#Uh@#{#_!L zl3s!LLG~m1ZibFA<|)4bzd-du(O2jPz1mM@fyx78E53O|LprPi{Gvk<$t*+(%-Up~ zH)#S65$o%(VV8pQ(~~XpSm*PhM@@p+a1u7%+y-Mm?a3lVO=VRpqW3zp_f2jIBSYkW`Vqp<}_5^ZhJ}(d39(&PVG+oUK>s@ zMTJ813VUxV!))JSlcCDel6Ctx#hYK&-$}iR(sUII77PivYv=Xvb~FaZUd~kNu_j_J zq)igMhS8*_p@Nhifv?-SxN6RC&s1#x;;kTyHlPXND4g^j-2rWAUX%gc zpsc9;m$Tnfvm3aJfpkukr(_jD)b<*5*Kg7Ni!hE<;H~*MSV>yA6YD>#I|XNru#(P< z>kAIV&5s=$z=_;;pWF0jV$|jX**BOvF+V+xs@yt}{uj~VC^A-2b=c!!9@iHy zEr-6D^x$&*=$HJsM}=YUHeok>)q3XE{f{!-@34=2 z5%;U?=(a6b&&SLcmy!XwxZPFMY6P8FCs=7$fS>$7(eiF(SqT2hA3U`wep&lS z+K_BHYJsB!EDXFDB?v*MwZ`!y{n6Zd_+J^pbmmcF_z}G;u0IMqGk&j$*7s zfgthv{&BQL5y+V;bGrZF&+tXL!c%l*aLA`blhDXcZiF8wz9u*n3P*1?&3uL6%TD=B@lAv6z=Fvenza1JpOh7& zrA1^mRCU6?lG0!m;QNo$Y`pRw5qN6$ifsmcN(=Di9o9s{^X@VZ@SL;TYd9xD&2;nA zSf+@P#TaU``zvco(v)?fXPUUtFYwG|*f?xoBxb7Ne3IXf0jjKyoknJ>G9b@|Jw5kkDXj| zP+|ik$~mW+sg2{6Q2)krO!rUtE2N2CUs6L(Oe8{K7-vcKd`=IUMKX2fS|W{`TnsTO z*!p_)?oFcL9UdNg_kfUxlQR8Uo1zV6kzNS9WxnO^l8(vWHV^XRvjJvQJjvb3wj-O{ z+VJL^Obl+zbtw_wStZ+r+`5kwliXgHY+5DyO0!FTv6mGFdO!L`{!)NMN!(90fCApD z>5_&l$L2kM@gjD$2E%csUKobj{41jA-G3Xk&X2;amx$L$@bF9h7UyqFDx`S^M}DfZ z)c!%Pt_9B@MBS)8n{GJeRZJ>3lm)diR0ND#v%ICU{Qk-&2Hx_QEm|TcF4!KZ+-dLp zZ{sH>xL_aiODxms1+TsjC6Z4qVa1A4)*4~KJXsAct`@~u`k~@hVI=u47)~}%eoXDu zB~KlcQq(#f=r`Zg97aS(s@+6raeIv_CbFuji%|P2&Nt0BKco(ELr+nu{LWuCBl{M4 zJcs2(p8h=u@)vgxPM7{eM8d}dRK)%UaWd4h^IN$@OnZ1C3PPgy_R7Z4k$WI9ZDYUk znqME?M@GDC&Hn;uAbU_En~_X^CPI1%-e?RGyD65zebAV;o6=s)*3c@nheQKKxY0eo zfdr)iEpX_jrFJByv1_rqAK@4KDb^T_?QBvGrcY$_QLtwo*jXJXUv- zBL0w!iCoJxRw;E)nV`VF4XK_D7-4SQK07g_xcO$@zE6Dr$CX~&cek|E8+v>iS1%En z<&@L1itf^-zePT{%WKGLRHB-LBSe^41{4!jD}5X%53WQCnJ}iOqdsFz?JX}7kx&#} z%2Jj&CZ|_Gux60EvWK&rokRC(8F+XL-)YWo`k}B55soP)|V_uEHpt5?8=A^vX^AiG_LY@gtAW^|K1SA9*eep~d)XXzRHV5k4z?Jm}?c8f7xO(?ReAo%$OVps3UaG)qSEhb1sh6eM8*GOM+b; zu!bY~LB?}gx(lUxKBSh9acJwOBNChx7$_nq(^B z6k8f#7Evi<@e?mfhZ6SDDlX5lFR)N%pa;cDFJDu^{X2L&1yPAEF$UK~lB>Fd_d>X@ z4;nsTPcUSS-(U>(J%iFxITg!Vi~ofjwO*xH1l4>$>Oy|6ZFNcSyf))>(C9BzOu(rQ ztA#?p;~}{WN4qecRG`6QbXPBTva4$6m~Xu!;QjyhBBA$!$jFN+>Bi-58>yqCcl*u9 zThB~L%&PRNGe3T(wy9ETRLtxe)SeSqZMXtkELNGy-cHf%x`|Zb(3~1--ag*a#s(Yx z*zflYHo{=r0o!-`1!wq7SPA(05%&=qAJwr#5K|XwGeEo#2~Ar z;}>dg9$Sb!pWJ-2Jl*wxQtIgdMEco>{acoD(T7%)@hik3b~C0236;&*oG)qq2>^=} zgzjXOS!2|g9z6S?@G$byo|E%rQ0x03vx?U52$j7?Td&=Gs5kQ37OzZgmj+g(m~N1T zZ=#qYFvR1qt1viVF&Z-xiq+%a#Zk?`M_($!hhr-&LG}Z>PS>~j9Z%$3^~>H19d$=x zO)#pE0*g7>F~XTJ~|qGBT;Jn=<{*{iwX8D3X(}gTO}a zy+pVF4wk*w3udRTMwa#K=p{E~W2Up(ABsaWz4fxrA%Mvnv_aH+$K!)Zjq2=*OPaD+ zHco5>xV&>I5W!!onHpyOWCDWA2R5X(x-!Ly!8fz^* z^|j)3zh_cq78p`0f23(ELysoMxHvA* zhRe-{I<@D%uWUCm@jXsj-oN^WMD-XyICb;r+?quKdeYe^fY$X{+{ntxsyQxwtmPz- zUF7LJJCE=v>VQ)`G?@4uJO8`S;V-lw_?IOHY)aOYFdV3?ws#WjqjJxbi&3Rswn`+TLw=sa%OjNB<`2O-L6vp>EyE9Lg z6oCPmCp#2fmt|)l*Buq=S%*a2T`14CZOhh2qsPPL6Xi>*ov47oX4tYm`cqU3g9@Q7 zoAForM_;VAcid)fuv?w)_zOV8Qt)*|H+TNm*WJI83#I14tll@fvatFX2%h=WRrIp= z>Emy$W<|(>#;=g14|>IE?5&?bA}3X;yiUZmYwAZuG?QI73NT(VgQM1q=DsLOxO%70 zl8;jxp)iRuaEcX@)66MmL9g3x{ijSUl5R(b%ofh1ePqSRm`?4xIbOj7XZ?=qE?y2i8@DKH z6d|di=W7<#?CJh^eb5P@bHdr?MkzY(A?dy5fCaN4O7f=_$*L#ecy>o!FCj{a0?N)s ze&4v`&W(OZ6Xe~pqA_7^6U__m$;EU!nmjh`XJf7Cf*1Ys{*rZk^RkjbX(9%Gdp-487Gh`Tee5xX|Dde-yUHZn)~4aYYV>-Up@=uTNxO;FB$ z{#q;5Ky0O*irK~P5tXP9E6lI#7nN7;vEJ=@%Ts{^ zv@f0VYQNpRKP!6(Y&WnQ34p;aE?^fPftGbdXAg9#SC5nxM%-fG7q-9M37V7j{jmPY zeGaJEUHF(+jg?z04QmJ%fuZnMJH6YTucN_TVMYir%M3VT$+;Vc!chvL7OoO0x6@ho zCI=gj8sSsY&fX*&=T$R2&VbruS@4zbfAL()*rV8Y zh1pT~?d;e*^(jr)LKH?+erRK46=Kv)E!5f7emP+U8XMvu;gg>+f|qZpT=oapr@qL` zyD*O?vgqk!)LU0rrc9LD1DqayP+5)mT{VZUcJ7z?)xQwOFT-7??YkOGyAy6ITA{QV zi-Ej!VxX094~69NapcyJQRAOP@l0(tlTw@4{d=84aJ+hv&r9shnvo=Is^-VEV%aus zKq2JccgN&C*teBp`xL2O<{lO%`~AWeG!x~3GX08XT!DS(dS6W^yMtAWGGJq2?IA1$x}9zvsJs@HT|7Z(E1VW9 zKa!&G9Q_&!O(DiHHOVR(qo-u*ozIp7w>s}shc`h)LJxbZ6)ZJuucRso3lxyq+78@b zuTAum>7psPBoa%tv^G%i)QuhC+r#NJzF$jy3*=c%;6Z~r<=JlRZHls#-eH0s_RKAl z+oS13EpvZklGG#X4?KE#lF^{$+oO_AUo*S~OV8dlZ|7opJpE89)=Y6tO^x0f0;`AHL+W<_e%>(##~=PakjZdS=6iUUrO|JdxxKhM(#rER61iS|a-Sqh zMo#QG{_U*5#mU5}ewF(5e^CfX+)7M)nAyIXNyM9xhE(tU>Xxpbn?s$p2M4Cswu;|1 zGz(rEZiz-;*S5&x7#NqKf2hRrGokW@BdRi0HfCl^&B}%n)oaw>){UUVhlgs^o?+z z>ffg?x*|P>Uo!6%F#Fit{|t9@1oiw#M^N&yV#7EZM9gt>h^smJQ32VMdZ1-Bn&*yb z+HkpI!CMk6@_te+xW19s{gL>^tBIDYc#LzTkW;Z+^6RBXQVX|ZIZLsRgdL}deOjZ5 zfnjxf=UCTvHGydeK?mWVf8x1a5eT=B3L%R}B`ndC05Vb3I&(QYVzX+~69JU-WAgAF+ zccth&gm&fsch(jmWj9kkGpDvu_jOBtx>!qA>#8K@qqzT9WPW%mug&*VDum2C@gLRZ zH-r~HJ3J4Svy??o149> zXq3Fx;^gKU$Q$D$atccJX_Io{vGI<3jhc+seqvg9z+m3dTxU1^c+618^yjF{y~OZF zzEt~H5*C%se+4w)5u(_K4!8FuaR0AE($A#$YY`)A_^zH`3MW=r`9g@2z&GaJKMOsW z?JO%KBSu5M>l<8jsQEX~-HF~_w?2y)2;#U#vWhE8B!*PDu%^!OK+r>A1I*(D zEzb1crvr2+@KK5d0$ z_yipWxAvXsB4UnE-Y5%>=B86@yA{z4znk}79WOecrGeG@E!{>-*RLLv=HBdJp6&*5 zseEbecc9RZwjPinE#Dc<%xS*knV4s$tYGp$TCdNIch*Dnvr=<{#UH4bPH8U=A8J{( z%eH57mefEm!^2C7r|KqgGOV>}Ys3e>7x36EykVBpR)`BhwE=)hr2gJ#*z* z{pD9mxlfb+;=|8Y-Cdg$_D$yAa_aQ{gmv}Zk6YZ}zhkA;_Q`~l4M%n$77>Oe2q$qu zd|b2dTz%A890k|9lNURloo9f&PC0zK?A2(&OZz@!lq+<759g77Y^Vh1P_iH%55`_# z-e@jqxSM_aHzX;7V2H!@U?#Gy`DJcXV0+JBZeSe)Vf!RrN{;v^itPp z+fub(A>Si=OLfE5$xmjp#1}tmM5pr3Hb)t@o(;YSB6Ulq&jwNJ*`=`gigCjlwxW`1Hw&L>_OMHg`lNCL zvz(vD^bl$>u>XHM^$8C>K^ty352XOMDqsPvG}qnnV*{9H_ArJFzw3Yddw9Gr-Fe&7 zao@&N$b(+ZSX@`ezEcO`46~gn$@>)}Ccn`?GWE_XyiaVT;SY^k+H8> zV(V0JH5=yf&}+2^56x7cwoUXnshi!c_Bs43?trVYh%0N`(N9w%b+S&*>071GV_&{q zvAEZ}lQl8jfB^zIiS5TuO%>dEDQ99t zcQuvBxw=NurVeWC3a&{RnclwRVo->^Y+?B0Wk&^-vYLi#sT|;n8J=|BJ)_@=j1|d_ ztZY;!+trJ}A*L5q7n>LVipyWzbSZXBn*o)OtrJ?;RZ8KIxtl62+b?HBE0S2MY+={s z)vmx5)pS^-E_6coE8N_Co8i1b=6%t`?0(xsVzazo(0qJq$L%=9!6TfV0&lx6&899x zy-3+ac1=UsLvDO^hv}l6@c{8hdUT)C6!EYt(+k(jxeqI*Vq&%yl22qUT=1ay z7m0UQ+8CK(q8+s2do>=>^VS# zY}^1ORz^0t{hsfGmWl6qo8Nt5dqg73Zj8nM+q?h8+UlmB{KCR*|NE1E<)3tKbAbLu zOpvP4-fZQ{`sH@T%Gp%0c6n*?kFct+@i(WgZa_|_P6Sd@?~O=w&s5yq)8O$~C!H>x zmD(Cip4lhk;_*SEY8AJ|+D^}_0FpkSMksChwebB1hv+liG*O#$LPEj}%%nKdqb|&k zmb2-*zj!Pbq_)0VmUKQJ(Sl~B4vvn_ewTLBY*q}$IrFa2`z0@+N5-m?Q#wQuvT8ZB zUFMpoFYfKaYOHwsu(ML@cc9ORAx2L1SHD*Q?QR?6MoDEhh*MIlR1-42^7M=Z>5BN{ zqR;PC4=4}ck@wK?N=YGuBiyW1W)NhP2|p9c`$qIj?3*SAUyUj_swQjn$EqqjSM_1k z&n)l^WqfO~Pl-@=f2%Yo+EhLes4QmlQ@(?Do`zD5aK3 z6bz0Yi}+1(SiN-ue;Qf^_j3De+biLXx&^cHXttaE$hH^P3%ng07@espaDzSAHiz54 z<14c1Oz#CMmEgE6U*N=7@z^`6w5QiW-+Q?DLh8fT=@A1Efs|9tMTONkHT;u)9;Hxl zzgYKCbpFAlZBu%m?J}S**oo7>@bA*}t~UGN0Q}=r*;}2nr1R(x`lE3!&YjO6me%WQ z7}B%1PA~!D0J9j&E&h^?nfwJ31>zEVs8DER*sT~pZfYgxcqBVLo{WCs1i zDO^TUorg=mhopGBIXqbxHNJE=hFA6Yyc)&o{2d2d85mI47tD=K57kr;Oz7oJ>1rtGYzX{uJ?-cHk|wy7=DpdyGK`T-xtSI{8Q9_s|AVE>D^+|b@@_N59R(kCfadxZNX*C&+gBf>2J|ZD+A@f^PObvC)Mowt zoSn8NdC`U}V0Z7mTYrb&Ai=?|zgx;l5A*)=)>0v05C}M?;?=_XL`3*|Yw&Txh+(_o z%g>Ckdb&nBlsV6>Q;2?pcjBOCIc2R2BMiFnh9-7)Kh+c(2(-J?MCP)%fBl+4caST| zeu%(}Ni2$rTJ@4}Jr)v}^Ye&vthvjGZ{uxjNb&j+0^_CsV9i$(c%R_n@L5^8?W-(Q_X+M9v?xb8V8~O{yxd7 zZWWp5aQ1#>P|sukk!t2)h*Ph=WF_;%fYX#2-|FP?n?!9jV6fLQGje@pa9#Upkyo11 zP2r5NZwD)P60;ImDr&g@VS(;D&l6o6bFn5hjABEB7p%Z(Gw9^{?Q}?62O9+^_mJ_B z=R2e3`^-M$zQTYAf zT`AvjI!2+J1BF0gJN;cZXF%}%Or_cyWiY$fG|OXtZQ{MYHH#HPvftixfMUrLKl0zZ zaml9mWzp@Yh)+F}N@ls|!9vp+{z#+CMtb~t7()z;J5=rbXvJhsowM8%IrXk)io=se z^!6VEGp->pX>YsI2K{0)?o6YUA*cml91M@)M3DIf@xPC_Q_Gw%doxOp#=H911rG{+_ad!*=l&xqZ##DUD?H7Xe%;U_w!5nI_Ka2OHio;6 z#G!_jS*Pxm@(riCRVoSr+#!OwV^^a(-;z<06YDz16*VP;+VMCsjNMTTYM1411Ny4} zwg&M0=FO9holyX_ZlaEMT>~ZrfZLvm^#yV|o-qyG>>PmeX#i_9U9QIkoMrE7zTS0A zy>e>3NCN0ngnBg=tNc?~^m)~L9TyiPd@KJVL2a1J3iKe}#gyUItv%rDJn2tzLa9qn zw}xbA?z<69ZWjQ;(Pku-C!W!D_U+ZH7jrQodq3{5a~urJvY-BQQtCc>*Uj;N|66Pe zr)MXaovaMk<;#641TWJ1BMrKt@VWu)??Br{e4bQ|Z7p6~GvL3DxR?-&{+MZCjI$6R zCP)D?iHPzW41hk=Hc_A;oAd|b4dmev1}6j+n&nR02VmL049s-%#OTUOedwNx@45Il zzOfwsnz_`t)!Sf`+yTgacsP*3#}oz&%TbwWl@XTanyk|7?mx*SfF4@?i-pmHi}{h& zrf(#MzP{zQ(@U~AmrmJ&ILK1nYQjZsSqt;P+W(&nWIdiJEkG@&LBu{{6q zE%CjMAcH8nyKoI)(#LEy+*cfpfkpimQ0QvF&~XyFdS(c|4*$#nEYG?jLHLgop)Prp zK{NTwNg$KMdU2Lgqu-Y@M-P~?01&Ypps|4-052nUd(V$QN$DUcfr{Ud6d0J;f(4KQe;bJYIShP6^>KmRfDorcV7CPmdyT&na{Vw@dt zZv>IDu&Jnt@Ctr*p<>GV#9ErGv#)RfFcCzR%zH;*>+l2=0m}7$rDwpLS84pNbISVv zYfc?|x3T|GaMx9B`FDGoZhvRNMc;z2~T-;(EWP@Ro>!Gg;WsEqL z>j8KiChW`xFR9|^bXeflNm}uLTG~PxV+`u8Y`Y!I_|Y3l@_SbppAO%oRHyO~`GT0; z@EX*15>+f{PCQJIW77c@1rOMCZ_4G6fy)o5Y-|GJkh80_xlHaA`r%vS`;wbBS3FI zTsWl%-8m~cJB(zBtxQ*B*#@6U&;LQ0C7G2}a|Q_P%RaY&QJs-d9LB2t)m9!AEhYN} zb=%kgmq!m&85&2j@B(fHzlfm{YCe-8PmcwxYxrucp4Z|~!aRdrXNuJrY6_hj(VLv) zVlComkUv)d0!}P2>HH}f<8b7-+S(4FRUabh-hI3g?eCFvnkrU1%v{f824RLEG!P94 zE@RG#etf3RkUFlc;N{U&kkM}C>=F!AtALrG?M$C>2!08~Q`>+Ake|((o4E|nVen&;2XA6YB z{qX%=dWygi05P_Oy-lje*<%qW>-=d|0%1nB=y8acf5}d|nFZM-5g_TVKG4EFCN>at{DRyNCzwm8rW(%n{5YQ44@u zRnu!n4x~@c8ZbvU?$rLUjeUkGxEX&lQ*06A0=N^}9v+X8d8t0DZj*EcL z??SjH8vx5XLY$nC(~E^Mk{*@#BfklL6&s9d?B<<9S-UTu1mHschraR5_V0WF6E|-v z!1|A(hqxtHH-vUuWFOzb`vSQI7lT`!c0&pc!&t(^FyD}dy_?lg5h>sr<+p(C&;+gn zXwU3!T%qCcfd5po3a`JxPCq7-Ty(gOzPnIfNFaO8?c3_pSVZlA?~;tW$e?+mM5Td8 zi5DMXi7A>+)03!QXWJwQKxcFeStK82P^ES0K=xxRhV7W(Fs86Ch%rnZV43R6P-Zk@ z^CPoi0jObW0Ejd6bv1q#ekndv3568d=(NZcVq?Z9Cv}5Pz20`(Rx*PiBK}Ol5C0{B zz-O4Zxj+|f2;_8BV2d0G1BI{m8Vd)6UeNB+4byYe9lA!xW56MY?+!&kAtBPGFx!>&sK0|3zN z0l7L;WKbkf|K6MEk&yQ^g@U@yq+vri|_pkIFf&35JqJ%vY?N7POU#|G@qq2E)L4NNRrXlKJ zjs0~hqKaR*a$iQ;bBYIVPgYR^h*}D2ec%WedPSY?PFi2v3fa37<|!pALyA9faxMOs z?PG7WGlO+B!r$0TDee|kLz-(e%8{B;$Y)LV zuSj?hl${~}FLNX(H6o*oh$pk~F_Y+Q|K>u>(PGxoM;pyK>O=K>G0w9#bTS4Vb_oE< z3Wh-K2(s^Mmy1qbxhT|(nCJl~Es9AMFz5N-uqG#JWhrpRFOnUdIG-#6 zQomsUTD*l$qLxKkErMxKE-58x{fYG7O8XN!h%M@Zw38ETkuUpJ{F!9_u{(0=GeEM9 z{;%P-SvAe@3oq-3SIf(SnabsesWII-LTth==!Ib=+;Z1(Z8c324JvoO0U#(qhu z=a-Daj5pcJ0o(v2%a^(B+JgT6zofDGxC?9Pa;g_#7G?vq<*8{G?sDKq-KB@((#ojP z7@{n5VUFYFpb`+Bz%1xrsT=oT24MxMz@z)AeTEMtMVmbjn%59AumH5bV(R#m+MqBmgV)@c5)x;tyhpr=BH!kP}cs{>B=mvsj$0)W_4|i_`T;kl@-#cT>TkT$Y=nkJRES+E=*b z1k;C6hFx0X7Y?fcPI4W&;B!NFPfx8*jLHw_;(J8uiUI@1AecV!kYNrV^#@Szc-kne85MdsGiH%qu){jB_<&Er2Y(U z27klBr^a`C#)--xfG*u4kB`Q2&fxqk#0UG*Oj0lx<&GhnmY-IUj@z6?7Q5qF%q;dD z{S4iUpl(X@S}FXKKt^<*30e1!y&zQ|@LC+PImU~b{}ie5$IbwdRzQVuutk72j6Unc86} zQ<0$-_+8^z6svpr94!qv=30@&A6^kPv!EC&wNa|-rna#^@O9w^61@HO z*5DP)RB*ktqP?D%guKj^s2T`E?vK93=_DZ)=MaszV=~4$Ycu(*o6lUE6!qh!bku5N0)@+97HE`l_LT&&D(}%tX(QT8&JId7 z3^}Y$(n=A$E2=BZOr3NT~t*WFu z!|J`Wbblrw$ zu{r>VpU(-d8jzgi31;8Q+Lu|<^&Iu+!cv$Gn>f^@cCFtcAj+iTt*xf{f@V4Y?T!~5 z@(kEPtw`%+0_-mHH!{|s7TSPSwIJD7ZlBjUC4L{^tlN`_v=hnf{eNtI1ys~)*RKk~ zAT@}>(4~a5L3gSs2#83xNJvNxEik|!Ehr!*0s_(@Idr#lclW?Z*L}wGzUTYyU3V?c zk;Ph`!~A1Ed;j(??A%~eyG;$9__nZd^CrP2ktTXxYuCr*K37dDDG_|r4%T7;XlrRl zDWax`p2D+9J^GFhkm3r)J*KB{c7|)3Ld1Iu{#?8WId@+}R4eA=@4!LIWAMfYiA@8} zfwHM=fuZO+rAMWaw2w)Im*Z>IQjTX4Xp7m;vdF3i^jcteqZd-Iua(5FoIJ^dkO^-N z$}5EOWoJv0huoJSGJ&mMsYi;0;xVNaY#=wH_gExc%U_8nxa`qMD5lA}dR%CQAKfAz zVHlzKPB9^8Ac+*DO@0SeAUj2s<8S)G8ViM4mt_Aeji>QI?tXl`P_1!(e8^09WKs}T z$u=|R@BxYpgzdWl$4Q~W(9>Uh(AIM0vq!Tj7f?I#GzJNUhhCm=n)Nt$5iW#-x0-Y5 z!swqe^d3vT2l_^;SA)~?+=<+W(=y?S4}Y>Y_%0?t@pgo)FeHT_zrlG<+;5D|j1aUg zeqBl!Aeqy867S2#lVp=XTAE_*?^;W(plamy_yYsxZ_Uc~OvR4LH&IAq4)aB#>mp?rjLKzpACX zG~^)>JNVDFNXpUd@xM0738Tt^gZ zBsa*5#jJY^8(&Be9Rgp?piUNF9Am~~p8;jjZvS}}1YLkv>2>cM^lqEGV~tQ!J_G3~ z!XF|-qCn`-cN7uuflncs#e=pfd9_G!gp`Sw;C6fQ7feH-U6_QaA*|;Sv%>4l-3XVk z+ldmjWXll6Rc}T0(F%O;>9N2)e*^UG^cH1b^;iGutC7h?UZ5X(Ns0;8_UBz?Wh+i;kn)E}cPz7zh8 z4#4tR^9l@w!|dY>%&m;P6%^fLb(Vf+=Y2f84_qCAE#MQ1aY3A)fx@M%RiLY7bn;uj8brnRsbaO z9wR|E9x1}aeS!~1zPlmA%`9?Lzt0c4)JSk`zq?pKTX&D~Bj060p!ow=2yY`{CxGBO zKT_UYA?IcE)<1$vb5Y}R?``3a!81)z0i%L$--xGG|FA%^A$_jwH%c)pB#{JjF{n98(kCP@hiNG~*L74g#>g*8>83w=}vtXC8Vi^W~{98wJb zoSk8M6IGF}D(vKV#6&b7m>L=UmSCUSSnza1G@@e&wmtRv?Oq7%slcDq@%a{pxO&32 zx`9_oh$KmbB5Iira-RW-k3;L)a;4Qn66W5R@)>-rgIjM6QbU_de7Fe94$lsfOkwG} zJt^YmdAT5*Vp5zC#YBw*AI-k;cpzB5>uPH;iM<(^4{c_zx4=uUiq*W`A;@1{ahUf* zE@0a0$eVmY1Kgi$;*OWi*kCvaQn@!03*;r9;4Fyu@(_{isM`xp~7$;^^CM$TOca5fm5jAHHFxn zBRW5RaGVcft2F2J(;f}KKgV>7Vnb|@c-nn1NRng(_p}a7vfD}3$R`OVd##3daG94; zsU$+!CxLXO&Ok}37GV|c^kKEtHc#h-d{8T2zuFN*iHk5rSDH(a9J6x$loXg}^WbN@FvjJ|@W)J`IU4|M1>VSw8-Bft#&E?3giZFKG1}&j*NK03D;M=a_RGW(F z@>io2{IU0o6YfU^FckP7DJQZGZB)IY0K?#E8SK0JIrf> zTkKj|41e#Q7jd}fYGTUgs}1_tN!!`ihydd}l4a_-la|*$Opr#r6kon!es5AtT!d|i4{iRd zTKf*ZYdl#&dujhqIbmRgp00O8VSw=rNi*VU{8q43`{Mb)AFmfWC2Ttgf3l;&2F zb0No&2V#MvUrGmi#jnG%cBzHIIHgzB%kL_{2l=j7B<1Y|jnj{eW4(q!v zIC_lY~V2tQ%FS< z`th|MG8_#dl3p?|4AtehS?2e&_^2HwM1WJe@cHiow#fkw%|Sds^+=5Q;N zh495LU$;771Kyn)Zmk5m5s^|$(;6$QpIl#P4R?{Db2S%kWfor#sMu-4_fvxp@96@+ z{lKLJ&Apyq9T$+>9&l%{N*UsA4G!_J+iquA8IaXO$Gfe-#8QETEu+9a@PcID??`TX zH8_HD-N$5CaD=LNl|%iw;Ym&cJn7z9~S9YH4J!8PL2f_*thZI**zOcrMW$H9rypr2<8$Rb+Dl-U)8zWan z41j0URCS$r{l3CX+aY(R`8d^jtX1?W(u-#0vm@EjYO%d|1l!(o&s(p`X#K3>0M3ML z60QXv^m>U`m%fo?*@kJrN(d)$JWu}EkI)~d+bViG%$$hGZkh@oQjYwX2{Uo9SyP{0{9Q9_n)8W($Ge2d~v^_dI?;5 zozC@#?QOu7!P$+#D6T%w^PK6G%S_cvrM*b7t1ghV+unVA_s(+C9!YD3zg?BFHPxh& zE~V}%7Rqrb{_c$N#Zn+x=9aE4^Z?n}4oSOu{JDun>NB)j-IcuO?xk2(PYVefyjL@8 zmd;^RcdfYIwek%8dT0h-^GBxMuD6qk_d`pb;_&xA?LYk~9i;jNCO6Yjj#+Y@0~!}0 zWxtk30!vP^6>sTqy-s3kj^RnYr+Jx<_^9K>vwOv`2BNUM*B{=8pmZ@D6u`^RQqK@! zf#4|+y@&Cf(Xxe{nR(qTW*b4DRoj@8L#Sdq^k}cS{ zaOt5O3$F)CnhL)B`<0uC#J;a;;yF}zH0iwh_tm+|kIGTNo5{?bce_x6ivm5#$em;c zt8?{Y>SDSDZ^LoDnuIe44XNXQOlj|b@`HBny?;=TiDMj7rFGu~I{vUtVWb9i%}^+nhw>mMD>Vx)x6e$8`iYO~Ol?l1iydA*!Zj7ma6>l~)sHSCE4H`*6E zRYwa`!*TqF2#ya&*sn$AV^%kazhiEk{qQ>5r>oKb*x>@(>%D+V)MOOAzlC>rbHdK+ zMkib6*}gO#{7k&<{Q+@4CsA#6t>(7^F`M=Aw5?h7!TpsHb#E^QgWkIP!_ul|W>jkz zIag-pMLBf z%F*DcCp03M>eu>NLWOhnx#`3&tphL3T)IJ(T{A*2mLIpRCRNQ;-1IjtJoK%+-8Bk{ z0(wdHdq$q#op8}eFBVK{G;xqJZR8AzJb_6;G#WE?=s^dqS3NyOa`9GQ|~LnNEQ>5Nnh57h9{0b13BPO*u?&U*y zr171-P{HX{R282aGs@q{RFoNZ8bK0p7r~q7(BNXEy9^5%RBSSKyzlzv7JqUhC5MAy**x&@}I+0yE3>)$erkcbDXaP#`8tAjA>BL#BfN zJ-NcizBy#*2Y5Tgu_qXNWR)1X7qyjg%47;9`A^e?l+x|+N{-g^l~TyzCS4Ng?)h1Q z)-v*)JCtHffLm(^S18Ww7APtTswLSm`FnTOqZu@HcL@$afW)Q=!|B;!G6G@T5yX)) z{yECgYvQc?=4gms{2vYjtMj(rTQnju{Pi^IHsklwRW|c;?k?Q1wF-^Iyj7yv&+5Q~!^z@8aEL_8!)cez>VJ2>)vAfp39*xOnR1FDTzgzD-}KPXlsomqx%+Kz z7CYD1+lqhg-xG9Oe19b99hDUL+Xx}p;2u)1Hpdr9KfT6BaT-~jMn*5)B~m(9oIX1g z7{f4aA$7!t8isO^wV|U~a-ewj-XRTPjYVkWBAX`s3ve4PUo%yIecO2k&5^jEbhK}k1n@Cq3)2c$w;QwKqy#;n)I4k ziH{Y0PBX`!sh@EVFe$~>VzxR54jwq)T}qoO-dh;ciAU|7ud|oVd0woed2dsG)#2sU zxaZx2xi@q`olCR(q74at#cZr9OCYxIFT*S@_3zk z$ngtwlMK}Of={gRbvPSr{&vz^dK5Es23;rfwqb!V9~p03Gv=DduO-PSH`m>Kb{i4U z^k4zRI5=!T_K}A$dUJ%u9gq)gI0p^F9%SFiuTi!dCdKnD;^d1RnD=|1xFeiRy~z4m z6NLjXq4$o) z$DvNL^nj2Nz>DUZ74*(_ye}PGTRBNDE>3MJ)uI;EZKHig6~8Kcb{d8h5c5=}go zZNeLe{;EU{6Ao?cLI+55(kC7dFZ){lDpU93$1)#e-$wM z(9e3AGrH(&MuUucKf?V||HN`BS8G*lqM_-|leH+XS0`NzEJ^~TA>zpA@I+~_m!Ax# z#-tnKHZ+Dx2YGEnn}k@+*-YmFCcdF?Y*KQ)RHxeQm;1;y#DiJ}bo~B@m@Al1(DBGx zvKr`MtOvaj!7eX~_o3zBW63kLAE5>ncfnN|2J&8(gh;`rd&@}7+Dova3}s${1MEMK z9NJ#YE9$+#jo&pApc?NwihhGl40CF}tyvDhiS!n^v$WPuE?*Xh0ab~Rc`5jFxrfUL zoRD#kD`d895lqU~a$I*z@6L&2xf9tXkk*k!IRO;0OOp}5OjHsfMh zTz5pLqPW@EG-o_7s|f`g-qI^%bR`JuZTL|Z{Mt~PA|zqC?n*GB4L4fy?+JDaAz?nd z!Rsx0BdocThSvn4Ft_x;&G-pm7feU}L!!CW74>d}#pV1xx991dbQS7(87Ujw9th7v zQ;VhaT#gru-w^_WwmRp-KS!%;=byg($Tjs4{=|-Z)(4OseDG?zjvhKDV^Ht^j4jCtMI8R9zKM!xg* ztozRXA5u?!Bd|lJ!4;ZbgKL^f2q#m}Gr}8oFBIwadK9a@z`kVLa}PY`E)8bwpYA4( z(7XtQ6BUu}2l7*vvytJo!{V>_*+Fjv5tGEyl`e z_9oHCU8$gyW-IFcnld=s%)!YyQSHdz&{zBV9WmlI(=EsvX72jOn0pF5*kuV#H)(vFpkOhY3}c#M(0BNbKqTO~4v`I>|0sS%;RGN!m#1GQ#xZ)WLw^AGED_pZDf--kvEO%elRDZgRygqL71 zi2c(Mth&ML7Rk-VI})UdS>d*$3*FDk`YL8tWZYyrcKj2&25l^rH(j`k-XAd-0sGTp zqnGR~_u~dWygma)VG}=<)r@{Wk@;=1jk|nH>LrUVfea0cOxcJ%P_5Pf-6xvZw#t?2 zMI}5Uj26bk%$hx5JMxcs1$DE>S+!9Oo@_Wn&C)R{KXD>a_lWak={zJ_EhY<#bnQb5 z%!^~6<^$9J{mV8m>kRUB(WzLYnZL#*Wu+^9u^h~dJedpJdGzMb+vmv=jfuLf0|$wb zxz|-5HTXeKaCU=xu^t%9c!vGkfV?4BI_V6an1y9-Ml8K##wkn$4Cpf7V(b1=;J~e{ z!rx8L&OVY}$&$m^)UWu?I?S~s`+fJSQYp|2wz)Tbb|$WDh(pwkS?P6gD2@B~?*;5$ zjsbc=s&mk8?VD~(&%>=Uo=wOcMVz4FH0oG6?dh{;<8M+zp^+hCHyROB&yqS!N9a-O zVG<{Af4%LK!Rr5X1nlSK1Xr%mThQayfy5Rmz+gdgaeQzk5h6K7>>4o#0wM&W8qi7f zz_Cjvjd*lQt+PM?7h`6ItBtVbntdzX%CZeF`3hO*~mety`hVZ~dQprr0o$1Q6z0Uo`C>4>E9gyhe6ctRazXvEgVVieX&IKG-e}}SO!AFRq zMnO>2D{T1US25T3rq8c-#_*0<k%BZlY?qDsw3^sv)0qJSn6WHwsE}bIS3eMkyFve9GUsyqBIa^Eyj4 zHG^H;GjYPjy})@(TO&(NzVi#WgA9Yd3G&5CqDvCyZDS{i$^%?3#G_J{WQZ@))Srxo zW&=0Z>-=O5uwh0(fZi6anAEpMcOJ1I!;1Gy31tbSR8Q!)D>ry3MaX@KeL~zC9rCQl zig;*EVYos~B|bR#A}RuV)44i=#4tW7r5GNpSy)00Exj*%^{VSXA8z!Pa0Qiism+&H zX%v4Z{{5KXn~?8g934s?)_~+sPAIXv`*F!d#`n{wPYICEpFihDHwh)eU%tEB+`OKj zf^mEt+##}mgr{A4UuSoBf;S%F(_}?NT7;!z5un0_<{>NT&izl=*1*u)54QBHr5t*d zB{i29?rbe{q0V(z@#WMwzG6pKe=k$zlCG|2EA0wY$8d;7dm#|)Y=s?y8&c&7LYL*M z!3fNQVqvB^WiC=xF@knIjb=}I)BUxgV!RFH!>0Kcl!8v8vX;Q;7~Etb718BtY+Wa6 zM6&Wqs?>ISD3M;kL-J0)r)(enNVhqy^v^p-rRbj&SGDk*Nvu{a%9RTI@5>-2DsLJb z6l#y(@l0hT?Fo6JISdoQbcYFcx22VxNBOIW)#oqiSOw}Q?Me|n6E)454c~N)RSB&a2c1AVc*3&)vZ_3YgPOx5 zrQhtA+mxj>@p?qsNj3%Q|6G_4TYRr5rb5+b+0}$*xyl{nnI*c=INAKIVq|ar_}cET z_Oh*}*T-z+3(r_>uM2Azdij6JH^__Q%vhbsvfbD<==~k+$J?KCyOG8He>R4oDnLg7 z-_@N>IUy6gp29nsTg3Fb3#=g{$=+vk%R-#7w$`7QyW{h*y!y*?>P+}Ayc4)uAX#&>za^Kx;%IXLlK3(8Kf($U_Cb=R$t zGt{%4WsWQUp~ZL4gR$1`k(N@diIywd*`1Y0zE7KtBDgj8%{qjx@^Fbzw|L9HdiAAh z+eOYDtHQCC@Ox8}T6S5ZNB-r4?yN)oDJmM~F~5+8n2ox1Mn}nFwrFa(UR3zCc-?mB z-o&$|;2rgd4~0V@5r~bgb-4>m!3fcVZt@Y`Ft5A6lc)3GJRtmy+{-(gDjUPO4mjTn zAGwYOl;lbpg8$j3wcSnMPj{6nZB070ep!Bwx51_zj_)$-JbM;GNJijzPl&?!1*nQn zx+^`F-lIlO*)`;?Om2SGW7b+2#!HMt!J0KyhT{~6h6?d_G6IG=IS7J#-}vHF1rRO^ zEHs{(B79MsMV$hB3M{%T3{C7JDvO^hhEOnnMH;&ACSk%Y1Ufn40VI8cS?Y-yw6~QM zH~W=%ovu;8=tH`~+nA!~j-5eu6(%D_zgaL&;xqEyu+wuI(y$yWN&#bZLDMa`|-%G+jvv4=p}n zytp@~0UKpB@z{NO#m(}!t9DeO8V3R5fP{jgfAH9F1ZfjI%su@5m&waoW1Y{+d9+$4 z*2Mqt8(Q*$gZ|T={zFF(Cb;*IAw1J0TzuG{VX|*_%fFslH9R7;FGMf;2UX|5V5W!f zLG~+=BFDoe?5PD#=j~-h*@q88s_FE3pfzQh?y}lg+#^Me0c6;;^pAvzD#Tcvg5rJE?Ng^;d4^ zCzAXLtumLBO2br{x(|qYh~FoCliQJ3UsDM1yd!|0KDaU;@}TF)d-^!-ayj0%vR(;s zG(8mez&tFQt>Ft6%Cuq=Na zj69O4Gw9yug50@dLZhx&{_T{)M~ojAcl)1r#3ho#a1{7pUAS zotUHzMm*V{U4zndv`r#=#V{f9@w*_mD~RhHfB7Zq^5S&AnbM#f&G_WWs6^^D zl2=g6BXsoSQC|NO{FlNqPj;Ipy|T0{z7{wFYoAc*O=CNpl=?=-kV0BA8>^J(KO~o` zgX6rNJEA$ZZ54dTnX3wp9sc?}{in-tzQtO!W%yO#-TJa2r}~d)ra?xq333MCO^{&b zVDy%NEGUxlOpvi)%5mU1wHUl0KAS9Z3jk_%4)b+jKavJI*w!G&#z4{$tRO+`1^?uy z3(kDhJn9bL&05fH(jZD1yw6mzW8g$Mx&S@|O8e%|#YHpA>GeWouFl@5`@XCNmhJ1~ zwR`p2iOTW!(%%3p7>$07cQY?ipVB=v+6{MeTK%k@T&z#PpT@+ktHWp`?^i}M=icad z>xxmx(wv~E880j}i8 z*)c{Cre1M*c4r9wmCU>x__*36{c@S{)clL?!0_~z4gYcr%LB@^0sCD5=?E{KO9?1H z0D;7-K&s(*N`8aFp2UaLgtt|E_5+T<%|lR&o$tQiEBa-6&OCCpi}>qLh1>BP(rJY~r4_sp^RN9y zBkbbt@6axlI{zKt!0RGwctJ43AzyU5kfo4yRU88VQ({R+bKcF4|&qw6G`Y z8cqTBC3&1P|57Za{C5@1kI#GSq35HuEJKTgJh4S5x|vlEz2OrL4el{1@2>T1!9e|t zKbu_Xx8&YBPTPji-HUu(9w?FY24V)%7ZI2jRg0gLCIf2$fU(7Tkx@<0XZV3IOD|P{ zn5~GZt*@s)44VKe^5?fK=xFJHa<6j>za%#;^tH>Ns$#`E5RrN7kQmNd_HN=8$i@u7 z8q>s(I5^xtN&%Xao+v8T=U*4cZBbJp;(mWNhp4M(X{sgnoW+G#A5NV=$DhKyQxwOv z#+}$Xl0Mhn%Px99z@qcFs{S()9yDTy_yY(k5I5juNGZZg4lVPnk+0Kj8TUppXztB_ zH}eIX%Z_$l%O`9SX6&L;Fh&ibePvA>H?E;e>S9qqiZKO3Oncx~pipPuq>Rb5Oy9r{ zW&QrSM8YAl26l;>ruNExdRKEuK3D(MfTg0u_A^0)$ZZ0!8XKXr@r z>wVeZn(yAL!tUor6_$$@F@Tx+6SRdr*&(Gd^@Q-8Iq_#zPvPyL8xti9{asa(jqs5R zAzZ-c|E&;3Lf-kE6GrAfKxxzUPi63;GrV>%DJL*A%lqmpSp7)XC-EN-ul9{*d=Osf9MEvg>r8xj(}gSmi6;5VT(f;N6HoVR|Bh!hePZh;8&VUTLI zNQ8n{lNAy127@rJW-8;|I;qEvoYx=^#){08R{ZO05R5D(RKC+7#9Fe9lwldap%?&? zCYnF=wj4B4;QVGQx6Z;8k;q61n#<^(&GdkA?Hj57(DbcXC)TWmhrgUpO_D9hST79kRIfDhIh0 zADT#l?ZPIsd$g4ym@gUvr@wgPUIT!b_3=~Q+OvLFp{SZd_o%PGp7Ii}{it`S(HOhM z!Bivlg3zG&jE=?w`Y9NH}&0%w2 zU)FkgmeKN^+$W`BSUxc_ShisC6uSc*dz`JFp)3Me7L&A+h)C=Hv>+1UMJOG}1Mhk8 zojsfo+m-uABh<8G3ZJD?5Nmi?`#^kzQfA$!JK8(ip3JbzrLjHGy!Tn{nK}Ka`mavG zaIPsBZo%3>MwI{@G!?;IpUEN6rnfF+>Q-OW1U<8-S1B@LzN$ZMd<+g6Ugcq3oyssdSp!BYTP4$TXuPK z&-X|(MaIAFris$A*-8AmNRmcKcBFb ztS;BhQj5*DhBNHsJvzZ#>Wm4`neu@Cp!fWv>Y{=;Ws3g}6f@i9e*~Sc$IhU^YR}0p z7}AZE*kwq22h@ub{h6EkT>n|;BLh-u266aOMt9&Kuu-F!*h0s<4*Qml@y*`u3ai-9 z(f-vjCRxDHihKL(B@j(*V4XA+fICtM9RV*T>`87lKT#u~)*+PTF$$j5sqB?sPcQSa#llhVS7|*^?^$e+O+mbo3ucoR{P_4lMl%{QCmI*bvXDW zj`WUcrv|<;^Z}&>Pz!4bs<92nOVY$gZhgu3H2A`nFMZxU5HGjch|bPmpqWaG#Nb;V zo#mH^=!v-1Mmbuw^0MgI&@*`ciPZNxHe)-v`6JqDYdt&v6?pWSb*$-iqu^aGtG}hO z@x7*NKa93zw!^!pyc97&@={Sjz5{?8mmM2!SPU6yRxjG)qrbFh9U<3#VEJFFlJx7; zUTGTnT2rfK`}FE-M7Bh%#xI~P<>fL-Hlg@bJwP23x+gwE}!(?fIpTxYQTtJBW( zRD(Qrmj{}_s#}tIXn~NC1|Y% z(Ay_QKE$x}?)SU}SD9SN;*PMy{z2e~ZXgy}ok66)(T~RDN3uFpAIDr9cJ(1{^P?R7 za6ApUer7;u-BS9%wi}mQSG2FY7O$fISG<3!w$H!^7H*|`ny8SY$$$ zoA^YYCd}Dtu{)j>$t_5_Y}6b@ml7(BJ5gqvFX4G2L|Vq}v+>1hyqB$9SQiY8RUvhF zAS#x*7cl11ep{$=%gqioPCI4j`cT!nCMCW!PN1cjIK}JE3t}b%jn#Nm-7jzYd*Pdah8>-3o(|js*7u4e=9MBt}n@% zq6A{nyZih19hkG;(NDYI+@&^Q7K(mVDCOojxhV|jX_`&$z(sjgK24lvGm|CJGJHoxSZdJ$9V zCV$9)iMQE0%Hy%wkL9sCPurH*Q=jr2bP+lIrudS_EV*sCiUFQylu!%kD_qlO^fKDR zmg%YKi%P=vE2HOm1bHLLLq#nd*XgaBJ61o+>T5Q31g^zU*{=Jkx`_I8_ez&bjIwf@ zJ4qYI6EM>xMNSugf*Ql$Fbgo*L z#gpzrB14@3es7w>Ox7yHg(s864BrA57muUnKsTrx6StS6rUWpYIU2ssQFl&Vgehu{e z!mYC`rqSrJ*Ur}SU>}5B_dgoZT|hkj!6}tfY~YU9>t92N*P=?k=l`NhLs7|A5{^r0^#|m3egU$dJyfw}#Or zY7T+sX$Qe7;eEaz6W>a_!-*X;u#8t+PQ3xQM)jlGJxdz~3c; zp@*lsxJN_@>@+UDjW~DnWN}4zo5&Mgv~#t8C1}DN@Mg6Fp-6bEu0N1AdZz`$1w4-I zR_03G_AO(!vG`N@P?kEI3$JPbR-L@SNW@GrPY@VGJCXdpgTqD-w!BF9;7!QAmzm%4 z^s0WEb$$V=@k#(IWPo3&%5@l$(mvB=Aa^0e<7z^}A}3#J0a}1;$)+}d^yrhbDLZ~= zaAi#}eU1K~%T4v)%WY(Rg{8)5vm^T1z5IFPo8#-#7Lw0*vRwjazIEJPU;lBCyox-D zvFx8W*dIp5moPiU-Moyu@w>b8)jeVTyEi)(zgSBkpXfhcAI^;@PegMy#)lkVhye)0 zNngvF5F$&z&5J@o>e~*1>B^#vLJ}Zm^<;C$9L2#UzL6)n{XpA1NX~EiQ1Kux@;IQe$0!hbB-a z{1=C5>1q1A)BIbAooOti-+k}6YsLORl_OkXFNEsEp)` zjm`!dS)P)+8$e)d$9BZ@b?!KTeMNfkJ1x=?>D)CJ@L15SP2jg7-CiK=-&;||Ga>p zCAE?L{AvGXv7`FjAO9AFppFds*sMi#T|6ybpTW)lRYtDnjcYol42C)fS$Z`UPt#w9 zaZQQEmW4n+c9Bxx?QqEO-&Yw2I`^$*f>W?_7lao(fj)weg3Hb!m~0rm5H9Kbokr#s zy?s|`ix<)c{$VbQ0wC}R6WqOeTfiak)hxlM^?eDKd^=i@!ij^)BeUEs?v)q_m%LaV z1^}NI5Am17otYWe=txP_^7>#_Dp)*>EjeHf8z8c8F|O$$QLNAVGgVV{J#XP%n+N*L zMVXh(e?If*jdQmSgz<%M5)NKQc(LoEgfax%8GEP74QZcg@=I1Ybid6c& zdfkANlh@r(P)FG&>`gvxhK7Yw^sU-Ld>Yfm$7UaI4>WTpt5+DG%6+k+hiI8~MB5uj zAO6_m3xqK-)MBe}Ej8v_qVu?gGK@L;j`44>A6-XPqq~fG9w9iz3ZWH8U5TQa;HQ=G z*<9nCp~CIZJH~Wag?>A)?;8W>aFUGxypk){(CELUTz_ay!zks!X zU?8ol+%prZjo9R^f<_XDiTNpoq}Xg?vfzgt4Ery@u;KFT#1Z0b-AVgyk&U|P(E-DK zQzUi*5IYLe}-m!Q_6FkD}px*x`5MRCEe_C&}{#CguzEYCu0c1=4so}F1 zDaRq1VgRerdklPy{%O45=3pCdzJph>k~d^&Wc&-AOqWbEdyzEzbFtI+Vz`BpTjT%29N(HNR&9@c6<#>VxSANrA$iJ9!?lLuGP4g1_Ll;> zrrOSV!=|{rUxX{R)Bb6aL7#j^6SUi2S-#&{S#Cy}+x-G9!L%f&&xSgPqCD$qkGfxU z^!a->iK|C8u7FhLM~du*0uuXNKF}Tezg`(Y&p>2h2EjK+ui#k2YYPe~Q?l3>cqdNI z8{j*|1$B5ZFRg9y{RU?0RzpDAMqJOU@jR`pSMTb6b!$_F>IsWT?IN45*Iu)&cZi)t zWD|3>%#E&>+-sMEyp&yZ_LzKcK@HTCi(S?vDZ#pg4zZudOvb>v6$XJZ`i1Z zvLu>bZ*uwS5WBhxK_Q3oOPv^w^fyl`>SuQOn_re0lk3^HJrHrOwPJP$j6N}OeeZK8 z%DmkU&V0u?_J7;A0|964^0u4=-RF9R`hLqf{V`ErEQf#Y|J10A4a?C~WqX*Dsc@Tc z+59WYqx#-$r56>(0yq2k4&=CW)4*?BSY|TET*iBR?5bVx=F<iZZy=eRK8WmqCuSw^P2v+8WL3wWM50(i!;QSJ=;VG23xCSUw{ zv@w`fJZ9|Y`q$D5EW-Zz#$9B&y>ts9KIUBB_9ZeZ!~G&Q+uuT~vPetgSN5xZZi7l5 zrCyS_teKTOwY4d~rQZ0SMy0J<*aP&yZlFJP>f|oV z1okgBREaDduHBDDRsqGuxwG^-(FMdBoNU$FWj1tLl^a;RRQzN$RRm!Fdgiv~dq8U; zT~zk^VD zb|zTuk@1{mGp@JbPhGXKkE*YflkUIA_<8Q>(RTXXi+OQg)Q(;4KdU(h1lbJ$q>r{0 zuYoh99~*cl$0`<4dR?v*F!&V(C+!+-?iSf&B33r?s+0lLAIdc)JbUZ}JV#c`7s7(M z#+%k+Z^_OK`3m8wZ4P6Ip4jZSqR>$ z!WvASLq%`+6?j1Gv1Fe-)ZWwQNn*l9Kcq&PlY!TigK1F7n{H~X=}?cMoVcn1nDpLn ztUjJ^R&XU_Z65*rz9|u~MZwo~E1Pz+G6l&DUSQXlr;moEDGO!v+401KE!)KpzmqZZ znWRq#{dsl>mj2vKLSthV7d~erescFy&rxRjXz$iY_(k? zmqCp#ussS@2!WAlsb4Wm<$_ey6@s616T32kZtG|vz~LIC6pJ-^JA|qUb{zw3=Rf7; zKi{N+Ta=ON(T+-Q&+gw}AHDmv^1fcDTJbL)am5NUvPLsdxeD!fKRP6HGjRX0 zEdl(6P9iBi?VAB#JA5tJcIJ!v-XYT|-uXXy1eR#txHwq-wCZ zf;CAqqUGRpt&1WnB(A?X4PPJz7Cn~^4nJN{ZptDRdKi4~%rV$juCc}ljLfivi_POI zk*@q@-0Zg;%ObBM?l&j^)1R8{@&x?6+j5VZ$_>2Fhu*b11S^ieL>%pyi5U`o+EjB zPGFPv?$*yMXzRbkp(B4-^8r$MYy6jkzA;h?5(zOwOgD*=s6v^D`6C>)0uANikHdo5 z1IvfON`h@u)dp8=y%xKO0)ao!DyY;hBTK@yB&)^5leCNP@|w8M!r~Kjp=!74Lu!R7 zR)QbK#fPJbkZM-s2*HjexBM>Apy4M z3pH2stAL2H+_C%%dRZ2pDwd$w!xmwV+oBLkEj;&W!wwJzl!?+k7HiegpVGL_{9hEN zzo+6xp}Jh@(I_nUaqsY~|HeZL*RRykN~%Fkua_2YH)RH(&R}<2TkpB<-r}Y`DP}!C z4wmrB&u8^V)t2JJNts@QJcLnKK_)Nga0I|7!sfV3!ETSuYqs8hBqp$ytQqx8{vF)jY03wNwOYHa!g4o zi}9rN&hU1oamGQ$`~H~FOuygqagdeAbhqx#Gak)02d-#6MEIFXHt~sdq19MI?j-4_ z0e`c1bbvPFuo)6El_x`VQtP+5LooZ(9;_rUnA}0r4t{g%>gFuzMYmFS|4i^OhoH*G@ZG5VB~DfeX#%Sl4ySFRrR$waF&w$ki?=)vmSVIu z5R4^{7-wgV0)%|w{_rAz{jfPQ-ojFfl)uSD?9rFp${b>E-6Vsmg*ZJ)5~7%)_ty31syUzW(pjI$2!sd4!1O(% z{XAX-%1cvkQu1(qH+O_Q^HNu=+pRvNIy%|A#<>46hKh;dDzDtKzbx13{c?)k7FkBZ zgQe{tU8XcPV-9++99f7wt-PshotX~m0Qj`em+o3bQhMwCzefgO_Y`5HBxWJ$ zQN|iv60ZhcrCW{8zXTWa)hnBthWjcqG6};udgLBzpVe~8EeCV5-1avt*1k8_7jcu) z1QqzgmZcxpcS0cDfgc)Cx=0SVAC?EqbH)T{m4%YNu=$Vpy)0s(=usI)nF8rq={wR7 zF-ey6V&9>r=mZlQv9FLwA-h>A%KLr6PEr>Qa+i2vTBX)nD?#!SyOqo{(ZLasNVxvP zk1fGENvnL3Nmwe~XF5{A=Vc%$M2^Je=L4d#i0VhTqq6&qw8smd82TGA+#=Oj_IWxb z1wEl3CXlZKAIiGRPLzz_#El}~{fX26$B6`<&IISTr8DBJX43|2g{c#O?+BC1%lNCY zb?q#t5T!L&D9$!H@Ovmj%GzDolHnQrw)Dmf%)HZ1h@Yks0%1$?QhR~Fg9KCm?^765 zgDM`t>{SX09a_SF)%T7&J6pQK6aI@=_@$Lv}~k&pjY?Z027@!eYZHm{xxFploa zBxBhJpo(w`v5|$6RLEChhaVU*Sj@IqRO~=!pq^Zlw4Aiz+Ei6pd}r`+tv-D2I;Fx*TUOErYD^5% zB$>obXe31Xv77!28Pj@`F82qnyd0<+=y9w`NJXv0(JW5Pj*7%_Q+GSwW}~waQ&7qI zeT0x$1r*4D+k-R02I zB`7&`cM3@V_v1P5`+ooH$8sHAi^I(D%rkr6`?{~|bK}14W<1+N)|M*WfV#^uoA-eG zVm&t|jar5T4?~bIL+fcq`$jC$X18bUXtCb-=m7-SJ88whKDhrP!e#lV8P#Thh};?n zCN0GRoA+aRWAW(L%eO(b()(>@m@!YN?Ka1M0w1wBao%e?`qrJN;s=e@FJ} zOf~kYZkq7CuIUXZ+J26bP>tkZhB}h30m#7HMbb4t1VLgv=Z9YrXr+Ri!~atg|GQ{Z z|1(NCsJ#6f>cK0y_xMzWx_KW)8x+w;**@f$qLVew=YZgU^uHvO|u!TU5Uu?cEV}UY6Hl3{rVyP(u^5 zy(ROaIno>Vq+7g=GJwLApjjuRi|WdaUf*0=Ok0-Mwvx3{G{!IVwJ?`Kp^^=d z=(TIVbo(`W8;yLv_fDGGCP^uGZ^yBuNBnpQ#&Jf}r-+tKntuA@NGm$Zh~?X{?_C1A zkvTFLYxHQdHU|)R?zr6v54XMsooZv~wkf+Lv&(Op%p}o!yJWkW6r;1y($rZJjq~=o zZyzK?L>ges&XgcxcE$s^D2LO(Ky%82vWw)e5w z(NinP?CCj$!}hU(q}I{l$(0#J6MFZRV&qV;uh}b(%FTix7QAkwDEs_gi^8_ zt}07;Nu?eB@TbGd#J;FxojwbnMZGKseaxUgJ4x>M_1?Xg`Yp^b4 z*8epQE;YB{6i|x$AR?F_)V#7~N8H?n+@9QDL_6!s?1Js&g~_?Ppm)a&ReGwO-2ik| zAj3dw4=Q6G!20d|af}W2-!1b!GktAXU-vpeK@FU8$$-u1=l!AF;)fHrVo`2~J>jE& zBw>Q2$)ZMaEI&IQ{y`E>Xn0w(5KTbwM3!~VbFXpGlZCZRqDr-WZaBTJEr`aK6?Br3 zK{8~IZAoag>JY&a4Z{|^kP% z4VefFnYiQ0*H@YbADjJdc%DvF63a6r@-sMg`oq1yap!S5aX4zR_q7I_ z*roY^&4eO`E=9Hs8db>|mY&V4B#iizjaw^n8O86)+4{A`o(*9Nz2rZ2_$=qUQ$U1v z`oqTec;v`;HXp4?T$bNwQrqFmJ6$vHhKI4@f7`=$udVGdKcAnY6miSW?QGDVY6}dh zUx?g6lN<%E%fCVUFtZ?+X^I(ryzOM7T@bg4Vuke8ND2&z$PrYZFCB6j1qrpk)u&3u znLN+cSJ<`Z#dKaWJU))#sQMkiQoW7nOW?JaY>j?b{_qlLpQ49-8|Us*UAn2&$_HF( zlcs7bSx8MNW0`4nO4raDmvFB-vh)YOQfg({ z1Lt}zz}1U0oH(W>*)^B*CLOF`C(THts8kcd4ZOlLoy!+R8; z_Bij6JW6?1Qq%+)*<~y-$3u)nMzlFILSQ>)OB1TX(qNDpxDoa9Qy-O#P0<00JfqK7*#;Gj0(tlYl~;x{2=ez z8BM!Xx}fVmP~59mCtu#7$il!h97fUGr(u7-IbX@}=LP4F$9(iUVPP;UdA8AW3m843 zE}7>+RWlBD-8z#yn*$c#L-E+JtYf1DZy9;+@B&aOYlZm*Cphm|k(*P>K{t(rk7kXg zzIUqgIIG69u(*4M7ZS+i%#^%%c1G*W7rtN5=ZzlDI|iuYq>)FJLEFU2Ti91Tb=T(iWgJJ=CeNc$Uid9OOYVtH0{O z*23MxYSF}>du!wpMysYge^#AFA@b4XXz|Un_R2T?K2~VHUBW80nV*kGm+6~+P5$_?q2nq#$Jo?)H+Z2v#0I!Ag@8N0-J=?pO+fT6Ef##Faz&3}2>3o0YW)6$#&WblA^`HxYK zEeIAIODAW~Ia{J%uLf?8uaJgqVP*BrVjnzq;hT*S_9ttJesU}>Ph>-)Ehrnoloq@0 z_LDmf&k%+Rb3kO4dw;#e(MvA)%!z5G?6q7X>hcORgP;QXb265N7TMmR9;CH*q)k#> zJm0*H#nAK{;^v+Yj9Wk*8U=vkJm9BRhg1}s5RAYRGAXego}ClWeW!F$Zj!xQmf z{Tgevy+hc_=5$W)42TK8!G6TT&(B|Bz9m1TTiB!I>pE*AB2%BXuil4F-#4ezT9Lc? z_UDS-?5~aIRH{+jSn*`~K3f%uuX;yXA$JU|{66_}1%)uyGl!?D9t*)l_aDa8z5{*f zS?}>eEfu9iR;Q~N;oUkZ$e%&*l|=rD{j@;sz?kHO^SOxQ*-Fwk8)F+)Nc4vU^Gt-)ot<6Ef%AP zw#SH_=X)lGh_=f|#2Cd8c~88w2PF2ArU}T3<7l|6e@f-G@4wr4YRLHe4y6v{VZVw2 zPDK(BKA(_6jS*mli7=5w(NhN!_j^fome|`)Sq`0HiH0KNk4GUT*$8V{L_B|pDnF6R&1bB zj~SP>axP&h5f>1jO@m2WR`2g%Hbs6f0Wu#?l_)SKedM+X=ON}2`sgTo`CJHSSZctd zkWSM}mdH8CZAvb&3oe@zvZ9|X=a$(R_{c4bunSp$K^&`?WvT>;XaQ{*6pcR#dN&k_)9>??BwhNT40xE^{Y} z-AhBChKhGYp{T>uBGl~UKq1GTJ5rHA|7Lc_Ye!Qu)8gk2c z342)83~Ie-WYP(4yAY)a2&P1lXJ^frU-d9%Zbx2KjS^o%23`mR4taj)^aye73!?@C zttKHo6{Qy#kbaDwjlEAvEm-+lIFpx{NtJt2{cUqMw`p8(fXbN`**ouSV{{>4fGjC> zKCJvbG9U58dUdV!q@G>hT+@+>-Kg2Z<ycAUuzz{J-Vs z^*_rIRwokNc=fT9eyv3SqP96zUC2{ml5aBObFs6ADMvg5JL;Bq!c>Ck1Dn6AEM?>i z;&N;-V4J^zESIzMQKTe@HWe%cvoE51oeH`5l3k-mUwhX0DThRc*piU&0Z(yKq( zlsKU>*R*Oc%SH2Ok;|IM`YjD~`oIDV=RvJBQDXevFksC=orsfftUC70W}Hdf+V`;1 zB_ZUCqJBK?nn+tk!ndjxPXSU9f36cABXtwwvaT%U)N#;1Sc~cxtI7^q>nJQ%_`o2v z6v>l@hM99q62+zPwu{|@!mmcpx9MO|M2ToeXpEy1=v)Y8V>!%2;ZZvW!&W^sQ@P;# z57UU-E5odaB}Ws*QWs4Z<8k$+xX#pk_8m@oopcfQ;n9C;@UR>3J}MKRgI0p^2jkNI}oI4v9+6 z1um(|qVSD0T%91(wL=BoX%9H<6WfiKq!S?5f6g;?UZE}A7W+j*buMEo*BsJ>yGSbu z2i-q|n|DoEu960oQAFl=95J8?3LTsiCPV!4ns9HU%PCx2V}J|F_||9ro73JL2+#|G zt!V}*bI9g_wf_cW4vb9x7I3^UV`6g>iNTN;qmjQme|)!ogP4HP_sCmEio=DCe;*kC zYJ1wj7it-aSgj5D&4tbD_`1@vO)6 zUUM!OOb45YuM5_81QCdDw^8B5KPmC=#Oy&+BTmktvm`B=9czXHe0zQ;VjOc3WoHAz z_C%f*bty$Z!NY~4&KG0Ox^zxBU%J3b9}TvT1#KyXvt;+u7cz0w+3r6}EjP`{xGXM| zW&fkw^#q|yRPCW@!=1DdX4GK4IKsu8qF+XVtH{kHQJ|lr*%~kqWX0lr>*m>K@=(p| zl>U}f&=Z+LZ-t9i^oJNPyAO#3z(l~FG5KyQsO)5Lnl3w>$I$2oiprbIMcri1Qm$?nF5V^l1I>soeZ(Epk_(4hn><3))> ziSq{Kcc5_d0_GA9RT~A3vk7s#KyYkt%xBa{2Vz$QP1v06UTYa3B{n5%6?1Yv4*rSa z5V@Qj`GbgfvEIypErOk*(Zp6*Rg zlJ3pIT5sQT3_`nUjsBql^y|!=@8`$&38npGGs?_Os`%aQ+j>vWINYU8XI`y8>9axM32ZVO*&aA^tpO*#SNy{Bzc`Cyz|_O zWlsNXeAo&UTOTq-3>tbLYn(>Pa!RQ$?SYXq_4_gKfZ>>1C5+eD5vuxML~gzkY{FDr z+o+u>?jYZFSk`o0;H!8<56n}nMoRhRl=RSLw(XBss>C=ua?Cr$s0QDX^#Cw!ymW7p z1M-!or}0v0bW|`cN{$j5RU+59^CmUo=-@7~yCM6sO9HYdmuPHVDPAqTG`-qnzE5Jz5^ zSev1T$q7eT9|Cr+@DS~kmD$$QP#S4!n#*Lyn1-2;YrvKs_fbO_L=DM8zI~^aM8>&d z9QPW{9RJSx`TG>%uP9+0;la}S0gIysHDO975yTOsL^97#_W8%L0xe;}-Is@Z(k0y5_NoD zf#dxky3b)%`p7$aT_hwsrd^bv$pNyzat}#A|z>ke%@jeT^2)56{8j{MTMavI+t|cGxr4%<~vhkya0x5 zp07%o;%#klzMBl^O~>x10bl+jA9R+ygEHLPC!DloON1t5BwRZt>Q7vHIdhO?XdHTk=+akk{t3a z;$xLr_PY5y{2}P?=vOV#Ljp}p$fS7>pC>uq;8Lligc#OxjgGQpp&{6Tc!&^AJ04#GWo)inrNkrB`cltY1Y(R^Lz)jH zCFSR}#q4Ze)?%(A7e&NCVsqEMo{e08>49s$=SNM{FCTvT{dkwp=R2VPT%TL_pp|ZX zsCBgmn>{8uS-^GnMseq5gOmBjD#1yRq(}}{&&~M$7b1mgGd1*S% zDq2Mh7Wn+J95siS!S!l;V+b0h_XBD9b3uKwAP)<{Q#x95Z|Yr%AnsmC0eKP44V!9* zx!HsZD64BqayMz+)5)uZLz@3Eg!nDsiqxwn^`nK}3eQpi1dspKjp2W%q4MbPEr`NX zpanRVla?g6=fvKDSZ2`LSvJ!w@^)lhWJ9MsA$C-d|J%dmo-ptPes|n@SB|}8@Fy7EUo#?<`Dp>$}qNcO)b^~(J5N72>p z#7~jIZqiF%Wa%Js@vPtFfq$fx@R0uAh8PI0tjWB~4!(ZT2zOi2Er*VJq!IwC9Tl9hI3jU|!wO+r$s-YndLO#RIg6uM*lau8J7V z8TV{p94JDnEN3#$=tRG>*I4ZGA=bdnW;lWNf@^v$`?^kLqx6N( z$>_4V+LB#PAb5*+Y7TRo%f*0pH92e+o;mWtOf;%sj%2>dS2r`k2z z#2x+tKG0SY!hOOdfj-um7&fUlSbJaljnOj55G;p9ux7f7CmKn5 zmo$x}@wSy`%`wvKwO~57U>_?}T@@};fFEnC^OKNP@PdD( zTqJI4T#<9kZ}km5hxg7E2_G6zw1^q=nb@SxbSCZNiV#&*#xo)viLAwkL&s`g`@T6O zwxGiAF3hx{Enwg_;0_^-Q7?DE?34-V$3nnkn}y*J<89=_^j+*A@^0eH**DAG!15Vmexy&t0anT^wW zZoVmY-xkcnx>PO?azVzEOr-v-kC!942jl&<hsZShIZ0)D)!nDDg$lvhOPEOais^qqT^pzLWdeXGVTqk7-B`Zqa{&n$Rx< zR{wGLZ2$&j&EJ>sS5~IWmJm(wJ>b%~TGG=X-H&BPoFRIfuPO?z_k#H9Je$?W1qOYhE)3658#^pD0kPjSlxQ@{R@N*|`2f`fg=Q{H$UY6|SiM&;I^@o^#t{`$;St3!_RdQSr~wy9^t7 z*)G4ToSBXVi&KARMz;Y1m6g|A18f_(Z{GOdF>3 z-f8;v4g?HD8vB+^?r52_(r{q^KIc-jsXY6ihYhkev#@Ny%JPRM!usa!5V&v1T zj#D`IJP;BUH1#Je?TfT6KkLsFH!5vB`3R|WkoxJdnqpN2a97<^&nv-1NNBVcDaagM zh?;lfMp20yy1UO)#$hm{H{WGvkfntpe zc-{c>sPqX)HFV?u_eKBbACWVu>9iGW$^*Z;2nY);jz>HSTjY`(%JA+p_D?3J4mOhd3hlS`XQ`5h{v&1bbMA&QPHxu_a`y{ zy`tYhjM{NH5qGH}t z(<+!OPVanZ#YVViNn#Cbo&Q z=X;=SL4mrkSR+6bGynSk+;*M2eabE|ZN9I(-IQBn{EjDKwo0Q8z4x(_>(z#$^s|bl zy$pt{DwOkIYLe8~f2?7iaj&&wf?2@L7J5)&wiLo{zaVOj1NDPNz{)ED+O68r>Ocf5 z8@2ci)m}YtD&G(l#0nyDBgC$Lzg)|{*S|v8IJ3Xdehk#abr8@&b`wPZNPw)RdR=_M z!2$YZ#{PsHhDFfy!Z}+Wm(^#dQC5^)*I@hSQzTLmU5!fx0yc0{q801a4zhjmg4`vL zXO_m6frj_X^VHSrlTUeh^%XSl@VMi+gGq$mVOZ1KN)3_2+rfdBTRJC+XIk*F~xppPUtj6M+} zh@6O7@uFLA1o{uKAJ)X68-Rms%z!VVC$Z-cdjVT2;4+2iH^5Am-GQsQq$t#E z`S=}~y`v-&{Ex*T-t>mfe-B))H!CuckBmT|DO)dk;$6O6lfngvt-tlX>*^o6-g;32 zfFxw!5nw!s5|qv*5W$-Sy|5(|>Hl1y|6a1lnUr*f3YnoIO=d1bi@3#Vb{4q}!Ft8W zsFx|r0#Qa8pHpl`T#Y;M8-DvS7bM3KPnsRI_J!j{gC^(O<}WIlC14_~)ZeLm=}p(? zHU?e?9m?{Ob+3P9`TwO$eWz!z_Y&FbdB;yqY102~tlCD4F&|>%34yqI8b66-5>XNE zw!kbzk7(MfU!%k#ebVjjgheSbVHa|%O9+LjjYqc9dYCpT>C>l2r>Tw!@QQtJ@M~;Js$&0F!*30M0Jz|19zUE)(B) zxWKY^7K8_-mC|7qbjkJ1&y&2x42k$kOO8@CVptPazgTV#lqUhWuFaQKy%o+vDt0=<2+&1$T+&$?6t2EzfBqtyM z&e(xBz-$yqg<$WJ#jXPczl%$J>@4795>Hm%Lg5qXPWId``L^pkYZ<+T_tvhWa7=UHh?qIeT zxStbR2F4&edgDpDzC5CXmg7#}+rxK3&~NG7BlT{^)o-d(&?xwKXK{N->=-#?cKPRE zu&JEaW%+{X3AWHWH9ssZR80C0I?lEw*@!(r>OmYTP#=*y?n^XJOuyA$x(VcYfUOD= z?Q?>($lnjQMz4OB3tjmYG&I>2gVx?irAtSVDLwvT6*HaQP_+K1a|<=~+GiC$DYd#p zd?iJ%>1+B^quxgcJ(m6<@x3jgK(jbtiS_}QBi(^+Oxeaui%$W9gy2Pg0Fxsxv1rNe z;89REl>*l1+HJSvQ7yR>)|LocCMKrAK8%=B$SV-$>DT>EZW?NNe-~{NTGRomy(wsE zE#DAe@AFA%AJLL6wgFx4GuW`T*mSEL`}{1Xamfixih!C0C2;lR`rU539a63 zK^r1wt5H^ZO;P#sunt-!=-9E;-0{~AXQl#x!FE=FcGfp;wXvJ|*?M|W-C)Mjh~i~) zK(7NWrn#YGKyzxNu0yn5LCDm&dSC@pEN^O;-|f58x?u-I_TM_U%7(o-+SfoHxFO89slZw=YvyqXE zLgqUHFca$#cLEmC8Pnz^U-P{h`5yf1_pF_vd3*bSXT@1G?TQ5AQyR?sQ=boc>L5gz z-QB#GjaMoegjWZc{%I2vpP!SRA}1~;bb+!5uHVUnXXYOcD}PpBMif?q=~fydSMCY6 zE;8m(&|P|2`%hM8djsI1cY5e|x3f7?-eTeKpkRqifx!B_J#EcB&`*J- zC>2A~a7elyQ5aEK&$&=|`do%~Hdi8j0n9 z<@pwhGnhzM8o> zQcu5}m5 z*{Yn`QX|MtxH977i;WzCzSp<2S+8~Fe>QQmJvyru+v4+c_LaOfrjn|7L0|u|%U>_g z1W97q{=2_pnw=Vd35sBiELR%aERH?mb#B$|_%5%Z2#_kDHC)Xb!xd$68!c1*DO5v}s6-VqO3RuNUWr!e%5 z6j9Hbqeo`Gi0*EjGgHAFk~@@bIxpr;I>lg4%f}EZn&z>NY(sx6Az=~1O6N`Jv%^j< zy!(?@KoyEAV#56eriJ~%jboLX&-H?ZQntM=(JpUG2zi2#Bh3M^+Ak7<=os4d&K6o6 zPuCSB9l04Z%1=!fXu+xz*_hras=vJH;4t3nAH!6Fd9^37V)dDsp}vYa{j^E#M7YE_ zw>ZcL^~!SfF8k{r1}25%t`Viv92unZ`2T*NgUZW$X{v^n|TkZ$SR+MF|2oJ{=l8SaN@?)g>wF`Y(z6b zkL~qzqG^&*F^!q;Y=T>ydE+G^T9ze(4!ibgDJhsm8nvTuRWos7BS7uQE{@nI2*7`V z?ZE9L7h68DOTTud2h%cpy_h!%|{W1A4aN4HSxQU{M!&OQ!ukWAVDIR>e z;Wo~~K=a}ifh@{Z=dt4C4ZP#KgSU|rVB>j!5<~w{*gkc=?-JOze*v@1Mvne=n72mc zcSydA(3cS325YDK&GnT5u~%CYkLr7$>)ku!w0NRo=j(K*(cizp0!jIaJ`)lH)MFtL zDh=4qgR-McZt>mawC0H*MKm&T>fdpr+or?3)5?cWxD5!gZ)>mppm*Zp!^e-CO|{GE zJrl&JL(ekBX0Y-nUoFsL+?Ck*&7k4>s*kjw=AldJ&c=M6EL@J=4U-F^QgZ}+BYUIl zzR?I^kvp9bl@>^+&rRZI!I3ReZPo@m-Ia2j^3IHu5tR!~%|41e3-P3iamdbS8o~fVOiEL>#{Y1UlKO z`E0_1K3q#Y58`fCmbnf)vZ9Df4ppjX-#Rqu9xE}}Y73zuJzEK?vhp0e zGV(mVO{`H}?BqpHqaOR2I!j)woP`es!?bJmSO>RbR(RZ})PQweV>>v9HauJrPv(Kd z(|Zp#sAYs;G&G_q{5J`cPLT^AQD7)dbDl0A1YK6scnE?f&rWoOvkW}zl=q3dacuRRP~SM1qyz!R*+o5j(;w5DqeE~O?L5K7gT9y=pO3MtDTdspE_Pqk(+gLZeoe$g zORM)Se2o1NgagU}hs7hSP00C;?&IQV4HvuPAD(QdG4~RmekqdwUhqWtM?v+v`746C z^6lW4tmV3In#DEcnl)<**rJ)?rcA?w&fpl=H2XvXa{+@*#QLtnh4ZY7eGn@&?_Ru9 znsaPsq4UV|_qqw_MyF`O4?NDum5;SRkpn)ChDRsiE7@2Xy;^CJqxtZlZR)zcNkH$3$C8lNj_0aOGG zq>wWOP>$-FzaZwoz`37iv?3u}aNrFra+qrpBYX`+kJ5TMYWa&xP(KgHbhx(-1&Egd z_F*l=lxPBwFo(G|S8+htxvnG=b}OoF{c=y7W(^n_cR)MyWGy`O=?<-ow7Jq3x7aO* z%2&`k2fk7{016VYR_FF0wT+E=09FMZn@Xp1ENI~rX&+R}9%uA@>^<0$AP@?E z-Rx(CQW;eib-D)LgcIX!kTgFk#iXwHDQIO!>`VHp)X7pVO{?SLmqCdbBb;;Ap*z)m&yv3}<42=-*FIY=vq!!{2M} zdj!@l_&sEQ3Xhpy?>pw2UwfW9J{dZa#NN`)mN#OvYEACbhhwZyBA#!5u`}Ndx2SQu z?2}dM8OhQ0BMAo7n=*)wYG1d2dZTFl-7Gu6k2CBdTM4Cg0vsQ$f!(l9%W}BH8(6iD zU$Zd(IX~Uo^T(#aG8q8hT9?4PjYG3+5<+1KnO^sX1YuR+d@F(z3oC_FN)V5C)(g9C znuAQ}(NpHCC=ozdECM-H%1{bG?4%^DNWaflPy%JPaqgd`a4yC39jbrw-=dZmSd{>7A8crzn)%M&@C*vH&r!`XGOxoqV3PHQ%!hy6RTo-4Th64iI^6-$ zOjevyoSzx_^d7YYMKM>s*flwB&2VDe>BruuK23<)Ed1!LP=}#^?r}BJ`;*ep*D?px zXluoNsJO*qvAmSX>ySaO!B$7r!}vp?reHuwyLv{s=F`Wh{%d{Re*A)zVv?(O#aCyI z6NzzjX(ryKfN0C{!2$EaZ2nT+`b8t* z7-eqyqw%4wyv6964^gBtbA7ZEAugRP9#mB_xfKyMp1?aMQe;XY#ykaUgBt|t2U$TA z#hQtxty|@v%F>gT#+2b8-jYX*W7mY7Cheg&N>1FHz1h!j89*>Dh?@E~#oV0X2B{@< z4h5dXU#7v(RRC4D)QgV%69xUKOOZ^Mle>sMl>pVa>huN8-^Yt~OF1R>mb-s1gDwVsgP)@{s0D(v2`}*UhkarbgR?$H@xwgO~!`UE`FE!4KF7gtExCD0zpz`k$cpQ)d(QY@!U-JWsD?$auV>+BNsU>eK|+#FYP*dG`?zll~pS2ud; zbO$+(WE~-l+oJc%F^yK;5x+T8Ss-=y+^J5=45>!KCc~v%DY9w0ZqYnQ2v_%X+6H8; zA1p@#R6CUQlw%d1;0YS>BOPN}0B-*zyvps<>zFm?Vw5)sLdrGQVAnmkZ$3L6E5_;B z!U&nw&(l-V+3F-QC{0O;)dh$#&yhQwHHG|Vq->bf%8vq$~&*~fs%lfc} zkL&EceFO-mXI%LsOIK^I1djulpGGVE`1j*Zs4y%saP|Apip$;-D?HTpS=IVN636+4 zth98ECXKO4@z()#4Y435b%tT)xH7hD*%f>wy0KzF^~5#tZ;l9a3sM<9)^b97^mOeT} zObkV|$jS|n7vM64;%K_OfWbOETk*btvyDZPZ8y*n14T3~uqE>7K}aUs4)ZIk?j{st z(8MXx()YJRfd^@zh)};GLF9$ z%R3Y*jdCGqf{3?1QR@B&4Sy=zGR+Jz zS_yX^>C8osKjl;iHiD;D@!f2JUo{Og)siE>Ma~wgJ>fgBC zR`xnZ?vM6f&O9vmdM3=rPLh~+h%&Y``jkL4UXkNDeJxl)Sn;SetR zaIrl;aw%3TX3(s>dajxG>SFO^!WM6L+PRO7@{-(b!lb;I49)KG#qrjfhThFqQk8lk zUjK^)89(~f9YN5Ic@B}b?LcxS#_k~=7tUDna!bP}b&)PKGx~=NclIOr8Tfw$q!Y(YoIHi0QwF5-juk8 zgsMUp2XGSg6jO`_16bj2W`TRQf7jy*u?H1l(DKqP`$4@pkzTWqpmCJ_tk{D>Nj};r zzj!grj;5IL6QXKBq$7G+;%pSxNpeY!Ls9^y)_bw=aHfOb4HAExq<54<**-zW?7-GnG3(FBi~J zlPJ*)eHLYR0Gqpi!lGB$ot8!pMw+DzWQxy$QB*b{sdHF8%lEdvzP<)Paf)Ra|8u>- z-xVn=&5AYOAN5tD^sU6Bm;WUE;7Kn{)u&?I9JD@44|d%_uxIZC5hz15iM~SvwYGr0 z9#Q}j6B7xMW=H#=VK52C<(QrJcg9dTfL3sx1JS=12l^{^Km4BG0=(5yk51Mitaf}Op3KkOrr4ow5;~>rQMfww+cWlZyx)iLiG)( z%66d7z~?yMGC`Rd0(vYnkkM^Iv=SI`C$mE+UxUL}q}~5{Qt+%V>Vs_S-Omy$-em7o zImYcY*ya@L3XD6ue6_jo?5#21;4b8K$f?#j%*`9fuuahsESFO(0oo&_I6Sw1L0eOz&?!`00~%eC*+XSt4Kd+g8+nUO{YB zzP@CQywCR$mqS+G>^KVs&6>3Jb$$~An@{u%+HTIH>%|Qg()gG47O_c5Yc`elf^=-9 z=9;{B00FEL+Iv~LpfDp64U^ayJlY1Ul)>z?Y($c7R#$$PSvs z0TtHvAe~~Q#17$*Y(c$W4SuG*lxBVcYBGYP-t<(hg9DvxWT@!h+or-*k?f{|&G$lC zZ+6X8DN`gXl11d7RUkC}8Mx*>Jw0PZ$LwR)ZLC90bSbg(Gmk>vxdOd zi*7@D5&YH(II*AC6A%#C0h=YC+qSk5S@acbN}!9acG?um$w6e_8lb|b^4cVVxs>Nz zH^-&EXF|?tfotA?UFNf-3(rC^y-DW`kBjAQ2c z?>cS^Um>W_`F?nGm|H6XHsptz`gr*k^y)QF3e96b1OjxetVJJz!s$8&&f_@C@REOO@=4Ma*0YW=W%wK`$N9~0%=p>l zaOo?-n$-)##LRkz8m*2~z<-+pdm+I$#;mAo;BxTp*Cd4u2pnkC|9qCjdYR0X_HSz2 zaJ%;=dPkYuna#P+vXQ_9H{k-^qLVPNEhEXSh5KX(2gTQv6Hm2i5S6rWthRAMqgP|3^1Y##K zqPlL+8tAA^G51x#8O;oU+AX#-{9KD)jSmS;<#Cy`p&0S}vznTFgUwVELEo2zMD&PH zgp+)*9Zb_P+^C@mFNft5n-?243e~Kr&}ip=WXClBYD~vz7FF%n3k~)g)Dyw)X49$o zt|!-N_WpUFoFrcx-<6m;7(EW!GSLHuCy`%ts=4?v zW+Mf0E_l~Yxj8$4Y`%}{1Wtv< zyQc9PtAIgwU{iyQgQjQ9_Gim$-kA>F?O)xbb}GVM`g-vhHYsP8)gJ~(e-+7c6ACN# zJp1^h2kDyzhT@39!{r=#O~1pazT;=TP>_8{%Jv8a8%Bi5!n#dhQncW5WUjKTW z%I}cDreF61Y_dFCfDJ>>$cO~`=vE*P>O}v$ZGfTy<46YI;ZWH_wZ-cv8B$wvV{dXT zES{Ei&8HpDYp+EaLsOS$N#m-K#QowRSS6Nh<;K9nmcn%b+%$i5?Jl_kjyR8w0q^ia z@0(KW0N!mb|NYKO=zwMJk{B1a_@N;AS#>o;Ic_|T`jHo%@?@ojg!M?75S{ z8S1!(qcSNn8^2m(q%&C#ro956jWm_-OO@D}Ypqs3Li+ocz>MNob1M?RAxA1S(c^?fc-1_DVH|d}XGsVaIqjR_;T^!j77Prjr% zhnJ=k-YmS+jKM7V<5&rf9_LHsf!@ULxp22e@*=dp;9-+{?YEwQJLCe50c!H1bwiS4 zN<5|y*eJz8*yNg-{esf{;tap?=AE}UXkoO{*pT8{p6M;FPkd9zrJ@$cQ#2`=2Us9Pi3gOka*ty zEwOt3A|PEc>=yl-I#?z$kTb(Lhw{VxpFGygQFI*ik98{fF6~(Sf0jU{a(sIS2o5`2 zLOdn&garlgv@mZJ1FylgMiVtk1Ky{8U06_;;%(JD97XEc9dlD^*l$=jP*serd4{AV z9E%}q5f02p z5f_d-C_X4USxeneV!AZuU;wz<%z?Ry%|(NHybT^T4$j=#yl;2)n@`F#$&Ao{gTdl1 z3t-yjvMYX2MKu2&$$0C~dpmt9n%S2sq8fktLUaE7f5m-wP*dyQrvf4f3W)S3UFnEa z1*A6>MGy#`&`GF*bfhRq3q=qSK`9nGp(7nhs8W;=2t}HB1*G@(J_o$No!OmtW_M?2 z)<5vhy>lTX=bY#J{nR#D#R(|5V!2mhk`;UEG;1&pr2Nk>v0?dRreUptL0>`o=cWZ_ zZEbFjg3lQUAm-RVg1pJkK?P65?k=jRLC-m4L^oxS0eKbD`LQNE8%bD6M&v~@*dk2k(@JsClwM`E%fagErHT-?%j+2ql!j>-#c za@)c|m3$H}8L7p80cOjyo+~y!|CcB+KC@Z6R}r|&1D{>Rqe4vjC5L4co0;BSZf3>0 zg05+cP536fn$J!*6kDTEN%Na#`MLh3{VAnnuv(%e2_u5TAE_2n4hBw#GDi9zo+xXO*<){u<{TDQE#r}yS56~BdJiZE!OTf*t?sAF5 z?e-|QC7jjc+a2khh8Q*(hRiKTCEa_il_1B!hxby*MfiClCgp3mP=((gv-f>SM6m4s z1rb{NvfS&FM*0dMPh$e55vk#YfzZ3@cCjOzE;}k7?urW_|fnqY$ukX!Upw4N<^CCTs-P3vEq3%7JBc_Du`;!`U44kyd zPHM$31tdb{^KekiD4Q0ein|%f^K{gfs6JX;B%3I~dCnI~G(hE7+vO3-Dk?EFPXV^< z_mZ#@i=qnVG1xtz;eFd*)lEaDGZQYbEi>dbWHz_NqG*`W`bE3wsf*z4P8*?)y{3xQ zvS~mwO3p)xs+gsD?6e$gBB0a~v;|5pEYNeTMdiaY5t{b9JQ$>PWY4g-L1w5Qxn zM$8xQ3l4MjH@kT7I-AKi!wjl^ZJpr|Aj|bGN^$7P`)Lx}iHP1l8^>oro4uDdSj@~u z^YWS{fpPms>*e?pWa7nnrLzWRV5jgw=nUKL4@{sLdRDxKno2korN3R93vxDIYpB>y zUk|1Gm`$Kf%-+*wad{=tcm-c`X?ylcvekT^b(TOhx#tQerxm3VixLBz9H#JWGvTt( zImUDveE78IO(I63E1|LiU8$;!;m@~dk+TxJe9CUh(!rp8JEr4F-G!i_a+x|%j<*2XTh`BA4{a{tm^;;&{@#G+v_Un; z1SWGn`?ruOhXL}YbPxx%9S!M#-a0^v)|(2i6vetF{D>=&P;i?%AY0PfRv6{?;rT)F zj$KiOPt@aXkPC?iNhSb$QqH#Q54_2o2Uhq+pnqKLye=@2B4YL%Mf2iW6E%q^>+82M zT8WQT$&;@7UfA+Q5h)uKg?O&5@x{O$i4GUSJ5u(UxNYF3oratC=z`;A03iof0kyN& zB%SrkQf_>&pg;c`R+!cwa{CM7wUa@i*Ysi*VSf06ms4rBLEMF#X;L5IpRecHpPwI$ zy72Sdh0?3yr{G|$kUD;zwuo>Jv=x1A=Zx8ii@WL}#3_~ad`9i8^x(Xa zwZ}&2xk{c+5Ol0_`&yUIor)Lzy&9Q%wa-0Cn8LZCo+LzbLzx87X0)W>8=rJmS{otrUwQ z)`}>w=Z9GGWXM)NI!qgd8wcp-GSJzPahh&t2wZph5s)rTVrt4kXEC|^arZMoO%J7# zQl9(+MBeM3dS~c4w9l}0*TwYBn!>`ED5bEh^Hp%RBXN66*+S^=Wl52@J83_^7 z7I2_D3n-Ls(NexS66n#R-%L}taxF?F6 zZ!-tc>(h8#$UFb(Pj*(J5S`S?HZu8w!0$oINZxy_Bl2UV_Wy$oOnnou= zaJ#{B@BNn_(e(Tz$m9P(Pp*74i}|@^T@-C-d?IYG>r|KVc3D*@C6{r3@8Wp^tS77p zown$T*524$w;g6}MRnS}`lgN_$(%qTg_uGG1v*J78GkGu5w)xG55=$e$*;&LRBdC2Gq*yVv(ut(MF%=0Fe+zk50PYW6wysqzwL~c-0?A68Ddbb zebV$?%Bc<&aGulpX6SX8quxL26q>lZ#LXm(7zyvz z@M51Yx$lnk{iHXo5W-OWa&I!N$tPjsC%~a*x0>ag^wBH!VMA@1VNaIhhja`qo6;VU z1cZCkUHJ3O$+JI8&0L)9YtNV7m1(TMGqbYdFgE7)Hp+dPlSXQl&RH zpM;Z<&`x3kmGicLzX?v14=&G*^J!_+jqEbLn3rZN%xv$hzEL8{&0aY=tH@B9NFvP5 zEo*mm;-*W@Cmm}Ooo9W5`mUuJ4AL!(M9dg+-~-HG(xA9CqI|L8zOR~(>atqurIsr# z{Gl1kX~t#ITanA4V%|YbbA#UbEe*Pa4|Xe+HZNC3Jz(|{mcoj4KET~{M61$iuyf*w z+OpIl;EdsY{hw`$U%fmthx$R6JPVNGHDUl;IW0PW*Hf6YX-d*V@o-m0M7obFQCnP~ zthxc>WKlH5KEBXNztm(QVb~WQH!(l z1<-=J*+fj`^GoB_V~`*3lgm6*1*`-9nhfI;S!8`Oq?4ln^_p9v;v5H(sRh&;`|7Bh zi}Jk@%K`cq)3m1OSkD2u7Xqq5-USkOyR;95O3hh}8e>n1k_4Vy`HsB61EFTbn%4Y8 zAt$;Wqdw0wGX3j+kO%p$WKNZ&`i{_k*~&(gKz*HLl;6R`@X3AskzaGjj`<;g!dC4l zx*MI#ovNaHqo?`KAkO$8U!jEgbEbJo;Uxz`JIv@ZHJ8n~W*Wr@d2h%}9h@~^Yu&4F zo8y4-JPWMp-Qs+~W%SM|cCM6z4!+Pj$1CvK2Zfg|XUtKKT2k=|>qB2FH?>i0xiLbG zj{PmZIj2Bp&stUeJY0=qZo^IFYez3CPZh_@%`-cPX)u+To3p>u_oYVmf<@~FcH(UB z>l=@Y6j5_-fwr&knm+BmvLeN(t%P~r@+qJF+bx0PDqp*LMau1Lf^nMD8`nWEkoR{Y z#GL!)v^`spx!TIy>nA92$CqQO~A z@9Z<>EDD5}?#n&lLQgJF2n84Sr6o6k=Dm^Nivo{#mR=lr?<}RBo7?U^Jx~ylNgc|6S1q_Aejvl0TF!@|4Y(wZnGu$ap<}i2kbv86o&aKv~ zNYyk~8c2y>-)iDh6l@h3a&_Fe8$2v9h@>Y7po7RdjXuO`*MPWrY^4h;FpN||{cu32 zvDtFVKvhZzVh+%`585jar&Of=WnIo?dt?V9_M%B)2hj@Ui=wQ{RVD{I3NhmfSALdK z6)3$7sXW*@xKz?XsNg#;3uxbeRT^~Kxkwh+>@$Dk z*P|57M_;kEw_nj|Ow&N0yN_e;);zkJZ&?-2PS z@h|QC7g0~pJN_h&U(@jyJ?+1Y{=eTwTk9{6^WVOR+&lDW2sMB@cb!4iyCjI>fhH4NhQhW`BuMm^husuZqaS&dAMu*?ueZx%n ziyeLI)H!OsHW!QDk1;3}{`+EIU$*fF+s7gR^h#d5xRh_v*Z@I!-e9CQ2B_x#$~%<^ z*w&vh`}kkAs(!3NPtWi23ydBNcs{0)*HiMFhtkx zbSen%x_=_(LDO}ZZ-*7`Y-)Ag>8kh+Sp0%B)7Geyw*zPfO;svEpOv4Qk+O1VG1ePg zG#GYqu#YS*KvX@lau@?--N@Ew=LOex5oLBgXr(YxV?RGX@MCoD-mL_Fv%QLAfWAFN zH1VZ<{ABR5#ifV91wJsHlrjVzCurFIX^$E-4Yb@d1ds7uDf#!R;+H}sKn=05s zAH?M$w8>ov~X?#T9wYv-#Jd?nv9>Iv(u z7^RGM32ydyz<}K}4Th4|AhN_8tnc{az1?tbZjJyNJ)EExE5+jjY55G%Cn!5=IcOP( zD+!S_+_)R3vbo#AZfu1A9Sk5aDa)o)jqkjl+vjLb%r}I znESkoaQOVEHXp>zSC_Q$Zg)0>JC^4Hu~mx2W6-@(0I)3FTP5PxGVa!s^T!Izged;N z&WIFQ`*Kujo|Iyz(__EBye0Sdyy`-=fibpUS(yoC3uIk?0MyQ**w-J%VqiUL@!7i& z$PYBk|7oiZ|FH-SxnfasrSCUpI+EZT0%C9sr9S}Dwdva+(3;K<%HaxhEcJ0gdF0Bsq2D{g>u8i9^%_|=okAnH9}&VU#m(kj>h zpuS>)Kc^OyKNf&YL2nT4v%Lu5o+_~IK$>m#|f+ zGD4K);)rwc#$A>eaUjc@Yc>RymjFVtp}C{3{KgB2PgA+BEGGA)56R8G^$%HMyBdSi z)`if|Pnv{oJK2Ozf=v(d(5>A=Rqox^ge*(@?r~VDP;oxGYYJ1fOhe+F?jSg$(z2O| zL(tun`rIJ^yMidp6nqqDg+9f=Z#cWr;-FNept>iQ6XH14@(Dh=G73ZOzB}Nk1%J?2 zlXcoLKjwgwCT>jd)_j|oKh3D<$W($7;aLG0yw9l2?hZ#eX?f1g?H^%*@z`5&A#(sD z=I~$FEw8rup(*2ow~FvhrC$Kaa+_db!GlyjN5zBnBv(NfOTptG@w%~`W4t7N)>9~i zQjk;bVTU;j+h-e-aPtI2luNI%!WiywId4KVnw*vo4-|}x;DL)m?mTcX&^@(#3Wd8W_B601 ziBAdSsPO28F3*)w&)%Ic>P3qnDXJ8bv|=5Oe;=;b+BG~%8i1pNq{E~p|ODBF{nEtfR>bh3i&>RLf|T~r+Jxk`=I9zAS>({R^0IrkUzleoAWe|{~_MXR)9 zQn??sTNbQ>dj$`EhsAdf_QTr}-zLUtmpq9o0pQ|}EF`verPA_IpI2{@&l`=3b7a)G zpNy#}={>_>`s1B31nqKhy~6`_#=>4YXuo4myCCK;1`!=I(|OG*y% zOJ4j@55T%(@~;Uy1d7Ra^@uHn)ISG@%Ja!y8&2p{IJsT#}g#Y9>RLTir%Mh@x0(@g!~wE-;yf_k#2~LO1AC$gc;p_@>xZ~gS{XH^ z4d3R=sTB@Kz8fg4ju(lUYcVWB?ySyA`7B$yp4K89JV}YuAH5$4?D%`W`P!k9Vt1z* zt@OAiJg0p~oNjwllJ#Ta*qi}bUq3O&h@fbxIGN=T0<#y<;?%Gq%AR1olsR$gMG%Hl zOw|V@E=%)AV8PvnId#V^fYH|VL$Kofh3jB4*z6{P+`gjc*fX$}C^-7V4vxR0ZJD;Mnr$g|Yso|adb9dQX zm?Rn_kyn}cR>V2u|FDeJdaY@fKq~vsw$$e0e=HXz8bqVKV}Y+e4}~PF_7S0TEw{<3 zw{d4|?0Xva{8@pQx;@n&iE(Q^&ZG;Fkpen)mwHQ;qcw0IKqpWb0kc9%XLRMLV{XMf zbpa~t%=Y#TNWG}Ql+Y3JmzJtnHZsKmx#>ZgO1=ipb~=?m!zU{OV&gN$fXUw*xG{E_ z9~Eqoe(mrgnUp)(GNh7o0HZOX2#Q&u1)_?3m@$<`)~=iJHOL4?=yZtPSOYxy)eSH4 z%d_d-aWnKdda~0@7B#(_+g{{TPqD3j#5eiNWyl#%4P=BjBV}~)7OGJi^V~tN%1RF# zz%4yIdjFHEa~)usL@L#!D(cn^D*fCZcbSU~UW^Jylt*_P?`PZIW^!P9pePYNqbiMI zS6yGxR8LVu>lKW+U`+tJh$lXZsc18Djorv(3KqM%{5I@@0HU}W*zfm98m1Ja?-xSz z$I;ps&wp~ z+k4W7TiReh-|)Sqs@lmiO|`sZ+Q}zmi1E3?x%*6gJoqRO0(t`a(51`KBnod|l;@u1 za3ULC52GJOhcmUatU=&OLeFrW@7g{C@^?_l&Rc_-PAbu=a*kaSWe;)ZfZ17-pt}vc z54FYe*N2`zjQXFD$@FcZOb-m zxzJygkIoI_ECSje?qUW`MuWd!O=SOPE#m)=CHsF7ME>8uh*ia$JHMQx{T7^@`8T8g z)z_e;zdSDVU`?2Bz?sOz>(wt*WBeKrBa* zN+AZn;vPHN&ezLat<~jitgjdA*cJYSfb;2=#k?3S&F;X&r=-|{0Vj6s_#8~wUwCA) zHprb)=8?=h^aR?%4p3JQQilA3k3TExYd-@V9g|33 z@xDTO-ZFF!ARzMn^>m9JbdGG}ZNIWH5y}vtoo@joWt8O?)pl~xbj}<)0f`+B^!4ol z4V1NsKv8%1?bl%A&wc-X_W%)2`x z-0?ZN9*%NMaW*=UajLZSjoJ;os%I4r19>LU9&3eq)(yNGuD&}vDX%l_wPqYk121Op zEt|&OJC<32kXJJ)pm^B>?UM*#Fa`C1LKbs{bv>^nupm2S-s@qM`yC))s{rQP8hrBx zCM7=J5~Ldi2|fqgDItbQ!g}lY9jLtN%M@%hl4>ZN_{y5RpXLFN+(V@xj1$$^>QE8- z`;tY~{=)SMb9((e$PorKxAW7zn)hn(_n0W>-OO7K)i`o9hmwRC=8%q+D0c?Jn-JS6rNa8 zA#2XQs8aJ$Ok!qDXO&tOJSE0aMF@^t#!G0U;tL^=0pv$OlkNl+a7!bcaRsGNgq0}a zlan*hs!V|6;sB2e&8q@Mj^ClMlN-xT#E{r>tqCf4hQx?K-rH*JPj5Le3ZEEQGWjX`FL!&RwRjsDFHN}xB_^q3}P1`>1*(xJcIlNpolH2 z-yG=@0crYFyyUAufc1xy0B$@7$SW7|g4GpRkW0%_@1%+~$H`2Q1&h#1gaM>>7~Ike zlRstW(%}*8ejfw3CnHKVPB6kKL!d^F*PPXNX_!^V+88cXg)I`^J{v4ZcNqHftuJlx z6QWDPGkY2rx?6#^c2n=8Ck+~%!wB6YEm$EoAT6f+fmbZQV)aB#W)px*dk0zcLPnM# zbY6s2m9zkfIR&Ocs<0jaf|>aF)f$$&H-6k%o7?lPA&u*}B}O(>>d>Esr;pm_3SuXH z3Hl&b=G-_arR=|^?^GSgD(roedO8g>Udy6kQ&}<>hPoQ8Vk4Iimv?>c07i_%O?aUZ zv^(JGp=Tz*j1-Ztaszg%XYGF)S?7i#6Be#V&)6C9Nc3FH;>e~=$_7QtS(%e(C(Q1R zJtPfKun+J6y?NXAzqTvmAE9Js;#Ck}wi99f90TwzVo`w>vI2Y~uyGyYLGRae62&LH zAAmLMF2)Fmn2T2?jt;9N9r~Ie-OvPT=>rO=A30_yX;?E~1T}13pUH7*ixi(+^ z0C}U-*z9$3qGSDsqLSZQ&&LyO1I6E-lRqSGc_)@iY2@~LIE``{3htZ-5<9~Napy#) znC^C)crsFyP+E^=GM4G`wWjl&48*rceJHh=-hvC(xncZR_Y9DN?JnOi2pp+(@2ABU+rmlvR%A@yWgxuOvXc1ZcS-NT=RAZ3;?_ z!$kGITBb1N9|?Xrkpi3M9 zDC{AV>!p!T3w!KhD}R0Tl3!yY^#vA$Rk0Z3r0ysCQ_-RUme{xdfF*rP?D;YUB^ML* zU7+)O4@`zu49zsB8Q(DrtK5oI_ot}@rv9_ze&INw04gFu(pt)qR4qTid-yV{eN>Bl zI66YqoyD^~l^*U1Eg8*%Wp`?VV0SK(4CI9yzbLP>dD49==||jVYCZyLPrM z(Y?W>EH$p%03Est+sBB{CruB--m>rg_1*1J)>h24r^;UHmX?E&@aO3L6zaY0Zo_B7 zpRzN{MqfNV!=m%N^u8Fs`7gxY~>WEkpsCgkgdDInOGn7DLpllOHI}E*NL;Z zG{p3k=1AU$jWyF!%xEa5E{nkADxM-c8BTi+D*}e2jTJHj*LOtmYb&N-A3UmeEQdCY+kJeJI+~1EhxT9TytH{T1+kTQlj0+Gdof46T z^@+1T)y_WaSL)lfy!-{BMv5wsC;^8q!&e&{b^b|eVW>ejhw;m`Wq2ka9mW_g^LNWZ zIP81A5PD)!A}N73#*%B1zzy?mYwrVAkyxG7wL8-0Cl9gAA6gIbc-zac(OP?}8^ji* zUZyw7c#VsH0D^H18>g`Akh!7t1S>A9NM~otKh*R#-S{H}on@j@u2Pa27McDt_rpt^ z@ZvQ0P!BT+WeU>^F3KlNKP1{`4zdPf=_U0G`$H2$1- z2m7W_*YoZQ3oACK;fh?gDqQW3{^{%f<2uys$J*a=*Qw@BVSAjB!X7Wr&X8uRGVB~B zCHcTdYxpho#-NeHCvE`25?O6#T2ySgtxo!`{3TCyl*aOz5t(c4aN#FDl)o4Y#Nmlb z%veg~eQMI`m9SAC%JJ&)Wf=?dk47I~!^Kl`%*YK~NFo+`-zG>2oDR8cWyVP6!A40+ zKErr#HO-Ne76lJKZPO$bitS7L2}q&Mhc8$KyGdgkAC{OCDe*2z4|R4wjH6MOIr(fZ z`!~y&tStuD-?7NFr13=C=`zFh7 zq<-A$6fE|kZc|lK=4PK3%-jMn-V_qh{C5c6z(L;76toh;x;3p><{3ln*RPQHl~zBJ zMyB6A%$+!C7yQR*5{02PJTxTHQtXUcT!LTW8|mK(bB2#qLF-PKv{;-~L7R=Tz!am! z7Ym7r2+vh{IAg;AZ`cZBvUhnI4=d#&hpg0)%8z#c7B3Ie%?}4~U}vq48DvykYy=45 zaN@jWtRs))B9-IM(#A~6ubuw+g_~=u+z$n%$I@&>D%b|-jmrvn zQ!1CG=VfgQ*zYX-OD{H}uMuy&M>-lk6iqn_#f+n4 z-C~=C<}Zgk6VF98_U^Upde*%xFSIJrjb;%bIvy+qLcz9SvKA=XWma)rPAt;p$F#?S zdSuFO&-5mif5^D=<<=eyv4ScKHvfH2XKw`=ZFA%$w9-hO-mk<3Bp#GaHn%E6w55m& zmt};ysi%Ss{rw74;fem@|0?JWEy1oPfNJRuJ*e`hc36e>d`T9n{U|BLh@h7U1U z;bs=oL-xhTLPn&SZKx&9L*9RLow^92Xk&?-jj80D$FogTc{7Qypl?i` z6zO<)aCszobX?;G&g3x2^So7j_kzKu?Zc0n_elo#TWsL_fdE6%qggh79>o-TX87jJ zhvM>F*K`C^l7qanPYEq)h^H5`sBF^=2(}#g6J8~}$Oo&vGBIZu&xSv_ za{C#@7WwmSg`gI8I@ki1M-g-OO07lzUV6mRG^&+Er80s%Ov!x@Ep>g85zgJxm|lTc+vU?dfIN_~m$P4ai85n=Y2FHpm31kaV4Xy#W8GqO| z!*cjjWgj<0IhM z;v`_v*JJDRD``Gw=E^wpHJeBf4rx1Wh!#(E9L#&l2E2^8cPjojfGj$zD$W}$ z!LS3r3MB5k#o|_tCt`AcqM#!#w&0sM7KZ&b&4#+!5_j{r=8bZ*&w6JNmTuPpX`dDxOLSU`#7S%CY1qus$m`4+~%BSK6e z^{Kb$KDOyp($l)u2pMx?XNqv8tCI}G-C##26i-8@YdCGmN^Q-bdm9>kSg%!1l=o{> z#jx8!G$bu}DCBs^CtJ7+;`#P~(;nvANKP(EO$Tn5geb0eerANqX|{*ii{cOK?`Y&V z?DYrN4C2ckG$V`e2K&bxI4=xc-QE9_Vl-V`I18xx5b9q~mZq21Udbu25+SD?pED9k zT^cL?75MeRy3GE-OZqsqAuT}{i;-pth3yYodi$%X&J>NRB7Uqy;zUwzv6B@T3s)cA z>Rx9aGKyl^po<(s9I#&uffIPt3RRT9{G`uKdQs5&Tw8r&^e*U9FSd!+Ap6pd$@%re zwt0VNywbfn^h^pXTez*gt?w`VKWjExe?9 z1scm>F80e;CDjYJ)eml4V!0=YLGY1!uPmzgAj8eAZZVX+Vd~OH)0uq}whTO7^lD?M zr3unwLT*p0P0eH`N{;|LyK9VgH^`dEzJX2W=~9h?xkT|2*v2*=CY3u*%1_R*=6!8y zxG9?ca{kA`Tjp3bd`-|-2GS4v-7@@TMy;NBOpVWygUN$+d1`X~4oJCn!vL+N)@;)Z zLNv>J`)@Uf*X@^hj)RJ);YZVhO1PxzBT<+my*vZ z6fCR69-^p5J=RtW)S`cS-y&27zxJvw1wcfriO>Mi(~9=WOC%PM8pUgIWKi&@s+8ov zU^gG#8MqW5@ipTT2au=Go}vxOvk?DWLKilAAQ_l|1=P=b+lsAVj57jn#de_#tPmp! z_)ZZ1+);0j^MCQYPy6Wgx&nOJ z2}Gre%lxbK`*aVzc5R{TlP)3tRk6{mwjDuVJfHahc2%xcOY=(giafzbh+ot??EQNA zq=ZEro6^a&Y;*8ep_p*acB-Wp`D2D(NKtVjA9vB35Z&1&-PJCcpJk=;;o{ObGb2kFajn*cI!||Ld3xFt%9V4vU^=2Relvh>ZJcK#X-bEtn|0)wTpC%-i z;TlqR@^Wo*{Cp}!m}KobvOu)61iu7gHjsE`&DFY{%^`j$;lnWJV zSHD^Nr*s1Vxd236LuIN209GPFD!Dhvc0AVRp%S$s&E0@mv=9KmrNT#0L>uspqVV;1 zP|n4m2az`sR`dZF1HMjt3syd3xintC2$W3!D({AG{^#?bMpaMiWyL|BF2Dt{33^3^LxNZF`(2(Skepw6>KrGkoG z0+h(xy9ZzctHLkKy?ngT)$sp$}2TssUHE=Zh^sZilB^k z;;xY-&>0uUvB-MlOi&SI?SNu!zxt-=I7r2xvcfZ11WeK)2#B&+TU*=V^?$wz^YZdm z8m`-~VL5_W%?_;zcj(W%sjm`4_w)B$lG*cWZ;_pSH7yxc$G;|Sn|W$&{*&IxrMd(F^}!DmAb9d z2(S0XgE^hvD~G7z`-Pu}bJSE8ckY`Z9e;D>Ut-&FGf8^o>WqGfiF>+&%07k0RRoMALd=;h^1Oy3(0ez&2eW0hPSihZ4 z?ZG&ssQ^&0}EX}HyW2p9;8gQy`?8@G)=s2^Rxu(+;^L5}wzVf{&W_KZEiAkjU1 zM&n0Q3mKsxpEXK;)5xk1v(d2JLUFk3;HgDcW!-kJxP96m7ehfdX%0vxe7&MOWvE!o zHD$%PR$202)s?R#>|sE+*{D(X*p$O+6!Hw7zC1Urjt2ol(WV@8T&H+cX97ifV2@9p zge;-|xmO%$UQs)rf0gtS+4b$G(YM+?qnE@P6=9EW4tyP3Qkuu_UfZ)>Vc6V%{BpQ+ zefD9tNAsHXQd!uq1F_%plVgC# z_!GiBg!NiW)-DD0io0tvIN+OF!17%g1@ADBrx*t*0QEcW^jX#)pq7Pf3X&8;Ey4>x z?ONr%I5K=+gKc(+01anLT@W<2bhS8oE1@J&K%u1^oV&xu_HS$-9VZhyejjIPpvl586v;U*ORh~y#2oye;IbzvspjOrF=k6)3&zU4wW=L+*J z3X^yG+yzuj;KY+B1>l_+M<=|EDPF+FF6qilhT}7Wr*=r}rNA#ZrJ{Pt4}-W zR$^0(^Si0op}TnCY@ENd%u1vjs%%bC1#$`T`4lI|T4&OKE=pz`0=eOI9la1xmh;QO z?scc#ME6dUIY)4P;a4MT#lyo-_1AX=bh@pU} zj{5Pzf9p@fn^RX5_M3s4x_%%!J_-vycv}CQQ%d?|J!EenD?FqFhT0g8$(l_sPSq*= zX#?bC9>FN*3#hPD(Wl~p(heK9L7%K2Mzi1lXBEhfwy5J$;-I(=3vTRNZIQzb8fuU8 zB)*VxD8d&qj^8SUqCYz6)WkFf*-y^`>6{7vZql+)_>+P;bz62$R)a?*f%<2jZH!f? zEH?mbfxhs%Bb>=xcf00fsCvQhSC^n?bsb-syjXegj=n?G;mGLfC(FW$`;GPR-dMSb zamL4mxi;^8n&^id`-j}JO=kYwV0HA$lNP}nDTgcRzq&80{_&E)d%!n{S_sB$^boZO zYy$UeP1Q4=d*=pWBue0(Tg63&YlJ)Lr<86o4qAcq`;CJscOjQa|CaZcgZ%HSZROU+ zbxP)qgL~kHD-9FQ42-=AhOeP8nnTu5)y-*1x7cv^)5WNYX(QAH4OaK?;#`a~6ptEc zavyqF4{79Ubx%rLND9^_Uo7H{?|0EZj9wj!SbP>~k82?)+RE|g} z4(kc%i^n&>!z3`UY)V-`0NauVewujVHuKaJ0?G8Z5xO-IDnxg&AJ5Kdw^!(vE%`(OGh@-r!QmB~DtSZ$jm_ zC_j=;GlDMi)%Yl}NC6QTkH>U1q#v5dID1&o$0uH%$#ZL(@vSX4c4|j0Q-=&AmBE0b zj_-xDW`jQGrv|>JX9sQc>+U;ZOIzUq-qRa2;VZ1{(`hZTKz;_B2du*z5OERIcx5~j z@S%Gf(k`RhqRzed$=j#P3uk=l49l};%D9Hw=g%X#3=3hii_*Tu)l%%)yQs|b$sYQ1 z&m#*&56x$n@;j3F_a<~Fms$qMk~mTWn!_mtp6uKZI$W~ZoW$QA022DEy3q`xo4tK8 zx;co{Wogjjl)3*w`y{3DlUv-xhTrGfF^Y#(e%w_3)w8n<oGF@T!MhcNKEV$;_|C$Dz+(tn2Q?+wF8!$^$=ICjb7)1v9; z3WHDjZ8*Evwlk>)Iz@JO<`=9#EV|c^dH7g3e}1rH@K8^R#JSuRlFA)>6;GY`iENy+ zPn<HTRcQZZw{Iqb}+0-FCU$h7~F|0%fF>z=9M8n!r|-METms0ze*7wv6Te6=~a zr>1@e#4C?2fSj>yowiqCV|^A}rQ^59&pWIEHVuI|VGo{5eurfbo1WduMBuJOTG*JX zcE)+%Vm5-VP=s0G>0Csjph4*~LS-e6P*E1h!r|&KE%z0Md|a|vf)uPDVj+JA1CF1Y zmb5g(1Iw-&`;8ovL0T>=Q%ey`(SIQ65uJ%uJ3a0U-pDAv7Le0YAYlrhqDb>B5cte- zK#M@tj$^ztnb}EGhyKtx0FLc(*v$y=O>Fvsr6IH{lrlC-mgGNW*iL1$jyae0C}CY zsQz4lB^t^A0ww6MnRfzh2(vRYU)bpq27Wa5E-WA{TC)K!dh9SWp*j?vDdSD*2E|VG z=9v$`>1oqCJz?hpoMxM)f=AvjA8wDqfPrEErQ7W*_Cl-^FzNlf1t5;>*k8rMaRdUe zFCJYmC^na`G|&zU?tP%AhZwK(^#-BSFy-|y6&o;iSGrBMTnlu&a#cZ}x?tt>(|7@e z?&-Rlm^Lui7jB8Nq@MqBn^U0)5ouu>K0j@A4zG&iIJ(nz#0El=(30etH5;z)AV*mA z#QnmhrH*ZRJ+g06H|SCSKq-(fq+_*)IF`&nWC;=s(p5)iE5F<+x#cxedkXx~QoE~K Iq-+`bU#yvrhyVZp diff --git a/Health/media/container-insights-health/health-view-kube-infra-01.png b/Health/media/container-insights-health/health-view-kube-infra-01.png deleted file mode 100644 index 6d631792e95233837e8582e30b8216483977ab7c..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 132299 zcmd43WmFtZ*Qkv%I0O$40S33=?(PIg@BjgVySuv$7GQ8oa3?qf8=Me4_~7o&H~0OV z=RM!w^XIIyR`;6KJyYG)UA3k5zV<|GsL5lYzeR_GgTqo(kkNvJLnMLiSO65*8L}@k zDzFcDS1oxdxatY=eb@n#m87a999&%@#*-N`>=?~S;iD@Y91;CLJG`RSyE8br*Ahh; zNgXfaqg>=5!U3`#@pbHzy^|Ac%})f#GI$soI!7%Ciq=g0jAEswTg}|79Hzg$}wU}SOWZKRL$>{qha)teiHPzBMy6&VLx}QBpq6P~JH2>2wrmz@=6fuzSKR1K@ifp%016lfi-5sPO zO^Es5ue9PZ+dYo!Q#(#OE+=}}Wi>$!%7%tHDmen0Z{ECdg9_0bC2^U{wSW8EAAv<5 z`0^CoS<&C&efcY{DVPmvxFN;=_PsTAo9k}#ns)4BgVku4|6`+gdsMzCbR<>&V7`WG zp-5s9ELtKRiT&La7?W~CJ^Xz2TRnx}(Rkz=?+t$o z;j_y;T4~cpCuAmAD9XX$w6q3#y(P|fW1swRBZBENX3m6u^5oJ-Sad( zN6>XiOj1t<*_vMb( zcb$qJV_8AZv--ugfJfEp&YMq6x|RB-jt886cTSIEhW^)of<+;1)C>%RH^TfYPSW35 zd;?#fTRyPA<^2llo8g`cSmfi)(P9z09IjfgLXjYe*vAGQT?W>85UFiZ;-iWIAw8ML z5ORXH79@_8$KrRXYH(G<&^Dpw8{(Qd64~wb-cWA;2RC7bG8Fy$+P5r9K5&OOi*|{? zKae(hF|pOT^Ti9!q%N29{A>TuGYY-K)T1o}O6`fwibafbBWWCtA`dnfS19*xSMcsk zVA{oc%T(5n^+?THdEsQ{+ifPEqk85zkgZP7MSJKbgLbJ(im0y#*$?oiJ=Yqb(kChF z*zDK>AY_qlTsQfvHe2hjFg$rL;1Kl**#muR1 zahWFTl5x7xgF7=T4*w{$OL~R_Bvv}KoJj@k##d`Ftm`-SC~E&LYBQ*DvpqWUopSkb%PnzI zJ+d@38x6JiOEl}nwLdaF-ykC$5jh%p1<(qo?{zic(cb%VoL_za^5L*%bhbhpu>uIW z9~#TzIabomF!IQnE!UL(I#pd?6T29|4a_-iMAy_G0VX;r%+ zd?~#*wCaCrRAmc&id0`e%@r~nH_W$8O6CJhr<7?aFvr*t(== zKuNf`_fZ#JPd!Y3D28gSq4wV(-+)C6i;DaY=c+Dc<>XLqa!Aq8(7HN(Zyz?dwh(=_ z@^HkTruc1UihFbq&R3H&6{!NxCNaP3O!WMVq4HYcabOm0v9`bT0@pdShPm*1XtJx;WE953~N zw@5{O-qW4h_zQWSkSjnpw}zAHx}u_@dgi!3fBvjhvKmrAd3(A>wII}8;MaA(L$;RA zW`N+Um1x!H}tk5b+m@9bHsQF!{eoH#guLnEv`m|cCh`)C5 z=Lcd%jzNbx8FZMDH(xb(B-59jl;0u8alI!vm~G&EfY{{D<8<{{nFmMCFGSt=)izJY zET~k6L={&&d9xDTSSdcun`~iB0^b*&{u>@W6sa7Le^%FpmLBGPju) zw@G|=6=5(C*>vD7mgjT8WWdo&i6JHjm%5u5 zXgmhzf0dIe>Cg7D{#~s*CPVGz1Omh9w1-h=DN$qZ`r)_Whdp6Q@}vF~dz7mlFlf5Q zMBJ7hE8}Ri|@?H@#XGhi|+NTgE%o_oU7-We+!ncIrO{lN!w8MQWlFI zyRcld&F{gvTSyv0O^?y-iQ%v19?JWg9t-wIukDQ^Oz$P5^Y`H|-_zXS=zsE^A_-$}6y#cfW6mGQ9gH<>RsCBd2l-#@B??1FR@=AZdwne<7c+IfLjB+eDZLsK7v*rr9 z*QZ8K^2z=9$L7AsTNhu7xD7X|B1)}}2wggM-4C)`DrQS(KCXx299^XM1o@YSa`sV6 zoeZFlzWCoPS_QKu`&=I^)_*x;Gwx_Lnw#_YGJ!u_UgeGSL*r7M^%@LItu9AeOEll$rTT4cSsKBdj|AA9 z=HPR{W0`t9?anJybjG~mU-c?#%G`lzuG-r%({OYo#4yk$JcGw8-*b}4{V2O|UAx69 zZpHNl15=K0stzmYBRUi_IcBmd1E@{BCZeGMc1V+_Y-4VD4RDQvFNf{Hz~md9C7-$H+1oo}TIOVF2M6y6o89w1)S!cHL}WTCn5w z<*^ER%D-GafQsi!oTymI+0GxBO?(+z{&fS*FmcG+pcM5P4#g28xubWd26ugG`dSa0 zGCh>924HxWI>gfv`V2!C2s(;AN5#$V(p~h@&Hnn#hlw0s+qs1cnF2KkMkKdUffVv` zhVxNH-yPA)?%n0=V@LN`AB z;zC`1iH&VRV?o{Vv*Nl>;XurFXH5Q)Xs~V84yRGtXz{R*Zk!+vY7n<$i!}X^)*s*F zqjKh8%=W|0I*D#%@K2#ns|9Dw;EsJ=y`{;;HvyblgX}!FTHK47ckjK{H-3HY>5q}v z>v<#Ub!K{h(tcX$4lqN3N3=yGvSIx_@i{d1p=}r3kp7`;01+LXgV1{?%gSyzt{-dL zjoOg7fJ^j^IVhs+@g$kPG{EPto`lf*F{W!sq~hblV%2=%U#n${(_|zju(-_6rg1Aq zKVk?W?Z9RDFfVTdq^YR^qOP6NY&t{p=lh4lR&LZVNg**Y{ZDs)Z6#eTpYM0`sH*e` znPP26yZFEq0rYk) zu;mf%#~WQw2;OMBHnhMQY#^OvZL)O$UVy~xpaY#WZzKT` zO0Q}1uw$CL_6!Ei2$&gbSPSNpc`PstZXUd6KSIKRci&?51vYLC#tT<>n|j+UZ7kH8 zBlYpe9DJ*QU6!So7lK=_)rygw-1i)^+;AQ{Rgb#>t1U4`8bf zL&i(7!uO@~e=_&9_o((vQbIiB)+8x*M>9BT`Jq!B$Sfqr1=EtR(gyuohsPNA6FgBv zfg63{&4F8j#qsn|US<1M6l0RXo9!N76^gt{iema|%v2wElVAkoBiGyK+qKs+%An}y zB(06?b8bO(IB4JHD9=H-Sb`Nu0ilMN@Dz2D;2Ak}vU8BEse+j})!^qP9?R{MWM@ixrY4bhBf$k{77yvQt8N`<6Oyb;^8_a5`k+?kiPwE3pT&O{n0pZi#Fx1G=sr7)Zm z08s$);@9BXA2QW#1t8AMlw@^Bp?5a8axgo0xb@M zbJ`Plb%832n(X%1XnV^JKQvP^;G3Jm&#N`MJU3QT@w1htHt0pnM|wRkl!`U`hK{pG z2k*@cBbUi};q9+Q)Yt~NOm%3A7|`RTriPF+-r1pWspm;*%=r8$LOd1*^a2+p_HRWYkwz!F7F zO4IOJOAj-s2X0iPCfl)V@rEB5-e?(ys5f+l)u?#z#yasAtMBr=-5GuwKSXq_>-ZjZ z1GyymGx^2d&|Z@V>?MHn;x?gK&~ErH!bXC}V!kBea^CddUM%k5-Yu>`?Y9}!Y1G2^ zc&qZug_wUkc~H@0({@u1XZUiZ)P?2N?UouVM2X>NnS?;?EWU99B(?zGUx?DI0ycjt zw1;kzt?(h4wnZR%dQ(koVCnXw+mh0!pqU_vES+-pXJps-%(Y0=xIZOogphEtv?;O& z@s}B0JV;IU!hRf{yKCFjKy3zhy6U(t{#}@nk&BnR6KPs%>BKqv)Cf4>BF2aqJXk z{j)~l{+`-PtprF@;j?$Fo0r_kw=jgw{N&3dPF(Y2i>WW4f#10Lh@|M1P;=?Aa?bFD z6n9V!od*c#J}VaO(3BO>txwgGRMuj111~vgkf?{J@Ah-N1%eau(45>R+-8e|*T<-X zID<@8R%-*w__?z_kEi}w!wkp`&_43M`y(uCfdfCvnYU zf%V&Hy#O=BfrPGqVERjJc3B@_MJLNm2Kf)yw=Qh}c1ZetLg7B}rZ9*u*o#Jk08Tue%Y( zyal1bBK5rJ9IZ|tcQV4DUK|cw*HIGu^WvF2TK)~^^^dam`Q+X=T*lKxB(dM#+k9Te zoyyax`t;|97&;c}WLo(WhHPz5JNw^9zBWjN;sVAhZfnqW3GR?TAe zDxsf5@5a3JjksW2U_ZD(m@Aq~i{oIX;M@E#LyBl>T&}DB+^MLy2#tyf86B7J<~yqb z4W@68zv8j5RzB8{2pH`lNs(9A?u=&}1c!BtY)=plhxQEx6CH<{= z#Ec4+Lyd9iJ#`DIuEZ^y zNLLk=f))XZh7D-HTAOtVFXW5#RbsCohordGaS59WvptB!I$sgnQB ze+lJyLejqokc660U|4rlIIPJyi-(S;#1R939L>#0u>@2Ccw?c2qS|K(ZR=maUEu+s zSNvl*W;%aAz53)!IDE_GEMD}>W@fUP#sL(^! zWT1#cIJ?BTBy~@Fh?cZIWgmtV7A0SPwMN{*T$L@M^=a`<1M(z1RDN-a9f*+>O8pf%MMa6*KpONq;85+R6gl+9Xxk^po4!WD|(9|%t{ z%P$&>uI#BT%n`FGHGThsoc&`zH{?Yc?c(oUYI{!MXME`7eq?3Xa=BC{!B|#Dh~$v6 z2=7}i3Lv{bvB8j|NI6HxSx6IgzNopSvcmxIZSMT1e#CHG1qJk7e4!rcsGV`@SXlrD zT&Q3utCQ-?x}>`)Rz9KxP(j@SdxTmT``vIDG`TdV-*ThOa=XFzsX%ipLk^PDG(F=u zNaKC-Q)I_C&Tfs0QSSp*MsvoOjG93T(7^zYa;_Gmhou(3eh{86NzXrOxn6FeFsE)- zdv2`Xl$#^}-MW7ta|m@mwLIfU4}jfp5W`Z9F(xQsQ_6@2iS`%xp#B%;uio?oe97l_ zY*K6D+3w{?-ft5WP=bv=Y&GO7D2rb-@>PIjOSHLEp=h_eqC`NGm3FtgVr4SXJkzra z^l`TrfIKD~h)|dR1q39DhnMnQZ38 zF8=}Ppq5EIUw#115pwez--eu;nzb!|UV zgR>UNM-umvQrU~b7>fV{Om{Nh>*$5t%6QQKNM>^;96=7r*~P`F^(55}L#fOI2wnL% zUN<6O@ZIOH^}dOb@a*zo*8u8wAdz?iY=w=iM*)GxoS6bY-XEv!MkUFFu|F7hoW!}# z7T6*_VPTV*Ek7RRs9L9rTwXnT4c<|A6ap_rO8NH?0U5>Q6^Mfc_-vcOLCOlOQ|jxa z#SUsH*L{~HD<-C6rYl9pnmTAkGAM;OG>*eEUZQ8ETV_2HlHFT7>F|>x{$YpWtvJLt zN9$RPqDw5##2eW_y+)=uaYGI!b7FgplB6xcdErwwHdB=W-`L(1G4M?oxnf>v4k>J6 z&ax0Q;Iu$2<*JeP&*#PVdHK9;<=t{%%PE@-l`JxvkLVE){COzWQ;JA+zd0_=dnB5m zgw%)x-T3Pa_(+_$+@Z{fjh^dB&7DecQf#@LIA|MqK$fr_+`gXBUby&hG#czlK(4V% zwdm4;&|?7d+uvh!p+wWeVeo*$`C?C9xFkZ8A%Y_g zWNT}gS}JGuQzY^pt zUuO!}_n;O-t0&c?FobILSrH zD$-6~4SD%>7;iRU`D6Jb%`;+Cl*D&{6Z1{sCb=)kCy)mF*R8_rZ+-_(zu?m1&5_>X z66Mw^k-5eIp6Tq7i|~$vH7GkO%6K@o-3sEhhk%460Ix4IMJ^lb-jYn4ALWB~7L^YC zjYx#NBD{?E!~uk=Uk#Pf4}cZBm_-zFd=GCA2ywn^Qqdusg5%iZib9rhl!M7oJ@0-RE=3TIo>FPeSR` zl(;=rqlnw4%h^OkYLrr&xHevx)xXd#f<6bo^PjO4$Kjw7`ix@dIE*rHzP1mLw901r zZjm;5!cP4s*0NbiuBH1%iAP)g^l!R)^^aUt<95NkMn0v1m}4&PfRDv1)3%YCaJLGV z2ODqF@4XJaT{tBP4r?0ZgmE{4#1PcpH)fHUY9KraAx3wDHD?f4Id#}HDfiSg*GyZo zgO-4@_>i-am8@iDv1<`xvAM^I{{g1iDodlFwe||4_j-V}3Fgx+ELJw1=1P)p^e73i z!0knp&Ze^)ai0R1sSxal-A`@&6nqEp2mERL(81eiB;>ql#Dp(oNARv%+82F-qnnPC zrwyAPePG)!Sg0g!C6<`9ytUhup1EoL6R-|2)QHc4na*wpzz{j#yJEu(nT;j8%d(R3 zb=%OGrJDKZAP;-De(p3SHi8%uu1qGI;t>L;4uA*aA5iw@1!uW+P_iQVEb(_2 z#o^k<-(pJx5SXQCRyixPu0EqAF&uy@Hp@#qj*~CMqSI zH%Tnx452!FGj5uZt$o?S!~QoWid`jwdGZLB(F4C_O$(ldRt67|z5swBMwxDvp?avDc13Hl_-vycnw zB*&z)(uUy9JqBMUmjkW9??l|6VbrHm^xkv#6eSUBfNb+P8PXoa5&b^LvGLyFb&$Y= z@E~tTaU=W#;R8Lj>PhT#T+J1R!x*DsN^^u2#VJZbYNO+fGFh)aI#TBO0)AuAA5mO~p)*ZiI_N(&2^5ASq3*SmD$O~ies z*-V{Nbsgy`i&aWbDrSRVv%2a(b<{zAU7A8p_(Z|ZgNk~c$#VG7R*v_TI|JYperJ)oHN< z$!Ez{QG_JJ%){6jGwHm3@Bh^I7NSWgX($Z6mXcRpme5dwgha20JIVOW;v6GLQpIAs z=j6y@+F~3djRNmWo|6Kl46W(BT5zA>l-bd7y^FL5iX@lK6L4=sETe<7T%4UxwzCt! zk^p`-gf~hgL$0b9DlfQpnsKdxeT6w1N_)h|iEh@RfQk|CKWfO8pkI!3<+M&YXh0W{ z)b04)aSUB6Sda#MVdr|Pzp930Df`hpUTRpC=?YO~t2$nAm@c#le_b%*LJqBv>J0s+ z5%JVDzK?u`GSpeAfS=+fIiNNvXj3VGq}(V|QtSRp_NCFc?NED9Y?~yaP@h`M+2_GF zMKnI3zac*S2$Vwr$FBX1>E(h9>QrB^fRkjh#6O72AIZ@qpGbz{b-HX>b0VL|~DoL?dI?+2sOxlFwbW6GeP2jw5F>a~I3J zuU2+#@H|Iec%8#OxLHQHAhCd>K;7BYqNH*k)tcwT%Sn!FiE#gdzx8$d<2#O%oGbKc z+!YM;HWy5(F?|`d-pTnEN0Z;pGYOe@GA8rl9*;4iKL;6Gs;)+OmHl1*d@j;ty|!$% z53jlSZR>9mFN!PhNipVsr&yQjUKKh{=WS5^5v{0hWQXXHZkUM?$@m?v+)r2kTA_UJ z0skJt1a|$CH?<DT`&Lap#g=y@g^Pp1|?#6~gUIldWDJ9s#271XWy*-Qfb@ zHdwS`D2YZ|ON7C3p|)4APzG(;B#q5rE<-#p;IUISmWVGAav!XT?T?6xjcLb}00TSv zT6}LE)&j?8Z6tP7NVlsDE=R9glp(i?gy};Lql^@e!z{VNL?9|E4%*aJAXB2Tzn!-_ z{d1Dw`_Rf2N=Ad*A?5TW*BL($26ZnP&^8Q}# zH$~IE;_E$uUrkbyV^IHP0_fN{&Zyp~;%!%6n8SSHPZu_YhaC|lTdway)h7=~MCjuq zfciyxvxZ&Z9tM0@{+g$n$tbT-&U|HsV)kduS89L=qHbxF`f$z-GY zsaf!|XS5=NE-5tRo=L@2wu*AIWEkaI{@bHX!9d?<9tV&{r7wmOH>t71i&`9qrq z`+J>U0d=!N0xDTWWex6`!oOT?Y0(ENrB*Y6N@C=b7N2zgYJs%rJ4x+x&J?3`v|@FD zW%52htcZ{lk7bbw=tc9<2fZP%%uMFdOl6DtT!QZ>4-zDRcKJB=1-4UN$Vl8w&JBu6f`Y2oA>Zj>Ar=!c z>J=&Qd&eZ~63iqqg*n<9?CW+BB|u0;5JFFEuanJnS-ezw?cQ z<;OxL+5aCMgjZ&|_s;QuU?Hpzoc~3S6nX8U2v`goZRgc)PSEBmSqszmNKVuK_76l?Ejf^=Vx3-pvg*+D!-x zLyC@$PQJVIQdCjFJo*5drGo(xDNNc7XjoWj|6n;cr4)5_bvM!mxA%U*ov=`Emz0#$ zJ66`Qxx9a$Vm}ou>Z=3;Iwm?{0ZKwVJQ`FSO3?7ILc4kz2GlNv%Y3jX2F7OyzJs9{ zx*GxpzXk?I3Zx^CP@$xa@nx6~s0T4ZTlkce?A{mKb^k%Y6itdTkP&-F4cE|(^;7X~ zU@6Ei;i6$^vWoV&`sb7~va%ls(s2;%dP9(_3X_+nnr*fWQ=3)EhTbH)6wTeGO8cKX zE4`Hih)BRVuk@$;OM}V(yz>1}*J@L8F+Y8eqXn~WVrHFTbYo^Gn9RDlH6E>Xn_!a* zn1<^vef|n;0Sv$#5?SYwZ2p=LtRNml*?8{1?VdHfG6YOoGNT{I-}FuQyu2pOh*yuRzRYP_vPDL-Z>kzMBiEilHCn5$T3ahyv$e`2B!w;EL(p~{pz zO)=}geGt4Y&I$>}mE8ldbX$yn%;lw zEh2V9V8fSjA?RT-5{R_gnVZjk>78;0JC+><&b6THj!L#J=PVDH608YUJ)!XO`0HMA z3o~4cr&jnNsq`v(s(%GE$9G1hS7L8Hl+%g3R*u$hJWt$2E{=poyG$(&8G}eaIQ)I7 zN*1l}%@G=Tl>qLEKAzoFzN9XiGM2~5inkPouyXASEJ=Sc&c8OCDE(o(-r0cO6G*{|;*BS8<(Zauo+vGMM zjOzh!OVYSQBsq&i5t2dmF`v0zwiO<8CbId}=PEy%CrTIS*IPu3M)-I_@X^UoP+q`T zdwQ+lh+HPv!$8^?#|bdQMqxnN^5jU*=rjyFv03eC%l)BQV=VgEDHV>c8}#F2!_G%< ztKNHTT~|;rNWWUU@9uodvM&s61by*WXVDlnD~yu4b{XfL*FW^<`~{(_%~L;RHGM&X zdeK557(-_J;>if(T1{i)rc#3L1CS4PWjD%O8dKgAxOeKMvzq)Pe@3GNq?6r1Ip0sW>Qmb0g~*4qm=qJNRIPueoU8gbCK7-NfQ%`3 z;aEzg)(ZuLUOx(t0oN~UKA1WR*&UWSzVVc*NarwN&*pb*UN+9OP*VV;-xja?6i4Sv zeJdVtuU$wfZAUTXf_C9<-eKV=a~^H^=sW%;CqPZ#T7N%D0O7CTW~f;a1|wDPHeI;{ zvi^XGx?n<8@7Lq=?F1S1Z8peR=K=LF{;#R;3rqs5&dB2$f{H9Oz=n(htWmZu6aklW zSog)4of{n6e>$bZuLKeww?rT$0^o<;t(#nQK&7FuAe5OTVe1YA#2^pl@Bf2uAL{3l zH{uV>0Ii}9ZPA+%vP@t^)LW$px>I1@N&tzeM7T$oNJR3g?e0#%dP~x0g5)h`{V2<; zn%o`udp5B!1tIJ}HD4muhI1f9?__k5q$7~=ullu7E`@=%o(An6RoCb_s(>bmUIoP{ z5HS+|T<5N=Ul5$k9%=|H!r}Hvs{In6oILg{tTtplIw+2Nvx5Rg4K;?44_)NG57B#& ztd(?<%cR4qoG6aQjlJl-2aFxb59hwONfAvnXOizTS`ihZkGve-f?=zb;dyI#Coj*p zCM*^gx#q2trW(ejs@Y$DGkE0T#Ngag8m0qW08W69e}3qRe-%aP(?!81w@#zzN;OHp zmjRj^_p~~19r>q<=~{OX|60)2b={q48giM$Cg4oq&@PSZM3u5^WnV2bEyuZ-6b$Rc zo1YZT;4oQ5j{sf6kO9lhU(tLNCphqVZMcAvf9O_G6dWQ;aTv8>Fuy`~Pr>^Y#AdaG z3{?+^z@c)eksii|hA|%L|Wj+NAXE~Om>7&$RdW5w-Oysc=H2ocRv;!YrlYa*K#)_CIcbQFi_=43yFBn;@jgPBR~nmBmA5Kx1+ZfPZPvRS8$HwetPigQtev1ht24ccMrHrz?fKTjg+iyr=K+y+Eph=ht2y&&fOobq517>Ct9r7i&Osq#KuOj$F75BVM;V^mi#G~GZ>a) zm>GDcTfu$1$uLg4Cm>1Ci(xC!_ns2IvQ=LnPo^_To^{Ol6fRwFf(C?GK#cqg%@|Ol z=Dhy%h!LxD65wS>b{AW#MtM$2ZED`cV8nVHcAMnD3CI8h165B#WtaY%D@l^B`#e8f zH(W9sis`En_7a-m^^%8UuyF6YV&3nF-JOI#`vHBC?tM=?=hI1{q11=r7wdjVHZz-) z<@`6=FTenxBOu?9EIei{1$_g)1`fln$gPbKa;B@X24n}!t`z-<1cf9iQ>tBeY$IKy z1`h@Mxf-qiJQ2Q42YIqEY0RF9tdDQ>H)Et0KBqFcudjPMdk-S6Un^)hh+u%EIcLf4 zKd)k;v$$g@QeZUlE~R5MZQWCgH~NDW!E9xw+Ig!Y%>P(Ymw@Ci=qMpC`{*W&2$ExH zyd%%ZVMIs_VFF@dH=M~L6-oF~$~hA8rZQ2uQlfLZm8U=gwL9e#Fm7=NjnaAo{UqQF zh`o(WgCiM#Y>L~Lla{GLBUZTtn*FMZee%cG{sMBK;Wn+f-v+Fj23 zcI&TG`Cn=N79D)&%N?5R<@aNoddj zA3BPujck;(nG*WMmREKEyAO;Do01YiyZnqv&i)s{^`Y0|OlCx~znJ*+jJ->q&;CLg znOYe(*ux|XSWUA$%uJr>-=lLNBG8%{dhzv)_iwxetwsOd1>+faYUGWL_DJ{K|2m-? z_7N2_1`<2|ZFSjv8-~iYNL=&AGK^N{UW2bXMO4J--~MEB9Ff`x#4n;z z#(Y=wi@K8G1EXTEGdUHxi|6+wtCI|m$o1UW?}j6iorPz3x-yF!gngqD%R|x;A*wId z84~ySOL7!&$0-+{>+yx;p=ocNt}_T$eLRv-+}EpR!PaGs#7pAd*J6nyCAs7sk`; zT^J*i`k!ctk6cQ`P_(MJkgPg4Q$cHrg*D{Kex9Jq?n^Ho_u^hLcQnIjJBmS6b^bk?gX!LgoLg zDwPgk;nM`MFli}^2?op;01@D=ewubn!#m+o#y(U8o=4o_`vj*UPbAvrx_r9)+V~Pm z$!RoJhj8)<#jk~p%4@#ZrXx^(0>YVXzZv;;dX5nzoKyHm+!(xkyw%(f-~5uOs5y$g z8Ir^a1TMD6g+nNo0Zt$~XuqdkHNfiR&QvmIOM!S}@sA`Y;58}7N&ast2V-rj_(`sx z*Xg2BtbeaD?gd~4<68QypQlZGW5{UG;kh8;FqwUtk5ra2(=4y6t%e0t&2MC6v{T7c z234&}i`GXE`!$T?l=_kYL*e|1a@(7>8*C7nYihae$={~y_4iDkGw!>B0+hhZb@PSN zxYkk@s^&JaWuUHMZ2y-XxFM6}3&Wqtj+i_gxTNxcZl0CY2`*<2jTGHp%v zpe?jVnhtc!2RBK{jUvdvNbh6s6L|;QTgsAuhHyXh{Rd9mpLjjBCtD+}Ns`H*UHE=T z!z?vqf(L-jqd7xdZBz)zJ6S zV)j6Wk<=?Yezmn{9%JCKPL1KOr>;C9)B6Vv1)ni2?N*H8!IX{F3DSOU0wdz7>={{o z+Lokuk>9x~ZbIqgzbjJr zhf{w>G4jfA$xvRGz^+V8g5SQ-B)aV_1KX*2TOz9a=x0*6%$Rs{J~f#Y~L7#DR`t%&JRW zL|STSBH-#y?u3195n%xfE%8xZFXX;emuAKh z|2FmVXI-n@OZ!9_3zEo|i!Svqasx7j3Q3X9wyFGM1XH>cGyZpkXP0qzTl9Z|LWT?f zj|77LAB;`?A9rb2rI)11sr`x9w&Bh%m_q5NWq&nbpZ9AMc|r`vk>&)lu;&Q~<{hZX z#lIz7NPRDMsqqY}y1JMUJ{|52MMd=G5G$=&VG~tt|HHnsQg0sEctzf(*GhsEPdc@L#*Tdg1)viWw%vDW2ca{ZT~WLOyfDyzfz zZP)ux&B&rM$z10yLuJ3AP2$AM$Ks{-??cCg6ctn&m#z9}=WjP z{UwE!3q~157D8Qrpa0o^N(A{FIaXYZIeA|UdDmYibz|$(0v+#_+Q zuUCg#;b2@bxNxi%v(6#>)MOI%T;;(c%7B31!=rJ_)_^a1N>QcUZGcNOmbQ{ zcIU%fx#FR~?!Qk(`rcL9baJ47VL-U}=CbIhLFeYYyCm-3@MhEc-f7rn(VJ7qN#~$p zS0k?DYPq}KM(^OtaX((+(vWU9mudM%mh_`@#F#;gy5G*)*4lVlN+Nc0eQv}zAu{G` zOX2cFFVx*vK~zi_PMnP$pJbOD?P$mKXb_=&j~(N0NFSz4sU#6L?}t!(CM%>#+V;|y4^Sawo9u#}WiV~{N%F@L#jqPqil zA}v6Eg51ibaaQ^tDbuqb_U{%kd8VHA6je|EiJ8JGp;%y`F05{jk|1e3n_pQ+=it+K zZmS8q*PXGf-!I4=Z*fG3MMi&>%WXMXK`DmBqb&Fth!Ei(Q(!H&OZ8v zW{iehzcv|DMa89slVL054Cy;DSd6EPG%mBI#EkRBpO5f#k7Yd6JkiO=O$Dk}(Yh!< zZ)&|)&%Da5pT_)E!BKuBg3ES$^^FNX;{?_pS1HxkWPYnJXuQX7_T(?VOww+Y2C%;u zd;3%Bpp)~HuDvL$o-PZ}o{S(v8M2+cs?9fia9puAFDo-Nf-jdvYC1TlO=FOb+g533 z%#_K(TD!XmFTLnOmuL8)GpVskPs*X(K;?VdFq62dA;$+WC3~QXcvHO!QHq%GW&3wQUw~fE)-*HkA0~Y_fhFi)(jvRS z=J5npWDnoJm0T!<(=Niw95cu_3TEb3elzy#j_imAAdZFqvSA`gi`hvMf?h)O>2iD) zJNUKIzoGKm7Laq(TL*n>@um>_^XX^gT}goG&5On3sqeD{*N1lo3UM1 zrPD@9r^$4-Y*@K9>J7PiI_t-ywMN{N7RNPqSXpK|tZEJUrr5-bfdw?l2t~I@%?Nzv zue~KHC=DDYCHhQ&;P0_9{-O-EqPu!}(4ix}>Vsa~Kf6OyeL3pZ0)2lLY(sYR1hiA5 z7QBAfoTk){Rj@vxUQ_M1s59A!k=7}#KDkEweLb+b`k@EOz?n-+RQ10OC=ABjUy)HF zR`I8y7wcu*NWXmu}I1Ug{M0W3%&c>$n7oLtw@hs{g6Zs#rEi>>8 z>oSe(!?tL>6Iv!zK6Z_k@;t0j)ITCsPhqtuEQs`WliiHw@$ijUxH`C~7_=D#zs%k(el>k`fM&S~l2F)eSm=b@&0G6hhUb`nFB4Xg;;`%2T4J&;$ zOqo|<0pOwWdViM4YYE8nyPrJh;Rpau7b_7??#&XmLnm#rEfz$y*CJD^9Y1GBjx92W zesQcjW-{oJqRiR(nJZu1?OlqRy@f6J;%SW6eYtGX(3}Z>v@(XD+3b?&_uj4KDzLO} z#p~mF&#E~Agz!t%l=0pVl(7Xn(E(e8$#4BPJf(r9akc1gc9aC|SNzAm4&F-cfaAf8 z_$qE}OG#y-_^^yXDq|KELoSI+O-vd>i+rg+5D;Zilwpj&_bikZvf8G)~Tnj;HHp zui`_k9uHYsbLqdi48k=qX^3x)-$i7Ea81@(Iu>Jcnj8CdsPEJ)TMbP$l_rq;j=Nq= zb}!qvX?Pg#$)6i*S4%{3J@XsOK~jCTNf%m$_nmDq^vSS-*~&zDbM!VNeYbRVI(OFkWf zYm%=ri<+y$EOAC6kt?=J)ExNfNA*y_uf2RM{Q_seiw@)ru50=1H3x9hLaPkRD<>tL z!IWBQ*2_$T9^GQVBG!Nn`Yn(K>5;Y!G(Ql_oNbervSJ?aJHMZ4D1qc7I(`jK7TYtX*?aOYM}Tu`YoK^^JZ-B=g@Yq zKOR@9bSx)fm-d0)n3Z(vr$7H6Yi}78M;EqX26qS!!Gc3@2o@|M0fJj_x8UwF5D4x* zIKe}3_rZfhkl-5JVbEcaZQk$O+S(smwOh4+L#C&D=1liF&w1|aVlvXPe}`X8?|qrs z0?ioe&H4-9n$BZmF#C*Z?Jy-9xtcUP!rZHsfxvFKy4N{_IcRi0CQM#r{TDH@`yQ!> z{wJ#*_T)7VwOZeSR1X@mi)k@=Iq~tP}gLh+G zpvD(21Sd6ahGr)>)vR^43Yp(%6e*fF>-IHu;NvqZi1c4PtiH%W$lHF;Zg+9k(O zv+H}l&-lXnI>a@M+=5n@`rfC}4GxL`bx8d zOSRg5e&I;)uo&8veZ;XDdhCtp=Fx{4m6qVt^#`nQyspZhx^)14%ziKe-Esh zHFSBGM140EWtKK87YV46SZFeLA7V%3bp(0X1?>a7FKMSMUC_!eyrdlc0&3l!Y(wsC zgSm5Hk2~Jw4hKuN_|oUiu-;PW(iYBF?gcYq97%@#YLSLt#<6D-b!%}3Y@SzH)@YWf zqQO6ydNS`$q$g)xtyASO0ZM$Ez zb8=%U<;qfwT6TQl-%a7l>NU(1$K3#LVr95|U4NRqq{UG%GAL1?_8lFr2-}`jE*>J4 z-e(X^8mjhIplB}lGrX`xoo+f!RgxG3>wgusUNg(M7ji1CZ)+_#=N26DYjc#YY7EVo zh&GSZD8jx^$CvCd6{fZ^FzC$`J^+0gwn4G&RgGqrmBSEtAP z@*d;}E@wv-GH``Qf<<+@08Mg7i-fVe>frXa~4gXd3N!Y-y+BiWTYS zTl-`f(iN`x2w~xsS5n|F^z9WMd>RJNgB+Mmd$diKw)M@P2T>7Z|CN152`=~ ziN%VQwKu_t&?^aB^HTgSN5P2zo6yx_>2b?9kUO{DZ?9 z$K7G99r8@;-!R94@)VsNY6rn%ay{I<>?Y7#177m<{DFAlVg-R$-d_t6&}QX43cd(% zKVZGgxVq?bpUQJ)WlbF5;;Rtsk$AwrYBt_P;A?cK4ib0QZZ#IT7?m2)F^YuOt5t#Z-nGF$#+)CT$+8uiM!1mk{(``g+%! z+JLF!$m2B5_Z7S7Db4zBw`!m~(Oo*#Mzi3DImVbd4tJBk=-3;s4ux!kr;i| z?pFl}!WUZVIhPZI8nJuxpKO1$4i##YS}7$p{e|}4y6=$7#d{RDOQ-D~K-fky8abis zTCRn*odFi9szg2+?WcTKU3|zlS6LjcF)phR6O1+-789*b9@U<)hk~PJ%f-8Qu8tK* zycEN;NW~IdwBeO-SW&25^Kr*8I?U}e-Y z@O0fF;g!A;8yj=+*UKUJbO6H^1QY=vwQ&!}d^jtQUxRgwAaK;X$Bc#rhhA5SiJKus z5a{%w;cYeW1B?vi+M1Q{!xA{ETLMz4yavg_ukRbY= zH#dq#HEh+po$Y*1rB$O(p|>7KHzjq4D)o+IvEKnM&AnYEtKj9i*wwV?8vZof$ESS? z(A1+~F24SeK)sJ`P?aip5^@ZuJHDn)a@asagRLHQ&O3PC`?XdX#Fbu)+?z}d`V8bk zMhx!;l8vHQ1ZN7hAk)}id6P%rxWl9vL>sxs^zE0Yx@NXlP2F-eQW4_ z)9L%ZbMH#)gQp@0*740k+0DFfcKfIiS26uX#iIY8d&TM{GsTR4YU(4smMd+sG_yC1xpa$67gmbd-=moMRxWxa)Yo_XOvCqU@B_r;2H*FAW}vEOsm3`1 z@^=XjhAY#*cwA36X@0ZV#C&^l$j?{3+gJZ&l~L8qI%iSL<%XSBGYQT!kXV}_oLjWd zDFJ5QFzDJ1*zq88#bYgRP}?X;YvAscCzXBWJ{LW`A1qQ-XkPGc?bw4N+G(S0Gie`^ zzvB(ETqjuj;F_aq;}T5PZ9K^Jq!7b!k= z;f~HfAk7YAyV5X!?kfa%T;C#OFNf>vZT=XH+w3p3QKQJy1~67yNphr zZc@@yuC*I9E`Av0o?f+o)kxedvO6Jl5>5=G58I)Og2r8ZFu^ zYK>XS+~u4epRSRAw~>&T(i%=)Y3?r`MVd%dh4|$)J(%24bdFYg444>2$e_Y@Dz3m( z7KX6W4G6_*?!Tzm>!#hkW-p^T|EOpMzAj55?zYobPv*PD_5i;npQB7+NLgD<-CEO0 zO#Oq=p(nErIkG>crFq(H2Z;C`PQx6^30soz3HW{PizblhnATOPiOj%tM_7S<4)v3N z<%+l|RO;0rOA`2`9_}@3lV0=qiatu%R1wbNxHv8I5sY?yJL(lM2%ftX1TouJFP%#h z>UKO-A4?>fwt82VWjvZspO{@qcK$tTaE5^IZbud;k0<&ea?02j$Jm3R^$nsoxMyP5 zE0k+CuAQm~u)xP4=iezK9`x$Gs4crkbF7E5mhbFMYcczc%Kh2hNyPPRl_SlF}~BOM&ABPK!+J zRXv{#*;_UW*3P!0eWU-y3pJwTJECyy=N3ufB=^7l>6=kRgPaphrG8(g9pO#g|9td7^oSziIO;nBs8++z-52e#O>6#3K0@fA3~rO2 zN$zrsk<^9G?w5~W@`Kl)rpd%ZDQsZ#iJX<3-SPer=N0!k*&?}=t-o4E4e+z-Ecww} z+b781yyECb&l2q1&M^lXTdhY&dJ=~u`h;_6KN46Dk`?GAcA*Y#{9EoGd-k%+RJ+s9 zAKu>TYMU}n?*eWzA*%fn^3gvz*`N3vab(}x38Q(4GI=?PBdll2rO zW}6e&@*}TOAFVD_U4rX$a9@>4qyk1`^~k-Ft_ z4~Ug@`l%BqfwN}Wa0b-mGV)v-KVO&ALe)#|5*h!<2>y&$)}3Lu>&1ET{cyFn<~pYu zz0F_vO)6x?vI?tTkviL5h$C-|fmH1v1Y+BDsR-4DAE*_WpL7}4%&wMHRxu!1Iwl|` zIM0r}#3$quoMm%3O!QBjDBu_#2@EG&AVW9k&FS9!w3i09wINLfCVeBbph@1oLfQHc z+MkOzH9hOCQ6ZT)B8}pH?!t$PsEY&i#qAgtbjrawCI(MikVUn&`T+Kc4B=0!?H!@y zPk2Lj*^i?50tP}_X`U z3r@IKmglS~2ALE)lGk~a#JKI>!Rf5ecaZn5chy8_#yHfziGHTe5qMDdcI{NaEKz3u z)OMPL6Pqe;La#19`78h4%$&zcv!XhIhIi0hd2gmGU2iMWCZteXe0b}27n5uI)rDw$ zaneHM4iFDU2v;%Rg4LL`Q{x>9(GE| zAqE}hm7Dq&Ztv0+TYYh16dWUrjmJD1Z^ZfabM8%YaD3MT-69eHgj)w+JMzAyQ!Vns zWxeRkbJ>I^#f}rA`>e(V#j93+_8203+hvJ0VZO;8n5L)yG}kF8Ya=S6_DhUU(nefR zO`N7i&D5?JDr?5~aw?B=vG2;yDB#b5v?HH!ER`uoe#LwpiQg?J1b#o)a^C<@!s%d*`%$) z?m=9)!rQCSAMTaR%)fvCmRo6cKTfPcLdC9jWx9m}8)ftH3{H}d&-6HLViS3wyBZ91 z%F0gWOKk&ezvR8Wy#m7ewSXs38TT__)5GFt1R?X{17#BBXw?hNMraUX@?>-(z-+WO zj{E$0|Hw;!x{rYTLq;z(h1p;GHHP%m&OMX{sOj5Nxe-6z#PV;$j7UC* zDD`e>(I;m|dHEv~p>DY6q^)decW#E8^Y>tE<15tJpj7;;=IW`(QLogI{8(f&WM)G0 zep$FNqc?Dy5hLUWZu>|WJy#8~g)5(vPj5h~y%-B|?u31WiR zgi`d^{2N)a*y^$~LpP2-*{x8R1RwVmNVbDV*mj%pNPO|6mIPjA$G>h6l|$be8Xt}# zw|K&Ta-F!9ljt?#3NF;2@=kv13(9JLm{UuyF^$vURn<@GE5x_Sk&wje;6|0sa28sO zwvO%)!R!0n^eA7LT1s;eQ4<#*|6N+xc^C<7j9W-5sYdOnVd!vY{Ht^~0Voqt1)jgI zuszsf=Yf9c*542?_pDRhN&`!##d~H2KnGy6;lKTusEM&nabnacu|biUyUDbld$rB) zIELGrRgv3+U&RzTEQr=Jy^bdEPoo|&(n~r;L@BH%k5ZB6ePcIQ|WD5sxede~_LcgHkiZxB_{T4nd-Aqhd5gb)uVd@Gll zK&C(J?nFISKFLXv8CcjuD*^E8&61^Mg=%|ldt-j`+lQ^0uBm0$*`qq>1<&3EU(qmc z=YE8F>tpid4>j?n7rPlf`DzE5XrnmqV@4g}V_mW(|j5K5W^7Hj`D=an2j^Q(66WT|pJpaH#$klO~n9bJ>=kwZ?sxs_AWX+-; zPuy{mLywg4+33&wrCT+-^Z;Jy-cAb}Ss!_QJ|wzZQ!KhtLq^KWMEld67q4tdUeQ_M zu(f|Xr0e3&E>jLV%%QP8FhD_>-Fx8_1?6q?mEZqUnC$jwk%u2%Oqs2al{zDUh#DnB ziLH&Uj64y{6@k{Hdo^kt;~woFC14BZ#?wsTZfmE;LSNk>V+ReGOv!()^dF^P*SwbB zi7$9vn8J~vp5tIJIUDuVQpI8aaCz03NV#k+bD`v@NQA*~3EYGLA zHCeGzQIV51bK+HeVg~m?KS@Xn=?QZh89l>f6Kr$yp%dQUX!fIXUj51UPBkqW-gk3x zXD> znYI}Hdz4_EX_BN2t@9JA2J=zdv2yp@M&;rW`nA!9NtAsYCU&#eBZ#!jJZN(*!mDq>0~NW@k4$!2zhyZ;6iFS4 zG8yOdU(jS}P=8*8v)?JyK<#{SyzYlnN0@rQT-&8@L%v$XQ8=#gT=hv(uD3?5>$^;7 zqPj*fI-j1k%euyTi&JP0b5%Xm^g*z2ly#Rw!!5BS2|=0O zC_W}(GJ5FNs44t%SB&@7MPcij4x3Z5>dXOgQ&Mv}w}i>L(oim2ly!l>p9Cax|X`b7{0is%sT z&SNk#md38IrJG%@IH(bnP4vB1K}P-3`6CQvqpv9lepGzdZ_#xDUW!l^3$3ijldWaAA_QZU*w^K&wp*qg-)-uZB{jATNV2HP5 z{V)krdl)a9>BzZNx}oY_Gg^*W;;)=~bJQ$=(L{yD96(s3dJaVs$6qiOkK+`sL{r4ql?8Fa6QXs(!>m0mVWvAEpNd1sLQ~1 zQCuSj>YSb;O>f~RwAt3C=odWqDaE{?enawxuFiitx-NQ~iHQrB-U?lpm7xUKCQM#B z)!JyiA`v#pqZZ8aB7p+U!jvHW719fl?Ku7=tP+&mk#|?+n04%X*)r%DburZw$foGx z6B{02*5uw-*bbFiJxZf)?jHx)^FHGG`Vm-fz?hOP@7g%*GQAzo)F?<&Ql-@Rb zKrA;~J-3eM%6P9VW;b(xp)%aW;zN(tFwv+D^HG}*U3B^lq4`ZPgh8<0@atlq<&6QI z#C;XyUclnPxy-bP@p4VJBWyc6aO9|P4ZwdA>oJkiH=A;Ij6{E2_5F<$RW1oH&h3uh zL+SP!Uwm*WB}bxdR#KIFTJc(!D6^*Zs}1%$<{fC)y2&_?*Lv|WS4Ga|Se;`4p$?&`?GFN-lqrr|5DGRFee8d8})VP;oq zl^9Fnh&mR*MQdlq9iB7Dv1O8RBGHK?cSO%KSx#z`*lMt~p}Fw?p#XKr3zV(_${^zu zht!!0{>ktEiEes*sr;#X*8KLA_i8%7SV9qdf6SIRp(e^>aLu)8a63>cN_!>#q`4@nYq%5H|@7xWavwj95ezxtMXzxu<%!w6S7O#LscgI+D}D*T|=AO zGL87`MSt>NGHnP+8O?^1;@HQik=sC6+>TCf{P5p!S#0>TqvU)0XYH!+~!#v|1 zw*WM=2)Ixh(Fjmr1`oX!SUZ1+922LcRjC8|) zyfHpuIsbS69ja+OM#pR9t@G1eoei5(>m(XnLx-iBwMYj}o5-z*!#Bevy_y+~kmd0} zQ`ref(%gu=^iZ)vxZ7 z>>o@u-#g5iWgG|mwA?uryV?{j`ziF39*kP;#_0IgQi}T^%1Qk$pr_GmS53D^ZGTNn z%_udE=f@%+{V~DPrO6EWMqZqs*uZs4`#rw=txPdhE#`P+McC0UXxcE~?bRJr@o#yL zdBx=YZpX%p)m`w}hkt*baz>`Q;m2~K(e?|9RQ>{{DHR`oPbf)@rmz%zBZTZ!?|V&^ z#Nk15#QN^>5W?SqjxwM--Y_kHvQ9(IwEA@4^WJ9n>uoE;?FI!)zb!iei=HYN0A)g zNTXOcfjn}`i+4fqy6kE8aLV!eGTMI!k6x%&ZsiSMPEEUyud9;u^uuz1hOloI$66Pf z?i{O~ma45~4$UG46oW0V;gH7wGh?^fEXhm`xeQB#Gh^l2dDQHu*5mC@v|+e}JL#=2 z<<4s@LB^Qs<=-s7sT2d`Ziz=0OicoO*Aol4MmL8nm1YB`p+*x8{p{&{a_K!f$2Lj9 zjZ=<)Dp~O)wRKV)P7YRPDb$6k53IN?uq+yWza4jtf2Spp-qA%(`sD@Ldu^$z@B4Au zh^dbe?QQ_&vYAw!z;qE}!YcCBPT*mXqZrcRmBc7lVi?jsi!^Z0y~xWI2HKwsDF#s{ zfivdVS+JsAg#QfY2&VI*?&t?oEL?4Fl$8hl!x6M_?~L)KKav;bVi<2Gb+AfC=ZPu( zv|8SH?$w>a;G18l(0%kYX{X~uR2JC;CHM!r&PKJWIg?v@lgP#%r4O4TYk^*!6PUaS z+&i6Xf###!tGMbncUy6W9{~7A|Bm>M8lssuPyjK<;0aK@+gjI$#UL~wqqpo`2@ z1Vt>5l&W1SGK=u;s9o|JGE0tH%e6P>_@y#bU%Hm#U`G^Lp|&}+F*RJD{$1LnU#;Qk z^YY|j10NDv$ zlJ|IJM|0odVF&rbFE+V2TV^;(f`j{p+QrTLOlwIasdDF{5Pz^N9sbKUhbk5Mx2^UH zw#~P*rZx0j8N`|a+*IFXcOl(>h&j~kjRp>-Ou2aaZ~o>|d%JaGw*To)l_H9x8M|0) z*QBL5G}=s8ojO>FXk-rn)h1kJy`WC(*hzQacoiISBu3!+c9m>IFv^1Aj-Y60#Ox@m zBGpkTnQVIPk22zf$c4G7j-AAdoEIdB%{%EEDAlQwALsKPZp44 zOM(WTC%to`ExRn!k8AefN+<@Ycsls^(~pe|QS7999c3268$?*n69g*(*hJG)80%36-D<8hinV|pAR}jbq^@g0b+JlK$^$c z)Wb{1VW%c?8G{vkL@BEU3u#m0`}mi_205PFNF3k60+MT&e1&QYOpYttxhU_*U=ZM&eiaGOmddK(&nY6@$fdENt%1qW_MSB@zU&RTx5eoP}yW+ds=#nB=hV% z#H*0Vqvu{yVjuVheGo8LvgQ_}X`SKx=zqLKXrH@!`?p3@x?Jt@R+v$XBE)!l_Pg5n zpa8q?VYLMbxf$^bG_IeY@U1NFSQcH9D$SyM{00X-2cq|g$I^laDQt&JP2ZnOhkJj$A8|1fGB-}wHT;t@7`ftU|RoY zcnjh_0~*y!D(^F&9qomdxOM(>au!yk9?X~iK{Z@WJUT8sL+^WIxbZj$HK(yyltur{ z6h%EDBO|^ef*x?_ydy;|w=>C`U7;iJ{1wMT3~*%`lgI85U;( z*WUT*f0qb*88ofJld z022Zs&VU@LDGNG(wRnL%Djh`;oJyKtizUm<$KH@eDE2f^#3(cFjwegXbyA;r^tt#b zey(j^qKw<>xrl=J@`3QXm(RCzB$I4!oXeO2MSYxal2Rgt3Xir!f>)*|tTWMqd4Z2a zR&`;DD=N6F&bWCg#2}S}Wg&$$vWwTMeWji`w4x$@B{VVNa@F2-W_3f%sNdF|v)=B( z(`mLrBwm_4FJ0c0iTVT-R>T)mpiwr0GF*AndykGya-A%VkPhz$nKYbH~!d9^?pn6&;!)Xxu$*pv!gUzKR(wru* ziw!Xw*$wa{fx?nZtmxw-rN8%Pry1UH?o0SOZ$Vt4k=b^ezHDK7^0JK4=gwE`dg8Z# z?I<3)+v~6b0j7)YN1I zS0TxV7`j0GlM|JsT;jE--#f)Sv6a2O5!kqAR*-YMT=D-hZpcLCEs@$QyiB}Zue1L8 z9w)*{dA+n&+vxwgB5mMOe};XZin|q#CyFn)xKTl2(%P zs+{^SqgA>6A7PdsT6O|e62p|}IXRW&QrTIbdAjD~SvmxS>Q6;#`{auX3JTFjOHIm{ zL@dT)hDths4huLs1HeIn0?LyS`=*s*S~Vc@#%coC$#^Y>iL58{L(BnIMYY!R^$>+& zB}~b#KL3ZV5ZUJtVx)_yTm6^+x!V7?pWOfFkNUqCv~-0>iZGz%dk#Z>R=9BcfcyGn zQ2n3JD!DH1KS&cP`r*wu5LR+asH8s}A`D1LlDPxND}5gx&NQn`Mu=KXtv~~##Xg^H za$VSeFWD6qH)_AivI%B&| zmk6-0J5kV)tP&(kHsIxQ2L;bRfno2_wGr|7Nw$E5EEDM(bc`eH-^>KLR`2`@?1D4R z?zrwGcO0k*ZqJYSyrJggA|7_ehyP3gS;y2b4YqWCTTSCQ;26_2w?Hs2Cnn&y$_~Fd#j^v(MV9b?*}p8VhzQnP8Xm_Bl>&x7S+X3g z+|T*2+tC4ebIJ|Xh3z4xPPyH$9~nHosE4v-%^x5dwWhmUU8=0~;^N|Gid%s9E9pBL zp4@Zs-8Ye$on4`)w--szdIGvVvYn(Xj$xNg4sd~z(Wt%NN0M?;&nJoMH94dJ+P=?# z>Vxs^+qVE~@>%W^STVB+T%#6VR6a{-(ChzT1@`?E1A0&{)vv zbozsUwrK%}WvdQK0N93egkRPbzq8n2TY(l*8xb81Y*ktv5}O17Ewx^x)z=>ZGvbQO zBOnMgx!5-L9ZF;rP~1QT7RB{K z_mjSGz4>@|BHn^l8A8yn_6VF_Ij)wsG!$xdaHVDXs)Vl5}`GSVS+&cEF z$9Qmu6E0dDp>m7M%)3c*0ANf{^WHr^w2kWoY+I$nWbzPM=4A&U%)clukatg zrrDkhgN^n*cXb@6EYZ}T(|U%j<54wx>kE4Q+@qrz~l90d9*NaDRObkSG$cU%mazq?BbP zp=IEnqVeNw5YWO6?4U>$sTN2N{1xoEp^p=#1ISScz~1!2z461YA<)k5nJ(yEMCtm4;+9Q%8+4~xHi>Cr6QF3B6BD(w0VA@tgOrGNxDY^LX=1P&wAOdz0=wL@ zI}^E%huku;l$sGAS{P&E<6r8XC`&xl1AeagVwqRUVpqxLqiK_?e7i}wuJW~(6P(ZB zr#VVq^Y9MA>$zVVBROTjcaQe)9(by$4}1^gM=Hb4Rd+rGZh87gulE`p*H(3Nl^LU1 zR0$Yn(Uh@6IOX78dGZPg4=Cp}97cQA?=)tS^`2)D5= zK{vs?^jwAV!m4qF189sJ0Z;JUF=p%LR=2CY#%#+?;EGcLk>a9HUs+JCOlu08R%2v* zJ`1U!WufsO@~P6vmPsD>qxnf(>V2Gde(aVdNSszoXuRQt$nU6`sv-i_@j?7S;I#|d+`seoemjaIQ&Ae4Xy+4 zxeqN4Ti)+GITcZwER2DZagfk&U>CdKXKy2B2YQ#`V4L6*dZQBh?lK97uJ#AYHyXbf z0GL?A#s}?Sf~656 zQ?NJ?r(6sv-fItR_Q0MS+&J)(Uy7n+!GRs7CF15mwCXsxlce}|Nnf` zw?Dp}cK}984~cU6F22 zf=#B@U!iXleqYtr~|Dd2VoQ_ZYM)=5F8Dj9E2`JQwq8EG62I}7iMXp z46_^FOf*jH^W9+Vrp)DVrMA|F&H`4F-6D13hAq;?xw|gT_=MPtii~eh zLS1e<2rU0M=B0-Bfq!~?;v8RIY zQ~HeJAkj^ae~;f@2~56RqNiFU)z|dsIl5-w{?v8~v5ou4ZNZ7Sq1Jb?Ir$$bjlmol zboT{}s`3?(>g2o(-7lzW{)b87(?8#*kH^HscTTQij$UB1)L3ymolY8P5@`?ZCHvH= z2qQcgIXxPnHv%xtnW4Sg-G6hN6uz|!Nkw)V;)UOc?x=8+Q!?%XGNO9IPFA#JG;Hh9 zkqvQlS=;I27%E?tXzsJTb!Um>GC>g2JRdA`O?3txkZRK^dvP^0w`1LR02JwRTI|m^ zz-VmTd^J}>omTdNis!_IYPX1%IeFB!s$+HseE17rQ8gJ`+Uv$7`^QH`4D`d( z&L_xmd%7r?$n6gO(9O3UKJ;06Imy&)Gb*Fn;o_a%qrLfGh89|x3n$6ia5yI^xV14vd0bhk@l z^z2a_Ap>qhV=pHX6EqOU2D^hradNE{oD635@7U$FPZfwJbRRM1z10+7rv+4og(~Dh zRa$^Oz9YWf^Vp3wtpE`7CVhO|YulS!`6Y**fnMrWGbAD@@|(UVYj0q`b6;K+Q3$OU z?`A>_b&Z+C^-g)`%a?NP$v0$`S^D9)X2{kpjHW|`Jsb~>SSUSv!N$tsIve3`-!`j{ z_#B)?ck{o+%iLW9OXm4aeQ}le<+M%4+l+dwQ(!mS5Z!Yv8Z=He;C*ycDHkFWc%P4~ znxjt|NPUXhBf86rvGM_?7D}mXI4Ci96I2r9$|^Hux=d?ee8h+|Odc|tf_F1^E1P~! zYDNiTd=m&ap>gjPU+fgL!z7C;rGZMm0%llQ)y=5CBh;^r{~%16ObRoiQ6<2=&QIuZ z#Dg*ZkWV_-#PEl1pF4R@NAN7feFz*U8KI#@MHOxo&cRLXj&|E)kU<|URzj3Fc- zymwZ^&clt6$~fnpNImD2&Am<_O-twK<+`_Spq+a2NZF_BioN=(2U#dAzG_`I5qSUG zYEf263c}m4FaIhvb842SHH4adGob;o3FT!#G6Z#*-E)&MI%$R49!WM9 z*#;9s)d9fLS$^{&k{rT*d`{EajwrUi64`E(&EH=OX4x(XoHVkmq%Zx()(?{`PeCHv zh=bpo?qAU6`Z?!P#&IkABqPDj-jKdu;Hk-UQ+XAXg7v<}m=k&P?dDJ5$Te8KsaUM8 z06;F%0lPp!C>BOixJhJsLaW{KLb7_W>J4dReithnnH%poW-jFr#zo~OVz9?MPE-ao z_?~eG{N?h#ld7ExnF{jlN=tMNu1P7KGVZl&d6Xif1hX3VUingkttE41CoruU=$K3v z4HO(t;bvxf!cPogn+L{5Tk4IEcKu$wD(|fmJ2f8a3!u=zSERe~73wp>Gu3XY_wIcJ z{|YhJLR9oNh@7sbm;!5v=#8L8r%|f_4eK`4HX5+(M7mrH0e5aKfRDz-B!^q_*(=25 zJ^9%ia&6^y5+d}~UH!3g-Jp?>383KfRjUe-=vyIC__|xioFZmVrqf|{FpX^yVaZ{C zcAPCa%6C$ZnkkIKBr{ky|53lM*a^a6Y#SPM;tivAJWxte`%chOHgchb9be_Jz=Ah( zNGqt}tFNnhVD%(ExkUlnN#%fQ7tkbATf|ouG?o`E=;&}S0c|?GG@{>q{N*t8eJ|8A>DSAIUqeE2 zF}|;MzuH)|5KZL{!9MP77I|V=h2pp|<5pd?HJJI^+#{V&jnshgH>OR=gVK;t=-*MD`=KWMc??g{SQerL zTVP`OT_iD@k(3c>=dJbr`yRCoWa)-h>P}M)ZGB|zOzqZ5pByLH{iIKFL7fjG0Z;c` zTmysni63{4bD2*8pIn9X$NRrtRr#qyd1z7GcYd++p!Gp0E1Q@DifYS;5Bpd%+Q2yDBj3E-Z!bGduM`?=l zp0erU4=s7~PrdcIWary6ll{4>T)Q=hShb8ffDwPjks&uGS^~v|>v^mgfbyjGtVNYo zc#CpmGV@c_<>}#OB-?Y9wQ1QU3YhFNpG6JsO%_-HwcS7tU@W>$0GuMtz_W<*C~am6 zdV%2#dRud|EomKhtCT+2zoS!f#GOhA1zhfd=gMAt`ciDVXnI*`%0`Ywa~Y0Qeb zL+~<{)wg7@giRuaX){w>s$1SFRAqsrVXVq8&0Lk__bD|Oni<|*2Sxhmk9zy~Wqvs4 zerHkVrS779Wv*WbWw^*|_`mC+CalhsJiRbfHNyRlnx;$r`RV0*@_jAfmq|*=H+KUj zZqt;#9(_D6oShFBj6@mkUjakt|!F{1G07c>XdhsqF#UA5CBiesG~*qd&+_R=@R+X zcJCsfI@%ZwNTD&vfkN~H&bBE4Prq&h3hF-V^$d0tt>&oUHl`KEq9=9^V$?6y0huv& zOh7@aCI@>*`6zmUb%N7Rvy#)T;i%OaT;Adk<5o3oz+{(YOdKnzgE=(k4oU6!KPY>v zs3^ZbURcQikyg4pht8pqRzRe?1f)Y?=x*sw5fFn0>28J;l2#}-Y_!5FSq0A2(C|ky=0-C<|^_H5!}}f-(ML!KRdxb(7y`bJ?IdUiLvOTR_ zG?FfIvXKcbbqVSVZXo?}`YUxRt(DXMb8P|9yz>}J1v$d;p%6jS(Bo&$gk@zdl*llE^0A(r#oINt3QyaC~5)L zF?kh(5Id22kkaEf`r^~JLRNB%;)xX-`%h=T4cbW`htl)xd97=E-3IS(&AhcAWV;6oF4XR@5C>i(KaH>jo^-3>hCbNl%5NqN9`Y{QfCEM4epDyT+XzE*Vdj z8L}Ds9ESo6y6i*eal=X&nFYwK4t#UtDyCOsZ5sLy;Hke3vLyUlVW5$Da$>B?QaJ+# zuUd#B3N!~0Bt6hg{^ol|sB)AycrNm1PHEZ;zTT|_jE z_Rtt^IhHDw2z3>=$!@FE#M)y{K^Z|_x$`JytNM+Yr;#Wg>@H+ z7o;!b_7mA3%Oi=MuKb}>y!fezM@srG1!eO&0B`#+e_)4fZw#hUG07H4+Raq;`1P5^ z9Yy1cg762MrPdbM-{oMfgy}Z_Hce{WuwxWidHKPIZGZVCINh^cg%2oyZt$2&Kb}9v z&&CJJ3=~obMw{BX9YlSff!B?@?-Lj>^6hoD;!S)H6Ardzw3QLK--l7Y*FF@ zdL`c>B}@xt=IS_Qd&KZ$CEMkTR%vF7f7u}_G%OfSx{jRatxdSW^LB{X+Zgai6*e+J zI%}8l@$bivVmC)1!>&J>B}r4^uc%4;BYi4xa;pK6JDMAgBrE(-;pCMGs5bjcXU7nS zQr+raQnar`JK;RcIu#$qwAGWNqrp^2-n=}bY$09v7eDU3TTxYN72T^32E{s9_f z+c_#I7)As&GH?Mjp0LY$JgAx(3_|To4PsJ-90Nr6M^1q>^D|0Pg;yOP_v+EhR2#Fq zLcEeAuMe6=DTDh$xcCW~3AqlQm7{!AAmq?Hv#x)BDGC2bU3vHb>Re+iuZa zr#^vi)h{!n0O*+Vm2!u|8Oh_tGpi0QRuSR5V{ZeqYy-_dtY3}mThDanWft|1bBAU{ zmG7o+!fWyt7sFU9MUuOtbJ#i@aYpzt(V*+t>j+lNP5cY5u%noji^FA(V9V|cyo>z> z_r$s_;)*LXW67WWNy2gSx+kx^7u$!Ci!?%CC5q8E-M$2O+c7~ZOIC8f#z2@W zbc~Pix#D)+@-rK?s#2I*d0NRlzo8L{uCY7$qQg@7iW`Q7!TJC`ob}qHFaT5yu?3hv zl_&09TQ-sC{HCpNKGLv!33xZu+8ZvSK+x%odl5qY$`dz;D~LCUdyAS+6nDPbN)e47HM&L$)Xfx5s@{Y0hiCx|Zt+@|41_u9Ps7Njg&?Voe}j zzC<1+6q5l%74u3q!^8y+nBje~HRpanj!>f}JS2cLbK_CP;jgFxDMTeE4JJ{(_ES>6 z3WR)v%SOo->I;k_FqlQBfxBo=`7DOEJxbjOAD>`v@Q8Met~4ltUqkEapz3fPVWy2> z;~jd$UlQn+KHu>QA4;gzAEx|l&`5TLi&{Mu`sh?h8p`tM^5q-eFpeW|Q2B zL%id;|G3LhmInDz#5|}h(4uCdVAO0%DNyKRTc$JMwd$aV4$T{bGQTd2Fw{zeV}z(T za6!XQ7vsf^9*yfXdsd!@s4+^mW^W-EF%*3)QOV>R@bY9vZl+`AW)@jb!e!d@2E_FQ zE7jsPIw5XZ-$}zW);IBh6V=^#%n$@;D{*+ydOYdGu@i6_JgD>kEoY5y4ru2b<`}h~ z)w5Rgsb=h1TFgD2vyd!!oXFpO=vS8TJuJpvK&y7C)v&g9iM45VKXHll?ro{;P=k*1 zE#}O)Y4l!$P2AbH31#=iDvibO$GKvcAKp3k*NWE82otu$qWzv;55!`NTG)n{x+vx< z7aHYyRc%~woUB<&%$h%PM76Zp}!kyHT?`dSxQAnACSFYVM{Yhb2#5ZD!WhChQUT4 z{8`AcNb|p)?*c>Url}Zia-o}^ zQ|o1se~C$t_+j_qR9Z79ulsO(-MbdcE&qr8B__lg5*^Zl(zj%+5R!4nFLGDGJ}=P2 z`n3FH{r2`=6&NOd4$Q=243;pXb`I>J%cGW~g6 zra^}4kr6n>{HoDc&%cRU=0NHNg}Ml-aftVz|ZeB>WX=bLe&_r((tGVknK&=>%C z#nZ&;3rM!I;G)RI*~Lzp;FLAe|Cj00cHuqaXVEmTaX?8{wV?@nz{rrCw?cxs1}>!I zpm8rF5*MrU5;_K3D=D#3OM5uif3sx=X8!)HCMjZcQak^;I@-VZ0b*3|yy1O5T)#a0 zgrQ4$_E~{=(;n|t!`(5-!U|{4_k{*fxwe#%QFZNR`Meh=fG#paPi;dXAKh@1~eLG{vN)l}5`MT}w zj4A1o#~XOc3x~;CWqua?Frm2XHUdV>)owTahlMt@pXEu&rcN{7bwRCiTf^nRMX)Ws z{PQ+JWT~_Y7IF3*GPBy5Ji6-sHhp=;d(=4H4HO3Y7R-EsJ+@!#5>8O7Ow|$jzoYmE z_gOQ^niu4K;`y6;#Rlk_jUvb31Y+%6Hd7W)0_|QdB=#jmg#scgxuU;ua&<=Yt z#ds}eE)tkDIX|twU?r@7y+gQ5UAgHk;F_~d)|#>)KdFBxr$n8jahr{RFPd{~`o)rq zFsmmd$hROZi}g{EHIMFJCh|A+r?2=Asv7&gO^i(~mw+2D`fb~g@Y;RE+L_Wf@^4Kl z(W&axyQBHamvlW{#ncv#qxj}cVMx}Ygzr`5+4U7V2LE~H8!yHEwcuj2rQF+-1uHQN zlcl_v&XZ!FYdB5@k6+C#N5iQaC*y|EY7ItoKXRU1j>?_8CSq&c_ zUj?0jV7l=rA}%fs44K-`ISq)XUH{q5)+jE*ww*$btuQjSahzse3} zS4HdCQ0vhpvPwm*15%C*+*s8h<)qUpGAS`_he|Ckrg}5o(S)ux3Heua^xi0X%{Y+# zy*?fsNW1*Rk*}MEW+htP-@3pXClVY52UryioaD#)yQJOv7 zR6l$WXMM4qCF%f{ivna8NVN_3>ZdCMsGBj?3Ui+>^Z@5-wCU8X(%}Yni>~-Uz4dxxKyZb9DK$ zEOm7jUS8OjmN!_}cFT}v{Qg6C&(*|grhwyTEQR2kP5$e*s#)}+wGNf}P83v)`cY}@ z92$%<)d;peaAnblwxx-^0NOWZ0G}j9L`1ab@>&6fA<&hI03?)7UnHOz)OHG0y#EJ4 ziJ6*J1E?Q90Q6jY5&a;x%N`EI8O}k=8NVSb@82`=2yr#Yi_iY=uQ%h-%64zQ6`!gQ%*{CNxRskuNQlr`!w0q(1Mc!+1?|a@R z$7RQ0I})dI@5{Z~6lsyq%=^CULn&Q%me>QU{%Psn`k5x07tPJ+PayH!QbKts&u#OU+tdJgDmmv7)mh_ z%?X&{pI^LL>MTjT+&Pcuactn|Q3S^zT&#cR_jhFdV)+OX)UR}4|I1l|D*Dmj?B6=u z_NgIsAb~f{0;V{G&-2MxCnlwK8t;cdn8Pw!7a9%ye8;kVA*T^{Pa>hvCZ$0ZG$Z(+7V; z_sJ>B&lN*iH!Oai^o+OcM&mms2jjYq2!pW5t(w7=g?rW{oaB@#1t z1Ho@(ECIJy@Y(I1azmjM!GIaU>g>pQd}|I`}`J@aaGz^-bEBvc3G}31(#awe8%?Z4*?5JC{teD>5G!SuEIzjL5IKA zN#SL$tK4*F>X(=;IGgoa5__%zR*+Huz|8^vAr%@9Hbj#<|tK)uolvS3+x4 zi@PUhyPK+Z(uLg&AckVsEy?y9jPBW!U-89d*_^t(M5(zNWvQY1a?>e=v7l$sm%!aLl+SgwcOt(+FJY05LaQb<1mY5t{uAz+)WMF8;;>OO7rw*Y zu)i2ywG@w@z!o@s?A@qN>i6ZgO8-9=p??Lxby$uaVj$*n)s zf?wuLSuB+q^1yFo8$KU1xG1)SF}4T*VI$j~Z-LV~o7z}RUQ=8c9Bdj;TxT)mT&N+u zTPC16m&b0J>uNa|r$q88p<>Z*U9tTyE6!P7OzB|FG$#FIp87Q zTws{fu(2O7TlSX)22TGGf4y;3j%`q#iuOA+PJ1M|gN*!7xa{2Qr$Fd94i~lUN3h6^upW7%V*|*q^Z5VI8=sQtZ!+1N^&)zoN zVwieJ44sLo7)59-ODsV?nl=nywvP}5fRxLVqSr-U9NaFIdlJie6YXcMmeE8?KUW#- zK3!j2PjDF4Ht)^0zQkXZb=ewb3XOi2rjRshIOj3&`MvGBx9h0jqF3>}-!6q*`XSW0 zGP26z=W7kdl|(VC^gRjL=@jCX`T)Z#3;?v)a_9>vMyPUK3DMUr-3MS% zofX&E&FC?zX4`Hlc-p^9hU;vyC(>DAXN}6zjfJOYdFAVSWQe&o&YzE&f970FU`_&+ z+vvk$9Pz4)zJ?Cg9^o}j`<%KpCS|e|!gk||@j<}BbtfIV5zufivw3$y#Lf(Kh80RN z@@*9se6MdGyF&E^%F+=0@01Ez%DdiX9Wmt5Bee5;SPnhBbtlUVSiRj3lDilwq~kuk zVkA+?xe&NKGrwQqaqq?@5j&%bzDMaW)N^<}xpcq7UShWO?Z7#;s;aKCM~NQ5U3 z5M7evC;nvCY}gkAdrW%yZ*0Vnalr1BG@p$tp8()nvC^yuDDk;31Q{u{zozO*K#a!ho?T zX%qAU*roh9<4FVCt!i{6D(ain7SB2`R80=5UVG%y(x))cm%m~CrT6KB$&>uIA=4C< z>a97GdbLz~Y*$rVNqKJ<*sBF@Y>ewY473%ITCrX?jGceJ7f!8PUrwKgpUj?eg_skV zu5xKS2hq~@gdsYj!kO2rm;Di4o1(A92@PTe&)H`gWQ)Y@?;%|E4!I@$e+j*hFwM6X+=lR6ExHFnp1dR4n*%-AAWG^i2R zpKe;fU@LGh9MDHQneFYA_cO*o_%A-gIAl$JXzEptas0l%0EwklVZ*4w>naua_L&dS z_Ux?;{NP8OWxK>g;ft)S4?G0sz2AuoZs?~Y_cDf=I2pB~h*6_VyXm)SfUEo0u#hb)Om68zreCSbo;xA^uei7_5cyZ)~SE4b<_b#+FWS;C8eubxnpq!$dN|mYjC)gyl>RgqRUx5eAMIDbI0H zkIPLns$e)msLGVn2`O=;D^IpYp`aojyRhBF((A1ZfT-<_FswRHrAV167LVuzsY1pI z9u&xnL=|}*A7JQgbn(B$V?)6erVJVZ?faj*zUnU)%#Px50>#(18h)Ne4|jMt>o{pO|O0MBieyz;-H#Q|XEs)Fe#jq10+uRAkoiot~iu=lQwF`3@q{ ze{0~cGjHiptM4xMOzVDt!S5ZftKCaui;le)iNkw&;^z4|63)Qt;9e$ELh|I#+Mlx1 zXZ;+ZQAA1b2`h!;)zCuejl@J;D#w2V%wOj|=9U;#O=Diobf!aE-`wq82S4RSB7D-^ zhn<(ONuUDn>0<)LaAYydxy;&vWo%Egj2ObPDPUfs$p2QNZ_S?iDT8@xCtAr_P_7Fu~6oiq{dd zY5m@YM#)S@Uc->K4f-vqzQ@9OQg=RJr_3!h%C*E1m}3%#1HT(BiYwq2t6` z8TKG(4wy=&}!iDZc_HZ=SbaV@`N;!=Q8tm zVI;n^Usq?B{Xg~3C5OH>Ny#k6>a%stdS#BYl6NFL3Yh|JO7EuZsiy?K(T$3q#Q(DO z2_+%8&5b|4$4bI3%^Z0DbXPHpZD$VS#Qw0kFHA@5bSJCIbU~@1S*OaaRXm2Fv?VOVGOhQRWud$L!N_`qnH>4jy&h0uvUc z(2i$1`yJ>dK*->Tx$)VTT;}3_f?{@-hrZ{9@Z#k- ziMx0(U@l?)SzkM7I;>rMbn;4pFki<=%`06saNKQ>qi))lnT}Z%=tD~R-u>#|JO-~- zOQRQJpKs6tRK;3&LD1H}op%iMQ7$y)g7Z$n=f6JA$E%Lhly~5Yyoey$PGMH3t>%IwkEnzRrTs&D)FHg)EyJY9~?;jX$+|av6?~8l3CY ze<&-?BEUfxb0A)$<(IJ_HF=n2%j8UzV`b`hIc>}inT34fkN%N}PKup^g>SayghG`@ zSlB0P-uY>dr#?)F?ufR2^(XbyWQd*6XYZP}%DBiq?#>IlQJ6XJ{?A!RP zFRGOEopjcTvFxQ+dqo#dvqM%*1J$?+-_^=>>QnuVUDe_GBX^tuW9sN-p=FPgfYso- z&IO#o0T=)E&|O~j+~5J6J~c`Q7UL7MGlpv-%kY$9PYDbgk$N;s`>n(D(|(+axZTR( zLN7#+@2?IrD2%PtvGFA--`7LqOhMH15&%f!+^QX9)B5|&XgKVTt(=Jw>(~ZyY(U0* zAdit4S?KCTcg5NsZ#{S+5Bj2ThUku3*=U7T!-%9))tu75>`TI@>zxa3N1CxwZh#SV z%0aXq8}6FwUluwZB6)a%xo4FHr(-tlFqPNoILZa)P0%({ezr_;%*rPz;z_5F zucA--qwJuAU(%OE=HsmC)!FkJ{sGCu{u$%~()5F6pZFfx{Cq}R!Fzlean&L<)3kBX z=*vJagGhOT-YK}NdqiX<7{op$$-LR+cR6J^=jN}43p~aq!`wd;?)b8uhK|W?8;!ih zsggPT(3`1V;XNMjRyIYob7kM64LMze6dOG9UvL`xY>&QuIWbjbe_B=$^~ecpyTatz zmAQY&0Qz>%k+7FUC zY$r=drr*0j?9FKTgZOi?I(>Dn{s5b1T@cqIMyynW91hsaFTfFLfnKWd_?MJgFQ7kYp^biRj|u;!&%ft)y!_W}vasacHRd75ti7A1^XyfsXswn|?&0!a4A)%4)!#FrIO| zRtv{QbHfs2I9b!|Lp#DEFxiOnYco?_p6|h$*vPHe%<%#8fkz$mZ~c8X!88#TlJB-l zQQ?c-tl{*;;c`OHSTWN~G?+|`XtQaPUa1H^X=cyo$K;12&U1^09oxqki|C5`9&MFN zw@>eKd>3z-$3JI(NDs7LaTzeOh>YXFC1IE&P&k4d1yxiJVJe(DKW{m;vE@73rd_0 zlgUoi$%lH{m1SC4Tt{9Ie<}gg1;w~+#IKo0f%s|;I%NX10(t_6S@w4asngvG1KHeJ z>mz!vqqfS}bftN8dOG!*@sw_hjR@6VVet#aH2Bx$ycWt$sUl837K)eidNw`477M=< zIShL+t_b}hYo>V(B%T12Gq5vI!3^ud7}A+l7idmu%J4Sb!b?9qsE@`wSa4>vl;qiN zfh@FqGs|aI;_AQ*L)&@aJQahe2R@o^JqOyO=oRs16l{hyGcC>lZ;!l*o>bFiZ?8?2 zLk%F%KnEfKuw}D2#vB{)P3DyuR8voW@_*&0xM4rAotO%{!o>2cvH}!ms#RJKe{vQx z>T|M*f2F|$2QOHdU8BWihCjqlCkFM_c=&%el}V=rak-9qP<{@6Oiq~mU!Y93 zX5iuBso2`%1!BsX#`m@+gE&)EV8h%Dlwl{}5uKYjPC!9nf4BzHH|qBNwnlOSjbkgl zFCEC{{sL)O|G)EHWbe4XtcbaaMcNl_$VSK69dcpx05NQz`9rd0>A^JOp&Zb`rsT?0tY`mt*2j!00hI0BlXLVq^kl5WcI?0G#j0-Xqze zPl)trm_Q^)g*?V*+o`hg&o9Ylnmim$;;2x#LfYV`o1nrLT4gan3u@p~RWe5IymBFT zph%=jGeEmk@7eS&Q4NsCzWDy=AuK@yOH@TT0zuoWDv_HwINz}Q~|%*+7k$bX={PSyvs`UgIitGs-UXa-k{ z*77N)VX`jRyZ#qX!sIJ;XU$7PLj&5m!$B~_063`*Ey%zRAp9y{O3`z#vae+IgB6al znB@id(5SHrlp(H!Xd^v{QRDGTagD>~(^$=S4}#v^mHLfT{SxYfoozs~B!k*38JEo= zn`i!xhCp4X2;LSQh*GJA)A=GWLm7pQ{BPfXAA18pq?65I?|D}lKVz?LhVK>0?7HKd zG{PI$=8va3CIYrB0UA2Vyn^C?0f76tO}xXWmd!R3j3h!%A8*Bk)mYU+Y`%Na1c4@y zk6=s5p$lc+ioaA!VO2CUGczeq)Up$%E0sGc7e4W9oE{oc&K`HrOR zANmXtyxBwTK?TyLkRnWsHO_>Ieg%{_rA`u7BjjwvH0$l4-Mr*CxiT$g0CFIzX$S+& zV^fsxoRcQ8eLOMLtL>yS0QH=yb2K)IGk$ahpxAwnk^zYAT3=k<)FmcVCoIqVK=?uG z`xNg)S^;BH8%1zj0b9CJ{CbaaN9uhw|!=ZmIosJ`bn8>nW8?8YK`W&0)o~U9A~yK6O*72+$-B8 zlTIER_;cTx432_a9$Ju{fMLggLz&+AeiyCw<{`qP*7jktY!O6sc6cO{ruhNDu0($2 zL6SPR*nB*12;WH2Qg>R0fAnSA`dKi%_O(WNm}Fx(i_$*Oi^(mjYE!UW9_dBcR$2`R zmEi<+!`L^p0BE9Q(JGn#=>s)vd;?zIvJLq>Vk)FY6$Q?wUt7ZOzWfv8LRzgDC@JIZ z=W2DKQA99??9o$%^I|ib6%nFwsk7{4=L_5Vcaj{-h1?B%0xG5ql}r3*WDKqXacObI zFlIw}BTim{3!Ll+wL-RIc4ZJ~f}t&bLGFbCI zJ(E0W*rL=0eV}5L9k?{VF?MwL@Rln>%q->OyY%)#aM?3t?nf7O+ildFoo@nd8#N@~ z5ZbV?NJ$aO;&uErH#Zfmg-nMA(lXP0r2a@7#Edk)u&_Web#=ETdO6b2Jl{c)i�` zsGe`)7Znw4y1vVWV4RX%DdyaSw62 zz|Ilz9jI!=858E%C-dy&p7C+yGCM=wFI}9I|Uj)8o>%Xs8cPro34<`^*K?T z>)gj8Yn2JAB*4Qnt&=)?$cC8|LO}H5xKp{w4F?6n>IgUr0~0R6!anB;HEOPZ$QyHq zdztW=;ij@`BBJOKGzCYLh(K3Bx!2d&&rOn!hoGxtOMd6)xGcj8unjcnZeMStje*$G zs-9g-+C8%5G3$nq$p!a!ng&AnUDh>)h-@D%T=CQ_=ERJ49x7Ilq?$gI;v>$P#!lbua;sfCX8P6Sz5cS6qj$f;w; z*<=pTKZBcYX+%RC=~S*2vVnjgsGJvSY&6)kj%1fpjyvI+Wd<{t2ox?saT3yZ|;B!WadoJp_h&Nlmf+ndgpH_g9{sU{(7H@CbD1>+k zpynVvI*NP#rtPkqcuw3L>uSqK+(6=eVFBPZTOr+ z)9~d(v+q)z*hRYIpRxQ4LE_*EkKdZwSE=l}tGr=NqYwrCHgY-GRTQWT=9Qip(U5qs zv3P&zAkJz%I&#wI#2#q^Ia0M{35;9fV6?iQ{bg%d=U~`GvHQX4Tf(2~@qIl{7^nlf2Jo@R ze2b*t2c60-`3&bavd6o-WBI|O5d3?sZS#&U@}xPQ)M7V@n{LIFoEj?%aDLe6fsapasqjBan=WhB@h z*tNFFfNQP%RgOpoLeweGm7VU}jdF4cm)$X2tL?wY!(OS?CQF504fN_<``M?2OXtYl zbuOwqzPXxTs6ZfB--4iSpcUdGq=0)u_(8EWgK(oBfn^c`L|##UA;2H?@R_x9jNZ!uAEFEhlS`I z*XzL;*2j!z2x3-&vmW9;Gc3Ege@ahSuL@GK)EJ5GQWl+s{7+sP6Y4EsaL~XawX{C| z&eeJY^(2guvYY5Ckc5xDJ`C0JoWvs~faH>y!{(mrpwozSXA@i?#gxeWg-~Lkm(FKX zS*GGSP?&1g;CstxT34ctZ0~$8uWh67%!lYYQ^bAvnO+Oc#bty^B|q7Sm5;V(_86hS z!JV2&;ojP6XOr4;^=PdTB2Zd`Y1L?Hq}9iQD+3_D0?AR6PIA7TUF$8Ao@mxDx6*40 z>ue@^+b)fRMS=yxzMGBtYdm!g}@67A*u5*5lrNO>%cT*6n40%6g#EOa$iK-fDl89$CWi>(` zAZeG>3q;Ex!cs=TmvKL#mLtS9j-!dHoCiV{G9=kA&*i>8$-xZ4eTGFV{cReji^RDT zc_zoo#OnQF!x0aCIkAdSM4Hw8nY1(4_#a8~h$jioD5)eHo;+Q{Ps{?-7z)m)>$&78 zh|kE`a62pJ2SY13jE3x#Rs4VuqqI_f5##&{oZgav{CNyK853%0?te(TPb>qzI8Du_ zL^dt5Gx>wE#vOF1SZ7v0`qZ?cZRaASKq|&reEq(>-I)@;jyI(p>gWa;uD0i)k*K; zs)_k)_{~pimQ5|}cr>s+!$5=rIN;%w#@qI#59eJycH|-@k**tqFIx zbf0FA>2=$iw3TXfoQ8VGh`-JV+`>7G+9S_iFK@rinQ6|$T8wK(=Rr38Vzpx(9QEm@ zI=i<0NP18hhu3I1qlXP^*~07*QXP67PA@(h5n4m=ia2*n%cs!&Y&@kwo*XMhA16k;J?hu|E52#Klg$0X4jIRRWi&z=wlIyw2ss6V(G{BM00ZWW~W-53|2F5 z+hSi+=pQ5x@oWiNM2aZXvMgoL!(_%RiTx&y#&3__=O3qwgDHuS|?`k^&>kv!`ibfYoyz^o>z)%|SH04Sa7j;Ae&( zi-=kvuwTMFSe13nVn~6#QtL3!vR0Yrk_pO^S2E4Z{k}`3 zU2HgH>{0FuKBszd-}a`!#sOYhTvA^hdBb&*P2YN%(upr=o@j6?ua#CY-8@<* zH{?U-6FG4tG5c&HI5jk|0%zA?(eM_VaAL@6-A3{cZ^L=>)J*PLgB3lT+7aeKL7ug? z5{X@dssL@d0rJI*M9aVq8B`fe>{@U_hX#GJ;+jd4=JN= zEEk)^<{Be$Az0daZ{fmYJi}a#up#=t#%A(gRv+i`yO^ID& zVe{O7xl|^0v6P6hbH-*;`5+%?`1(mAYpkndOoiZos$$~^k>K+2CXZIenf!?*Yy2*E zeh+dgm!n2K*YA`f{+x^sGmBD>_l%^$Y)|<>R5zp_8{5uD(uQKSh8lI@gdQ&a_MPC|&_IDD56^L-e?s%2u= zsM5X^Al3G%=;=W;m!-GtlTN%inyE~CV%mQoh_7Y^gyWD|66k0dNbHE2nkI`~hJ9ya z>PqpIE3C`4U6DQ)g2qsEk_l6deDIFnf>gvdtH;&H5jQ_pPhb3q7xDUl;y_=P=qWPH z6}Q=wVR|Wa1HLP<+hAp&LwSv~i#(__;J2DoV0l@|QA^;F*kE}Q{#-!%Q~599Eu}~S z0ru5T3bNeWvnSeLwaapR>eioa^RLmk8ei}Id2P^Rn4o*#U#$H_W7J>`Uq(cO3S!lB zJ;XJD-TaN+yihvVb$;rABt|~#mbfMu?>_tS!7}kLe>hRF1s%1u*`5_(7+lhYW(M*Ep^Q9bURml7N=R$64}kaBhJyn4hAiP2`- zZU2P235nB2UPrpDW`t=cT+-Sc8LW!Qbt8>>z;36Jo0&*zP@ycy@kc&Lut`|-0Kw?@ zA*{JlX2gtCDk@r-FpdMIQ}Qg*v>LB5kg|Yh!ps;h~B*r)W`^1?qsz{Mu%9A8%k=F-JIPV6v0!x z5yj&^^?d$GxXZH0v6&0aD`H=xu1Ec8mj?X>b+6Q8Lk69E6_a+Qp3rTJs7DYNK@kf^Af9A9haN%uOE#gEL@0DI>(A{RP!Y6 z#FeA_UM`dfIYVO%g@K-ox zY+H%ync2{dXazSDHtYE2*N* zC0ThG{USPfX+)kZaaCZ)MchviM2B0uHj93sJiE6mhJ!_u>K$HkE-l`lC0O{h9(z5` zb#tEg3H(ztPh+3%DF3=A_73G#z{z@JJk?0w%1)8;#Ks4&M0GKwar)AjlhI1gS85*p zyjzDYaXMS)(mbQ?i(uqH%^j`mQ{LmC(MN!d!|VxT+@Lc$3Qwzr{{{!91Arp8Rcl~flge{sS~Rpzl-+EN4>EMAiC zv5e}3&=zWbfOA|Ovi1BY^(Q!xQ7za+2yn_HBV~J_hlvG+UWXmx9TOb}^6ouO7!{tV z@3}f{Uj2T<*E|;AE9|{;BP>Wtd!$0m7%(Oi(qb#SP7-Wgu<~f7$2X8DEQ}t3O%s<+ zC-%=`j0y+JL!E4$+$CBL7CMZlxG-ZLtCvN{PH@kC6oM}cbDyHC*wT|I+4rS^O?LNHzRj&EX;{-_U=hy~)!mo3gcrhE~n3R)J zt;*{kVTqUL$0bmDmVWhMM?dt!Iy^&kt?byF2*(?}?Ln<*xon)VEIw%1m7-W_g}?+IGdUfZ zzROF(^Vsjn2NzBamfElT**F8PhN7NqoDGoaDU7h`O~pc9{8+EOwO^x2&?TXCC^d8of>5! z=lMI1qlZ@tqN2^gxkawrPpX*kMr*%TQ$)0%C&x!~{`ZICk#||I(!Q(sPKOl_H#d;g zt4_6ur%TgAZ=%+_m3X8U#9QnNgIK8O(So~mad6jU--v_0A@q+`z>#f5RrrhGk!~Q&kkqFrA7|;2)yn$EWUT^X({r}MjYb0W#>xAbDCRL zRz1Z`683$i9vvjCF|Zd#-;ygn{+Mi9`T=$7R%Vh~nG@bqo#kcrvpdcnyq2x$PZ13x z5^rU4369{LiQa#t>;eS}p|e&B(I<=VpapWCe;ncceT;}z*khYw7H}-w+4>aL> z>9}*1QfTFCvmn(|O-^O_L4qQDpF|DgqDMM!k)pgoj3x?8oFlgkzBc3NLD^-G=5M2V z(dSCuk&{4Sb9a`S^zGcG>|6e4hAHV(gLkKn4n5BBqt5fRAw_sl-0XNDSJt5KL`>|? zj|fjeA-n3X&oJ$W_1c#^5TBCo{yg{MIZh9(hhhx+hzJeEa6)btl$ zs0fwP1Sp%nVGQ72wQX%CM^ScXQM|Wiv($kQF;~fONPEZ-5;#M4hu3boxDKEtchiQ9 zNZ+}LEpRLZ+pto+(wN}qF@@dNSmilUvlbsBv{^dD8bdR~yH)4rGk_FwqvN3EUI@_P zB#}R~IQM++botnq<&~?{)|-nc_SRO85bJQ#P@c(iMr6Cja?;PGVa~D%3q( zdyisDcU-XRQ*}4#4yTwKy%~jEESS@t(|sfZ8d6^HgXe7LB+2F?#781vlyj1?;b&B= zb`N8#-2=}&osOw|kB~c;!mpdITI!-LVejZ=LV@P)j#n-_Ugo-N&Sg~0SyPHqt2r7` z#98f`FCToi?cV#|)qD3=-RJ7==kWRHK=@4&@K5G9#DDC6qux7}*@SojEB=+@phf}) z%m4EmlMz?(Z~O1{m5Tm4X zu@YWC(gInwOTiVUK*%y40&^KHiaH#+;PD*;jRZLSzUf%V;qTs(I!VO(u)7$U^6PP>q?j;_?ScgxVG$u0!zPO%z8PoWEcrUDUXR`Gx1h)xw z(d(IVIbDA@y+5S3Vu&N8)xfzwFd;mDke=H7=CNvHYv4hf4bp385xB6wyH*hDFLYJ$}zs}KTXV0iv)xw^YN7PR(z!PjeG zZ=DRhv3midP_(Wn+1}C7GuINf3=I7*c_+z4XXfVSgqniEJ?eJ#X+Wp&ugh`=`Pv7# z$P>mqqE>D>v!&Xb34Oxf%+k<#4-GoM2rpWX4}Up2IOe}$T3xyX;Af>mu;Rd{m7#wx z?VJisi`2jf?Yg}8*1qUnoBtK}@>a$*2WVq^L`=R~M{f#MBfF?iz);C%eXPjKhqJ7# zOafRYF)0Ps|0V;|YaYqx{fRtU@e}imX(G?wG1O`(^69;?*!vpvH?R&f&-Auyy(LNv zjMUTcDHPN@Ii)mR^CRM~& z*?yH;hW*ZRzdD@-bA(#wwYZL#m^+W|x1J;``m}|YfK^~L|1^f|z3u(y%xKyqE1S-h z?TtRtyRD0|ichci=*3^>5m}(saQbwTGW}?q?$d)qxgHKfZB&w0$98hfRG{8(vDhK( zb%hd{KFQEMT*Tk6j+*n$A{iLwDv3V{-e0A_wH_e-`pjbPu0r=ws91{CK+Wa4#7cZ>ru z8L8qzNcLACb451r;8ByiVT+c>lC)PBA`!bRc*7`s&(z^>G!Xjq#8-xL>d+hlOpF>BK`c~~Wm3+RMb zHnqUF756NvnkFM0C0h|+0`L(@K_Z^!$fc=S;7hmw2W>H^ zSwPopY!i`DAF~JS8)k{`Zz6<@E4q*H59$8c=7D5}y%mj@SfcEHGl00v0-@MOJxt`# z6f?K00F2iZd9IF(ypyzz-wq*T-(ga*#!5TSY)IzxUXt?M_s5&YU9i#7ElP^YWWjj7 z*zpbBJ$rQ8_cR}s6HGI%ABDuyXvkQnVV)&=&# zXgJRlBk(*MocEYJv7vN-_d5W{GH@g$Bqtd0P4oBSWI2^5TTEsHC%+eu9(x5mpF8ycbI&6H zMNc3G--x#<()qBPc}?0J%J$TS9I1fRs}0X^Aig{WJ=>MV3Bc6zOBdoDn;hWR zvLB&nCdR+PhfmuZXD$RE{b&7`mQX{NHv8#}7}o>RE=L16T_$vIq)hlJ@X;s}zC<&T zgjSKzdxT-y-$u#7Th`y%7w?wP%lEeCfGTlYdI&X8SwA z8awV5rKi zDK@c;3Jp;V|0222?905vxma`qSdjfFer9qfMZhv`o~?jp0Uw{z5ksH+xwy91mj|UN zKkX``$PTMtu6IUxI|9c%_P!`KKY#F`D1YU^2;YxHf!s0&0Xmw87#ryv+N}jZe@qVL z0~l>m(T=F4SwBHnq(*(KdXd^b5m02Kk7bA#ejkD9b_%didjyhtkb5m@-p>S5B$wDA z*VKtb8PIw_Kb;Rs9DKahi^;F~L7$c8ZTc>OV&*_pmR&XP4Qt+DS;iI4?WH(Ho)juk zwac$;JGkAg!LszI1zrXi%(ZpgeR3xH7WzrHYWPM?y8D3a{Q^Cvvn0py;@_@VV8!Bg z6Lni*rC@cTr9|#V9tSqoLG@NlIbCjIgFG3Gb#~rZai6Wy&z0yiX)(w~S+VJQ5Rt0ChsKqLx^yxE~7ZmrDg z&=pY7RvJpgq99~uV<;W`LrqY^sb-ns)6?<-dK$BlsVtlNZm5iYg)CININc|DfvbgL zVPFw#PO12k&jUPoCE7re)y7^J_H->tv?5AMu_{-+uyT7Mklt0N3^%}0)pNe7)B{GM zxP@w6o9`j7z3hy)d14AJwoENYR_o(GQAiM&5u8qvw7Sh0nynIrbYI!-egcUDpR5~T zgH{R*sHL^07D@V7A&iqP?r%NnbHcDSei3_V9zM2nyPdgu#`~|_*}3p*GH#R>ju0A^ zd?@bX6D4Orz@>$fnyy3($f~^~yA@Hpu6|cgF~VK5w1nm7Cjf5VzSj2*u{%^im!Ui5 zLu9H5WkuUq&Jk(KH8phQYDC9R&WLf#AxC{gp=OF`iwkgXy~sF1MZ5Ix_ja_%fss(g zgi2nugxgsyV=`l!BKEtpT(LZ4I|o_2=lih8_-AD!taEuTTvHYtxU5Mt83FtBjprvx zt7QKzJL?E$8)Kd9`j*NE(&)EZ;^wtRz^M zofr^qZclkYdYBO?mtTADy$QE(940nkjAqudHGft&$T!<`LWtKS03aK$*;5lj86s;5zeCpcWppqmW18Vhl)rE)sP`J z_I?Ou>pM7k^q4n-+AJy{zaw%{>=#{5=o^bu%N>d4?oNA^z7c#hR)Wep)XCL!L`Vz% z(YCrUEu%M0)Y&{fh2BTm zZrE4z38Scn@sa??fDOp2@{$te!{v!efdhE8!3(BBgV%I>vgGx!2EU`qfUX^(EJ_UW zbZD2g3%UaO-C@zTbc5oV9Y-^MB^LoVuc8@~d0gwPJ7&TkKdBb=>f+Vx3t3ed`#fJY zD15FbWp%y9?v2)ae-Zkt=1H~ObkNj3urWgE2DQ|Q2Nh3?xezEC@YXy7XIB=putGD| zM+y`(28cSQkOXh)IC(_ai*)Ykv@oj)rnCCGkbfnXF4OSXtzC`Hk1h7=+K7Hl&3*a zx{b@eBNNkfvp9xDD2Pg#Ss}hO?6?*FjA94^fp`z3Z(7IMD{0E?fSvbcE&5qU?tt;ukJ~LPP_MY8;ER z`Pjq7<-36pdbOoQ(aItK;|1stoM=Yvw7Q)oYt>&{N={>M+qs{KS1&!IfnR|Lp=o^j zJ`P6Z2oy$~(Tm!G`q?2@Q$GwVI4BiicA&_wPlhPS(+HAawv zwuct$#Pd|@f=pHagPfQb87-yini)L9HwDsnhg{lrW(s-5QY0yi`PK$os<$BI8irr9 z%2h0ej$2pVFfRK>AO5mXNv4oxPWCV8?)HpRWTm)%_S4lP=2#(Mf26C*bc8zeyiPWHho)WlP5HJ>oIlLR`PK>aSt7{NIr!rr)RI3=-r!_ z1~J&rtFy+SKJ2;ivAH`NnyL{*UCh+9=BMHy%0(m6uWOjH!Cy#0$?32U&iUW43!1}y zH_@+YFHb-TzDkV6!ffC)_m@v$B zCc)@uNHKaOWt_+!4u4_4LJ7w(j3H($<5yf!#GF77~7XL+IwiFbJb2xcta zzu~b%7G>k}10cAo)sUw=-vVQ8R%2EjX(450hiO!_(v7Ux;!0! zPT8f(2^t==sk)rt1ei>3Lct)kBPyG@IaSpMj+>5yc$a6xlpwfb#Su7j!_P$B;m8hK z%6nPjcIUeOjX-O0f;Sz=5*IqxmEdz>L26H|S*PE8b|~HXCP;*SrDqgakz(;EURaMe zxXx!@ThlMF2B}WZ2S{Y}=gJyNw@Nb2eXXBoB`;Hqm6&gTHY&+<26g!*FH_sD{%bQ| zA74;uYVs~`_1Y1FKPQRA$c0j zrQ^(i!>8a+jRV&)bAU+&0I_Fv3D^M}f#uKNcR%bch%umIm3Lc;+Q!!q-$9wxw1h(O z;XSLBuzO5gi}rvVb_O;Nm%Lr>U2NXRK!SpmRBiQMerWf`bSsR>tFzN=_OzmqJsE1yIrAywmw(=aX4lAjXBcxfW zs=e^lO&^ikX>}$l8Po*VYuwjm82&x^4U&bO&CPa1^mrA-62-K^(cUmxrkRs_KL?0g zMn0$z*`rHLss@@OCnnbTGf}O~kJ>|=CrVqk7BM0iHmi2=Ye2y8LQQQV{@9&TpR$NT zq*@vOB(V3$1T~$PEUAzsosN#q<9ZBSocsF|Ra7ZuE+#;%T_VP_D_G^)uB->f3>_+o zCV*)OKq7wQZTrEqH`PG#L>*XyTaP29e!H|$-R>s|d6<8P-u^sONHYMmcpFKqgoTbg^y*V;{W%gk|Ty$L0!@F z>myWXInWK^GBMES6PW{Tl&bM&K~@QhHJ&JdlE1`~Kjmg~hQNKY*{d6w8J~H)y3l27 zE-fr3vQf*_8Kh8sw`fheC0ki&NdW0uU?;%M#j^ov+F!YDH595ENWnUruG60?yK+1y z9uK?Zn?6hD$Ikt!4nyF(ARw|eE$^0)3?w$M{!W@mfNEw+c~*v&QC}@lxy`}M+SmDJ zge+@vAkQoODL^{uC-jptWQx-tA6*UbY&Wovo?%+HvvjF~#3G6Y;cJ(?pQfO_1ZZ1m zEfS+3zg-P>wSdm%Z%sj`Yy4T_V*Z*b6F&HzC!iC0FnHo$Wc&@sgq%{sme1b=j$!*u zhVx((z`K8Y$s^yKVD5eGy49Hw~wF3@zWz~?D=u?uJ+;in*l z;{`Tu*tu^Ugi9<+st!kV!+Cy3^h4doL0t1J{_ztCuFj%lPG?i(pe;IN8yqwYD8jjv zQntapzF7~Wt8@TcizT)NzU;+Z9i$*qK~Y zy!^=y3c=Y(k>}>7?DGQl{79HxSGbg6NnAH>FyE@ZmP&zV+YIA^V+#9QEylhj-3#tV zr+?UgfyU)1g~HV(aLQ=}>Vn@#VA6Y>X5EiZM^@_s;?Zey%{LSF-DRIj(DAT^6c~aL z6TClovJp6gyTbD%0AI-sUM((%8GFgv43tOMtF2CZjz6^fB(M9*UmXw@`9+HD7?wafbI0^Oj~X_gz&9K=#{WL#U_G7jqd` zE=NYzKKy(w)!@^z*Lr(HD^{_n0d;4mHl4DbrQ$wat%kjv84+Wr$|Z13SXjh8u6D7U z^3{(O6<$Kq!!GALqFAN~@5Xv^qFnkt%Zo0}Fq~zU5*2^k8_}P|Wp{2e<<1nmXGf7Y zp8gROk4RRqEsmuK7uS4uz%1qX>&Hrld+Yl-UhQVUIeX}z`L3njFse73HD8*6&dazYo_@y)~R78#ZmGVeHZ zuLCppYkrfbJ~n>%$FtVSbVhETkCK{VlP$2F%XDfKbTc>7f46PTt09RTB`CBO4X2~q{a!4 zLkkQS62d~}_kmiskZA^90ske!L9J@~q_a$$$Z$D4pgU`v)0>l9tdvNrxpjz-ZG^aT zH#J(ms=1yx6c%Ht)%_Es$G=DeeF}pKXaL{ps4ASridcq>vPHSONt(t?Z1sj@>l26x zcLljhr!z67CXePv>82U`KXc5rHXvDG$i!U67T4ls%1BJy$&YOjCvV}}CxO4L+*bS&1fNNf#fU-VFwfQE2`zUDem!kNF6Ayihr4mWW3VJ z+-W(t^B%qpuQ3~pBubzb-Ub;05pGS`S)E<;1~ynM zpGdkZ&+|P}yk^g3IrTMT`Oz`cYuT<&KaZQM6fM$ht9J3?Y!I7h{!Wtt2=5jN`Zc3- zqXgkFhLL<*)%RW#6ML#Au`_mOBREi=k_(*ck$;mGRif^V!@i+1KI-c2m&}_mul=0V z6>a!2_8BxUzNbnP5y?nAV81h9Gk4B!y&3ZQe)DuM)3cXb15X|}CiN!}T5=;F2MU*0 z2By{Gq~>J%FSK|_29?Ono@A~S@3yw=McW$A?UAUYvzgr<@yfiLo znz&p65+P~%;Q>23h~R*HQC49`=pGs5D7)B2$?*07iie19qybh0+Ueo38L>M*8k^CwATwRef%een+VN zf6CwPr`^f;YNQ?NqaOKgQ%l50H@@TIgzasA>j!SkJ54(wx1nukkx}!GNXt5zP@yY{-IpC|L=wH7b?f5#7Dua~Pk}GX2-r0B zHB(6c@i!lM%SQ%DyD6j1YZV^+=G)Rddei5-b9V*f!qZ_LxcU%+h<)oc6& z6@jC;O~>p!&vzc_^F2fi$b`pFS|y(tnAK-X!AF+DZ7RMXOOZ>GwXg_kF3}K+>&OjD zO7V6f8vn~$_Z-i353zm$7D|JW*454lvfj9AFAVoU!kATW@^F#_VQ%`4uwqiD;(nxs z2VXo@IuUkTm=5ENdu^sG0w6~P|m0&qs zMe7O83FpIj3BI6W%-VSH+#GXt+70YorajsM5J@6y06%k`ZeIUW#L_J5x*6qcxb113hkNzLvHaAL2sA z+B>0=p=`me2XYDC;0k6+n97_A7{LfQY7`>>QW%N7To!DKRMOa7@zm%zJ zf9{LDdHS!u+tzc6RdG#7Oly9R}TOp`7<8X6-3QB|>)~QrE@rp(ci}ZpIOHL0XU9vTp^~G3~0= zQjaVjM-j58_LnF4xg;@C+S&g2%FYuRsU%#@AGSG>wnNz)JZmMPQ& z`Kl_KFh-n?uPL3;xc2*f36skdvZ)7y=~`E1f{LX9GR@dc6X3O~%rQ~MF_B=etjNk5 z_BE%U)l_#NVee4nNY(g9TeCYCW#3PC=L{X1-sJN{XR@XqnzQ>x-C1?))Q3o1!C;`N z!5`Va(a+}b51x_z5uZvVZTiGfJac9Ir@VpLM{|Dp=Z`-$F5Jq?m)n_s9h92u>bLwo z+NigdNYG)HqFtl0)Sos3YO|%pBiE0vuKtL+<+I#7o2QJ@Ik7$kX2^?>sa<(Rouywl zGnN~5&yF7;WThAyz?kRp6TkI0L^QlP8Jbkskb;oU$gcRq#+JI6=9QYc4_pzXka6Xu zW{SRsJGmr-w1JB^3KE#P%A_jhH`VufzU)rm0+-$@tI@CTc$L%ief*3n^oih6e#1`* z8xHOwD&ZVtAV!4QyEHAFaNZ{vE)mJaGGHwfGx(<*GZ?grf zfk_+-^TZ1m{cm$$A)*eHGSa&{bI!gCgcCDo_c{dta1HHvLUE0;SwI^Dxi-=Bqhs4| z9&YAJ&`-D0E-uU7 zADgLdxm@bz-j^BA{m~B?&!eHC1Sn zZlsWG>OR8;majJ!iJCV~I%-}v(-v%Sw|wgB$)_M-U=;7PP(R(6^30d?_8CdRb}@g~ zz{fyYeNVQJN+MK&e(O}k1Ibwr*dFw}y?(u`C;N@sW9{qkvU>&1Cph2FPu=dm*5dsI ziOntJzSSp{;$-xfE1dgI@B3uqKJ)?4^#q@|ov4LRo&wLEUs}DrvVA>wk7P`oO!XJj zcJK#&p~RyoKh5V1r}u`S%gn4|3*>z{yM#Qm2S~Sz^Bs2NhWxZUd4kjo^$K~F`t|t} z<^8#AV&>>o=~lP;^m7ddPGcE%uB0d*#25d%?t?XDAy_34DQ&}Fkuj4slM9Z_Ef!#U zqlWHXTW;!jK~A~61odjC2qEXy!U%DAA~RDzl#>{b5y~$J@`n!hyA1q_DHNDQivq7p z^poa6h7|cHcUI)hw?0kj{(0S`cZX3xM4LPCx`?xu)>YBr1}inEnt z%{c;YwS?eQ^1q&p9I*|*XM5@n3X@q?lFr#HMUUGT_AG4GVRBza$Wwy~&i@FQF)H%jjJr9e zZ67nt zFzaAuWtCS)3&PfyE?!9M+`kj5j$TSuEZDB=x`c6L6tN(hc`aTTdZs9@+XL(<9_m=rEI1cBeXJ%h0Zn5LHt+ z_xuC27VNa$@9JQLd;!IY+nyOJ4R(7uJkcVJA5F1GjgZO97w8S5_=o1+iRbn%ws>(b zJB*m+jFJCJffUE6$v5v$^|wBK!y7l8*FKLzzqJWj2+e+}yIdmhQ`dai-0kJNTlUZ8 zpGtm_&iEky4n~Ql5I5*k1U%1ul2(Whgk1Br6$i#!+CwtqKO(L#F(F2)NCK3`s**|; zck>flb25Cu>xmoJ)2%0Snl{!C)%iVE4OHIk|83<_g_raxU63_k)#NDO1fxC(9e-6+ zdruV551~T-kX3f@QIMQL+yKi*|$MVS-M=Q&aC1(((z%gP{zemGVRhvN1 z>OpuE+7KY~IPai?M*9`Q28@uXgik=Q ztqcC0Q(PQnW9}=nEJR~zLKnKxuQ-cjfpn3*7-WR5yEg9y4;wHK_|37M2SfSzTHTqP z(qTj0(rvPPRGcF%bS*nS_g{1N_Z-kIiV`U#nnzfA6A8H>G#jmCc0fmGIF(?w>%)U8 zmYZovU%*=+M_8KiIK}%><^nJ>TyBb>e&DxKW_84Yo3Ajj4*EgR)1@31s1lM0iGqHe z?T(VN%JZ6x!2Nl>$ctFo*MuXl^BXU{_*v*onvrvIH&*F@yR;-ctdvVL*vjNkb~4Jz zp-s#9q%ZO7Mr>6}xWcdzp(ufB@ZwkMS0NV4OJWA*R0vpJNE*pt=-zcPUoJEHdiYXw z=?%Hm@;EvH*5pPy47ZlAlYl$f5mA5?l*3_>lZJIXn(~lmb|!umjC~ubEnhyu)7;jC z^l)3gDQe;5gy+*Y?GKAm?jc7bn9kVc(|abylG3@8?D$l3e?8v3?)~#Z8;7<={3r)4 z3b)hq{Ox}s!@}_W30P zCF6~EM%JStKXEN7@e)C2c7HSfUE}b7Dga(Bz_WYXOe}a`pN2$7{e!i2bm-Y%xklz= zf5-!A-16nExBTJi1B4^;G+BeTg&3E0YE~IeF!f79qlx8%z6OAg?VS5hYtZA$8vmP@ zC$&*TrA9q8?3`khOOf0_lf8%0Y{u7%f-)iD#L_z%g{8I<|QDEqLzt(%(Db0DR@-DD(6W-n3 zO-f0z9`phBEnanB-1b)uqhn+D2BY@@YrKL#eG&by=O}VzCHt$AoN?<#CI#~-5zCK7 z+PLFxHX#Ekc5Z{)Onfz%=57u?3Fj$dJo+E>`GH#OMG~*>5@6aA0jm9Xg-I9kB?82LFh^1N{EZYbhRNsbe)2LJmJjsIEBNx4xmO4s+Q{#vW$a^6@T?1{#p zkn54*=)Usr5jqW5H<#@;2$1l|J5ER%{sQ&47h3sFZ^*^E@hQoG<4L~ppx`*IKqBbU z-#)lJGc|zB3JDze#_Z=Tm@G8_rvBBkjdY!fKs7-O*Kj7k!(6%rX47EMz{#*9QF;ls zvD;aD z1tzaO`WunjEo}tAZTsuVEZ`S@4+`T4w!h=uBhR**%!?aOU!JkZi~(;x0|7?<_~Smg zswRN3GW=7Vq_AqJ-p6bt%s8oKMlF$3^a^f%R^qX_`Z&c78Pv%%#lUjfLYmc=Pw$kX zv)+_q@O%bYc{h=BZw#4r@yg`bChwI3 zGE)1T>R$f5S(m<;sK{l4_u{Vy;1gblGUcf(L$?Po{v*+oWX$Q^xla>S&RO5vKmd!Q zHr(<-R36@xB!;K=%5iM)+%!}g9pzfQ;SNT&x*+?hD9YkJyMph8r}V6Uxq|cYM){yM zh}*&eS^tDPHeEIn#-llLkj}9XzCU@?!Dy~WvEEA5CVK?X0rmtpQ!osERw&T>Htc`h z!>^Wf6W^dH&0ZZ_zjBX#_-g895T|p+JK_ARDN+poX;*y?N){BU)(SM4()HC}cMw|7 zt7v4tq3N&A-M;i-1&mK>pyp^==1^$X(x>5VXT8@1R zq>2GNt6e()a?k-?E*n4`ydWgz0^P9BNx)T=_fAS_&I-srDD7$h5#bx+P>Qv1)y>(a zSzpnKc!XOqBwu<4)mHG$c98w-eFaqhi#Z5T{VtkAz_kZ3pC_)JK$S@Hs!f(wwWAT6 z=~oPx*^Xj0|9cU5`qyf^tc$&zTN8A2|IeXH!22UQl9W7dmr|q7=`SHu<*~*MxY_;8 zI{y^Ja7EJ(fq-v#?v-?4o!fHdZdd8I;>}D(bCmXzKYDyErTK#@_!%%@4!kA-mx-rZ zlYuVg*O)KHiqfq$6=Z3?jC*N3zBU+q#o_`F5(87l?B7kGv*(5nWhP=70IRe@ksSgh z4iDReULRlqN+fQOx%sULtWrQ`KP(F+h6)NBS0sYS;7K1ctTOwXda*&d<{>R9*#v@C zZWlib+0Y(A1b)VXcXgZgT`%&w1b}ZDb{7mx?|{6ZqE**yGuBAx4YDpyz1RZ~55;W(AyOIzn2t$#fOIirgv$R`gQv!sucGU z=3kx-`l*Wu%?$a96gaU8WDQj7(k3nP{+NZaK7bpB-Zd@T;r&EWekB_LxR;Dg+Z}N0 zn;_r$j8_pNS!jqd<3P~-Fa3G+&EW(Bf@V3@SLxz|hh{_>9#00%DW3=WwN!gWcz_^} zJmnBjAmC+WWAe@7$FV!)#9XSglq0*X5*QD%&PHkTw*&+~fhXnqTjldBmU?)rN2(V& zbcjrr5-8-F2y60zK13EOvGms6eUBQ=wk~n?0%+;~_lYz&3Nj$-Iru_w_p%t;vU0bx z>!8tm#-#I$zj^A`>0QMvpC%Zm&RE+#Z^uOTyb4L)fUF~cZD=|j0D-j(l9#_(DgtY_ zRG=>TwG8iqccLVB-0DwO-b(=L0-pv#&kV-bnVVlQTpPC{h^UY&;zK&#zzgj%-%5k3 zQ`(ATVt|~!Sx|LGY!xPnQX}MQilW?Jy{p_riiKUltsMlSIyC2;`9oKYc7ad2u_#5N zv}zB8adCQgVJ^UR;ff(t=H<0B&h#hP)`sBOr8@0b+)Od#&W&InKKdX1-;&H9qg75D z9AgUnZ`9`~j6WCmtdOsc;@iOT>xI{|I7Q~Pzi8*i6y=*{zq{luBqdN;r3(t7?n56- zInPWFl&3@4$CW~dcw0l^jfa4TsLCi4&)n66t}IeoW1OT}g?6UmgfwO;RZCaRl~-Yz ziI0nnpAuzOKzO+oMAM#mz1>VUYzn+sG(Kqn9dZTSuolN;w1;`%iOrNxhKsNO@Im}e z)JOh;aNzgy?8S3#OpJSW*dg#f6$F#8Z)`QP&h-jQM}Qlt+SAL!YN8iVzL!xbHHdq;i%thYz*-rK5!e*voOl$?$9%JyI4 z$Nz5Av8Nk_xiu00=hj%&A>y%Vyt@JUdDfSsiH zjD*qDqC}zSjPjE)uzZ_x8}J1~BiIvzrYnJqA<8HVyEI@4dN^+(AK?Y;f0n?g)T)j; zhEde%%xNc0e?i!!vSMC;Uk9Jeu&K9c(QS%bq;e#T@X|=H7Ry(bNdVhSF9>VjgqC=u zKF_)P(fIp)Rg19c+N3EEc5yIcQ*2aw8h!yXJD1|bld#4-U*d13huUd zaJ0Ab&9QM|5Mzs>zhP2ZiuhIC^h5BdFgx&e3mFdbG-ogjoZFiS=94JJi`>ldQ<_)A z%haiVfBLHVnqDNEKFq1TetkJIur*mr-y3L#Jb z00C6cC;VG!r7FPN?<|CN{51LR;f*>BT-!=8`gb0Znajv24`Q0yj;Wbnv@x4tWEA#kyR}Jj zBxp&K&RG03;^(4nLB(S?Q$Ev)B_bnHwoUB>ltH`(`vmE+mEhSchCbH|iRmo&DcZum zj~vx$^t^)i_{sS{6uZt2hw5j5#&XlKZa3i(%@Ln??-%S$?f{JDJHj(@^HZ+g4~?-` ziZ9ABXWii(0zo?r#7QT=FW;#?TA;1E<_Z zumW2L%z^x8-$M62DK)EIni{wZH@dXh}@M6lyzHT z&6;O@o=uwJ%^lM@n5B#&nHh^*>)mu2!@$cE+bM$YapiaUrQ>^cLcd5xy!p=I4;*a| zDRS^m!kJ7D-gtGl%5T>{z?UvsqZX8I3zoao!>)0b6er)i*+MdK*LZn$mVSGT-hII_ zdq&uk&^`Q|d%ldq5VlsYqSJTY&l7zh8i(uY6UQ0{Y~CANMH(96kw`gM@>}m`PY9 z*OJ)1bh3~}TKFx>@8fu4viy16vv^KUuz*2gNG5qZLFZ|Y@|9{unr3ri0*nd}gdwehxx}Wc7l}wIOR-_Y06{Vle5Xyc4asCM_GWrXgQOasvl?eg zU@4(QdW_b@U-HB42hIp*W}`&;G~~3N6**hW|vO*8t{Q~0QaRY zEhNlJ=q063b8w+X*1#xy6U{=PDZdc5TeY@y_qYq(`Wtebj#v`%cBYMUUgD)+_9d?s z{0^TW?K?3{m2K7Xjf}0zP%7Otk#DUb^>w1?1w#wj`6qz4F5@`LzQq_ukTH`#pnsE9i7@2m6uJFz2?<3H#xC_%H1BiN*UbqKv})V$0^A z30P^C(uC2vh(0LU{e>6W|67Ul+~F|&St3nSs~^mo{}C7;{Wixqjg1oO_93iWMsQ%t{2krf({gr#A_gZ6{>Lk!@N! z>g5*C>CJpB`9sjR0$Z67eyttB6;On%@bxGX#)1%SWf`%0!M5HJ+4nmF5iz{}VWP<+ z|Mj{gmbE~FPu=g2|3;}%w;Z3+`ecQ=Im04G?~>}u|{ z{5c0@&x&@=xOq*_a3stxTCT65-GTtM1Q~ZHBDr@&-sB#74oq3U<-Dbyi$cmd@*MuY zXs?`Zo2iX^(xRrS<@VYld*Y8(b?xtehf=~ZEm4Jo;d zjbQ}v$%?JLG}vn>2^@#_on$ME7`xw27D1a68b7qZ`C(7mlFehab8a35H_DqgHWM#c zl{T#Na+DvujkYU`+bgh8pU?jI=yU2O&qDo;rMKtOcn&O1YPVJFxu_WgO}JLhaUnV} z)=~*~UV0TEqnKf%6S;HKI7F5PrwL)=F(J@od*PZHlSE(5zv_;TOpxo+wngvDemsLf zPNfLUs_>=yVO;BIXM=h`zR3N*kQ1VIF)L>6utbe5t*xgB492&BmBuQ*)SwrLQ1F=v z*Om=jIPuQx>m0xEezBrC|9%>#BrhsfcpC@&A1@5Uk^h%wppGnSoi5*(bsjawCIO-q zr5m%rRp+@gn~32q*@X$XBLsT0{-@73V&nXS@5J|V5qCcEe8hG@O~PSVUczmSANM_{ zYB~3XcQ3`_yIz_;KW)?d)_;=cJBl4+5`7ibwcD9qKRrFSG zdV}A)nFKErf`4pW*NZkKZwlx2Dx8c(8gN2c&(ojNDrnaoIT$Nbdt+wQuBJ zJ9c3RUdD4SwnKgaufYKpO$(5tZ07wJ_CaFrPN!KOn;4sg~&OY3+_7CFrH~tWhD;<=;eXq8P zkHKz7888>h!mmrkKu@^ZX<7Q(2W+~kea;=Ouif<9eVcp%aM^<#KoNTf2CkScjH!`*F3L4-wqPwQry8hOI_#-5VEBfKo9%ZJ zpTi#QjJM^x?NDo0Ha0eos|IBA#uv=gCZ^ly{E?I`H*R8tKxXc4IW+x2%k=Z^pE|^7 z$5B6p?nBL~i$BP1z5V^&)7HyfKpN&m>P$!3fsw7$7i1>LOfsYoNGmkhbdgG5zpH;r zPv}ASApyCr^8ar7DtY8TKFc~MlZc2RXqKF)QdCJ4P(IruwS_}xi(qVtd-OQp@VF3f9BS`(iG3XmlyBFhJx~-5xJ3y)J%u`nwM0I8NYkNq~C9xS5GI$;~7)E+PklSzg{)B8lO=5~q zv2xCmj&s5;g6lWJnEkRFM^Rd9yPEW*;d|eG5{XfC(tLvL`lU18`pbVPSf}T|^sUau zfTG?r&B(FsHMQ3gib02e+naijIU+rpyT`@3{(tJ((d0KP&3o*k&OnLbeUGczc%jnd z5|X|BUw@H|aA2{&S2t)J!xtz@8Xf5spVdkVY>TIlYKUB!;;bS6K|03e)Rr zM_@l(BOT1|1%mNk<$ko_CmZ50j}+;S#TDqGt8mQYU{+e5d_I>RA!NhpeYOLsZcPDaf=aCH>t}p%zUPt+Pw|V8U2#_QFFKYRN z#?n}Sf;b(~O12Uc&#DI!->SpUA)(_Y?{0a zxjZ!XJ?fj(yaicmS>9LRS;bp)4&4RqC2KtQaW=!+FMIF=U?^;H01ewo&Gie?MzB4p z>1PA4B1shWbZ7!)v_jwwAv-eyMj{(t2R7GegJX2iIFN?${=xdmrkVEv=>$NI=IgJX z1>$g5UiyK8ejyk}6|5lrTS!7dVJqW~`Q-h0VZ;mXcLBw_gqYzUkQ49ZTmP{R_!dOn z{+jinl^v3<;SUq=;KTO8u0}1%o=scq$%j|sW}Odw#DP`zZ5o7kZVUbjgar4A6Y(X0 zU1P`q2Zf$VO_n!#V>s*4UTaxMtw>AOA$$3g>`_QD($Wc@(by-ilO>S?md_}dsK*g` zt5+>M($h)6V0(VDIp)~MwjS{VdsiXA+H$o&=JF`d24n4&+ICc3mN$cAGXGoa5MbCtCO8-pWzV_s@2YaX>219Ay#olW7 zYn+sYffsD0c2>N{ID=?&?#SPH>@hW zE9yZfZ$Cn&^bC|F?OCjPABBo`qZj-N2#9m6a=-agE+aLhc$0pN!!sMVF~`OzVNg_&`nIhei-B+nHi)Lh|+#@u$-Gv4Z~rWQvbL zyq`X0r4`90cm^j0PU;sU5cDm2PGihul3x6Nk(DA4A$X7tY~H;jq^}Ic zC9}Zu2&VXiQh~oiNZ@5gsDeoutCZ2$9Pvfgs}8A?I48Xe7ZrQGtMCz7F5_xICb@pC zgx$fUQoYW(E_f(JM__OAXFcm25I*c5;cqIvK_s!neW%#$tpfS5@6Pw`)%DI3Id8AJ zanbrutR=o@GuCIYEUN%M*E32{X<0>H?*5TFEhH=fww749#uyPE9{;TW147PsOlc4c zHE0r!>x`?Yp!-nXZRHj0AkNlutK8tR1ND*HLM&!%`~VLHEgr|sm_v_V+hXpVN$P%1 z=0?+lPl1Qt#_m6A43fP*HfJw@8AfLK9+ujm|bk-|#!plVJ8Hdz58u z&_nR?jYULduEz6>X4}SC7lWeUow!z)N~aIG<0$wrT7qwh$-kbcK`!9dUY8XIRWDKU zHW~pBi$aDpv9ag8`#A7oz$4*-on{onBM}^857*FM1)^v`z|Ztzs`!9ZxRW1Y%vy+R z7lV|4<0QT;*=AsR9w#BCx*2Wo7mf~KlXMQ1Bpj9Z7bZCyh|>C|&*xLtMuKiTL>NEEJ;>a13RAx87z2b(O0Rg|?Xuy{f21F9ITixOHE ztr2a`X)(kw38sX3ZQ)Coz)o7RhiF48x)0HBYFE@Q?fN;iMivz*Y-MF~mYiS(r#g@h z8$a0kRDX3ZCShphspun4%;@Rhx#RH^nIfqWqZ<{CQE(1r1vaCCI{gUn(`sc@dh|dT z$>Avh?ekMH!unuyWqrI*`cOf15qY8I*QC2%q{h*b)VLUa6gK(&HVS4x-p4sddv*O~ zDa3H@z9U@-UNAeP$3|8wHP$sS!$78#{9-9!F&wuS1=f4=@2q<5}_2G)WH0P;SJoDG0Jl^(bs$+3MX8Rumpsj zI$XcIksQf!SrtKZRnjZc4bo6AR^E2Lg(G?2^Lfw7AD+~Btq-`LQ5Ph=io&d+&!OoG z&3*fWCb*#J;X~6Npy+L=p&73(#_}(v~c7R-)UC7XLsBoy0P0&xk*y_ z-4#As`&czh!m=GVdTX*-sHHagOAhgYO&$v@nGd&L7~JSRv%50JY}WXICkIp>Yv4mi zvkdH*Ox*>Yb##)xK7l)dBp+PSf^nx(cwdy`(fN^{g;Ue!a;uG7e=>b2-Iv1lDbR(- zw7vE8IUV8ri#)rgRLUTGRx~lvEQSTXvwV$Fq#)-HCc|)4b$%Rf4j(zOK7JMtofQ}w zVRC)w4r=HWLSh7eN1R4lW&iGaM%ti)X&eFSU1dW9?9gKG=j!P4nCL%Xo7vVeEdp2; zdwi-}Mos{e!b-MBe(nPSvF zxoQ{-Wi-*2&OK9!7Jprb)Z5MnMZYOzNCH0OBa4cEeo#c)$fBduSKW9)c&GX9_?RI) z(QZpZ2YkI~d>5qQ;i%ECaCL)8xbBlNK5Zb+;I@9Tt2G_ot@6n8u6*ES8qa^~0}?qw z;;*0!_O5Kkgw;A2kL###z{5-Okn%OaqhMo3>mR!S3Cs-iiL&ehk#sP}at2*i#OUk@ zG#(ngdem9^AwT+O^xr&vUzfQwz!-$`25OMNG5*ta9vt)JT@O&gy?3uPA`KejX>a~^796xIV?+zZrqrnhte zmFm{izKxS8s<+Wzm!Cz&tHWEM)^x{d5TeN_UpB0Kp9jYg$vlYx1Bd)^p^9KaE>}*b zkbf)(y2B)LRLRIZpkkxhNQ}+HAa|U4S^@Z@uZZyy0FCBV^~(~}lwIDs6cuCU&8KDm zO+d*F5rWOGOX|x)DfuL1=*BuMi5%z|>hgLdYg4Is=afJX(=P6Y_4-$Qpp%yh`5*oB z{vlZRGld13nd$on%cT<$UB}v|D($o<)1Bld+__RGnB(LoWn_j8FHgVm5SNyb``^pp zLif#`PF$7uay(_k%ocne}Jfa4z+?9z|?E zc> zJBZ63t-i2}#5J2ab+V=l$fbq1_Kd3?bUQ8Gz)D+`TF(=0$HE)Ik8}NKS!%fv^T#d}L7x=qpf43H?VeZKbJrX%1*>ms!~tC=q6t+&?Q!>05TbEZZFJV|sX zj;T;*1;&b|`-?yPs68R_zU-n-#qgb+_>pReOSYeZMEyQV9TN;-sPYv@9!kS^nq_n z!{+nbc-3Z;(zeqOnQh?_F@#Kvl(65iD(pICpa1ImXa7D}v8rPjrM@*C4RQ;q*!x_5 zr84b`{glmS+#Pn!VO4nJ&U#Pl%C(aC*z(^ow+rptiZ#fPU(cCyM9HGhpl$hjKU$R4 z&ByYy(ScOYsicm%xG(~$6s1*`%**8G&|L2}i!E$}D$Al%S<9z6kF?>rv@~a4^CLym z&#|BJ2eJy+wNuYE=t=nRyn%RMe0Wpru8IuGT?C8YjZ+G>iArdNg5UqWM?-uk9bm+Z`~EqO-reax71Iq-0R{UNU8YII+4 zOFtC$J>Amdho1$m25T^amAF%1os%(m2`#jVF$r}<1eQyKb)Shnm-2|ua%_qJUZpK4?In5gS=?o3`J96MJ>Cz zwtH{9d3z^ub|fr)Z_|E{M>Ou6GR2!=a|-NwJN{4SFePQx+9?Y22+WIjy}Xys7Y+65 zFXg8uTC#>LP6lrG1O~>q5)$~1hh}pJ7sP#jZRygvpAZt=@fn;MvyQwpvT2$XcrIuF!fJ4VNXf>p_w2ZqJ?LUi-w!=DMTAjAD3V^ zF8F zPj&T8i_?0Wv%YVvOpKrE*W#%}&G1+G+o{M^d*^qTTi}-Pd2bWqq8wai&?yK14(EwY^YQXOLxE5r! zf1x~?H;5VV>8@h>H0gGy9L*%s()tI5J_!|%R;it6Q_UT1Q{gU^7VQe zkN;!nPVUPuO)vXYQlD>;Ll@!M>DV8mOdjZ^LUQIujcvD?pG3`jgd1%$PR;)IjGBoP z6(5{Ck&9YSD3c!~=FnA*%si#!NNYOk?vW|XDse4T%4Qbdew-i7?YB*F?(S{!3VB;! zjpB~;@u?G-oLxpci~5Z!UoOv96defl6gnkzCnnSKTlb5Ri8vKjF+MH}sDv*{Y~2p& zxm+xnL!BOyL77y~4*t-2iXDa<&wnlRyP3p>5@Z~Xi(4zw8Ia1l4 z4l<^xh7LKA`FVrMvN;>PG*5{=7RfkD=J_0$bdJxZyAS=NXq1zmKDkPn^?U0_11*C; z8N$Bl(`Ngd`f`=2nK(3W2^v&8%{b@3UUN@{Own}EK(7lq4JxJO@n{$%;rE%f2R~lr z_;14?-v7QZGG46h>6s*8LE(uC2W>L_xBRa?>}HuHac`B{DUO50{w+OmnaCPT#u8Wy zrEq*PutPg<>W3hWi4dHhYf4&AAu&y}on2rBGheA?6dr?!u?}`gTnff938!M;e|-;j zn8>EB<410wOa1jHrL^dmIKj}1 zsrs}j6PV01ZheL~)IR$Cu~+I2*NasA&~4uge*fm1i+NI`XPCX>qGyBGj&M$_qljuK zLY?g0@P&^n6}9)0^GusT2RN^GjI7vQN(ZF22y-Q&#QJCo;PU zJ=Bx2`q|1PA!N?D{2We_docf zvU2$)S>>I2t|Hwwl_Irke3#ppDT=Bj6$$T}V!mKcge#9*!noCFLfoS7o5ZBV^>aR` z<=*z|;~Si@!PoZX3BAFLwj&UmY}1bSO(U5QHR)(6w~HSru>Jg&?~CHcyC)io78Z&B zc228^M4Q&OZD$ANet%WD&J-PHaxX>X%(tu~G!~wix>V0CYzbnGJ)!gQJm3qx@{>1i zWyHYWPg;bNL1mh+{_V2KsSqa==lDpI3)<6RG0)deH6)0n38qqrEYPKyI%m8Xl!r^I zGT+}t&3O%|X_e_d%@FXbw$pE5eoaX#;|d~ie#d^D8hZx@r?Vl^igr$S3KRA?v7^c? zPjf3aoyxr0jxFXAgH5fb)pm?R@0cj7nU#=gw>wJPti;hOsDkARkED`|%O_d#qltBbT zlCH_`SnW&Cs;TkCNZb*1X|p_3^gi?r2t9#{1coXF#u3uLFEP#CZlvt)R{3yv&j@xG zS)lNXSMqT;QvJ~vCjR8&^156`^!cB)80aUTC<(Jt8t4l znq+Kok|$p#>@_u-#qSH#-`n@zN|6a1i)ISR9wlR~TJ3$c(kir}^8roy4YPk9GaguF5Dp<&pKE%^3M&Qh(1@w#l{C*JncE?DEPA7WaBjYxMqdh(3?t;`q91T*J& zK8xe0tY&xUMg4^6A7VZ9Ca`GRN1+EAM5XZ0-B-=k%;o5_-TUnjnB)Nz;4fe(2+o~<6) z>h05a<6rXK_I(=w3hal6uB7kZ_}ztewe1%UoM7}QbHYJ?mh@N2=3mUHK?ayJ1}XSF ztbOzlLfr2+8@-=$q@@xSHybx)2qRD+3+jj^<}pxyKk3M5rTi?xGKuhn%g~giZhI;% zJe;uUcAuh;BuAs2>aOjZQI+u|g|kf5-u@fW=tiZ4&={U1^=yY!9BvUk?)Jg}@i_9j z9i^h-6gvnBr~Y(!Ec5v8@msB0FS=Rm?+?Ez%&jAGbN6}V=X}kMy_4qr?&n>$*7cOH zjU19)N4dCO+rCJy-b_xrQsYjtkUPn6A$YN#HT7AFzE*jYM}N@xel8L5B{tD&Sh*&3 zO7RvQk8vC`;x!^|sHUQoRxR^+A_h2q!|e8}c3Tdt)%Rzzy>Mg$P{OwrbhqrW{d@82 zE&WQRU(c1o4E@%}v8f8RdlDW$_0hi?v10@KiTVOX2qqdXKL4aQEwl5({^cNyXkIEF zcyumn60>SzM8};&@YZ`Qe7q_;U3fGF$DZRJ<6gM~^OUJy$aBXTLw~BJjK^%QRH{)z91!WGz4COU3)?s5|q5%G(> ziLcrU`@1)dK!x5P{;_X-KXumXd~~lGy5kWMp_a&mvpJ~NuxNXnNRA$=pfCjEoWIQc zmwgi7Dt4y@^VnrN{+I4OzMf_{etV8spAF#|N|lMaF+vXXKoabTNLyE-(1N1*dxtcV zMQ@E9*b=ot%lkIIT5Tg0R|>j-2UVnKObvX>u9u?3w7ikTT-<$;Y4L-k34@GPR6VW~ zDjR?E^$o-vQ(G5T4BIAoev+53@`>0|9mOagS5cBaL(;+uZ6 zCiRwEsj;8CQPXLv(f4X9X1`pw#?xnza{#u61tAJ|5RQtfmTM8A)esh3$-GtfA^xH= zDNoK((trKz*!Wn2pL!`d=ze+>x(_g+x7jth#-PwJSG}G6JE#x4xl^#58!IR>PYRWP z@VajHBjb8WnrXsQm1l}j2Ilq~mr6#aqI6fq)ZpHS5pgk33&ic3!a~B7hN`XXqC65I zio8=@jtNKb$|12SCJ}anddFEyadzZiZd1;Js-$YrKkEjCKCzbwia~i_ksWZtsOEnp z$^@CoI)H$S6E!mzi~rp9*bBBC!BXU2TJHkgsjs$2yanf{Y8(tqw&{rXY>89hirV2` zf%L0!gLyE|!QJm2iYLIwCLYtCHrN<+{QFy})pdT|SqsU#;oidEgPK_ zyIv`sAI4b<%58i6-COeM;V^!}Vwjr%reXLCR!V>sI zL1m>T#G6rhI7?d}`zg0YY*Tmm^?d!p#8;!mHKrE`)$n(P>8wz_kAp1kBQr91Gb^z#r)TSoxPGcjf?VN?af}Db3PkL;2bmWt#?y6wZ%>4*^ zJw=&BH=+EXogt`$iq#Z%m!U)T;&pk)mLc?_Uzso)t2H&LN3xB zo_EL3_1vlzG}1}e;e(DpKN8Xwyxxc`{Lp28UGgr5U*tHIN6+{Q_s$Ug_Am*TX2SID zV$YLeLw3hSUsm?LUE8JFmDGqP?S}B9=zRGMB8fit@YqOD6Z67~!%XZBiJWmvSUald z!Ae08D@HMO^>|m%%_S~|Z+n!o(;wbNXgSIyRDhBJr6;yE{iDBd zy#^&aRD0hPmn+uAS?UwGsW3#TtCOzc?aby%K#GDH%`SII@Wv^ObDmFXC27TK@Fvsp zq)RT5rSnw)APD+faAF5dLin}85w4(HK9@I}~Qp_G66P@(v@P6f{F1x0N z$+uGp>bLVsuZHmz6b^`rTlO^LRl5vH!}707U8Z98ij<-zKesa%y>`aP!QEE%@(M07 zq8y)N8T#yydHuV5lW_*%Kllc0XEx9cs=RVIGbD|;%ME{tDPT76+oC+gG6Oc@Gf$^*eWQ5-`bLD2mq z&F1_0(}h*!mC5c=pw3rAHHVJ!I=k66)2}+E%t0=$3NRxWQHMDMj8#XUXvUjbcN5 zdH;AzSU^9;R3#?9?d&B>ke2vB@~q$0O{XxK{VCn1sarlSIj_FQ-ZTTo8BJnS)YMRw z%&W5^-C9|!3?bL%+f;>=kH6LO*AmAh?)34gMcIKCC4tOU*QmgHI2gk3vtg{nHz0fv zi1UplGzdxnIaKfM>z@`&NNW^^YnK{)Sc-^<<%bD%52RTja0(*riRAejEmSQ?HJFss zQpz%ghQEfKSoJ49J-+mD97^cHDAT_@O)C-n$I9vXX`->P{S;9pikr z#^lK`20?5=Ka=|!B5pC^Qv2=TpY_ra_rLEH!aoV@-S4+6c#EAEs$Tge?izPUf4=3D ziKUE8{nQsVyq_&SG&g8=Dg8FzXauANZySv2EHXIt+oUA?uia;ACF;gBETjXU7OaWK zy{dZ7Ig%qdG*Q*_HbcYWdmBO=@1o~0oSBJ9pFm+xWoIa~uGFfVr=aK^;@R09EftgX zQQxl5`Z-$M zbSKBMnO?QXr)nJag7CAm)1tX;gifgeLZ3-$JWt6r19C|)178X^_Irafl-baTOEqn} zF=|Bny`o)rILozMr}VW-pmcZ4D{7t;cD)o0i5o>`jVxAAPk&j%`X)n*XnT|5uA*ws zb8cv%KYd3m6$?{TS~9vCh2mP3O@xHJDSDE8C12%tgw5>ao>u$HrPk+Nh@dH07Bp~O zrIb!4Ud3!9zFL*8?fCoc@c}~w*D3k$nzBZ)07-EKKWKP6LWp%e6E3~lW-WgTaotdS za?W3$)zh{&vc66TFNWtr#t(jJSk6tG7P`$~yKLT`XRd}C7SQk(p6x+s>GGGgY8>n# zjrU((1b_YA+}kb=g|4hG1>CwQ-G)vU)IewPOALzJiP?>RoFO`eOZ}DX89utGghXx* zIxvKy`_D}6rm^ZZv~?vLz(7~!^Ucw0#rWp0xb@9k|5T2OrqSesy2-~QNey|p*H$85Vh~x#GXJyLkGLymPoD zU;Qa22lF*laQNyrifLlPZ`D-pKa{)6Q9GRQ~A&(v5W()pl;W?g|UKjla+8wM#cLjfy6c{r7eBZWin zjORP4>@k@6wa?u%5T9J^0uyuJrr!ZHGJI&O3dpCBbYNmG!wE3{qg!FZ1%?G5?;q@d z8Yw&ucCw)afKH76Ux(Xk7HfrMSUfpHNE-j)CydY(E7GAagE}&y&%V-M$~_*rDj3k} z$c-P{4dPPQf7~Dc6QjQ3fsDDuj%<2_ZwRF9gg2e-lcP>$sqM8{`fHBid|a&}IPLrk1;3Mn&}}#Wnz@@~ zuNp}@bpJgEp{s zBm8W3_W8(}L$-^zrU$9bGF_Kekq$*fYHQJi_3n1D$PglsBPj8CKd5<#PjTN>NN-^dToyWD!=JlTMW_-Ht zXB{@Tzs~e8AadMqtW575W*X%~{ls75?~BrUdiZxv3Z>Gdop(G4(;?_z#E}_`H9Y!! z`i(A-2$xQ(Yr`?pFHZBWBqwV8SSyN8>+4GF>uPkY$XSiiVbuDpb=Y-poHHXg^grEeoa&vSNq?3muTjL zg=%jewnWP!sLSpifZ4js^NS~E$l*-;J}|W#w>Fl$xDld>WF8h7IU-Ti`$1kaz;cJg z>pLh&wjnixg-S_`Y;0SNV&fvh_J5ww)V@nf<}qEMMpW7Sq|bzp%h)U{EoDLOrG4wh z#{fC_+dzn;9t>dG6Mq&c%pUT$N+geg%9&%&2`=&;tG9D9vv9w&)RmH}r3+UEWG zR`AU&+OMJ5k{fqIGCHYzt+IcoE>aO3-*5O_9pzQ*#$PF^jxy!<)3O>Y(U-hF^S(6Q z3vd~N2pRnBy3O2wp}0v7yGOsfYhmt;3UG$N_r+#f&D+Fo-|Nk?&CPy@r~1-)aUG#9 z48F1b*{g;W%F;Pgi@%Be+7)$;@vDA6=E4!kK@8C_X?OVsy4u>_W6cAYi9C$-!gbV3 z@3=k|M?Wv|DwXU@3QCAhn6I`{v1LSC*7D-vjW_k#dtrNcFxxOj-fM1dK8ZPyPR(x_ z{^AQ(jbXD}iE0LapEvKQ__^^VfG(FEF8MXNZmPeV=6x~oJD}?hbf66i?!cdDozkqlRT!AzF!7qct|Vq`J#5$a}V zR4XX^34T9X9YBDg^LJx7Q2&W9ug<*Ia2(aI&-V6?gkxa`Mkq_N4>TRXtZzA|QT+%u z#<3Qd`FjVC;pHl_U6}xq;UOpuY1UU5;RS+Nj=P{{2|)9%$&h zaO8o5PElu6nFANOp#WiqO#G_;w@JfH5#SclZnZVx`(n1?fy@;UF}O%1ltv@$)yCRRVmV};=4mgM6~;oJ;Sq-t=p zFX&U--jo>|wucZSjAWbZZ`6iKzS7-ph=YXtrRc|G3QSZ2`A<8wDOkKFe1Uv6)+6cW zF`l>!2<{eDbUbp)7%@0WGConW0t88q(T<^jC1m8k?fxu9<>`7?>|+ zxV=74Lz)fh|N59yR|QtZu`tllNqyBRv5>>Kr91oJC;KR{ckkrx`oQP_eFZfErMyPO1ix3>Y!NpaE+&|a zH^As8i;D5ZzfZHEK$HdV>VIe`!V12(;7OK zFcJSXE721u+FU{ubAIqr$Pd_9XA+dBoPVwraOc;`c_*mV{N{y}@Pq=tm-#yReM95dkPM59qiYZM0~2W%N@TKpZ8&TlouZ?7_-ILb`wFbO&n`uVfYu zzY>oB1Hvg4z_BPhinT_GL!na(4qA40qNOE-d=Lt7#gLr>MN@dz`6C-=#xh_L`04F| z2_92bG7&=AHIxLo1wy897BgZlkr1?NqBFU0ePmHg0$D?t<%o1SL&9`ighxqPZm<%% z6kqv@XLgMILbYo+a?DMlT`U)=3tJ2u`Ciw*5bP{DY=$nnwh>xq*Y~O zza@EK4r)f3x*rN7aDqyj>(RVU$GNmgEKyrYKq;h_YXd1?$ib{2?R&s66wVsTM zdce&7tOdBT=tPMbg9*ELn^>bYJ+P(k%Z9YRU@L2JE?3C*e=o%7gC zrD?eN$2T)x?Ev0{6ra=~k{Xer#@{qUW2NntG$H>YaXRHkNla9~CfjWT2%53iIH)n* zsN4m&hyHu#^y7HUBN%o0dtTkV)Q%|ErO7J!D(PyD6VsK9m( zhIL=XF?vTpf!40)^WW76085Z!Dgu8LWjZtuomuBOqU4}TM`BWadcxmR4;4NU$B72$eq5i2=slI==N~HB zcqmDrxgFs+6}-+XbOpXJy5#Lx(fvvjjP8^{9CPhL1smefD?Fh&vpzyJeA>^11mJt9 zaBq54M2xy{ReS+StXJ@Mv0x(XPMZ#Ofq_|cdm0=uNl^K&-O-Od5BkA638(|3*toH) zy~G2|9hhXpF*hq)dB$v_SDsnJCk5d5w9E7k9}JZ#tIKr+9gS7lZ1Nm0PuvexO|#C9 zb=R#!eTuC;Y4}!F0xeS3VkUnTYF$#2D+h478oXu5rnf9a*5g(@QR zE35#N7T!_S6ES&-{x$s|MB7HlAp1}{_>g%UgBsuP@C~(o*v~yG?q`@VX+~O?R2)u#W;CYxwE^EWNAgxjs4tq{u+Z_Ob)IXmIGD4#6mCijk-JRRF znB9T+>&aNunDWyrH-ClW(t=#w`~+Ezh7qUM`Dj<*(_q)M`kmHN8Qf2jb1GcqExoaR z6>u~Lex-@GAh-B4&X!?#5#6@tvzKbi49^>>eIM}io2*qwa;*oTw-xE!;q0~#Z!Nd7 z1slEq-AHd!5INs*XLTwLumMTqC)OE_6sGY3NXk9qf8AX1Q z_Q=2E{WQ^bYMPps8TZOL0q<}gUpnnCeC5g!y!d85s~$rpB?5$rRP+Nfzk`nAR!@os z7Y1Xh%?8F?Sl_1gG7l&&EVXKtJH03|Xig!H9Q0L8c5%Tpr?-r5f4tyN2i^f3qnL`m zPC%DY!T{O+g3vrfr_6jL*)L?i@t88}NKX}vM7p3V4JjqE(8oRRs4iL)kt>M!4?`mdfq=i1 z^iLd5=G(Vopw^fyK$e@~l4+emVR2&lzfNUxlItI2A91}S{Qtd1-ONS-`@azwa1sCi zQ=I>=26`O#_$gd5b)T9icdYeIbF3r#*sYdnb4-VE`y0S}0M6}*A?G_T@SjS~LcSwN zls?=n7O;d=nH~SK)HKA#+bg7APXT&U*Nwg85a61UNjaC@wC@`kG`lu`n-?;DcJPtC zC(WO>;>M%sUzT^$u1GylV1Rju#9zp32+8WXR1SQ+Um;FJjv)>d=1iTe=C>Dk9u!Ku~n545oG zo(oDu;dJfUJ`fQRKj+nx%UzK>c54>C2!*1> zIfay+AFiNhVr*>fmby1(j7&_os|T5U(0)Zw z!$$T48h+e>qS`p1Oem?U{sZ!-K8yXD0GyoEQ;aDU z6u7OEeDB9ujFWF>!GpVHPu2$39Sh49F>r6ewkXGm#&N#+w|X(swzM82w4kUbyF1^a zhqPrwoR&VoBV0Zwy_OR8+)ZK)pXh-nf=ZE60P3Uv64n3SH;7$107gnY|HWHSXq>DRruy{h(;8Bk z4h(FRrRQo%27s)+5HBhIEWqquZXimBXI}GTEac_c8BVjl7(X z5Qu@q^{zk@sH+veF4a8-x|wOKL%@d9*Jy$Eh7%!0VaP4=)5+G3yYr6wp}`r4cz%~)aG~X!KPIh-jF zor5fR2RLV!GiMWY*U-p<70q#pLZ!s6Kx$~O!d+Gm zvH%s^stxLvl}L^?Ab(CoYPCZHf%tLOeQq`cha@$X%g7KKo2utDhB9v5@t8?j5eV(p zku@CZC%6^B71kMQ;Bo`9rRhDe-F7zK;l;xgV`*GW0VYux5R?5fyssG#PZIU~A{!h^ z>YPFTso`izi>!U^DWpR24Jh2X!S;{iitJOYt&Yl_kN+I#DS`IT)3JTbrjfW-I=JBT zZ0uh4z-6#qs67Sx*kb~(mDU{o1uoSI1Zd3v5itRog^?*>1K4s9deOWpMg|g~A{TlC zHr!f3L+2Amn>-WeN$`o({6~|kwNZl-M!xhF5&}3&Z#I~AV&+8Y>8M$e?X~3JvJ{B3 zTlL|5=f#vUm>e#qAj$)8v&4f{Z(%a9(ytYi2q6-)f7x3bU^M8I<2v|7)3pWx zz-3Jix=7o2wWz~fW9>C)SSw9xu28puWOuLa23)3TcT%ajT%aay=>G$r@;#Rv0`9q> zLjv47BsB@SMZ&8ARGQiA?AxOcw6r!%?V!MJ36O^G{GNl4k#!oRK=tx^253#&1dTBi zbcZE5g)5SW;W<=v*u@Ifg@c|d3&}L8=kZaILWz?px$&=kiifR#iIR0?S$2e>PzE$d z#=pHE>GlcL`~)hEMg%egu~fgVxM$yUn;o}^4$vz%hS)wF z5ASphM1R!Hn*v7{8xR|7yM0!zoD+{ap4Y8bKSro&O@w#5*~+{+<k%A9`(WUA{Vg7q$aM(TR{5p1hD#y`F)HyJt$VyFG~FF2XYn|3ZJuq<@>hOHzaoMq_j|LyR?T%chSvQW-2xI|2`c<@OYj`w&Fv}BX z!f=69@D!=&K^u|`2Wm~~pH`svl--j(Rl?d+2*HF%IW41O0kpC^w_0j3ugVt_N6{@3 z;z+RP>G08A=T5(eIwimj7y>4h<&MBD=b?r&_ffpJ*7jrKu(LXOg%#1S%_+Wnnu)}e&?CdLr0!DYcqNi3=+&)I@?&>0h+w2Wz0(i1#VM{F^f`c zzI=EY=ql?nIP}Ik)V1HHNrwFrU2B{7-(>qnrQuy_9&t7J&~^WM?W?l4|y~KR3Kg@mj*#C+_Tyo9?r#Pn1^3XI1uxXU-IlnPgY?cDe+5Mg*-EP_40N zVmRY{J$UiWb^dtE8A|UddYAFknPxuSe)WuJR(n?3n1*SEOa!)av0y#eANTs=qo~Il zFs!%l=aN^rVO=#b_9qkc!xbX2(|P9pOhE7-T7S$yOp-QOt_sKkp9DT;){*mgKTsmy z#mOXSCG-|MO!V3L?mr#+laTUN6P=U(!<+w1n5Mz?{Q}A9fQr9HNJh*P$j|Dnk`VVN zNpVRc9;!J>Vjwx=;eEjyP@9QiGKwBZ9(INOW{?+gOVr~hFG|zTp9BRkNn1_6&GvJ5 zx=OX}*@#3TFFza3ir809^I~xH7ljfmdhnC(u)TVwRXCgN!gN3!mv}LGPFtx;=T&J& zA0C5vW0k3L`U}5#+~9dpk+CM3cimFr^o{WMH1hGU zzkEL0vD>p>C#8lQg;^Vtwy?6w?WZ2UO2{sgy*wWd+%Y0WE!G+OpAyy{=FmzfzR=uH zdB%%Dd*=Q2*F87|64-$8P|b@ya)Iq*dwI-SbXL z&2Osl)ih(-V^JC$TDib@FW#}{K)Ku@g~P8tm(Ha#@P6j!(WOBs&;c~>R!$AN*m;DM!DH<*nsiZzi@&jKEMR#=jDv% zgAwXxZVLorm(yu3*8@lXPU1%00h}n^<;ZMpcG2mYebO~Hb|tI(LBNZs zeWQ54Zdml^%D>_2-#Q&JSjI#19|6jXN&+V9_R~arR>Y!L=x)VBM6JRm`Nj8t?=!6>1(a$vsu1^H>yoz{dzfEQh$Zt!;N5x3hW6?1xh>-zM@%wgei_&o~cgYY2g zAIUOux!z4W{kzRxU4z=-WP5V>cRZMuowp1kPT+c)j(S_g*9zBHMQNQBuk+2J1eJ0> zlU-?x#<^*A{w_DHK<2Z&=MQ&1=`@eL)@yV-Tlok$sQ+IE!L@N>o}>!)F9}?s!b<17 z@siA_TT7poawd-;0rF1f{djMS6T2VJj@?h}<^(Rf6-`V`u1QNz$-pGZa z7CS^Abv3J55SVLDxS}U5nKJw17L<_b-HuZ zbqmbbMOKH4vi1??2(sueexjfvOECYQsBDtc859cX;$JhRy#6@n^&p=7U{-(N2GQGQ z3t!_84nZtn4h+h{p<%GU{^J5IV!j>nkI-&Y61>m-{`1^k^J$ft3r1X+b&pJOEDllivA~>FNhvK#ff?! zeTD#cLj#Y+D>c*5S}lOrQ33>;k9l-J-1Xlq>8h?kcWq!%ebv?-5eF$>=cvj`(bH3? zdnrzyWD=+M035?AEc~;Z2Mlpiwar$oH8J*O7_6nd>5ky7mTT7e^5?TH z@Kr&g%pYHS#ijcgyv1`tow3_a>!6kG&yL>H3{Cku?^jj-7N*eP;8C)!l5gO#^QqD? zF7#tz^|y12BX^OjIz5?Ei-^}}Ln(nrhNMRc#E*|0zq^;F6MGLjy$vt|`U9XRX$zQyD$h$Ndx3TZ@&mtoPDRUu2{BQg8U#BDay&*3EdE5z!ofNBwX z=6Iwf`B|vH24;!hd=YFE_=^}6pV(vKHaem9I;Het56&weIkMbF*bYV3S@{%g=_1Ii zA632Cuk?&)WxLZkVEXi10R4b1`?GZi!LQlLXZhjg0Z{)aLuzw>)S)aXCb+mEeV{~x z-MFyLL=xu{?(b)mV71E)%CFA`)5#T2py5A__bAPVcTTYG~s(7UNn}G)P_o{Mrs-H z`dtN-Q%45oQoMU3*s<^oGQdwY@AqNjK+FAZcPX6;d%FQPRaID={tIhJ59Ue2TI@DI z)!M!s86|Ta7bT+dF_PhR`6CIHNvBJT;cT~UuMZ*E?59u>6?E}{yiGgbh3@9HSW1uP zSbhx0VwbhYt5bC~8Hgp8^;B)uZ!;{{P3bE^%n&Jk{oeIB*S;Ku4ez&iUl)9>gq}aw zpw81(YK@f4pMqOY)b1FGE=R0P&Jw?a+KmfoRlhTWb6gsm942~ToNO7XQ(Ff+&G*dO zXPb1P_jyjd;_k?`>Xh4ZAQn`YNKO}%2IEQrks5v%`F?ez_{}{VFlTqo07)wSzx$C` zMBG1k9O)IGH!R7XuAfiFODueu*8N(2Ch*odP^>n7uUzm;nZ`Jcxex6yQA9^{XGPvO*UWtW~QBh^tj|v}Ds6=`* zQIb+jNdjVbPuPCK%J;$Jp^r~08vOZpmY%>>!ug*_jqI?Onsp1W73FigLqo&XqbInw zRPA|A-&9bmXn}j1oq1pP=u44xb;sH5lqZIt_&>qg8WMM{Id0Jzd1E1eRf<+-~p|_L< zAVtFdcB-BhWyj2sK~Xe0jY0&4n6doLrODw9<-l_k@zP`!k}&o^5mE1T3p39A7=-!W zxdoBYoU$Btp8D$XFsfBGg+rhrAhB50O6@d=ZSSdCDD?cLs_~m;7A;Qde%2o#YeVeeU%vQAB0U@yC zl&1f{x6ck1(xD~y97dkB;&m{!MiBSA{2Gi0k<&YxReJH~all-R)KgIJfeoCoj<~xD zo=X6NoDh#hVF*Dnb9#?x_eX1SQO&0Kcy`(giXde&?nhNl<~2Oly~BYqvxxDQvHC)! zXBgN7xgOW}17@w2spd0%c3xJN?MQx(A|9cm@^68l z-XxET^!YyJNe>aFge_epGdvU5V~$RgYDr&rp8f*9n}aUQ&~#BxMmU%NLREr?Bh)}6du zA+Rv%v_wX}kT##-u;lO3Rr7+zfUJ>Eds#l=i5{BuGj51g*I7*R76V8_+r{3*JFhPSZ?JSX&F})%tiOtEyg*ODK`l?%v!A*j8c8kK9Da=I@8d-Cd}9Wf6CAjrq{X7lH{5s!miK36Z4U$ zCnleV=WI;YQ_Y5Fet18zrMTwb@j+ja|5iN}Tg_Z=zBL99V?7oc-l8<9c;#YR8R9B) zS->>6sB2POLptL)Oj4{mL{;bF_5U$Oi$xi#F6q+Eq&`lZyC#|e6j7x3gp@!^;tM-e z+}{&Xf3H-}8{fi*ySSG^^0nxr%)u7Y3Homa;z9Fjnmq-1Mb;0uiW-IM<*Qzc>wUKM|oPJG5f7ofJw7O8%j zBXOCIPNuqt!I&YfM2E=xPl|xp_BN3=eXk!z06&+^d3*PM$1;rgrpUBXcasbRxMZ- ztG@dtZpS?n2rrnbkrif0-t`Vdkj4%c;WJOkbG^V zQFL-DxkANJ`CL+7QSt7>hp*0}!&bmiSBmGlesx*d)1kX^a&p&1MYRYhZ(au-V)-7j z>>+6m8zlre7CUoo=VI|-mXCn`2*J1T{C{@wH#?_hn?D(^HHTp<@gF#G;}`p8L^Gyc z2c4_6N6qh>`44OTRYyfzZ$>7}=vjCfrgu#W^SiXD%G&sVgs}i!5)gCX&5%^7=yee1 zDS6jw2)-aiLB*5=oyl=hvSeHP-RIBCgyw)(V3u%Q|NLD9e0X75e0e*Rc+`W3kn-l| zHCU^c3}1e$_c^uoq+SJWqI45ylVm0pPgw5u_OsJrLK9{|!NGHbDyO%TZnMq72h{N8 zuaP>u@V0DKxF7@(^S};TJQpF^ohdcGiTFsCl`uJgefcXmAMe5`YZ<%_H(vwKGaA01 zDs1PO-WV1hUc4H=3G(bSm1rx$#SZcA=-=x_0M&*5ZKGN5#J{3o4>ctj>%A_o(6p_& ztCa9U^Np&2n8mt|Bo$03CFP~Q;Jgws_==e@>ou9m)R&BU=QJO@*WwhV!&y2@jgfsI zD20@#7@=qMNJ@j9Pu?R&5_BeN#BG>H^w~ePC%-=OJDGh*MtUI-3ThzHP>eqN@klw& z57Z*9$q*2e9<#wz1sWvO^GFC+EieTAMTRmSe|9=xdd0VVZCm#F#@Hk9M|M62ugnGD zC#AT(31|C0>56EVS;1^Wmf3Pu4upth&f-5mRRCO($+zcO!L`PmVZJGii&w|DF+;Be}cflZ4;KKN@u2j#29nZ_CAxCqSj_=(kg*$Cp6&8W( zJ9{BIUta#+NE=lj9WFI^mj95NNX9KTii^(hx+!xS@1DA6pLJfwQh%CX${wH(BSWlA z&EXhv-2m@VGGza_DPb)~p~e!U%=tmDszjn5h|EIw^gX#0esjHr0Gl(QMeGgLfiC>? zFkiFgo_tdP_C_nY`otN{EoqZhyb5Hsu$ip>qRWUMPa^NVM8Pq(xW-RpHttemzh1~!SJD_PmVIAJSZLRFfe{g z@c7G^dC%vq7rs;V={uDwZzAx!u$h88NxxBfiJDUlkH`Yt1`sE|S{Zk@&`lX(%EqPH6 zohfg5xrgY+Sz@SL4_nj73Nv13eapmJs>k92g~Ow8@> zxXYz0DHQ*qOpYYk*jpv34%qBk!VqRl&d{g}e$DA==KkaS*2shT8O!N>d|5vc>;6`* z(~a{^<*=uk0l-{Z0`*_@d!?`C6FY|7huEwPb|=zfUD8~VV~QL^+bzYM4-BR&WX_GK z@YIrsXa zTF;2`AC<)i*fU@vKBqNyxsMcLXbyv1-4oz}77ZgfZy^Fuuj}j$QxGZ2hV-!{e?rQt zT(DDO1JAjLs?N`zDQf2o)^}{xcsRUO(cbJI;#27T8HyhNQS#cH!+$JTuj;;WeybK$ zl5#6=6ODAd-WBz6!`txv8>8|JlT8o-RGKPfn8?f=vWxz_NW@ybCwr0*n)jHwD_`h3 zP?+v^?WR^@G1b^4v7v8!Q>`%aAA1D~ZwGPYpi@V(#E7D5SuqRmJPiCTFS$bdn>oy5 z?5m{~C@W68VwW7FMAYS(t_oVs?w_b_POT5o|jx&T<#NrQ5f&zeIzr{o?bCv zBR$sn*p$~gie(;u-~T`%CYp4v;SKE8uHY;B%?OWK-ZidCRFtM>fpATF(_;K(vY7&A zox0YPN94?fH`^s^jkL>MS074u+RL2zd^Fh2jk@GPgu=f=@A7IA0{>=(+nLs~9{BwUNBU8KDsBh}-Nj=*AHE*I3|F!zj_1bMhol0BCqKQ~H$~3`YA(8p zHhw;kDcQ4`&dB?LeSfVn;iDNh>~VyD*RddY9`tVG+{S~ymcxs;`3ffmdByB>iH7Lk zSMr6-f&ab=`YL3``}f@-(y9>TSwcRH8veV2ARk${`v2%ly;vy-Mn7;le;STbJLrF( zc(AZtx-q1YG;-L`U_|T6O}NDhU-l>Z@6&oSrOqNq>h$}u>-6F8g6X27kr4%%k-?|n8VJQ)#aNgMy;GT(fM0M^2Zcx0z5#vOQaZdDXp)$RIr-6;gc#!G&j z6L6{4op4G!=F(3p{BoZ0+7Safk9@n4DoN8&+Y*~$?lh?DRa(B`b3Q371E~pT?X@7% zIitU4wv}N&b9A(vomIzwWIN)f5{YC}Aw_A~epFcdF4x+|_^5hlg#}4e^IngPLwI=j ztkCE@#3q!0xQnCqRmIWDn^;@e?O>Wl93a)xbx@*}K~XyfN|C3Wj|=d&T(%R^(`{km zrwp;rhkDfkN@Nm;P6kLaD1+EPRyY>XLF6KZNj5@Uhcg}i*NB}+ukoUf%Ubb&X0K@b z<)fJ+jCThSNk`j7M)Ihj`?I8C(DUX|(li_xSOn+kX4S8Xo{1^SE8 zfl;onF^ziH4V{Wh z2-A&-3^JU)aU86d_Jj;uTxz!kf#PO$*zsRsh5h8Oa-?uNK!PlRPS8*7hwgOZ1cvdZ zfAp5hbYPYq zxn%x1-m9rMkvD==1lRd!kue&}^x3ibqDzsKTuvG!?K2QKbE!zk^8U%;)>eO#n#|er zQ&2Zey6Xc->H1UKauleVW-p6v(9!8akoCkzSdbGFDZSA#V90T${VgOB{Cuy$QUkx& z?G8tb z8lH@%>aU-lY>-4u&dhF()$E7k@vp2jnhZl)AC`V#eX85RHa&`IN&NX}#A>|c`}#Le z{ZEsfMc9faU>l~QGG6Jd|CwvvJ%?K2r7O^MkL_B__XzmN5dN~nW@StG8qjEC zD!@Mr*a#^(i{m;M;BT!y$mAO|(`5JhVSp(zDEFz&HAf`stm`v&d9 zFSdk6)^!btn5~llyx5?6M&~ZR$M5fFZ>JLR-*K-bUP6CAAR8s{UHz1FHHrp%8$@Bw z7U9yyOz$_)RKGq@ms&hgsi8>&*s_U+nP>Oxc-nJ~Yxs#O7Eg>1RG^c>g?kWVtAhky zKxOtBqFIm%W}zIZi09_!^e*_A^YfdMIEa=4yaHC|PQQJj;xSn7c4@ovWCW3$f()e$ z%DNuNy3){oBlj@3{W8d-3SN7bfV$boc zgYV4@u4bztu7@;r8xhqa6U5e+VNWCPOfax-LB!| zmS+JPb0Z3(79LGRp7SNjKeobLHkPcyiAQ)@(F9y-hD}Z~27g; z8WLh!={T>)r52A;nDA^zuN1lIoDR>2+<(uk(=xra80&1)LF-DZY1 z%mGQXr$p(r#*u#Q)Jz3uU-;;|CQMx=!;IUf@*a^%Aqu?o@NGT!*t#CuQP*owUfW}d6&(OYi$ zqB}Rcp47ltvU?%VR#;!Uci+BLD32i&hgM}@U z4#ju3`x4iwxtUv=LeIZnfejR`Ir%xg#hydeBBh8;#sh*@%JS+f+@I-{X}9p_jQa=P6c_|A&AvSgoXPb|c}U-H^XI0_s!-T> zrKWqSBy%+GhA@wDN`yBKn(?ZX=Uo`T(Yi+VP(EP~>DqB`MfEN%T6U&5#uiLBV)b&% zSH@Dfi;4{Iy5PymV`_9&jW$4DeBpt=8_AbT0CKvihogSU30@OOzI_lbNP*|m-hBI^ zzjVB3(ASQ#`=)ZtF3{>`aLl-4Nwf$0cnorqVf4qKk!{^W>rLu9GozVrN>(Izk#jSO zMKNye=ybM;$jlti{p8BLsv&PkZiU97+&C{&@}(K?`meNFORHAWPOL$q&;PA4&`c=! z?p^JL8=BD|&H3u1hHoFRA4mGtK{mHuN#>3Bk%Z}vvO8BF+ z5x&K=kl}U4R<0asbhQW)fsH|7wBJNwCjHOfyUe)p@Y4IOTqC&Un}VMA6YWjFGY23q z$p^zmreB=?2)FHy*OvDp{f#7r35ewKXf*iGC=8+sSq7Y;r<^Ukef6qBS1zG(6m0X{ zZZBe3=XRiusx&7UKyOl)LO*>e9IVs>10(O#>(MbX$FQCXS3ZXHR=roDC*dG)kSLD9 zKZ2c9-wsRB6oMGK>GUB9$)1Q;Y%tkKQcR?mT{B%0&({HERVt#D%<&XuUpb2-(w!dr z!kE%Z=JXkZLl`e6-S=`xofj2Qc>Z*Kq*yUqRD%x13M91Cyu1$0cqlP_b&cpBC`#No z9_?fDzLu{l;Wv{D3rt&qId7?3UtiLYLXE8*=#J`OjHeO%=>6Gm#B3?^;EZ;+gQDCn z+{W(bFXfsa*cwq~`}d7d<=Wm|GCvgV>a@?>I*9!?|M)n^0O{F!QA##o6=5+$*4_0q zA+3iz=x97hbV-ElJqPn{Bax^@ggE)`F$XVD`U(16SogzzJc#C{4%5XF3?a-;y^>7& z3r}NE%6bpEQbaP~6>p_i}p}}iJKg+1>k6#~WzMZ+hD1L1GVH+lHJ(R0Qiw`C->6%EL8jPLKL6* z*Tz8K;48VxUKF36XqBa2H;vQ1G5eHn?jk>wUe$mddkMv67^Sa&;aIbOx*1*o1KkjyQvMw`BDY1Cgy{9(wlpMUT+B>m4|kp{E8E#>Z%q1jm+Q$L8fGI z6;p(4YHvM!?ei$@ix&DEbxCZMB>*zW6_84lmQIZ@Gy85W7EaZ`{b_!|;%5u-RcKN0X-JuD?Gi-Y)rGOed9PF&{S4 zQOd28|NqxALRa^H?L*g-uxZ&w%dTMBHTM_Uo_yP~YfGGN(S<_^llr%ld!op7(wa&4 zvn%uTSkKE?-p0#u;dPT2r`O4{MU$6GTGg+P?M&pmasD>0=S^c9zB(OS>Yu4zRQ0_p zq&6NWchjn$;ZZzKj@qDgEhu$VdGVP$P-o=h8=g|T@HGKp=zvAn|7#oZJ9XDHcIa)- zt4AMSYtWu`FCA&rT8QM2Brxb}GK{G-O8Ff;-=%ny&L~T@hR3@MjY0@9vk7b z+uiMZEXqrj9A1DI+@(2y(L!d+18vpTi!Hn9?iXyeLS8owYDbWK=J78!Pu%YvxI^sm zCx_|!S=M-Z5E5JJmfPaZL(dZ_4(p+aMsAXpewUd2PDmodrLoN->#QJlHlQqVSHSi5hA4l z)EXA}3=mI@f}EyB<>FG=9&FgI0qzcxzXu;2!10DA5z`g!K+8L}zdDf7SGoEwZ5^4M zf!gew{yan^ejXeg+@2B(-P_9ui;n&R^YkOgXf&;eJ(0r?ny5~ImaXc%4$EQ8sTFB5 z-5d_vpof}p%x=78>I4H&_E0iha=7x}&-#i<1TYxbW>6W3%gR~ICmlF0F^4n|%X9Sq z=nr%wiI1dLws86Ov%>yI$OwI&&(pVrNiM4oK2~s-r+mZ-KAe!z(9ocyqFSDE#+XYO zzCfIbPhq1dy`qhbS3-~JtGiMU_8qjmMt*IrL4;-{Xb=3fE-JjC%6<%jqMDVQNLvuP zJ%%Q84I}_{$4hByt>hd?Sayroa{9`ORb*_e+fpeoj?e^TX+tDWi*9KT2*K8ovIm)q zr}+n&vPrqrUB`p4UF|Qo%%J33Z^C6PLu5$9IWlr|-12U}23NxYjvU2jPbW11gaswg zKQCEwtSK+mGEsT+4(s}-;tkuWlT}K_m9sLVtN=&s^+A@gpq@$xQvci{J;+2? zqw7iLYk51)qys#p4eQ-kpOG%oN4sZTcZ`UNa7S2u|96`J^10`+1+PR zf)C0Fn^bg>4N|oiF-7WPT_d34wnc0kfb0{;+=k1d3xv(2*6|LerY|oRh!4wQysO%^ z-@Z`7qKjH2WM886cwXm;fydkNthM4TxMmye;TPOMXe<&bfv##oO!{Pn#qtH=l^YA~ zL^w9q66xmkwl=RMboCYRaptBNrZ)?karHuCQn+_`Q>Xxo2GNk)-{JpIs~O z-Ly_I#^U-=U($H;+iC4{*$dQdK4#s^BG_3dIfhX*^swYjNX*t{3)lwGejE;alAjkd zn)-l=KZ#3sNuD?06i%Dl8rcb=0UHnLZGFy6U5bJJ2dr$Wu46U-0nz8Pob;Zq=X){N z%^#=Pun3cVMn>?yFW-4sO0r=o^}XvkENQ>hj#%Q9X;INn9cp2C&fYhsjbZ|DShWQw z10t&7U8WqeA9yZP085#&*Ye@v8BJO9G+$(gGL*phtk`XTybe{See_#XZ&j!)e~GrI zRX}8E|Ch9S)eP~Wlp>`STnqe!jajb_Q4CZ03qEQ(WR(>uFZ18e1^c&}voPXG2)a^h z9Y{JqJFTg{x*Zbog84`+#tTDUS!wSEIBZEIdPQ-7R{3R(2sLMWnpewV4*X}=Zyb4* zXQ=EtMBDn2F}nzCwp?*uI-;I7)#YVP4H~_m7GRbq)~c}Ae-GG4ldl8pNC)izYv1;F zC)pS&W~p78L6N@72JpY82PO}_Rz7?zBw%G@t6p3$F&|Ft{d8s%#Cm*j1dt$X^2 zfNT8E$MJ*Nh4I^m-nyJ+oUCsydd?>#pP!#nKHaMd9(xhL^R#b3F7}etxLpr1Ar&_f z_B#{>-au~~l_6cKnDaYb^&0I=z&3P4WaC2W|Lm$9ijgjEiuM3{cpGvn@c^H?Cg{FB zb041O7G1gQV$+W59Xzb(*GYfaN5M!XZ7PI^TEQl;St)B3a@g&+2lO~s83cv`KmoTr z<5e*yv-i+RXUkI5xcFNUOC5}uZVDip*=!IRha|}%ZyYqksNP)R1sqin#?Jarmz7>- z3TkXPmi}y54y$(1#@t5DlY%kg8FXOA?>aBaUMlzP+h9;r{OW~}&)cvoOqfb)bsY8G zQD-{1?SISBbR2{Lz0ejn>+&0|!%;F2V0pR!kinO)T;m)LX+#TAlh`Qh1)EF;aR4AE zrFM}#VS6Ge*c^!_pAX#IM99mPv3Yz|Um5s;_Cu3(2Shdtv?BJWrdik;|$S z(I!i7>4mg;IcVlC!T2Umt?+_0TwJN=0DuMeN{~{2lB#F%zlD$8-t9xII>=y(A2kH? zrzJJha$$9QFzsH`kQ9M16bwyN!!PTb3+$NKCY=zl#71F`ph^iV_N{)6>FYZFUj({e zTnb%87xlgV)RHC2yo>vt?EdDSo^Wf#QjbzvM6Q)y{C3`1i><^eJ9q~bVK)yU_HI8D zq!W%irKG=LCx1FT#UR77C{pG^U6yM_k*2x=ic^{5X;CJ|s4k6?VkiB`9UP+l>+vU@ zD#5E`GlU%;%ZrxXJ`aPmUB^^@1WvaTiGOF%tVuR!j}D_M-Rx?`^>x!fAsHqKy4ajg z;VROtC_L?w&Ye9<#Vk~rUXeIV(B5wp)nTgU;uYimCi^`2%>CH;<@s$L4YLh!KSEK; zGh1x|I>lEoZJ-?3V`cT@(5-O_)OgB0NWHm0b2dnJNFUMo=QVP2=V~ zu*Kaxh2+2m>A+a(UMfbR?IYI&4A*uV%Suz0?CJD0D`GN6)b(>|4AVBZ04tonn?`gT zUCZaa<&CzLTUfm~?d-b^^o-b-<;)n7lIJTXQ!}Y-Uo2aYZgj4&oiIJ$zIEk}NF-CFNdDOh$%@pq@4fZG2^aiEA zq>h7hO#4a~w+b*<<*0E^7H)N+8vPHI(8IPB!d_lhcuMxpe~_i$rv7M-FexA~tnO$o zGDE`%wD3^v2T!~m4DEul;)1Tn?qL0df!pB!E@gU24xAOq$eD~rnx3lPz9ntkj*Voa7 z;(yn}>+uZYpN{O3W3bu+_->)~9EM!yTV1cE$9hr`MTPe0WtFi8iEo(FbEp8OI=Xr4 zw|WAqkO{v1*sTwtXH~=0IbR*IWpHwx6k+1Uj$AVrsg_)UJvW|cgwn8WVY$_*CMX%a1U zkq{mmH!Uhre>Y)nN=Z@{kQ}M1!ztFAZcgx|T*sU?6tIIJR+EGjbF5Xnipg?aqg^#Y zv@qHGVH?3LI1%hr9`8H;arH~Fh#*YKw!mTBlVj#>ZzCL15+wfZjh7eG zltw)^*TxDef=>?Y%`&_Ax8eD9ixpbbqcCRq>0`#sx#FA4WHDOt+AXn)(kGiOc+FJJ z-Q)Jscw_xcY@}q72!?zGuQ5fo*Y(_Y|77gQE+7Em=0r+XP8*o$GZya*&rbB_DwA6p zO!*w@TQ-MXzi)bLJiLqdT_g1a%Q<8agM54a`!WAvpsTYz!8b8>-goEqCQANZ%4cLa z{;rx2=9!&ZUkqB1o}n(c9kx~cNaJXK*%NOOQf#gV+{9ODq?w7wol*9~Gn7AR$bOun z=z90Hv#nQUJ(Y4B$?6Y%k+v79hcR9{50@CH2?aClE|nb}e;L!QLJ76l9Fm}j`$GNL zUgKxGb&D;FGJjK*Q9l;j>k^n;=g;Ssz85^Y3sPKNC|dP@5spv0&tJ0qK$-*Pi?h10 zIMneQV>U%DC?QVAuPz+IZ2Y}B3WRQ#qR_3C>As5xZR@*DF1|>Zg+jrtIepZJk$#Rt zz00*U+Qb@#qPPVDuHHKLu6(LSZGRA71>QUzb)Kps#>mum+vKr`D({og{DkJL=t;g7 z{*zZNoxP-|U~f2^G&N7jktko2!o*ZD>`$)+iiE<-^g5~ujp+bH?uJWW@Sje%-WO4G*Tu86HmJh@V*x(BAs?!Adh$wSLJ z`yRf_=#1z)7U6qNFQSx|bZ|*l%?6)r*IbP|K1V-0Fnu*7#&=heIQf;}rt#7#)#%Tz z<>gE#2bL|)Q{Hpx$ggGMz7GCAEYWmFTk4O`7QM>|Srl(v!AbPu2pSQ|ePuFpam-VW zw701@+G0z)ElJA6&3}z8VU^}Sls8orWa)je`TFWjcAPa4?C~C{51(9YcU@wBV4G3q zg?aOElSPJ27l35u>S(66C}u#s-6=nnpOw6lh51#o0)vN6mn^1A`KcPkP;FctPG>$T zgkL8P&bUtbI&HcAT<{!5rak=jws(1PZf^b1YlNn9E8^$z zjvoxckLnd_SV3qneGYB1KvLWyOzh7bEGc zc?=|fY&z_|qt~vkZ)sIY#t8@-Xg=M$LrldSS`Lu=G92x za>Z-emgD3UKlL^dzY886fnFtK7>~ImQZx5o_=ZC>W%PCJkENn4qw&kbI7_I$Ia5NN zlz^d67OOavw4YM+;Rd3)t^iEGeED_KCxgEpJ+-&|Sv%D&cJdfer44brg>(hO3BUOl zihxi5Kjq^8m%s`_>;Jzd_(wQx+-{a+6Ha~M4pb{yJEi=>S^w2?qv3`{&xLn=wD!dJ za<)SJEe<#~GbTh)fKsWyI5S5`1rT4TrB&F6*r|`%Tdy*Ym)oS>GZ5=CpdaXFpm!?2 zr@Nzn^jDXFFcy9RYw83Hz>uJd{9njsxbi;71q>;>Zp|E8?0jL1J6t^SH5Ohm?C{?xu0?GfK2 zt&43S?eu5=%BbIam-2>#du-4v!PApr_M03omhHr&MxbH)S>a%)qNYY%)X4z-qbnOr zQHkg8r>8J*f0LE9jgm{p3>x}>Xg^LcflvZ!?nCQe2-^$s*+~L^2)Y5(VPCiCJQ38Y z0|ih<3xgrdb|$U}$OjIfb{yx(OE&>>s0{4cMgbCN)-4U$K`cd1yQn8&Gdcj#WsNzr z`~h;efG7amplksX1-jjG?qz+s$#=FSN^@qcC+#Onmmgp_lRaqMbm;F97dh*kGoAbN ztafz&^&7j*F^$=}1ND0E;{tHS-jrfT%w&NZY=*OxoJ`FLDaZ;63RQ%TbNy&6}tFqhXMtqHl$?RfqjE3)bXRpgz}%G9eZ7vmS2CHWI(Xxv+V>nh~f$V z17kr4(9}Or-0fuv+l?zIo5o>YYSL!~=ItWc0AAv*4JAcMAQxMn;U54DkV&{~y)GCK2Z?%Qe?jV7|v$Crq= zJdD=W(LCqoG4GK3`AyJ@Dc60oPW@S5NQ*BpXKfv|FulH^)xc4wLb$PClwN}(H#U+q=FIBJ@CIoBD)t8)0R1MF3??fRk zG!DRrJAa3L=I=e|f!1$8s*&s8l_mn+hUK+9){zf(kz8!zq{03SU5xr&_ENtueKcIO zRl(b*jE|IaY%5qBoz_Z79UeV&HYlUIqfh>RMKF?>2PahA(H}-H2{X1DY`xD z68X|zf%{a23MW0_Za9N=@aT!YIjCfawu}$9TS&%Xgm6nbp??VUtB2J9h8uyO=69-z-D?Wr~&^0?<7TJ6rZey%g#!KQGhCM;swY zYW%Ixd^}+|bpsyW@&q;U&ge^~t}w<^b4qzk&j4e8gBIg4uNh;U8B`CY#C{DX<{Qrr)h z!M%|t62?FK@b`bDiAc;Ib+TuTfNmrG`n8L5fRHCm%clcH?-aZMOj6k8*F(1ru)NY4 zEipfQFTiw0bq%QvK0az`KmhQ z)2m6!s1eOFfOK6C(>Mw*uVG(eH+YBHCA^G23e9IZW4`|EKzG?mW=&RQ99#5v->$LhzFo;sVw6e z(}KxMk<4V$u^@cr48WG{oeoKu^d-!B zeZXnGz?F4EyeG|}ezl9;^tJ>IXAbQISmKOV+hl<6g#C2?Ta}uNdT+ZBQj?2+7a%k+ zv=s+jWAf(B=9eau>7dQ)CRmOYVSDP~IhaMeJhEmwrsMsb%xg11`Ct$mbG_`To!wwg z=0W?&T1NCt-YDi4?*)-}o2KCsoR zd}dtH)g_gKkh*HTAaI)eq4WSR#>(s+qyG_9+BE@-Ao&>j3xT?}I`&8%rk}z3p{C@d zh9bu^qz2W>Q2v7sL~0|lr!J3XOq5LaR4KXX&O+9+X8Z$C>{r@#ov^G#Ug2Cy4FABp z7R!_=>X>8GGg0N7@lT#l*B>v$Z|kFW>AN%!OQMw`M#Anpr~W3-aO&!ARFXhi8GsoUI)F9p@bfMX&N(d>&IBBb!k=~w@xI4W{ zco_8_D=t8O-+u?Q{i-Qil#B-Buls(|q>j6XxB{*~f7J9e9)>w$AGArDh}Iu3(H>4? zla6Np3^fIY?FfZ_=y%@<=jF{-s-($W7UW?{lX%mJ&gH#rG30wHn9;RnO9 zX{6Sd%Pw53y^mK$lV^BZ!(dwJ`MX?M zoRl@@B)e=GT?RF}6`d?v>ZV^ z!?89Z$#}}A7Sx!TuJWg-oe zpo~C83C04%Ir5`Bp+DilT+aN)!i}WrX8UWDvH7eo`JTTpJazJ9Py6B-Rwr?eqe6sb zlw0MuKjT-qB8MnjK}v$#*G?n?lA7&Paa^oxs6i^SJ(}z-Y>tlNb>k%(7!nXzHf2j6 zWbS`TT5C5}HcEen#v@DC)YUxg*g?-N6q1V9t?& zci%ggdFTI?#3bW69g02qH56SnE4cYGvNuTjaK{3^nmDT{nhWQU=d$*Vz4 zbH2o18jHtn7a{!MS+rK(RN!;=9C5uYC9E^!5c-{-oiLA9UbbwA1L)S{Ron-R9n6TT z*5diwJ(Th1=%O%(O{4f+mZWH(-QR$vt4$v3|FAnO*bGB|!b9V@fmbi2i?8YQD% z(wvTdm;>6N?nZPA$dkcAX7(MIO(er}jHIh@mi%9s$X%SQ@eEre@-gXUiIbeDZs3>( z&9dYuFxQZpOy6g0^HuqavRoo|4ZJlaF7P{Epa@s4{{2eh{noLvMH!leCV9TzEb8dN z`>6u7DYN(wuub2rcPmPLGHd!a`{gv0#`vissbew;i`Kzi3#%mf&Ea~8euzT_n z<1nhD!yYxNR$N#l^v^|lD8*HaDHsd$1{CKeJ1gy;@Sb6P4eL-bPQ@@5<9c?qs(Vg5t~&w!=~QU?5c!{#Ll-HMh)?b z9cr7x!2du$jK=Tl#WpmJ>-ePwU8F33NOe6j@KsK<4#P#p5A-EI$^OII_m4qn^|ev71`ai`Prm*Ik)w%{7N?@86a}E{ElKm)98Q3!C-X)c9 zPTpbPeI|N4H=+|ODiHvJzv}zZnP-tk^1I6+*210|>bf;P@vQ$FZ*Lt`<^HzuN+Y=t z6r@85r4gh-ScHm7sECAgr?hl0q(l@!IuuYu3F&T>kl1ucNJw|XxgUP}J@0$w%$zfG z=FI!oc4luki?yEhJm34euj_MV+7{M3T+LN%i^)~{Zk4OPK0H|kzY6`zf9(3&^jhV9 zV|{D7i!VA=JwEQE7d>~FE7He%1*zc$6`=o@efj^b1sJ9*sf=68&(#>EJXkbmQd!ck zGL_V)jNSSErYLib5&px*iL;Tq^|eWY*^rgd@RP%8lP@iS6}hK&Uq^O*JguLd9=xx* z@%NLy7uTWrkECMZ$**e^@D~IBg&au|vTA=aP*L{{<6XV@m z%)joOwVkq^CnazssZAS40dfKMnXP}RE`^SLBw^aN=u4<}u6?DJiql#P>$OS&=4*S2 ze0z%&l$9?p@q@5dTqQR9^se&wrExY7SPBh+^SS=+kQtnwVqd*N>HSE^%*q;pIiH(~ zC}_Yq1@`tM#XO51b#Z_T7OvbnRkF1FWHVm5?|TqK$IpoP@~^Tqp3?qn*XRsPu-@)} z^q60yLB&|%MJi=)j{xI-u598^?mv#X1L2XAUxudPz*7X?Ux^meNyNhdm!V+go10q2 z*73Tq%mnwv4-gkNVa6Kbxh$TNZtcS)zR*ujo@>-%|xatGeTiQ{;TJWy4?Kzs!mD~EEmPI>X7q&NK$ z(}wryvFol_RydQ)xSr>_g2ZGX3_AW9r`Bvb_QfIGB$wy(k z0`;IfbfJ7BE;irJ|2!~Win~XKo@Z4*y&G`r<)y;EG?}LtG3!3PN|8+%li>V}vHbje zP%dPC#a2a7&%!E`6tFZBCCK61syjUhF$DHkdwE!mmgXbT0^1S4`rYt(EYxxNT0QaD zT%vz|8kk9J>|-eQ+_VlOcj5_Vx%x}~`;QynjvVlKxP;kwEX~xJUqxD5G0;|@hPL;!`j|3`0`T z2uYG;X4Wvp|5VunV77V3d*MPj*NIk&ux*O%jR>BYi z=kiZ0$kBVCS)SNJ@-zvq;vK{s;4GVjf5`S!>p=w0bm*$7wPNs04&hPj=$j=u$DXAr=0@S?d_f4l-%Ws9WqS;D zZli9m{t6p4hgT``m!81RcSYKRf>G;p-7zFtoZQ-$VA^xAsSVUu@qNQK{6p&F@@Qdu zG@hO5esV{W#DG^YRA*^}Zng}i8jcIDwO)T{!*|b?xVnAc?7!|RLOx_{HxKbBZ$}AC zd;A00>YE~-cE7>b>|$Yb`sm(E)Tz~f(j>lXXAoz%^{M;63`C#bdCJ*Ve07B|@Ch@m zM*F9ClKshL`mdcWHAKW2xY)&17rz+s^srW@)Gl|g{#@s~B`aZjZcH^sGEB1afkf(#BIO3i`T(KOppvRdY zCtwM3v139PjNq8r0fSb#ml*k6B|x$hqc4NZQPGcV zUP+o^32r-9;aUQ3Ll3)3-0yUy$|oUWayI}|?BdFRmA(^a6ggy7b>46jAPM#ZEtp^Z zV6M=HfYX@EU#@wfsrU2eZEhtR=B<>?G2W!>+`rv&t2G8c@VC;SVe(4VuHL)X8mY2a zX2z(0!o}u{xhuJ9GY$F8?EoMD1@`N_6BTM}{z@PH?-T}lzOe^h8^i4cFz0AFW|7Xm=ikvh-U z&LzwU3af+SJZMeh4aRPcLWYf)vDvq)mruujDK_VVief7@Ash>9CQE`#<%e18!?$|JbDZg33tvw3_#Ldg}9{93v3(u(iZL6E>9AJIzC-syj3Rxac2Fu0CR zs*9kE3Fi}07msdZ>Tctn_;R3a2J_q5d5?cYezngVdtpglk?KNMkuxa)8Ns(zq6Kfi z43`gI^jEq-$3U4lmr~fL<@cM70lyoQYPwSj$b$Q#Y=LYVmZ%QtKOsZLrlDw|d&Nks_FCS1_h)&t4LLGCL#kx!gNvOc$Gi4rCUd+cfhtuY`Vk zbrRr?g|I0T?MJxFBv74t(=SQ*o~d`Tg5$QwRtIEu#@7^Dpi}R&&!V~N`l`#Qlj&F zly{GGa}LM~tt)cHRi?qEX`ot97G zr=Sr`y%cbRb0;eWt?BF0^yInEJkE%3{-27Jfs|l&vS(*s@2KAjl&njjqZA=|gg;9c z;7t0KOQ-H1tu^!EQ|l}fB}ng;*Y0LqVLMgkCdEE@*6mBK4Vv z#|<8@`bvYRtTmLfDVRvqaUHJVCG(pD=OX_|+;uY9n(ywc?Gafa#}7G6`*mLFrq=gl z8>!!ow$X|RLAhi7Ywb|Z0>@xaMqHcXIR&DvB~|{sxTL4s&o#}k`W{{R$UaPGG%^zY zNT!@NBFo#>)aPhB;jp`kkcXO`UG$`2ZSGjUssXkvQ^R*&P)t#v^t=^Ej?N3L^=ty9th04w- zNZfXv%#m$(D&$kdZorBoBVqbZ-~NWuM)r5FogSJCH9)xEs?@^jxj@Yte}He=>3-uH zmlnpE?=d`n7S+AuLd}X(`b3+fL|)udIrXgBzi*$^`jql74aS)HmjElqgj|h3(!@Gj zJ}o=GRTg$9yUGSeOg^O5VQ6=Zty{KP8bf?d%ocQX?FdmZ%1c54lrju#$l>DSN(Mqr}+-167!!Z7|M61BSuq#~HYwZH5Ufl7VG($^+OJF&5 ziVktw4`PZAVSXEMnqKm{bu*nx2>lY39y4V5m))Fv&WJ80=Yg#Ca|RVdjkB^VGp1PVAj+PXzEYT^zhS1LE*szNOgHb7HGKKE_h7!bt09mx`E@vdR0f*4gXl z@xQ7SHkUA(O(P55dCsjEmQA`mU0D<&(BeelQ0sY)>nU2t(}rMfYgc+hs5>|-`NfZT z781(f*R8}x_dGzmcxQ5;v`oMrX{A2XDXL1+wfiDtqyFC2yR0%~;bZe`C7J(X|C@^2q&GJ6WXZ?>ndgxJxW(G=alpJ>T#u<FFzPD2O zq)EAj(761{nK)`UWr;2nu2LoUN)P`79?k(*@5EQq-y*Vt!4@4YZb&Uu@A|fS%{8f* zd6=R#ld9W0of%`E=ZDHNPgoWHWoEj1yD427w{B_i!u<8`)RoG3Q=UZ;tBlJT=<)PQB;2%2MNmPE*+~uy!fR+@m(B)s9R}#vNBvM9N4m-Xx<0WUTl=!O{SiWdc)J4 zIORw}Y*a72AcOC}scKfYpVK>8;m`7?#!S6{zlesX*>@hdo04dxj3aX<$!#jMb6#+q zs>X=f&G+2&`93L!6t4bL*e~fYU?vowm1@$ksgT<={8H;>MeF%vcuwyr;+>ugjO$31 zy$6fWe&#!I5sF8zi(kJRiY=`hPq5F3(Af(3Tv%&vjJG-6mgA>JsP`qZ;vZY-?@G=1 zEYHt<^vyDe9uDRgLuH3B8~QCs4A#vXDhlY1FA+s+GPC|*i+{@sesM$cm$Q5}aNJd|<$*^s5FZeZr6DZC&Ubj6Hm~-$ zMx9uqu=$_IANFeQ{=6_XAxA{_Aa82HKn`dQbc0OO^Dj<)TPtn$(^VPQ~eAZ*DI+$FPw+ zopw}c$sp6BV#w|#_)CIk)P;5O3$JgE5Z4raGgH#BWTW2B?>JA#Bfb}=#^29BOL_}z9#54uoQ?PbgBVgmdRGA*2 zsiV^Tf;cF*t9@XpDzwCyo*RG$-;|Iyt&!1Ftnl+at@W^r5#;TS(kFMWe0cwXU^OV< z*!%rqS%mJ9R&#K`D^>kKGJ@By1zB$+Srg!njsW2d?92^c4sYh_RShS5A6Se)k&bp} z#QG?d*L#4YIe5UBJTIx&2tr0R|M6^e^xPA&Svw-<_R&$i_V95kkL!mxhDWow5;;9( zv5BUan5_EE`g@BV=3cJpo%%4p02bIJ+W9f}v+7Xeg{*&*6Mf_kFcx zMOdmq6;lW<5$F?IY_El}%UI_0s1TvtLypCa$>%5^3H?u2Io=0v2Di`2 zQEYOuEsWpa;q?yzftF(Yy$}+OF;i8Hq_+TPYk{=a!4sIkAXgHc-2{r26&DxRSsZDYdB7DCEQ#C&w_bn5ABZN7mFMbkUDT zHgy8@6Ri~E)Vsq7lh@(vt0?Mdn-YnOHYm26Sm{g|Qhx45(u^3$V#PQX&B+^^n;|OF zo8@bMZU7Vh*Ph9oVEHoSdaUfOB)+4bYtTU}5!{p_u|Zt1)&TCSXoIW;NKE&!yu=Rdu5_VW;r`3ghflWbKM>aq)R^qyy7o%1IGVxWH2 z!)8d%7(Y6I^9z*LG&MB^4KPab)9JucnohzcH=#}%D!&`F3%c^&eA$^!T=5GXTq043 zuYbGjSBU?luGYBysc_S4>;|>Mhc=6&TN0mDb7QNd@{&m7Ps=V61_w;7zL7P1$N6Nu z^OL*oUie{fD~^RAW)(sT!+<5$2QHa2*A2`XU-e!+QVoOnB7~@s6*3f^bpWG*k0TN} zgqrpXu*JRJ+jFxT{FX)+h7{v|=(X3w<>nGQO1cg)oO{s3gqTE1mXWx};aMt2EL-z8 zEmZm1Psd+Y=>|p51d)rD6`wfXt7hus{6#h#>23k@q(m18s0%v{NBUGqH`hB_bYqa? zFOd~Z_~4cyVdH!#C?e&tQv_wGy!Yun?6TE6=UWF2QpJIDy=qIm{Wr1?)qqadaq zDx@vGpeFt@R9wsBBf3!hOMyviexr-0Zzn8VGwgSqQpgR`4+N(_$@~V%py=`j6~?H9 z9y6}WY&!O09%P$rpslZLLb_KTp^zMs_|@5a1_%$H$e5sS)2h6zn+I6_(~t# zE>)Qo$DSvM*vK`6kh`Wx!M(>}9&{Z-z92cjwDTV_GXt@4E)bC@nFE0`ZDwW$yue|j z>wW@*RZO_EoKzUc-OG9agZPE0iPA8Fic=A~?~3vJG=;nO#lK)daO29T$x3{Fhm&7F4S_*9Y%FIo0j81=j~!m= z)P&cwCo6DW%_BQLo2?>fEvKTPa_zMGS|w5hTuxUXnhDw8ApqE+g$w4RZ-1t9s4TxwY_I9sa2Uj_6Wrb z#}O{b&-{?qqRZ+@*lB-FtKBKSGk*wu!^$dB>3Ij4(y^o6kHd+D7SSBat77031ew&l zFFTs&$+ucWuDuy>|A4$Zytb%J>lmt~&0KNAE}zyK55huXg_X0`E#?AxB>BZ(u6KGf z8efrKV8jP-sFK24`Wu_t4@9(L^lveS%y`-ejm9w4#5nhOGMpG&nk;nVQonw#96pkox&)Sj8;_EeA({L9Q3kP zh4}_I|2ba48xZgxv(N`b*3D9L^4*lK+A@q=C1~k+Hh$+FDA(?L4HBdXYH?- z$%n`t#~o)`)g8+(L^u@=o6wo$wz6XkHa4)*R9=v6|P}%ubFndS40Ze zD8HYB=vIZcChQB$&hfiK_N>4e3cMKso?a4hctPvb0Y+?zx(?!XPq=}VZxaH|W_238 zRyH6;JB-i}kjOs%Wryo^C7jJLNqW?CvjfsnU(2@pcvLg*NeF03TD*M2qk`IH6-rES z`x!mJ_7@1xK zdkIu!Y$vZ5G~quL+gvreLXe#5l=iYe_4(r3`{Mw9G@*xIt4Ahblm0gbR1(a6&7tRR zK+A52K2ANoz+wt#qeJxE`oJyO&ArEsqTBegGXdf_kxJYeEw$5nIagbWytVxg&XJsp zCXM_+ldwZ^zE<*=^j~b$Y^`&Zg+FZ3%CaN{9J@}?w&w!mocyXC$kMN!&~I^U&`0x= zZAHH)TAQA4iF`qR5Iiq49WIo({`+YhSDr?>lSK*Bw9;50A1}wSdv_tzIb+FzLJ6D#9m zPaqt(jYBZRm1Mv`@-lVLAVzM@m@Ax=JJe&59es&97W8 z{J~16vu<%)Lvt~|U6y4uWe(h%pj3#yc^bVD6>+9eJYkiiD9?OrCeD?hW(0JbqfyS(x6-I)UF!uVc8R7D)7^|02v z-FCC_DqkL3kkp*XP?%GV%%W)UF~#Om2%!?mD(M8sYZQag_V)oxvGbiI{&0Tsy;eMa z`jsL+iMh44zH7Oh$J@*mE(VCaJkT6n5G2C=K zGAC78qOeWARhA8;dmGQ$Q3y}`9(kp82a>?Wix)44{^)Nf_F*6 z@znEmqS=;WHByQQKq~w|%nO8RTzM@71XH0OJ?x^1t4SN#YlSUNZD{gSzaZ6e=%GL z=!ap%dX?0XH?O~+us#`%-|1<+Zu9r0UBmwceAqBsW3_s(bigp628D-hL~-$AW&MaG zw?;&?%wRQ}MWNK`&p*ZH(^Mt*mXcII(#94Q6^(tBkMQZ;kz+vqJhH03|+(kTg ztRZ{<76EBhLC>uLjMwC=?rViG+YNXR1H{Ylf@q zEcW>mYHU^9zBUktH^Irr7G`Nj2n6rLn2Vr>5k>>TcD%@|GZ?z$eS9zkk91=MA&o?q zC#%TykRcjLbRVZ3Bg;JnrPYRr;86OM>v^U1V1l;vIa&|Vz9cvEFE$Xt-i;v03V1${ zd~FlEog)FCgWqFjcBA;VJt{p5pM>xkd()phq!R_~!g(c`Md+}|AFDmtatm!DHh*?6 zOU(E-HljyBFaZUFr;;5zZ`#(@W-I1+pgt?2U^`J{(aUJ_TWBuh%ZX?7H&}2kxIh6$>7Q)<+LxDjETQAI z1mP(_JYyN@i$eDFAMYpq-NEW?`R>*gK5;2DD)m=vt90SB85%`7nOzsgbh2n8T$lw) zK#9->QpFcpKWvx!^BjXnpnClMtyzyoso9E!mMqmYIb@@NY^Uoz;E;UbKXwOMRoJ}u zt-$J*yUEE#85o@a^(-4E-ZH7q#_r#UtH^!YCLW72cY*9qYQ99`#-}9hMY4pVB`XgT zC+)lUAJ|){FVqzakDQRC@Ob1xH7@eJvI0^RVo0dyHRV6w?p6avk$!OhpM__Dnk53* zh6;FseF1$%#GoFxgpausrd>k`=;2s~$nQUV?f3B-B|AY5XKj+AUFYTsS6IvNQel~di$LctEI~j zEN+zvXXH{t@W*G&+gUq`)zGjo$Kymm2xzmDE&%7)qMLigthTf!Kvk1%j9H0C_2?O%w?@0FZ(Uz)6|8 z5U!y*GP(?)+kDslug{ohMeX%DQ6`hZ1k^Aq+H<0U(qt2ZvAZwRiaVLR-Q5|2A+>LE z@h>u@s(wzs^kOY!dh~!N)Jnd@Q&+%gQt1+H)x1WDZGBI5+FfbkUCTp-=FbZDDg!KL zK|KMdCLyvq!QlU60P!SNn@mML8z6IeOuvx$K71xC(x>oVrXd49w!m^Mc#fDXA$@?U zF>su1Kd%c_ueMU$Pm5Zs2!dJ7 z5H=yiV;%A$!*$IDL`oVce&y5V4d9uWFFpnVC`}e!QG5$p&gA`4T~dT7<1$p4Pa5u~ zse-c(xD{GEvn3oV`k0SppdKxE#d@RBnm9k_6A3ToZn^ploWuF z{sN=Tw6r`}Yvldfo~M8*YNeP*r}P&Oay&~zxD;N&5c~=$cUqW|-d!FUh-gDpBte@R zFoBJlxj`C14zaC65S|NW*ygq2@OKRt;yD>#~XymNun$+dQ;O%U^&>P)RL4wWH9ZC zSL2vSna3^s3%A*#5i$fk8iqpZWX*4j1+a03buEFtR}jQRc8^3$C{#;vH5$ZCS%whx;7MV|%-pHyu3 zYmb@f%cS3w?I6gxjjk}~SVf0O(ko5ewn-;NyI{&|;lh|tzb8E`n;CAdtiQ(1ZBNpQ|9XOq`WRl& zTK|$7b6z$qG6!C3C`S=JMt{)$y1+LqG5xh9<7c8JPn1{dNA5+t2c%Lh7hbzO@w9Gs z`2B;@U$f)&wosy?h-~8POH{XkYe`w{`$Vx1M^2iZU5EaHuUcwXrDnEjU8rqOu(I~S z&Pe#g4D)e_(+BX94Zh*hNQ{Q{7?7hvHwjun+D})G?Ugz2$!G)jnisK$}bd<8vywS(ONR|xcL@JI4GonDj z@sS!1-{VlY_FTeG;Ei$&rdzge0Xd2q@ewK z0WAR#LC^TkGgzorfD(ivs#>0{-;3bH3)D#y_j5h-uLe8YJEd|E;~n_?s_a@m6yqC$ zRY30H9v!y;Y8x%edW%(YF08Qp?R&G_R(v#VHaW%^7_hdpjHnf@(4Ji8bietJr+r4u zjiFCeQ);=zw`ssr=`?rcMV+%`V$1?5OP{h&R2^z;bG-?Xi&;J^6ruL zWgL=AhZii?`1Rsmf|h6^wi8bp9HLAn%#1ewJMh&`>c!Tel*?!s0$Eya*{%%5Y7V2u zzfTxl@-I3O`%->znXD`mC+TM$OW~i73z8;tFTN}xZhpW0e*K%&v7^vPbV2WinIQEy z;qIkA%ej&64-bgH4#{Lnu-uPdUI7jLIvIOMSiQCBA;p)i^Qa0!PbNIs+=I(|pmmqB zE`rowT5o51Kv97(G@`tZ;mMr%>*`pjk#fk?D55lUW0Bwn~ecnNX6w`y#9EJx7k!;;Q<^xocV^^zH=6RBqelE=|C{` z&B4{t)s5f5c|U!C&ioBg=;z(n+9`i^wkn?=2-EpkmD})2?|IwRomw37(nTSBG@EjK zz46|$WcbbL2WafxO|U%$*wg2 z%~A3m2vfU-^}V4e_o24U32K4v^%Ykh(_C6w z!g-Q_$(|3`E+6pgZ-k{t3|@?j-m7(5tDrF<)+GNrCWCp@C@D-5hilpi$I8F$7gV+V zu+kp1CsTf?y`g%yHXm^~b(0mxJZ)T(u`qxWLzKkH8;$$p2O99~$t}63aN@v9(9&80CNU58};qX<=|JgA`IM z<+pAGd4R6kbYBM=R%NO#7=kPyVPHHZFc_JYwyTV7sTi z?{At~OST%sGR4emcw^od*-ec9lZTd86e+o-s(8O(P$ElT`n zHZMB*ytAcpEAQEBh3ijlMjScWB7gc{;_MG{jKuVhlzymMs+dTDJh4|Fey4k#Y3@;- z%T+t#RpgcY7g>KOxwP(}{jVkYY2V;Dtw)lV;P(VC>E+9dVjse)1P_Q7#b;FLZmixb zROS4i)8)yNoBba4E_sE;JE!26eU890| zZU}c#qR`RpbE}QIhK5ahoIo+q_YT3$vV_bqNIg1IR;mpKMz^mxb4bbOr6p)O*N5okJS@LiNDEi8yIuw}KIP*6 z#-BpOpAW>U>)I7AUwB~rVSF*qk|1bu$E^#8ZR1eLXb#!^X=+<~TTuA^`?#aPMa(0ik{eR_?)YPd4{?^V3hL0#<~} zF!PoG$*?4m65wD~V5ILfc3SN37zgM$(hg9sz1s=jcUL;19htw`8P=_@8a6Agp!NP$ zQ;{gdh;2m&;Bv;+-X|6cSro!Kba@_oL5FQrD@p8(-sOOtMj%F*%1iOPFQb zC_k}`BC)kxbj@{jrY#LGvr;lifXz>m1vNB-=fi+MNR->FM%lw3DJX+|=VJ3bGSkix zgcEJkM+37#=BRB{%A~!*X~7USdCwPVZuS(KQ_szn!vQfl0%92O-5ZsL8G`s<=j0N~ zoX|dyuUohRX2NCYyJ@N zFzLHkY}`wKo&J87T%_%T@#y?lq^QXvXMn^ZZ7jq2EsR!38vpL0!A_#2cX0~T8a&QW zB&|R!<}@{bKYh*xb39UXAWnJfS|@xwc8Iqh7IN)~zOhfu{p5G!&6fNV07n#naiiP_ z@t0hO0ta2B|6w-K5i%sL3?a7+*po$&w6nPD%F@m)^aKp>_fdrrjzY7T-%v9LiHq$s zr!G)K%e9W$Q7p_6FI}K@Kn#G;UND{QCNkUgevC-?zMC%b+I@4j%xii|@TFnt01aNh z(R-w>4mq2Q1pAteLhY>u81y~~U7)#g&_0;|1!{-;0f%H(jnq0sZgvyLNqdzGG37Ss z=m*bJ+B6Qn5?A=!3)hm}%`vAKiJKpZXdYN{4j0=jHp^T{x28GIdId3ZfVf}o;oNhZ zV+ihF<}hVC@4jCI{C}P9oDT&BsL8AUxl5g6Em4kU<#lD44Ev>^GKb0b^mDs<^9KlV zi{GvqeBl}8ALQG0X8rPGmxcX`VinTw8t9BUS|Fhc4Fi?Pe74`Bvq+xnvAh0 z4D{f6MF0Lj^Il!o`xxWi1fqhAUD3HjQoD>q;tM!=4&>K3Vg+ATF%TB>midZNjjJqi>J7+4w1GpK7 zyc3KK9shO;$H6JRZy+#I9~;CM8)17Ntt;(H=_m5v0E(;qA+}9g7wU4|IXh7d83RN^ zRtiHLm^Y?6xOZPwD}5BIf_*LT1^=8yfSL*Ug|qFpL|u=j_F;&OpqChyKMX{Lu>Y(4 zq1BC5cxC((TH+40=9knts{@mCK73dvJWa%t$u8ls_*^BqL4;W66z0P6=ah=SCZn)Psy%gpH354l z(q8y_CYyuaaO8Tuv6|}zEv7oL%1Ky9uK$cHZ?B2ErArbW1}b9hcEpR&+*B!a2&5lm^qp zM87KdsYmBkCbj$1UBv1i-0N?Kba0pM2z-A0DfS=U0ntj+(^mrzC0 zh|62wPe%fQVG~@gpIZr9E-x!{gqa_mHwt?4Mk;yYnt zp?P<@ts-M|$yE;fzC3ogt!Q^*ktE;yo!)=N~ZO5VB{xYWyzG-zkICB zxiVYAdn6{55|F-tWN(}^&DJqmmSW}v!n8DOoeyL<70nVcj0SRL1hO(-xc;toXnt~l zdwh~0w@KZ>4rngBtlfE!_9ET}4M+3qL*?N<;Irtw33BAmLuc^YCMHaWUfDNv1F}hu z#@LJ0i16$9N`wyc|3Yy#h7Y$Erh5!eNZ*Dr8f87UG8w@ho10r8#tk++?tCQT^~Zj9 z+9SRLkMdw723z38*1gA)r^#Fo4*egdt&CMv3|SA~6A<`L4v$Y~)^}U!t;0BhB61f| znVBp4?5Ny67JYgQE~-vn;=vol_q7DEHD~OEmP*#&P~I}LlOpqx3@@D&$SQeUT`yG+P#- zZ~_}|Poi(3mO9|3v9lkoxAnUO zWe+)ts{QsiRmSV3Y_19O(@^(SciYNHP2ZLykRY6EEn=57*U*st(NjmLhRu2%NCdiW z_d1y`Mkvj*{?17djjPj9PmrU4t`<;*aAFiyq|f5f;5Tl3mw(u`WOL85s2Dih+O8(> z1iWaxpwH=o_fy+{`v^7h4~NV51UkmORl6e}`A_6Ia((B(;$UR%e(lX^QfL50It5z7 z6U*~HOH~+$;GexV!&5A(SB`uDtQU1s!v!;ILER)(9mcr-?=2C-x;YV_2IZ01(xD5z zJ-Mu9PoGucoP%ckC}xAKGt7xvbV^EH!_<{B-^OYfrP7{yzP>FlOC0 zyD(XWaOgmS{B{b()>R*6=%t^+mUlApEZ8iyXLuklV(tCxx=InNc%>k*dJo|AD_aN-x8AQ8+L*w|S;@cWkCLslTC5Ztwaa*h>8TOlClFG3OWhmRkFA$SWq*C3pGEFr&4 z{G=PZa5i>~F@M`aI*pDZD${Z!aimTb64%=?hk; zY?i~7L_cnH8Ysu24Q0hPW99$wa9q7Q1gz89aI}-XT;M6RN{xzy<4+y<)PbaL1F63I z#!Q@B22ctXo0#a)v`V;wETq6UXM%)blbrjt`qShI&brUQw)YJT(jmK&{QNjj%k?~k zLnTfhIbdLgd9M5e{!5GHk&+T0d6b$ZP~B3%6GU11HI z{!h0i;oby?Y_r~v_Z(AtP2f;9htb(Jg%8Z160qo927r_kpw{H+{xbw2gEyS+ z7kV_v-UQCr>>Lh9Kfdv6U0Gf|72C()c21oH6Sm7oRS&g6M(duIYgAIh!F+rCPfB|K zF?)4i`0(p9L6>%Xq_`Qem?M#QWXDW7hcNpfQi(r6{EXjUx$fmRMeD=MZp;ywhji)Z zb0;)^kRRF~C5kzYItvI!&t=_1jNj0y{A^Nq7A0G+oI{Sk_oXf(N58Wcfj6C|(l(alOJ>kzW^Q3e+W{CM+?Sf46=q}9CtC5UY(d2pW zdbh5Dh2!6KJZjV|+5Mx`!F+~@xoT%RVRGM=Zn1649kWa7IF4$SvhIs6JLE2UUly&E z3Wj!MaWt>J2$UN0e8ZqyobM&N@3JVi?GP40R_62xlFfM|`I4T+$3jqE%^_KHxFSIZ z63S;`q$48H?JDcHn}B#QMe@sc8w_TTU^22l<{0}MOlSKIfQ4VvM5s~o zZHU=wod<$*I%FEVM@1ZfCy2NgEU0%#hZ3zVkevRQLO|MI5iPM6* z2dp1-Y)A&C@zB?4*jD6c#V@I70nI|U1|)iucfmnetjl4CQ7nd%3-TZnj|%9(A;|p9 zkl}vLv;g|zY*#BcBPm-68xucvtyNId5VI%uVvlKEsm3s9RELf^9GJPN{}IZy_e_-P zE2??K=lps*>7Ne#&J}~8DSw$O6!8jNVx^`K8B0`igH>^2tKx5JULCin_%-FD@I837 z2Bfq%?CZA>4#juE_D*Po8}*WgJBV(bGk6GejRP-lz+!y$(mH1LCvQc2^~r&i2hv~y zzy19uMr7U~c~GC%*M`*mGMsQ+jEJDg;kga^$=N0DYLoSQxa+8pp|_4WLup-f`3fnC zP6ww7p z+?j;Xo}TZZt{iSQ1DE;3gKj>oonuvaSt{LQYx!)Rtpz~Y9Jbn_7p^AWu45Lk1qP8U zh;?J4{LN^c`N9y97;l9GD(5Si`p0YX`cH8*{IS?N!W>}doIO$_7erLCCrX)9w_bw7HMFsX)2bo2ynI%X-&3~Znk`5sXuYF z^JY7KJ@Gp3T=aLw8K3AQ1;I-NSgZ-00S}~Js;K=Sk_nIj=fc3vK*0y^@qCOzbS0C)7ne5`i7Xet%Fxw)sA=ei#^O4knt#F@?} z8UAUV#|~zHUISBnnK%)vP~UyuuM#XB*PcHk56*{>1q?R%uuP4hTsTuRu*NY4rktJZ zti2gZN`@aOLQz%*)Q&HRx17Jq%$ zPU3x^lS*?ml51qp<%4k?9!X>0MWdy%6h4x0bR{r()4YTi)eb!mrZ3UaylPb%oFYy4 z9}qCFt~TY-iDiJmB1czhYh9pB#yGe(U3tbo$) z@*eUrl|f4g9mG*KiqyZ`d0|eVMj0OR)k9xmZue($tN8_grT_tOetZUvfWe`W0>;3A z!6=o4^m`}6@XnVCnW71Nx2?@2MT4IbF^yp)$)0?yqRKS0HM&9*qdYG$YXChWytKFa zG9lyW(dT>4;)Ulf|K(Bg6`q!ZVno-n&ZJkN%Dt61RxRDR5-3kkjzjdg1@2#R$Io(db;IpJfv#NgjRmWrl$0FED9o65Mk7 z0GI$RTJFxa?W4h8S;{eTRDRj#Ak9jsFbtjG>#3c70@x&278;Cy8EIVA-?Fy+NJ;UK zb>b*ULHp0+%+QzCu8-$pU0}q zBLHL33{2?7$X?-iWNE?i$X0)({(W@YYWhBoLHX~y9#+;NvWf4TOxNET8GSnq`fm4) z?M@##To*+#H!{F>G>C){aBYos&wb>&j{4O8deZ|%<&3P#@8hyX9$TMOr_RSO3h4D| z=#wZ%PaX8hzfwdy+{BwJ8GZh9g~O5MBgIf08RNnoFYLMWB>FIWCH@$_KERr4QX(Aj ziisWO`W{fbTp1gs(i5CmNE9EV7?|H;>YX3VxV`ER%1>=g+GK0Loi?Rbj21nzGgcj` zHM7*=JYl~VW@x<@jO{je6i3^6>&-Ms%&rQPXnTws01Sxw9P}UF7oEg4W5t;Mdgh%G z;K0&zixf4AAn}40A?@!n_hF@ZAVq&Pn{Lum=biMc@jn`)$j#~B*$(fN6GnWO4~z)x3UAbT2;&5VZGG#>}kA=+%+r6 z9;y|6_~~Jrf`=$mP`{qNhSIHaGVG*QoW*#rEae184H;jG^}=NuV$&4?GHHb{4k3qU zm^|iYwzO&-wR@SycKTGDrgIjLJWuDJ84uH$LLHRylVUX)xgQs>lim~7dLjfF+r$O7 z^zkeiwNqKTYnpjWJ@HRWCHKho59N$QFaa`5)Ai!%^z;qK%>K7#!mS)-_@xtD8oWoZ zt1!={S23zkO{-X5jK1=+c{{*Ej=p@Yu@wBz>d1btprOu||9Jx5VhaKLG_7{EN&H0; zYDWo33)_X`M`hk4)jLYx<$6RrVw2@#elhHXhklb4O|;k0#%IaIiLqC?1ya(OicjWX zCrQ+FlQDloRJ<4pQ>?nVZZ~{%iiICoPD)gFO!)ymBgS$4qr)6tm=oP2Gk)GEXz66I z?y!1hfoB+2!89`M%3sX>{_tLSpHtz5{h!(+FLs~2GnJWfcf>+JK8kruM%5FlY{L4; z{Mjrv=ZUF^%!Y-!Sdh7b@$gU`kv*OaJc9ZGxEH*~(NIUI5L)+=QGT%}2LfDN`9e-wnwIFi! z{r}S}{{J#BW2~bY?-UlPe)2&0YCC5=cbKZzuZw?P%1tp(QK=fyqaCVGruD9dOzem4 z)8+Hpe0M2KX`P77lVDP=celI7fgz2{u#(>d4qa(wc^ zHS@Thndg7szja!e?iyT>l@)VGMzE~6^i#QCUeH*s!h8$MhV1kD&*EiM2?z`?^Jk() zu|aib;B)ffaA5%OTkSr}Ip>bTx1}Iia$rrgP1^hREcg68YmEI}Uf+=8Yk5hpFL=L< zM@&uWuFU&2oWB%$%4+-5DA5_2nWfA5jcDanS9huVECl{gL;*5DLE1ZuGM{c7Sj+;0 zp99}Jnn;^TZt>om@$nO|m=uGWjsYS>2edn)I5X(xaSMdJ#LEi45zw-Bq<)NXY1RK*fm(1f7eKQ{^Ve0gHY4=_|ova`GQ~f zTz*>57c8vw;f`L5uRE8%>e1+Si2}@J5kQe36=pHL`k^m*Ty+vz$P!VY9Sq@`sh+$ zB$1M~%pw#i-9-K(;<23sVb$U!ChJllv0JRttoOowvmFX@2kB9QtfIqvRl`^Msyg04?W>|gMwErYcdMlV@C za8)=v>)M zB&-V^+yj8mURh8M{GOfZ0a@FVYT5fh3jGYR@)=*m%a)sdLf`i}c*mUF7ey zFBcvFHoX9KZ==LwnbeXNkXiQ@0ovRHf9#4Hhsr^ZZ2aP=-WxjX5{dzXQ8)Om#;r$5 zd#HjMb>}%Po=46&_qfNOe*z=lx36rrK^`%wrCjX#l4=hP+ofB<4DF04VMG6+t&{8n znV=|LbB)&~I(|az1U3$}jE^*J-lqBn^<(=g*7EP0hYw~EA|;UC%H~yYIihtucR&Hb zMgBA?N-;3Ft;A}K+c7%7t0p$T$7HohOnb~m{^qMV z^07VcAsq#mj+AGP-?*4QF=9q02X;6wT#)mnBrsaem=~_3HV+6R{ggm*t2Q$R!X$0P+2!waM3JF0P@bOr!Nw0~FiLD8+ zvzPc-OodvLKCZysDnjLo_cGx7KB=)O6*>S4p-+YLQ`kWNJgl z9D@;~yEyUd?FrP?j!?Bb=Re#(#P0IEGD)to(rk=vYZN#(n}S?G0Wr4O&DJ(KdcMtb zQ-6t=W2lOFc~nr;ZSNiA1=Gf`w+yRNS6wxH6?cKl2+zXQRK&pkc%jd+9K0E@2qkNI%DVObzhFWuY2-? z+m5_a>OH@tl;hEpOK!3CgK7RaWw+WCrTZYjmzPM1i~yxvZ1EFj9D7Y!mm!uJpi(L z9>Ur*1-wvY9vO=K(oq~ruF`vzHsKh~yn{4$xX_VcS0hJ0ktAFa)!>s%UFg)b7S%KvG%8M-BXjR zfCZLlt=}0@Z}WS(Vw;H^MOAtw=ZI5Nr|bX(=C3z6~C7K zwQ?zH?iLFMHu?UK$`8zM2iD-jrbtQQ`kFlb;@KFEB4v%zij` zSMwnEpy;^G9hZkkj*MEJAsJb+fmQUwHu-$+g1(CW(0jpzOyO_TRxnshNHnQM&k<*m z{YC8Yn6`)Zv|J|Wi+V=e)9q3?9)&tCY5KM9u3c6g60QCsDIJQ{wT$n7^R;RH{#goj zxhdOBEM~Ku0gW}}g2#d%$mim&U*`rPU8>6UapNYRV3fMk@9--%!hT-6~<9yltu;?6b5; zA{Wi~pM>jmI%&fxkXTi_`?PTX(F#SGJjhTSE6Y;kcbOlF$pt%Aw8zm7WBEDXqGr#c zDOIf^N{|+vP>vk1a(IQSOTpjesGQR*3-c7e$NZ5|14h#(r}QG?w|_ol`YZE~8BNu` z$Ysi(t=29nBOSg}Thp{d%Wr^}_-4fR%TL8*XV`_Ex|jk5Rs110%$0B2p0_Ne+=xhD z0?c9ea>TB4=at#Hu9Fgi*R9*|)PQiv()Hlj4dU0ux%e@Cz5V>(JXTJKX^!`7d8p)| z_d=sHJuaDxJxJo@Nc3B1)c6IDeB00L7UuyBFU`U@4!c_~S>%KUY{Kt^hACW-<7Q&<4~NjyG$fTdFN zsBi`I7Xhg9?sc# z!(-$`%c0XRZqTv~=fW+OY9q!-JE@axMLdcxD(&Jm%mk_y!I4oSV_+7~7Od@hbxMY# zhw_DmIx`eZGjjl02t8pOZ}y!?S%lDVjftnU}2A;sec||^0ac5)ra+#5#0jv zf21&)7h3{qw#9@6&RFbZpCIm$7fc)Qbe%4*kP6Idci0Ts%;oPT{5DtWKx~0aXX;#jU;F4qdF!kMa-Wl;(={T; z!RL1b%chLBM38P;-{Mz)(xW-hV7p|<%HhU~9mp8f6<63@9NfMf!hPeT)GT2U!m`iN zxgsIZB)nQK4Er`RrO~7N8jd`B?J0Sj8%r8s8&fs#>e{&N33LXE&a~T zoDTDnS){$oyvi*>@ErVil9h2Qzt6mbUqL6q?I)`|=I(1D;rN-CC|7C2-;G&v7#k?V zRC280vGkf+i`M?V`XM3}--*8C=(qoWV-|Yf|HR3ntNNd7xcEvHljWZ!eCy0zG(=t6^FNhl`91-p4MynXt+ji iT|sbE?y}6XU)?C-KvAu|rFL>1{1_ZDCgkebhy4Qam;1=8(Z`>UM!QBY~8VL{}xJz(%Kb>#? zzkT+-RrldOoTpRORM%q3oNM;5ImT~90@W2>VUS|L!NI*!Qk2z#gF_^OgF_HSLxKG# zSX9>!_66^%r63JgHAcP%J3z9QQj>y%tBu2aG)IOVqdO@YxWd7`Vfgz8ucSqP3J3R6 ztRyR?<7sl3jcP_HNA!h?9to}UhOtu+9uJ6nh%ATPKIhYG0_+Xy42nX2I*OsCrr1-< z(An#}$q1Q#*vVXq|LKg5PEOPFa5}ntymEXvdYCzD#SuPUV=;(5-|AYr1x=74l+*mL z4^I>~Q<2TD0EA~|z*FrWj;&_{WDHE@Kw;NEmyWvsjx$95$vgD@>wg~?g2d19 z?$C1IjlWn^oa>2kjbhE5i%s0_wZ9QiPi@N=k}--_Ww z>N>nGVB-x(A{V3Ku$dU6>~03SKAjCxX*Jm!*iPgHUfnVe4`j+XIpd6C zb#%Wxx9e1!7-#U=ZmfO%r_gPy{zhlZ! zwn~Oe?`kLRTciKuz5-z%zSjj*3FJi`?NKr3o` zj{-*2yjuJ``vn1mtXgK(vjDt zmqYlQw57HorJS6!!ELFjH6L1K&q)}Hfh~%?PRxY1gxk~(4WCl~m|OGPH4P5&cH5^G z-1QZ$iWL`IO!=Nq{+8SR6^%Kfp?x!wx_NUKrvo7v2}DGtXJKK{<~%n|k1GO{@|XrZ zHHx10AfB%U<4`_7toWSV-LRDMj2gm%g2k05rB?QUAfPL-r4L>H{Z$UOTm4u_{v!Dzoa4)VA$9f=Gce zQgFp`2_e7uo|6%B0L=t#bdzGX-YN zf{^9$0+L3%*(O)k*V{S_Bj9LZJDedz(@*tba$N|*^t7Vkic1N?vxYYe(3)1%n6Lyu zoL{@~%n_DDjt6zS$4QEwso!sPCK}6<77Th-Ml->v zSPiYNJ52!w6fxb$+u}ZYY8L$|yvZ-m4;s&RJ5(>Fgq~dkU=r6Bbs6MwPFA*jLe{}? z>s&u8rsO^q&Q5yQow4LlOrl{NiAS@5RP){-R{&6YZQ->8MjV=#8JkzUD=4CuL>r1#QWucn>STUkAVw6s>s9KTOK_TW z2!S@q{XW91@1DFXL_3L%m&1gjMy1IiGI^4L4)oy$<9s=|SdP|3(f&)IW{DFA$%V8R zvjKPW;XElP=X0$Leo`!-H2KrRO)4q>N5y)ppIe9Z<4q>H3@RzNHzngu4vSkb0ZV2# zY??y_5E9e@Kb;D&zi)tcZ%@;(2q?KLFtE4$*ETsT@l9WAb1ru`&!S)9Mz69Q_!)=7EPV` z=})F3=xCww+K&tOnXPab1CPyczQx(X>wJjms0*Ed5xN@i;;>4^N-UhOi=e>|eBamC zcXUwQ{ha7_Ff$MDu+rhx^t2ScioMpKx7_LD@nDRVR2b+>y-Z)@$kXC+Fry}5KWDZ* z+`tD)D-E<+bjqlkmt3N;Ncnb8#qTw(dR1aKe~gr&2o}Bs4lK z>b`IMXla^i>}~+Xg1an$S%CLICC&r3GquUpXr;!QM z?@uUJQ~(pxz{LCNMA-SX(BUyhIu8g+L?_j4 z`z$2lY&QhEKhCis;gsIceI~X|<8m#Az26n76bfH<(CCnu zUT-o=Y$y^&N1#yC#iF2y%l9~TKUu{f;L(pKOVs4vwY|M;QB=R>8A% zNvHDu^K+ZNaLp<)TBz$#a?JT%b5u6K_XIf<^zaOFY)xg&lvPg0z^k*g4gwoMu9f9J zD-&XhLMyp{zMTPhSq#QrQ6*p@@Bz+t>kNIbVr|X4i%6IApCNuce*H9eZUgWnFQ$-=hgr&Upr{e-eZd`{-MGDe zb%=Mg77_l;)ed_k>u`u7WTwp)@erq72E)e&PJg?jR87FCJOBg0dx4sgY3ZkYc*zY5 zXEt0yKob-MS`gOlmf|Wb#BhJ9g(N+%$E2+{%HyHUI0jvrqeis|GDP@bT}{>+zz(%oH`5j;^Tn5rFvitf9?PhwKz5W4OpX@GPi=dQ zFDoD?S{>d_uoC{0vdd-a~A!W2=Uen6B5d>{8-z?2^wC7@g!XS6Zw@$lC^Pf>uV_j z(GV$2z}c6zlKrs)n2dCY4|?8y=d$q$5As7}U^by&r%Uhya@5VM{_QaUd~}>^*6}#; z`?9A@y#CB4a}Lydtk{H&{^-)EjmaEav2M|e-W;DC!={Vno~})C*RdY!D7zXlc7u&h z6SLf)hAnM?RJTY5+nTsOKvr1buN~-IM$7Ar)av@o!K_E$$TQ!lCRP6a9I6w>6WUk& zUU(|7FcGw=`NtRu+XR3kYV?Ka9HvaoCR8ZyY(CVZ;MsK4cDd*`Gppljm96g#}F!C}E*T6qC{hNf?7K=g4(Tc+j6H?z4N*LF(exj4tx_ z)j_mU=m)&>zG=kCqq+i~LIvcz&!d#(28_~g`NuiqjExir*dkIq!jBR(iJl+Qq>0qwUMx4R3R5G!6$TmMF< z7xj?EH1dYqF%NjZ=d|@X@$GDfm+PW92|6!okV3Y=;jcmkmi+8DA+aQ)xOLc*l!|t6 z$AP`UxB}Z7p5Qm%*B*PwCAPLFiJ@-&U!2!J?VNouvt}T{+K2h<8F|q;c@u#_Zg|3A zKw3e(vH2w3`0HToV3yVuQPX3RQfzL3jom&G0%{ld3$4QE9%eSt1Hjdop`v%zK>(o2 zvGXcLtzuIzrTcaRvz@{sIbNI#4euvyHilw==i}MnH}iwgrt52odyezaz*upV$K3^( zH?&(#l8#8|YxH6?%FQ!f-zz#QN6lgF7{LbD&3rRlytfc>0Y}huGe(Q<&)4sm%LVU5 zKwee#iaD8-R$h$a&CA`jKAw*p@g9lpG~n$xUBw~Dy(dAufytZQxW`^`>M+cDxqMId z-jDWH#au|?qjS7Ba7S0 z$C?vrnHKDE+5lq=d<$geUePHb?Htby^mYoX@f-)bK3~+Q$mgBoK)ZMr1-|sBtNq_1 zhof?1)P=%K_ir}qELOYx1pJbpL$jn%gwwaPz;R;NX}>$wKW7kb(#94wfKj(tZ^D-J zVq;)|e5J!jlzby|0G4}FI63|sd-nIi%Q?GAyK@aVOcxb9xJuOoETJ#xILa^z!0w% z;63piiH`AZnlKXi$Xe`&=tK`5#tq95rzuA3++;)S0mfmuPJ!X0T*rjPMZ$PdJmF&X zc{JhD_a@9~OkS;PIP%8{9eBb;A8MVv0&Xeg*%k$yZH20F-#+4h72FPXH(M*n0TScM zcZ6$*-~DKUcN_cx6T^mAu4G&@r~8N~bSRhs@CaidCqvpr4!5<2|W!0SJ zPsLE&36*KNdU&OVsaxkqmA*sQ7aEwx(GbOK(E*xJnk_sOVVC%Wvr6j4GP*4?zl4Xr z`bhZbNhmi*T&|I$7J=s2ZXEGTktN)qlh_uOAO4*()pi@84M6@V_k5Pq1qK(H++pv~ zB~Lt%XQ2T-xj*dQ1UgkaEHu;(2>TuAAR-bo#E2`IIx~Ms4-}Vdr?CJ&-i~0B>}wfu zr6j~PTqscafd-ZDA=-~NG6RNSQ!NiJBWGwriXxS8!bEe#J`iWL4+F_Et!u0$HfxNd z1v*v>erZ4j{SXf6pwCcw(8L*?@2L&UGVgjd%r{OOi$Pu_`IXBBg92GNjP3hbZ%b&F z`I7l)@2**v^8}8wiJ7mHI(ZY!>o+AUT;lveqfsF~Aeqeq|6rC8aWRJZ0rD!j`MqEv z{s^s?%P9?A_Z&qh3=6=j=Vr_^d-*zLi-NJ==^Sj6S@3?(h*qA)xSnz58DJl-1U{D>L?M*J3WDVq7RN<@ z$O>iA4)=YR2?~-awVt5I4+P4=uhH}!DA=U;W?$qZB|^-mY^j7p zR2^j8#J@NId|$rMu9C-~2>yjZWUTes1M9u#ot zZt?Cf8IF;w@uHYaY$P@N`!+hVje?Oe$~4B50^Emt%i$dQo=)x0qytNrv%~Ec8f<4; zKKw#T6_+!%p6SV>Ece)PE)-XCb)6Wik}$6nt`>}9nU#Sr)NrQELLXgSOD|oeb&W8Z zU_z97Hk)(OAchk2U9!^J$s%zfu;^CW#Na|)Axeb8Yr<>u>k9z(2C?zf;5x3nLfH+m z&)9E3ud&}?sr5i5bqm12RjAnTnBOl{5@4$}^ zKNVz(29pmy5)A*u+(A0WlnL!-B0Y{gOZATfyW!an2W80$)Jcetd@Ut4_+XS}C{|6# zYEPI|hIGdoL;fWB#}S;|NcN#vvX{%&gx|7!NYYag_w8pCQBpKvN^CQrW&GgRfr?P% zv9sfRvoF`;hXBBsEBcnB3pU%Qj&S`)&vVas+^=ZX;)S3*B*Y!bU#=*aHLomrME;z} z4c^m>T4R?R(Wd%7+K}AU`5-ex1P$;aqEr%}eu?k>DlfatY*;1VNqXW|a+&D(K#;gq z;ry@_D}Zc&NyiTSz%`7ky4Apoh9q8tX1X8NCeNic>$x7u)j|jdp+lm!rAI@UoZxyU zM$+V&r^15JfpN8TaLq1XW2w>V#QzE;mBZd1XElpM*}2ZieJG-W!=N5uG+|s3!66FB zPNgg(*vH}I^Gowy_tyl%(;Qu$bD`m$Nz#D4WOiOjyfZU%8|UOQGbi7O9E_AC&efs5 zq5UdZ_M(sFQ6Poodr5Z>ab-dJ-V>1&*qhUfj|}!#^FfFbK%th$I|}LzT3;jN;>0>8 zR`o>U<|1AS49s%P!*cRROOY7si5~qRGQb)Z6zm{-#>IxHZMVy0vYUEod++*w9B$UU z`=B0sSdwbn1gPj-<4MP7go%h-OlXOK~>pr#0dsj}E^ee_Z< zQs6+kCKx-o6nuRh%t<2*skFm5R^@ZY=N)=h8Sj~De; z!fP!xrqO;mJD-VP#wTE!?0iut>85|**2GNTIoiZ|!ouL_&dgZ&=7(bD`-I6Bm-`BN z>cdlzmLQq{VU1)N**+;J>Bu>rKyj7u$}3{5&u9`&n?t>5Ag3+fsMw2RZ*!omVj+lE z(ZwI--DIqgAZWTUO7jPGRQwnkni-+)Nsp@Rz!m&)V2A(xE9H2@anYJMAm@@uu=vd| zrd+DeS{LF2*XkhJId>HgW+n0A~r+E#fA#NiN;mOkNXo>c)8|9HNxZn~8g0V(Pvb#LY-(!v9y8mTo_?tNnz#c-i z8Qi_%g&}0k<$h*gz~_VwA<(i^yh0ofrB#vmaa%ZE0I)_T&Hy+U={q@Yd0c=k73O{T z6r+3bq$vuKD8~0C8kE*}gJ?9mi&rPcg#4TiTc9YL@^;hFAH(_ojpQB`I4ro*JPp33m zH)!D2tvcy6cs9giO=K$V@O9uG%4R+Ka1Q5*YKlVimiv1`U?}_>%h2%HURT@C?{moF&7da>HQ#$Zj(c3s-b8*pQ(IWjL^~lXEbz%`A$F-(I$(vZY?N-+4;pU|4 z5!dcx2#)+=0+IhJLosIf(3ins=#H_<5ES_rkIgmw-DYm^`2#rke16XFeEchMYl-bJ z(qw}qx^$~Ti`I#HI)dIR8i|&aXI{CTXiE_d9<$^coI`IdEdOcRI|V)u0%DdxY36px zjK~7jFA$XjOO*7S6RO)4v6%t{HnVf-jZ zxC(1dIky<)ZfWZ$ncUP*R=CHNr=(;(5jHYL3`16icNp(rUunPoUIAU(g0K<>KsZ6t z?v!d0)NSMzax#TH%S2P&wy z`5%)rlMl{CR<5Vhe-ETpe)zCvVN;-L#Q=a3OHIF$2fBx|p<4SA1TswGqsb>`;wX9( zMUQ|87tqoPh5Wf!&L8ReJg-R}vg+YvP<8;Wte*bPTey)wl%gL$ER~FcEOEtL>%z{$ zoN=r%S7mf3JQTaX}-l~V8m%7q$>n`pUYoe-|F#{T^(kL?-j~U zmy!0a6eO)A7csY**bUr)NFB&((etQg=!~$gs?KxP`VfG69&OU=*yII{l;SBK2tdaR zLSB|V9DBk}&6WDLA$h-2a}JD*jzuM&!UhWDcg@)mF%X7H-*D~G*js<0;m3%3^Bod7 zqhAh4-KyKzMii%$ogdG`1mFNL(F`ehL?dO{aO*_NwrT)jv z-$-*q@dqsOoP-kwF%ybM$mEni8znyVb}(_Wvana|@ZZoTt`d6^{T^pxzzkJM-dL~u zq!(nrlueD(z-g~t&Vvw!c%|XvNk`Oxtcd|3$C4Ipn*~^#NpTb>WM>HQ99(rJA+|FfMAoG}z^ZN5Cg`Nxvl<=O3r@ z8d321bXvq+mY-JIc(2U0D|m@>pFZ2tQ`p~X!%J&Yv}{hUjlI&|fVdwt?)iASpV%Z# z81$K3P=lTrumtEeDNbnXr*?8s3i#0iS}QjFg>(CgDhGLik|a2PLyJ&|$S=&hcopb> z|JG9Oy5`k(paqcD_>K$QBH*U>(&y>adb>n5&OXdMyME{9Dhwc6?Wo>EVi+E3 z4bA>43P(2Q+OT7Wa-H!lSuRmkL=1oWwCHTgijHEQ2MIy zEZJk)iixCMk6jQXna7ptvb{3sVI#+h(VFlQH46xpWI(>8zQ?CDT8)!D6;qtQt zS6`_d___QJ5N`$5YE+qQBpsyOZ@s^08};q_Tw8rB#_H$yvU2buf1jp)$or=8MH{08 z@j-R)CgA)IEd6J()2DeHi}smaJXh1ix3;$SSwq~1a*POz7w}NVS_rypzCFB)SzqH- z4i8Y_vENz3uG;GyJL9pLpw8t^d(5p%iIYFRz9Q{dY<98xbIhbqCRO8|UuDbbXe`ok zglgMJl-4``!Xb40xWY5x9yp5@#tpD1xjQ(H=?=UZYTcv5ZqBuvpz=ZvK0(Tfb|61F zGF%Zq=E7(@s_4C@U~P!xhWOgeRGv8sySI2)A^@fZkQZ~3i8iCy*Adc!} z)%!eM#G~qD-a7Xx;BBMiC;z+cRGI|yKrku)u%OG9;!EkJb6u3)&GE7yEK9SsKA!7; zU;YAMXy-Nmj5vqPRM4OiiRuW@G**Rp?=RDkM;_ID42@qU5Wn`?GQDv!17qH2`I)a71R z^|#T%P?h&dSDobFzQKyI2qcQ{v0TRx3hS7XT3Uq2jOi20$;ld6ZM7sXGjwREhmR)l zV3#qC#9#tISWyZbEVP@ZmDFdeda1kSZd(&Qq;-!^!;?3$iIYVx_XI7;NrT2aM2V@@ z%q(7wkq`p70fIf#F-fN65HG}{(Bdmvk5u0PIbq`-ea4tqeQ#N(WMWZl^NT)z_x7%GT6F?;|Jt+Z+#zF|GbQ^ClcW zc?y%3xDvlO%bSal#kO#upBar+YZn7qSvROT@wY*2(?@N9nK;HCFx{_~6uaUHplp>t zem2a3f#^D(?b-t=}cQmeYg~yefb9n1d!E8yT+KDKR{R z!VtK6Zg%8%b8wI!MAf%BsHb!@64fBhAk@HJVRR;f^LQ-{kP6=j|Dzuvy*-}}zqePp z20gI=FrOmEk0$ACN^3&cLVHuGHaAcsrY{OXM8L`G36X|L+0*Ul3DW4on#%V`EbgQLz>6 z?JMc&=@nzig&%!hGw@B6y8Ibs_B!86@Vz@%dVak7OENrnGR4Axr^V`K+4 z<(&%{IbthC5gHn*Br6*tD=)7ch=6?PvjF@%WyqmiuDIV;7}+PG)&XSY8Cdq`2org< zLbMDaKZF$WyQql1Vr}rpk2!JO8Z#+oqn4T@4<-bxsO5H#MZ32QB%$f4iQ)}JE`fSF zIuN(FzyJniNc>-hDNgI}f-s0UO~ypSh@`(9L?wb|gN-J$PFZ|Nc=*E&%WxzjVla^( zZa`p7P1UhA{N`2VIMWmVZ=OEgV+v%FrU%-TaI-aN@U0w(#`-BWW zYvAalg1y6qMwKS}`464($*g)!QK_VFlyw7xGy)1&f4u;oxsky6M-m&i)W{!~+uU@~ zIgHyHAe^Qv-V48LgxrlFFycjdh9uCu#deC(?g#T<=}q*5x;UWr1w|l78PC^Q*3<78 zYfc6|D@e$i7ds{vc!;({Am^cNn&X7nP;hR~QL|$N$4m=k5x!6T&)r&^K^?XoeE=5# zT2!Jjz@V^;nke&IiCC_Af@TG?Vr}0yIPk zN_P(Y-srlcUO`C6X|mM)PO}b2c8Uuv_ML%cBX7svD6%5!&p2U((Y}dRgu%GXk)U3A zjEsM@izyd|h5+G!U!hX7OBcN1x3`{TukxNWkau$-!u5NazTpilx}p}5i;{}SMh!Lz zl}4?T@jrH;Fp&zHVh)CuDEwWS;UOlK7|iBr zdv7ih_xsDe^PAOx5g5^RWOq1M%j38rO7`^E-)A_?rv!*M_@k!H9UsfgDZdB2 zJng(d1R9pJgk2wVwPfxTS7}KJ&=yP7vspF=V(Q2DVBncWob@*QJ@v#fuhnSHi3Yvw z7cMSE%fi^dA|fZ;3W{_NBuWaDNK+8RWt^M&`c4HlHLKUC9f>hyf+=6$RV+lHQY!z{ z!@(XFQ(mndBRQ3Z*p8-g>%yyHVueXO-}ZU0r*asRPWv3{T5X(^!!z?$bFL{c=y!RYQVL$2!1O^9b_#p9Obrj7q9?xSoc_bH&<8rHLzK0|xT3i1fel`@INRA5E`RZj`8w51fuANgg;3@+ zPdjF!2jKZqk#=bWo@JVla{-%a*OEWe98Qa-mFTWfA9KF1mBdR1tfBX7 znq4=0e zwx<82jg6k@H6XOd2wBTs@mKGGdUoL&;-EH)uBr5TKjN4(B)qOZ_qpFF3?@noo2Lr| zZU!w*`3|Kn+rt!^RhMhx0D^=FF!2w`uap=!zhVdVE#9#l#~n;qjrNBltM%7T8YQ;LXiTZcE>!j7CoBsi#q8)92%udKr1H}%yullaUXlYN{ zskE0@Q;Wnx$lLX$OV@>PyGc|d(+%N%NrnX}VOzSK*Y~J{ld(pdHV%XS1#d)|NXqH{ z@mbP1Z7!YOS9Odof%tu_ycj0$d^{Rzud zI9h*{MC9ETI3aFJ_wIPh8Um?}DJ6z^qNa*dO<0JGoC4W-hwZ5WDp>@`q=;XLPK5QW z*2KBt^q+|~!v=OJZIgO~Q4K>WarlP$Fxmw>!+qB3iN+MXG~a9DuKJva?Jc+8touGl zma!I46SI#==3z;aJ)idB=+^ev^b+5~Sggtd4E6cX4*kx6rD@(Mmce{BS4u3tu^5_u zpD_gr6uX_vyxjBQ(HpMkmgd4XU!>ECTccCuWzGVH2qEsKsj!y<#*@+oYzv4pTNdWfCNHL9L( z{SRioIqVbqW$(Um7!+k`aywp{Cq*KDlJxSVR17Wx_f}D1#g5pEu>RF{d~X=tfyMx^ zR9~0$`fA$U^$rBg& z!yd-l*$nRh#(&wxFtLU3nGjRMVBcCxnP1-wn~%*k#4xBX6oqnlv}eqi*&w;1Ppnlf z-$}VTJmYBkym&FTciM@G09#o^bWsnIf!ii!%_E%MOWyI!td@hfJvOd4yONQ8Xp(1g z!d?4cE9{>%(s{OP)^|cL3Qx7%u_T5!{~&T{<~H?Ae4q-@w4cW&7M@OOH(k+=^Zg?y zf*0C)zhB{Cy4KMke5E|EX>V+3g>o!bPd?%i@tN=A^sdu%jE^g4k78b@F+GzQ*R9bL z+|w|-zoelBgnK;rKF`F~+FBN0DxZ=W^~!t3zzjHAcL3Lj=o1BUXX zg15#cUK)nEF>I&-Rx5-Iv{;qyvJHsH%f*{;v)3%z>Lk+U)>KhBeayaJXaWawT~1xf z*#h$*+MljBDgMZPh70BHY{SwSK1Cuh=ovYr6$WGiQpR?Bk{i2K1CN7q8yORs)^iIE-2ho~rckG$*Ir;fCNNiw`R0bvaiHW2zmsLc@nJ6}u_>ws5 zvDcY+07+<@hu=RDG$ctO1v~&?2CxEz@{vNnS4{Y#w$G$QUXosx5<6So}oukzQJzJt+yd zVK;dxZfvUvOTsF-99j(V+0s`^H0PvH*eZs;6Ql0H>eJvLp z-6MB4NezVVH=U!Bh6v-R2|V|-8)K zP#G&X8)3r_2d27#)>OG(`88)B@Oh%%Bp|wscMf+4JFM6hJZ|7IZifI+DD@!pRU@qw zzVfXgvQ?=-+Hi{C9<^aNW$rK}SIJWzSCax{4$=+e(AOtiCPy%;6b3pDDd6bT1lC>L zA2qKQnRfeM%cW~_X)bg>T^o`JIpwhvtnP@Xub#q8-hway;K7!TAJOG(THh#*EbDI+3$r>AE$^_w8RL}qcV_ZbE!A5AvN^0a7Biw!0}!BnF0O0E@_ z=WlIc+J_Ng5~(nDWX^rSbGVWz%U{_TByUjM$-CH%xGtQp-YNl}A2rHFpCZOy@hKKp zbSrRZq0%*=la_l=IS!AO0=KItTft7N?0#c3I{+!88_tY1VeRQCWBBor!F9ig12Vvao=Sjx5MR}*nY!#5SZ zbLngpRw_3QLm(CT=6?KvP;icbtTed_~eBjM%9iqVT1c!yWY(V|`}XAz}rZmlfi*C<~j$q^M3Z8GtO zafR_}vJ+YmJqf>oxtdUKtny9a@pOc7QWViJE{Cw*>c4TI^32qKdSmK7H3A6vJIWL? z!M<&h-q_u&y>+#f1tB}Suc8leP`GmLM~UA^HXvw-Q-1!#L(a8)+5JFZq~~bN$Npt5Y#W}yw{~r`5Hwo8GZeex}=pe3LlEH)y^xS zU6G$6mLGOxyq`(eEXKHq&P^r!Y<+A)ZbN$my zw5@}w!6lwvVx;^qJM)%!6vZgQ*`7u97v199U0^^1$u9;lFBaB*99q(I$v^2B5a-fI zO{U*1PG%q&hZB(>vi?qYQyQKBk{RMnivcJnuJ85lqnXYloT@e$?YVMnVpa%mRVKD1 zCaCKC{G9mr4M7rh$^V@Q87@~zc^UifOi!uc|Lo8DKb|}Jzhi)S+szG0)efwC>tOp; zA80p2=+mz=9?w6F^L|`2up`S%Z0Kog=Fx-Hi#=o*2Rq?Pr0{SD$m{c)!=TMJ_a(`N z&!$-^x_N|ZYBqBgCB4(T`Nsgmf?q{51tie}8*+x(yf7XG{SirEIbEjdvOAG5-gAAl zXxZ}xzV731mY0-?8s|;B41ozG{p)arK3vI0iVxO<>w(#<&Wa%9)A=AAaS#SYWwtVT`$%qA20$9B1y zPZHcN7qT_B9$+0Wr?N*r+w6d7#lBK`XuZVPO~BhH{U*!qy^+lAoAQIB)pzn?R!ko@ z@IklTB}RtzdaZRq*{C04DC_!T-JYz|Qtw1uR#FH|h{(WI+Us2smb-^Az)tzS>b+y8 z{laUDRbG<``6p}p_*Gk0x)+#6)CG*D(2bV??p|DJBT&PMnJ0!e2!(w#@W1Pl|?mNRMKyq-T%*)Vn zIFaI6W2l4QrrcN1{M$=XBBg|?5&MyMY*MsIF14UPpYhvn34<{|XRxo3Z~8LTo0KKp z_4RyA7Y2I9p*4d{(0W_vGkfV9z=`0BXW`*e>lExKHmJ86f)Vvhus|R)K*a_P)}^@A z>Z$^3iea!GgEZ-Pc-Hb;b%d$sh|keR(jM zcot)7m$=D7ho~;)i&fR3kW`eNzB+I0AA!xXJD;~a!^Ae5YNnzl^2B}BiyjwSYx$lJ ztve@}+xN4K(4SNqsbsblvjZv>#}+?5T{g1i&y>C=1aHU~Bs9`$MLiUaq!R-wp!qFG>CINnpY1ipy@JQB zsRV@8H$OP)WQm^tB)_w^{5Zm+am*Eome1qccK-gQ|9oxMb-T@#1v*n*m9m>#mD;lU zDW197Eq>n;W5L3PW`U*DkBC&@Fe)XyD}%j4od%~e71nH^oG%@!#e5eV5rOtF%Y2(3 zq=>``#v05K^V#~2$qB&`zomX>5R`lX>dSlIW$*v+e3$!%7%%=k+4prIz%+#WgZA-B z3)efzZn1aa=T{;HMU9$Tnqepsi}D-($85hOOgKe;^4=@&c;i!~(wfTjckK#!9WDk~ z-bdkj@2et)g#Yq*%6+Aq6Vg))$)PGXJCD7`aPEmmZg62-|^Q=#Nv?^d08? z(y{Y?zn@#Pwg<)Y^@R=~rs)aMnRM@rE1yA`r$5kJxxpPskMq;Qm;Q6<(zjamMpBW2 z<5<7l9=7@EXG9fhOFG?sXO%K^*;!`0CDa6R%;Ew8(@~i-nU@XsMhd(bD0h54~hrvj-+V8W5HaD z?V$VOL+fdbw`v@`yW}{q%4pLl^ErxVFTPr->Sd@&TCO@4&%pYFOzQB7Mi%S1skQ>f zPFxKN3FW|_-%&%2{xV#Ws91P_A$fR-SDmTYSb+qgB7zB#At6X)Kb_=bQV&5Odipzh z9*vd`Cu0hdw|0e(Z{0crG%(T-^1n7N&1m~ezdb?UiBBlT@fTBINgn0oU2W6P(BrhC z3vV5*Nb^z`$u-pcn5rT0EQ5~GO>#o-=-Cnwov2Cq;9eC2eZ2T1H(B|Sd&pr$ftV(h z)1zf+;%nk`IS?8E0w{f|7R5{V9sB8VzA1Mjn>0qBIGUF-4yDD4i7+z9x82_uF4+d1 z9~^B1jS4#Wog!bLNu$j4T~6YEB89~F)Y?P^{T!k(u3_-fGv4}1DYRz;`5ap6!JnY! zQ($@hm4K>qEuHb)yMQ{Q|H)s_Ip#-yuF2a2ZY3g!#;H8kvvIo~_P~#!XnD=$CPCel zPpasM7N6IW?Y(vJUfxuG&vM}s(DZ(+1%4HKOw|bhrKy@EzO8Js@_{U!%}Y$KMXp=D zn4Yic)o-umgQ~yNyB@Z3#x~tmIzG0_4zFI6`tsPfF5j5X4xHa^Hy--T*R_tduv~8wb6Kw-0R3zN+<|Ug%BLJ!rhPtyXokf$un8VVrL>FZ+K#wi zyB2N*Y-(%uqcwMl&^N|=UuL$t?3BLTN-$h(RAX*C^Kx18Q{fo0P-LbisgR{!ISvtD zfY;|X?zk0-syl?8YL{lL+kTMsqv5MQbqRWwPYb#!j9q=p9gPP}xp%7>G97t8I>=ny z_zA19tVqbpKWB;(q3Sw{P#cSKS}P z-GazinBYiQAzSIIUNwh^`(Zq+zfv)QR&J{#^HbfRP+`z6`U9-pjk)pT@4r2H6Q3ju zex&U@xlFBo`lxM?b9-#c>q+i+nPmwY?%zKwJGpfPaV@6?i z_o80+DDlS84zw`Y%}pV25}CmCEI2!yzmJ2HOeT?GwbSC&+J2(CC*~Xa*lyKq^tgaj ze%gcU)v}t^=W3)F`tkA-V9%jMmUJTfO2wKjNd9BjNq%a4`unCy< z4opbZ>l8NlDsi7WEwJkKJa||4(34>6s1Urgc`Y`mVoAr!;o}$7nF4G2MU-cZOWOY9 z$Vu0(lDV@o6k7Yohp6lA3N$)jULB|TCgJxeKacb1qr>^JLrk$}@6z)(TUTmEshr1W zQ<0`=u1(6PLn_AxsEy8eTG!o79z&hYlgZxWvrf6$)>aIaT5>jO;-X!NaM#^MVP(=u zaQ;XPIfZXaXJTUF)R!8c>~fQgvh#r#tszTD2$Nvq9}t3FtT9IL`Q+Cy@mzVFTL%>CbWHD%z#1pO(q!cDf&% zZ7Yu4s}?LB)gjD(h(UU(q75Q#n5xgvZdYE0v-c@0Bpy(74GC0Z4O`4IF zCgY>29euCyPzfUFx7j7O6 z7rJ=HX7JW_yAETNTvC`lMvg6xE)YP=MlUObUY#!=9_BHh9`14zcR~_KrY_>-t~-OR zPiX06gqfJ*UmR3Z-m%8`()RmiF$L9+L(W((D*XgpLIa}HTn2|4|12O{+t=iBI%o}~ z_|x~1ow8bV<0d!p5PEa(Vu>T^~vjGNE&J0vn(ofI`r&Wkxh)H+YYu4;B{mKlw7lFcbjdU&iqj=6>0W;=W~hAgbsYEY zR;pcYT~2%H6Bn9PMrUd5l>yUH?1W{5L zgTKWV(jFyjBOA2@W_FCMD)TN!H?=$8(CvG=WB6W2WezPZ*(7fzl(C7(I^@oi2b(e& zh;s8gZ_f2{2}zHC9r`TZne;NJ$qz^aoF`FpdQC7mpQrWr&MG&X8s!V0i_ObH9?2(C zHh;lWlb#d8*^3;b`5ET9l<{fC)Rg|ucdIw7QX+yE4^whY4xmOY&+9=iqI$ofIFY`E z3F3JWV?N-}ViapGv$Y00qI_p0_Rlt~J9?Z@{YYe!=i%2t<-!@{32h9)7&gg@g}WFO zwIGqkDA*iYB!<9*@cuAO z+V$M>xR0#E?Lc+h@nXiA`s%K^#e33$6dB+C&ZRWL9GUl++#{gk{19bI&6q=m znN8<4E_-w1A72|g0|KQpngh~|!AD|~9A5KdWbtX^t#9kz%Cyy5aH77i_1M>Fy~i6Q z_A^YXN7g0O>@D)!?0r<%Yt?UWmQoy$9n6>@YjL;|@gR;_Jbi9!A3mrv$Su<;Q-AXTjlSWGYEq3PqlXpI(R3 zcW~EwSH;G&U&dy1U5)KA@$`T8=E3YMi~PK+m-Iqb9`$Y#UfcS1mbt|RDCWw#&1NU% zw*8%bP;6x#+MNPb?`M=8aJDE^Xk1fE7<}*^-7PmW7eFw;!fSf#m~Z^Usv0&c?>ch~|tMD6#Gc11QyLGN;(W8`RPZg!?X{+~H-DiiIr_Oy#kb2uMN zKtg&ld{Ldcu(#(&S@oLpS*aKnZ%S!!HaPATf&XA5C*k}>iIdF1%G+uItVeby2Hhgc z;aT1_nECW^{W=-!>n6(|tMD3x@HV(acT9rTZz7FZf4O(M}J#)N_wrPVu3$g){7lT(V zECZ-eLFWl4vn9`-s}1V(#;UNzT1&w2TbnwD-hnQh;~xZQRY_mE($pS&q{{ z;E>R{`n7Y`)LT?{an{9hV1xeAnMjb|=c?%KJE!K=RL70IiVv|F4Ik2Ovh^HRi4ojE z!w)uDbcPD+ywI=AQn16XO0HYhgL3NvB5G_GA1Wrdt+u#3&`#5PWw!HqZ<=o&>nx%# z*{twDXz$r-j@8)0UDn0WLeI;(?!6$jBe~eczx+tb;vb!=LYtarIj8-$I&sW_zrE#I z>0O8fRHU_;))(3H0)EP_?ERi8j^;P(4(R>|Z@CI^?mP*Xb^>OXE>3}ohJgElrQ;B* zXl|096MsH_Cro-)!0c?(C++ujNL{;{s&%cF!zO*x;}+?@ll{+LPAy zuK{weNfJMO5B%l0DkK^K9?oC#G28dYLO}}bWU0fT23O4wyQfcql346s3=fx!eefpE zDWtDuM?VAX{LOjsHd$ihLoSB(B#d=65Zme};`HzNQp#-?^v7O(roRk&cyW4Uwy>|k zYu+d+AniO8n5z8}T+!#Sher58&Yl0D`1~BReRspJE-05_0xF;RktLgf09ZvTxm-B#C5Lli_TCH*s4IcQ zO3GwycU(1{0HrDxD$1l(2U)GNE60%I>olDM3F&HI|N5HZ1Pcu^3A!t{lH)@#=r5$- zQFilgP|d7N5}VR6gi!ay;P2|xd{AJB6SLk#6)*f5`8Y{`Jy)>_^ zncNu&2v#!#%evEzZhkl3~{1|%FFOqziA2d%?>=Ku+Qx%Cm^_R zT*)z>1j<3X+yVZZ1#u%+&e{*jb&(qJg%~S*Tx}GmIS|GtIRn29Z{Qm0HKaS0=SWz8DYJfU^4szoz zwmb#0yCm%h{5Wl*{rlyZni;?6B7<4aNyXtFT`cPU(H z&cphHp;u`_)gs06qtk_>layBXug*Vzl1P7x6YmB}FqJ%?D>gF@{kJNXFK+O4YDC5< zkQ1$QR*xNBcOrBU{#F8i-*tWLP8`p|AVX)$imfcpjC7J_C824i^g`JqRxdx&9Ix!r zZxU~^{q8Q6=<>h3KrGb_c&jDg{#xoRFo`AOCaPJQi9y4o*r1%E2BN(7yf0t7tBM{r zY}Z9o7Z+{PL}|t!km69`F3R?;YIfm-Sub{5PZ8&uu{m5`Tjd+u@Hq85iUkWzl&pUh ze@LfTpEVQp`sGdoj_Tk~6l~?P1S()v*RK4_4*ossmP-K_G}6S&1w0h)UQGoHMk?}Q zdd9nz8_cXYyUkP!z+&1l})SVey*J9Vrv>s5y$M+Ns=piDMs%Mk}VpN z3MG8X%hR|SJh^kf1gtgIi9KTiYoKvdN%hoDd1ol0frV> z`1soLG=X`jTV7zWP~B4~{0l(aLb#AvE><&lQSPFef(;p?#K})iPA=ds=;O9MI>{57 zN$E>tPJ`-f2Vf2{b>G@uVb*P?%?1-F^BP{b%fF2s2z*k)w``X;h|O2MyP2qV^u?v^ zymNFx=dsnUbHd+URND{wCXZ~hmLihaan|aHXSrR<<*B3m#A)fGLV?lGhk+e3BgnS8 z$)gnEZIENK0D1|;=-LZMN*6D3A@e0nOl~T=SZoeSL=xWTPQE{p@4RG&EFcd4TZvT2 z%^{3jR#Er2ZN>tcqd1me1Ktm(=VA_;M+AbdpxjJ}xYJ7$?0ra;eM-F7x%NT&S)l;4 z;?cfKDsbAuqjpo!eC2Ey@2gvo%L=ivy*<7|$aNxjj*Sw6YZ=F`l;R?bcrOdA;8k`X zLB-#(Uf^XK-{JX*(J_|U+{<7=RWj5pZL8_K`WyKKc7xU`$wDzy%i{=tp{V96dUjk| z0u$}^lHik1-beKXs}sWb#UOrsZ@jZj8z(&=8&e+pJm<7itBcA65+gxWL{flG(Hp0`4~Bo)!EY2 zFL05@%E`hS@#T>8k?;6F2_{@hP?M`FyQS;?5}j#Bc2Vvpy0#E9>c+^>?(n z!X(sb3@NJcq)AYVTYR@8o+(-l$>S279IEu=QDH;a()FaVz+wH$y^*#eZ%p7hiytDcF7H}qgrMhdF z6!RH(@`(LsZJ7>&H$3jtAvEdkIp!cgeV&J}Yk&w*0qAyFVcj5+6QL8Zo7F5OnbRLc z3lki0b#{8BF?E@~M33=ZpKeFNFa)m1$9P~KU|w4*_=-RobXD*7(9ft-{7V;4>EFq% zV&rx${Nng{b^$tAGD|W_&1mty*4a&;kbBOePOyHm!sPX2m0`b^ly=0@VmU|qH2$<#`cKBTHgnS|sc1xqsV}2=^C~Ci zGCWh_#y(?$Jg3#8BipQ_2v*#)^m{kn@;1382@}pBQVxtpYqxvYSRI+QbXWW9MLg$1^AyO7zAJr04#I-qNKU$8S`aSnm zq0)%_C?R7$s{fs%UF{uykxTaz_oi&G2L#nBMgKWty`Q1#rs*1;qH3p1DzM8m2lvYcVy!epp$`QtFe--oaZrL~Bg}G^WbuOOB4*kAz(T~TWd!KCt zF%}oMC<~m;NnUmOk}0XP=|y{4cCYe-1nsq|z9e%U_OKUnq;SOh6?Ycw1_cK+7B@b( z+UyDhujncU-7!lF-S!|v6^eDYr9?5)+cJ8x1ow0L#vK0AohsjohWCX%8(o@v*=uOP z@YVU9n#qsd#^Qmd1=HERXmTY}dDi%XT~u9oIf`J|_Ov02a!^)${{7jfD$%Y^3)UXF zME9(;7DWNFA6~9QfBw7{ZZA!dAt5DqCSUMbJP>%_DT0u&f@_HPXeit3GLXI9=6d5) z`xd|bou6B1aY>Kg*RZwK{PHy<6i@sztZsoq9rqxN5l?QsPjcmJs9@qyr)xgMrzv#& z(6cV!^i4DjR=ST7vbkEq_)VZi7tBA#$|d z{~3d}P7~yl6j4Q-dBqyVQJb!O7E;ae)0~r zM#icqrVde5=;VJL7XdN);g_u=q+_X~%RVya!E~~i{k^a(st4MdqE@9)Awx_+=VE&x zX?ZjqyTKx*e_@~xmfBp>fVDeu^5F4w1U|pk)oSCj=2J_Gwe?$`Sw=x5^T`d9+sJJiVqyLGQ5wsX&k8lI^@0)kGC6k` zUO-8V?gh1Wng}Pms#=5%-BWI65rIefWp%rC@hC#(AoKFBV9ct@-TEOLgmJjZ`~Rl! zeCQmcAJ1tT)8H86aD3D>)*9n;T}4lpi00jpx4e$;3F<`WRHXGq7h1h3znd@$-xq>l4*{rQyCgsD&`0 ze6WApkZd|^g7!V$&z1V^=n8m)|5R#eH`<_^Dyv&7fT^|9&QO?x_1zAe^jmFWu!VqO z^9Q43J-p8F7v*JX$(qochgKgQL&m=U1fO`Y;*Ku9pOumPq_$z&Zjq-aTBhky^!2J` zPZZ>r@zdAl3U|*4wZDQz`KKhIX6vt`IpqTktDhAJn@LIX)iwf-`My}mgvN3`H6$GB z>GLX~kzp^Jl&C9do|e0rwsy)!tI$Q_(Q#;8BgCHeq7dR6ENRg|snbo@4_U~5-J*(!QMH%!hWoqr)uc-w?RRb z#gsctm5(&$BE*G4c*Gq2^!Klu1C3)^du$*N8@v=feu$fJHzQik;pMln_$!$Fe0H>a$x5KcSz7%=+2YN*OYCgW_d5fQYwFD=WVWz z)9DBZr8QO=sSHi7GBMw-b2W{U#y|3PHyp@R5+e$&Xpxo`E8sEto_e{L=;CMlRzh6M^6sY6x9C{VTe3e+g3F1? zROI-~DFtu31(uuH9Wi}}yX7JbIKDuhG$fVaQ{pB=2VXta=lJ71Y^PiPhR#l_F#RT9 zT)UH|GF;edkdJvJ$zLLAj`OxdE=?&18HU)>Vi9`%!GME@KF-()tLTZ{o0DHj4>TXI z*VNe4o9bsWG)>@c%U`OFHb%6ljFLqrUzeYv zjg))wc~F`yW`*L3>gE=NBxs)cRd?`R(EdhO$2#aU3-6muar!b1pxz>#{o1qsH@jmj z&!Xd=K?0i|j1_oM( zPUS+XA^6snm%|h%c=htfiRx%bfF_-AJ&x46{F4n)$MdTe_e$;EFqvJvt%^*&+CcXl zGqOiF#}f}#9gTAJgv6s#3EdQ7wMpKFIn%GqB1J+;?6~n~UfmjYkHw~CU~2tzig@~+ zr)WFoV{Il0H(lMe|GPtJir^Q_A>k~T5JH;eAfcCR@{i@i9Lh(36BqmKe{wqxgO;W1 zsVJ;GFD3{tHcg24a9km&3=kAMg{PWC6q#CjTZ)tYktR`PW; zKbVgSr_8bYTc6_=x<7`0#-8ojg~i)!XLOVu3yJdgPrqBDD#lfU6QK?Z@HW87Pf^iQ z+^TRG+bhi-%-cn&aiEk5BK+={|XS7EH#eTs2TYiIzqJd>)rq8 zJiFHaL6EfgPMX4{dpH9Ob9blp=++L?o}rBtaNHnuJj;W~qDvnQ+ZaEWsQ5wyiAu?l zEO4O9O?y8dJqEIB8_vM4wo92)^%|mrpL|@VYsF*9_ezUfjs%HzRZZ&@LI%-s#^(zx z{sFgA+aN+Evh25?7jty9OvNyaBMgSe_+O<)(~1XInBb?F`E;UzS*HE5*cA>`8Eq8O2^e8W=% z$*I$ttVq(9b^Jh@Cb+YxP|&eI^dUS-Vbp|Nw@y{Z^z7hq+waq0hg?6xLfiOJCh&;di|u7@|0YUxZ0IPpM3OwlVECI@>%DD|$HOd=9gvN@bRkwYm;~a;_K{ zCHImY`_b;fsqg!UiB?{!wK)_HUTv}m2N!3Z0(kF0uMX}|?=mBD!Q~cbvmb7GwgMD} zhP<9CibB%P4jdgZpBlYpZwD}FwSIJ8<~Rn2apwmCk*)w2w(W;LE*pWp1WRsC2s>r1_( zuk<6BZnp55BQmpqK1Ou>V*B&j;ogUZYYD^0n1;g#yslC(EN#gZF%Cz$TVQ}lq5Gs% z2$Zph_^N1#Ns`%pDUwDBvMUZeLP(DBQ3jlM?Aln1oicvZIgVE8lAw2cK9q+XEZ3~b z^8Ym*oRBa(0N2vFG@#6INwC8fGiP?!(p4ICfeF2CE=@r57)~U%r*(p^{HHKKfE*gMa0Z$S@8{hkB$dB>XjQ3{P?HIXQTdMSp?mny1BV9?k(y;|KVQ+!*I zZr$r#J(3CGmnz0vadSd!S_G4*M0TmQId}1_pTo$0SgZkOil&p3&N3f*)Gh6r>YRXh zaj6kT(ZdYMk7=awrlV`am@ZM50`aiOHqfN4c~xG8?lrG<$vH%wbS2;HCx~{%qr^UH zQ@%aQsPIym1uLrV5tOd%CBa_F0I^ zEk$^KB`J3!u(Mw+&UPGk)f?P;^mHGFuYd{34=Y~DCJH#;#+2M?C<)WL1z$NG3(Hjp zxF@b%ZF{t}dD>8iJ=Ts)%nOA{@EN{{3;5BLkaPTFX>R+*Y(jL0b@NV^EJkZf7W7GfRDGmE{&D&f09~K+!P5ElZ^(G(E(Td##!_UQb zc-^NT7>?JMg}D0}_xza@Z0~fn+ZT2#o}Oh}K|L9NW@ovQOB&{v5)B$5W{zUvVa0b^d+832;It%0 zodrXysfyBvf5TNZc~fOu&V=gAkJI(EYgS#*vriT?z=u-8N44KuFxK6UH*7Py93qU@ zIF?(@F=y+CzEZreb`J?=<-#7@by}l!%#DEM`MfZB@=s8?CFuBr!G>X!<1;3%0%)Xd z_f>1TuXv$u>5{JkaxZa2s;x+_uzrTm+*h!OQg&sCh5z_FB^$elP`DEF>tiRibRY~^vlX73Hr9iOsUyA zjUUE}y~0X-tb6?fN9N|tQj7f*>P1-jonK-9J?q1mnt8A06abr{Dm%z;BxFh%BTME3 zQMz45x#i3zqsWAxEdAM2e#Cg*J2;PkNopszviGm@8|y4?P&3FZMGy@KG4#YMH}asB z)-6c{#LFAo$b()+DM#2u!5Zkqlc8Ros$*gV%}S(5cHZb$F&r~+_0g4j2J?2R0oJrK z*-i!&^8mta;ByFVQLk8qzAhKV&M)g%2gIslx!xPT5it+2%QWOk_`0tw%Z$a_)_6$A zU>{Py4l;Z*TAVvx_Z|Z@E&P}`B(<_1y#XY;Dj@ByN_aWPs2(CeGo=q($5ve?b`5pMa)UznmecghI2bd58Ecatplym0sfu;s?l&932#fIQvo1CdTJh8}7-Z1o_ZtR0fO%@eRYQje%fDNZr zrpI@FT+5IYKQ-d^+-Qa%{YlTB`KQ3$NH(Mi>~-j9S=x{dJzP%o=*55FT$EBr<<(8l z6hDLP8|!xq92T=1ADO<{(MlTVJJ8p`Gms5#9DS6?oK49h&*vwO~IG zV)4`M$`1Re!YS<2Zi@Dvl-%~VxWn8RcMSKdsLVRAJjy$PT)U)b>eS}3L1!_cWQl;w zRFGGY^|RRvpPxxag8_?&J5mAtF5(>@2xsH8Q!n02Vu6ijWqsc7NAT#VHd^-xf?B7H zCep{34s#X%s@&j!hJ1sb|931HTi%DmDK58k73=&IZA|qHwcslA zzbnNtV@i1)nePgD{$S6hF9mv|x%Bfl?2j_&UHuP{R5MME^|KE#uPb^Cmi>I@q`Yw# z5lxFv*4 z_Ft`s^t9<2!!GTMcxMV1DOLRDI@?%#Y8nXiGGSoSmCM$nEq8ITg&56SqO2~Kyeks5 zT(=ye9>Vuyfdg7Sm5B4b2)4d!2ovvJ;>wkc)0^}Ss&O`iQs~6!{PlUTMlZ3}YenAdjb+w&H85Kq3Ap6YTMwKbjA1ZeuBxz%Po7xs zmaTa;BIt2adqpTS_zDGsH2`+{ilvB1X4#kG->RLcw@H*;?J+5YVCt^Q(}GuuB< zLqT?Uyvks}?COfXxZ^Wi26#K#(-XlzP|A}Z`1f_=pSHZ+W#87)Xp3!y|G|7DtGym2 z5Jz1Hz5ZrFxoQw(!~skNYT;00CX?jN=2T>*HS`D_nLI>zghd_{bqBeDiSJTpasQVE zBBY9+m#x##n$zLRbmc$oW&ak>ui0}npm3HV=vM;$TXlQ!rP-&cV{mNZu2V}5_WzZm zKv}jyltj8=$kn8(qR+uZtp(X!|35e)qm<87pK|RNdKUdtV1ep- zenU ze)0Xw6^ZERg09Hht6cA)EBi}zg#gb%j+7VP$gY_yb+6e6%zu)Cquri=Sg7Qby*W!F z;eB@oc`o3#tM(Mh#>$G?{C7V-?NrG}5+Q+o7Qlp?B*sK*6@m;^Ltp7gyN~YUYefV7 z!O`wO^r64Vw^{6rj}G@_4A^828lqDz$Wu^QXmc=E20$Z-IZZoTS=hpF$$js|TyWjT z(ouEwfbR@vLNow6LnwlvVC-Zk?j1Q6and!>YoWMPSfjZ#_^Qmx`UrOD_x zr+qK-xg(^s0yINN0ET!aTPy%9Eki(;a;GyC85x;Sn(Lz_Y=!lo7JQvf5P zzb!O5`}RQqNIo!eR>>9e<%;E0MXgTo%Uk*3=tl91P^o#RFWDxyA2QE21yM@aQQpqKWJd?weG}9!e&(YQvg*-?-BE-?WUz+4DzDMCk;3g|&)xE|%gz35DZ1reMY>G*Zzt3s^7+%@`J3&y8(mET7)A9D z4BYt741;eGvLrT}!g)+W2I*41w=egDwqI}>)OH@m658p(nS3ukaog1}I2|h1((6bhSvA?fx(%P`7&*`m^(NvH@M)b@7fm@*}WyLlaq5E4N>r zl>jQR1qQ=f^APo%ckLc!hpR1`2a7fQ�c)Zr&@Y|6O3bHyV)0j{$S7tiiX}UJndg zoN^{;Cp84j1A!=bSeFk=9KzmzO&Qil>YNRTdHp&8lZhF?jPI^=JYkjsbMRD;;j={y zu71d0&kbtiz#2Qe(h@wzToo)jq*&A`mY;Op;rfXLFI^a+O2nDl1A&dYIwX|=pFF7I ziddcFZS9uD&L>+Ca5)HnbAMo?wg%>B7e3y5^aN`nX`)6(lH?M_O>{3zKVL8k zy!&{vy-s%6m|TdTWj{`2L0WqdvXm4&jgRBGv1eU0Z~{9<&YvH`(`tM6^g)C!-~0=bpX{J#2#?S zssOYdL>urbc51FW;EQ!!%2DbAtWVeg`>ZBY!2*K30AwT{z<}`U1x{Tnxs&S$vq0I0 z9WVq-uCp3e=?S_y^!t4XuFaJ4t(U$8yr~Y&2T!t-j{O+_B5y7?$L>9iutl+6;bBy_ z0mv@NZxh+#vkQbEQt{qI_gwf^m*~uJ!J|*?xw9~c3Mp(5W1B7mr(V#t{^4<4d9nZT?Nul z+W1Ukt${_{WItbS6W4e3wcDxl1jK!OPf_3|j|SX~?oHv`g}>+v=wAbu=Cx+7$3^p8 z9yFT9?>tNc6A;Qc;~4r3Y>F$V_q>*6Y!92EPil&M>2JL+mcgZ2^y3c>&P^ZUw0Ki9 zx{)~HVPi2NKTy7*Fn&LgY11P3Wvi?Iqxl2NHygDU1KTt4>|ETTt56}Nh`2D*|w_MR9c%I1m(-YygKlu@K_1iaq zjgO(#4uEW{T@@%2FsIO!^geRH263f+(I^DP9Dao))CZ!ye1T8r=tDmc<*!QuAOAGM zMyOTL1>EGzZE|i7>8q{KGVQ2XQ1=(6nYKP|Q8&V4vvblNCPq%vREI3bn5dTnpP-u* znOmyu9)51PqSw)+dPB&yUDf&kdl^`A97JYxNf z{>bk#c-S-|*O)MCV1UVSbl!bR8a(_M#wBzHuq;wLj@rkl*hcJt_>L8wXt(o-|8BbT z2v0twBKPZ@@Y@$p!gipR3h_qHUGq+I)9jBv61q-FU%rhacc*wWk}(qTFho191y3%_ zK0Y}5l?!0dxBBoX)ySz13yV9cHNXsuLn;<;Eu;*aW=#AlsLc(2ub*r*ea@j%0^t?D z9BT^ci<^HVz%VKCo5$aA6l0CxRMW>*3O@XWAqCQxOb=lik8=e*9>|(0<`~`A1Rmci z;YS2Ar)_J|z81$_m$`aF!gA3ni=&R<9^2rKPn(Sl#KfVWltAWObBqmnR(utn(Z)K* zzIO|dkV+dOF??cv4)hEEOfl!9&BSb~u%s_akHbHaTQgt{{hBVk_v>_nvZm3*e(ojK zl(M)aWM#~JHu9bf*FqBTvjXT1UC>6?J_cfk>b~hGshVn0$zqAUf<<)V%%6=-m}u~Z zd~OVdbDqX?Abc9$Oj1_94f*~`f&{%*B&+@Gi%XH-`vNM!y0Q8hoPg%Vj}i^>HIb{| zUmbvzQjZz}EcM!H<&`_!hFv-rZQnM$kBRXRc$nLC2nfdPGGv3hV@B_Tv(Xbbxxz*G zGHy`9Ctn=pMnbvq(k<+BBBG?bq0A}l9eE@OmL!>Vo*ZFx$KdM*zYI3IKZ0JX0;=g< z+}5Wvb|FG>`CB3!k7g5=B0eF)tkyoeFeJVrdEiVsDxU9UTGNS)ilO@Y!=0{rZ>y^# z2-J3SvFcd#IbZ{rCV@^)J;B;zRfxWk*M2gaBsAy+=I|MNRP=Ux-XA!+5fwmod12c6 z-rPeGNEqJQ8ltd#@1n%ryc=4IU+udWxorF-A|&Gsb0beFAG9CijWJ6!P18vIP1SjL z7yd5-r4Vv7-hzX3F*Yt=8hd>F_3)IhGuM|5hUs?@>U-3+s-(z-to8h@pS2?YBsa|{ z<2&KEAO6p9Nql5ZZ_XG6t*{R@~-{EPT40B&sk;&$^srR+qqw*5cxnuzea2aIUpS8;faGAH<_dU^V%Ri~m zF1AHlQURMzlpE9i8u%>JdCj9Z7R6o}_5Cjl_FYITV?H+>5|i+fPVkg)qzg__L-_GE zT`Q0MmS_PPg)oeTnAcoLtY<^zFA<>TwDp-9^IYDAkgyQ~B{;$C`o~ny)f#uR<$2$d z5z2+re|%KHQDkyrg2&an#YmqnPJd0s>)T6#KiT@txUJp<5p^nfjKTa_!WS6NKKvl8 zW)6O8<}Cw2JKnq=)yeNHF0!@u<;-qr%((&*umTQ4g_5)aOtwRdm$k4tz!UNiN4rSC z#B*M6AxqOb1Uk(_D`_)I(s9TzyaQ(!dO*QCp7IjqU5g|l&KF1n_yu{iPSw z!AuNeA_a}a%I2S*gC6`RxScl8hELq;E683De0RI>gdqo-RS+g9SM2T})H6ZBjV!Fxj44LTW4d&>Y;04)f(Hz#5MTg}aE;S9}$CnC)*)y_oOdP#BO& zR?Ja~^+M)#9ZA){Wl`N)Z0mk+D;#loZ&`V9e9yG(+?v~J@%U2o-b;b~zMvsa{A;!} z&SYL>)=umGR{i_e_kWED`oCoaV5|He{@P@t;y^g<7OQ+>dCIcX>&aNOWza{Y4p)5c(ARCNDNF3{5tQm~~2{U$|kA$(!mC5$Uk9 z>#iLLFFcwT@jfqaTaJQ}JMHvU7KOmKMk6bDf(<*h^H!;GHL)Jeysf|c!3QPg(|%E- zv|)T0mrD5?dkjt7KH9NUlMq_halNSv+>A*N;K}{YI%v4V$R#)mun`EOsuT{QT}C0@Ix|`Fb10%A=V6FV3Qoj8r~B zZhz-P?)`LTof+9uveNxbeUiznILf^Sp*bA0G9@-3ND~|O)WsN(3kV3vB=iQ>{%j8L zm5ZaIQ2cAxxGV;-#)-nE%XV1w{OF&T(4k*3^~_!XM;ixwhhVfsF3VbHykvmzDZe-c z6px#rI=KmZG4mC#@q%Ji3rHg!74cvyMK}$HJH?Lvd11K2)|ZxDuke>_F-9HIlt){n z;|j#+z)iMLm5qN|p{SHE1r3+~D&_rsC@YPy33I~LT}(a2Y|r!ZSiC{pW$WYF!MrLN zpYgBzG~4Zcg56I4Phu!->gpY1*q>S#VfzJ~vCZ;6g{9Ul1T-ZYejZLQ&3PgL9 zUD%8Gx^72{ejvX%(W<2!`?w%@$ z$~(tfVnjiUiTTf+?~e@g-?ya5y@EIvv&RV*cS8?cM(82LW!jao#=1N2mDN-wPb)Zs_Nl^d{GKqafz$ikV5qrW8}uqOV}qt za;1)GMA3S>*G!O_e@4A$6bzcNR~vG&9Yp}2V0Bj6Bn!0ZYr=&kiRO(&M9U=e>jzpC zl28*kCI{`EC1n^)CEuogq+$0kc%q2^!rMZNxv)Z5YIw^?6oMNzo02!`39m`&FUA?WHaTULU^M(893eMu z?gO5eM1b!XREnV0&1UxUcpwB7P!1XfLM#yn+4Ao?thRo5!ykrKO~s&j61Y_o{|4N7ThCOgoEZSP0|GaO({r?~D6 zp<2E;thH(Z_+X~Ho6EcaAYc~yFvrnx10zQO?A!nQMsx=*v0>a1?=>yXJ-LVW*?PR;p&N*eQ+geAt~DG}fi z7%tnGFY9Cj2y+NC;d6{2warcb`HZ8fSR4348oAcLSEQftZswaUGj-dqy4Hy2H1luX z#W=HyO}0*#2qnBD#nTV%|FPxiW5Xf*0NJLr!L;z}!Auh}e6I<;70czl)RW|`(Y#0B z<=AL!dgR7QZ@A$E6gtG$DFn5ySoR5NQUNxIJPoqNzi|_h76&VF{=Y{<7bHM>0lkNp`1Ap5hw1it+Up@1tFbl0M(zj`=6?T3~^ zm>Iws6>|M4zuXht(+Q}z?n^s{U{ZHROH0c`6i}VJuMphZo{s{_3U~^@&4uyI5dkHM%r6=3&0sY+-Z8oP2j)4~Z zmLZU}Pto|2xLh;s3R?w!WxYH77t7k2*(=134I-pTQJ(Kn*GqXhuOUR^v0)q6Ti-Qr zffvXKcpAE5gMZdrJuQu_o8>%uA#GvMX!QmNHcE*(z5@>MO*x&}hHkr0l9&<(5B*a! zV>JIj$Cy_~vt#69Tu<5q)?72@_}2Bi^VZe7GtLWO!d}td?;cLaSfZkoD8{~kA25-d z1>ZDD-CduGcG&{r8qxL#@&efpAEa>UB?6=FMpov#;~An=PY3gU4FS(M*S&4|5N18S z9H!Ny(G+fQ3jk}Rxi3Q{U~L8757Yq2-wR=YmNjrR{E_o*?N>mOGtr}X=(|{HXjMp) zobP+Odn?VP5JgfqL};gbUx#)pNwV8vlHgpq*0eVJnGm{o+j^7e!=YuFlxVQyQ+?r1 zlp(2Io#)auNA*s%oS=%&K&`$1_OI7(B2b@L=q$!8eLPk0d_4QEv<0oHzg5z}Mv;MQ z&iY9QV((EKj?xZOj16i5uI=yQi1I-Vdm*nTApS0P(NDn1+=m4nX8K9Dw=oOUx*nm1+DsGh7)@I`C zMJ|(KqZ=n}j?Va-0N4F!n+a0;BLBjpnRX9*JA>*RJQ?R&503d)??|0liTTqxE*Clt zB=iqCLf<`0jLQsUxzy{J3Tke%dgZt5!cx)b&zhV^pO5I>>}G3{z@@0|Mx(@o-IfHm?$ zWYL(p!>jm~QBu3G69+wQ`|gN~Gq)!B<_5<+a@l0Mqj)iW44!33pxhJOu6$Gr8wm&E zi)+H|nS!6^x4eoGtVUW(k^i-#2-T{wcGcrQe7}Bj6bD`4<9aDUboyK+jgJ@L&8Gm* z^RoK`?$;-7F2{2Z_ggNblk~+syHy-2CB4xT*4seUXR&0@njO&QG6H>)8uc*bWZla?R(QnI37`%i z@G#mLEX%byb->Gdy0fmiL6#>~JOSc<6nytc@#e@j0bis#8NB-J#93N`f@It#9jHf$ z{P$*3kjOOPM4@6SiEog_Uc2DltDV$`B;Y7a@9W$1$YX#>uejl}^Nf1JobNGlQyD?F zYIasiu4(X%=e>VH7u^o+u&~$;&?`zin6DTEEC(v<28?5bDv5aue{%TbkXeuGzYEu(?!78-0XrY9 z>ive@M@gQc5iOCD-ULovlepE-=ya?P*hKn^@Bpy_;$@kF;GT1>!F$g1($7-fedhm% zxc3Z(^Ns(7^}1Lhtmvx;u}buAbJZVXaBQ|>-qWZGKVO;nmbXU3q4~_ zOm)PIuC11xU47`YH(gt-HS8270bEMRD~<2>2{gR2ywnzec@AnZa- zN2xD&rXOK>qRe*M&2{NvGQ$D{8+hr^Kit#?>H#Aw#Zu+**!V%k%PHCky03=TiznK% zdw7PNRas)Z9w|Ye!Qxbq#s1%7-?wk{(bz%ix~$w?^Sk=W#Ru)?fa)l%%?4$B+3t<#=h<`ciAn@%;Qh51EOmBRs$Ts zdTw+?@N~NB(ym`bh0NN0N$$(0+FIzaH?V=y6(vYm~dDxQdeP zzZr6zyV>-lIcvWZVtST;o%!F_h5xUwv%{QS9c)xP!=+L=M_|;?Q_^iA2?$Zd3iQ-^ z@`i|Wm&BD^_Yermvjw;3E4^5o-PtB=D;0K-4C+q-rrH1W1(h4rtgO31Z}8Su8bVwJ z*5YDf_FXh19qM=gk$4ljuK!>^mrEpH25Q;kn~*^UX6EhdmoGzK-!nkR=l|CYpt>fGhL zNn5DN7?Z%bHh25D**xIy{f5tAU%`U-*DNW3yXyx8GjF!EXDUw)s=eiIhM;LpkKD<$ zP5(YvL%{kW>U;F8ekILx8KmI6rsh!lSIgZYg(U@A8lDZquScu%<$0%ou2&F*8y~pb zr{yXvb)zN-*W)E1!%N3?0%pD|K^~UyD?#wO(TComlQ@ z*Ie_TUQ)+#7ETdwp1{CGu?VR9eO=Vu^$Ng#&q=B1A60r356m8wT$e6!0O1G?1H;BO z`m)XA8cdGw7!61W7v2?snvrMML}40A;3C+~w}yy-GB2zw!2h6^=c*Oc)dr7KX@rOe5FLlbBe~t z#~qgp&X$rCY?pgduJpQfK`0w`gRHXcrMXT!QsOnD>h8d@k3f9$1wRyUtGr59ZwAStBx?Zl@!uF{h@N|#8dPA)s;@UNL9+i-5d{J&CJ`iJ*$`f-`zC5T>G4kW+oVk zw3V$fEAT28pjxZ~+g09@F(sMxIJk2yh_NELcl9IcC9Njh* zsLtpu+y_`5&eOxSH^7~)HCFz_4otZq)B-f4bg+yy`gmN^Rod`U$AK;;Y?n|OuF}~} z=*?$i<=J{`oWapbc=Qj{=)Es zF&`gj>+Z1KlVT)dl(Qxs8+(@-Qu@uZ6ZDnK9$yga=e|S=C)3jzc z`E72cDMq5Vd3>8@aGLzE=@ap&R;Z9o3JvWF*iFq@eAn6kc&br*#Y#!mO9Hu6J)qFH zE#e7ie-pF%|M2a~7GaFf4Z51hImc4_-G)5D)WK^kLKm;}+H00gKHkb}Hnp=)^gXY_j@u}#$B!hVS(*jthkU` zW|8w9Kg1#G0P+sY-ONAwL@wc^fHCXwGK?ApJ4mgJ4SykMs!;{=4(Qa`qU~T;?=t{o ze3=`@^L-|Gm!2ij@L4oBZp}7LHB&L=)~#9E&}m_Jn{B_BR1b40<3*WrVE1Qt*ZSY= zx-Ku4ft_?~eu;_6wBFtBTnf4g?I6YP3aJbmeh>Qiqz<0TlgYO7_O}s)gc0W?J0omD zqA~+?*V&^{t_&3jlaaTsUGi%N?z4=mp*|ja$*zqJlqdAC z=ksQ5gP?2zy6LrGlP7>e!;I@#cYW%2kGKon6Dvh~V8%^8_HDyY4^FKIy&PvT$tm&e^_+gT*2pLgSKO&n9K0$cse zG~}=;{hx_nPJFNcHdxW=HYAoWmi_cF5j4h3_@VZ@=&`1W;~f2QVP$TebjzPF_gwK) zC}{7&NqYP-1hNxFL~dJPX>z#fg#rZO)=3@rtPk2$c3;vDmWiE3eZc+TlXaq7s1h#v z%`U)Y`)@&uoUj@UL*S$TSTP4Ul~J5_(;<}7Tfx6c4_Q9T5#enp?)CA19zQR3%=%*6 zo9e}QaXjU6e9;j_ReP?%(=G|Odm|DmDgq0u1H@v&0!$pqg3y%|T5-oJW=5)?8#xfv zxkZ0#S5gm<`Nwj}?do6TJX!tR=4XEJAM0oi`|HjzVPV4Hqbx#8ePKF*=mn$bqKb2P+5b;YjLio~=mcY)FMnpw&RBwPVuawo`vSQz#yEM=< zG&P=_GK|9i%^!Cbca`it?%(nCG^o0qper94SDZ2K4=Pgi^6aKUFuQ5`lWPFXP5Jf$ z6z_*LU<`T%iuR}d55JF|raW?g8|34mYRRFI&fs*6LH6<(In(Bzo}e-jE1# zH9MybN($p*l8TcZ0`SZIcfumcp!lMgR_K)LjhXcjv5uq{nZObq zwIi(stskv*!QWr}ec}cHnM)2_E@0>FWeUTC#1j_%wS-DxSF@=*J(plZuHX6g=fbb^ z9j$OTZdbb7QhCG9!7lat`ybQ(u73%oTO!;GKzqZ5vZ6G?<0|xdcuxUJ-tvijhZ(eI zz35jJBSP@jJ=}?_{4OJIsORWSiGUCn2%!@y=YHeP@d(EFc9(K}g+=?zUj17dh2{IN z(_w`pe>Gp+?}(&uQm~_uOt~r166qKWKn;_o5I^{>3AL6>>QJvV<<5+(HWFpE2;-wZ|Fg*iguBnshz7{amQERx5~^o}ERM9^5d zFkE&AG-}Oi0oAu6D1;~Ui4+w+#mA_ecrbq#-bsN9g>9jzDP3m1eN8Y%xKSp-#)@~+ zmscOmDDK(7*fsb#%=sH%AmyN`VC?C}7ok(VRnlDv%@M6N78+Le*OWAk&$m1pAMU?S zeyEnibLvi?Liv05RfH*~@1xBfwA}C%mtIwlAIx6}pr$sEME@(njWByw!hL@B8nZa`QiV^^-*>;3G?1P>V% zQr9g5mOc$;8#c@F{4KvWUPYli3Sj}>BpIrJ6t6A~+v)B-cohyzwOXOugBfGENw>y? zp<1)eA^xa8Os6PDs?MS|5`!==U$<+#@(~3%8FV#(WXwge)%dgd44HU)5$}}j z`h_aAF#Xk=-mtB3;ZP>_-s9>Ahl+dbOsoM%2K;Ck6I2Uz2dbZi9T#i5Y1oy3dbxy~ z(Z@N_S}cBgT@Rhd*~;b4_vgl>FR5r~fPBfRLvZ z+(UfkzFD4}B_$$mybz=Qz7vt9h;%D@a4D6t(6EQIB&0S9)co=67;D2{h^I`| z9CM^ZzIfC$_Buv-jom*0Z?qIHcX{+vj)ZzKrH6#%ML2;}s`)Mz8wvF@l9!FRyxk<+ zjTAat?H+n8d@K~o7~YZKa#&Sd{a1)(S4U)? z_ky#kK_b;M=>d5JpCED11_6sD6Mb9SGUhadm{nDPgSP*n!GrC$aDMNyN+N=S)VT(e zdWYr#v+K+)nd+&)<2ZdsvP`wF3J;15CZ4(*6`DV_z{HcI^;hYW-*wi7HmZhjzb5Ho z`YhImu%Sx4jk#efE3}g#Rst~N$wd4R*SVr@YsUrJc39hjiWX(!-Nt!j-3CuQyJWPq z*VZ#oWwhGB+dn|8I4H_(D1U6jgk#4^qdfC-$VB;d75grOOU6>jjd?*;z(#maEX>u4Ek^KI!W+iF4QEWU|YZX6Fyye`ABszNmNxUE*7UuGzIq{_Q(d zoG4Yl5_y-JsQL)usDZ!g7eDu_KGRlL4c`vKR#+Av;2*W0{7iOkL0W(>4!1o$YG+~f z9atl%ilCy+qB$PBSxM34iQ;eM)l5^pbaE|aS6O&DwizIhqU+1upc`~%;Ubt>s&D+*8)u6qRbPCxCNxVAfts7W@~~ucWimghjwh#e;;vt~Bo4oQ zD6pZ05rgMWhA?E3Kz|?hCMFVjDncK?Tg0@LZ^rN$}aB5AyQl z5n5SpZFhyuLmBA9QTunSpz~kZf5qZ-)a98!qurTfpofT&m_5MRpJiR?R9w2MPb4SD zXSqehvIE*HRY@uA-@1h?Mk>d8M7}gUf!~avPh>%N#3zfYr_&~tasC{;=8Rf=Maf8W zLFVmm9a?Aif?4BFx@1aE^t8KPoV;PVtNX1NYKnnI;>!wspBd(gfQXtImaMzqLBHI==jEW}O3e31zAFE=dD~%Hp9n+5vsN$B7c8vE%JpK%ge3k;JhOk2 zyYO-SI_1iI&PU_-G%pA8ni1bLa53-Ik7j;n)9)m?r)qWi)Ny0Twd5jsJ+xtS>k{#g z&|$nXe>=R&hvFP8_bRe|;JOkESp%Q*Sgqp_Ev)MbwbHb(HsU+|v0{<2F9fz6BJP1_TcTyEKc z?Yntrrr|t3_{e4d)ZBN2%L1qZz!ReWv4Ev2_r2r!1c{xWV3e?{|I^?DhLC3pL7=v> zIq>pq;sgQkhdF#I+4*a9D0TDkE|@uYz@$F6<<|`crd?6xY5VYspg1rVXoL0M1xU-4 z>@re?xj$dEdH3r@_|pgRHUDX*x3E9lJW%P28*;S2rchWuy!ZHT*mdRvD0BKZ$7Mt7 zHwP%r?Q7aw@oo;?niwpYxupTXt8Gaikd?LmJgG&zgd zCWXrYpyKSN!q0h*bYfv^FW*1FYIy7)FE1kXD>d++KdOJ_sIZs26+H>Bj3M>ndmN~r z)Dk0~vd838ta&PaU_3Qfq%;wCF*k6Pe1JTo6v-BjDNV|JNPRn+3Bi(lP#B;OE>n+i ziE2KgknPo2Qd?^yemr@zUTH=J0*`KmiIBc4x0;>E9&LNTB^CMOTPAFq+cCw0tmu-U z-4pS6dh=$ml8Pj~3a(u?fWZ512$^ft!PIUKXvgB{V%=BdIXrj9tvXRYN-! z!X*lK=I%#%hxfo;Sq3y8GZm>?K@h#*Lsco7ENY})Z!u5k2Y^QNYP|s6yc`nKt}4&o z6ktsz*ncYn|E~uQeDr^&>UtqPJ`S{0_W-wv?dicvDjK~!cS{pq&cHi}#2!Pu3judO#0U_U&AOjCK zH%8uQBfuNra>o~ym6d=4qhV#uUA?-={{Xv7UgU}pt;NOxBZ)LkUv_%>`ZX=n*>6xE z{O0L(!NBwwDAT(zO{5%K{%0u?YVmWn&aLpDzx;I?NB*>FjicrX?7wRyq6f69MBCsf z(FTAQRzTuD+f$c*Dk`9KdyC%~)Nh+~k*(;F>V7tFDgrX>QA-GxTK^xz#1^5G zG;ra46nIJq69(mtrC6BpO0ZWaNA}T9-4bQ}K8CRvfS3orjPxEAOK}Y3D$L32`JZfl z{G3J)0vkwdcGf@-O{Popqw@OITlE|7GDB}k0DLQD%BAHAkXLVk_++h1K*kHzPP@mM z`E7q?aP9~GoGHI{1PRgK!Bd3%&qZ+B)~;-WoLxbza$;xh)c`o9m%;H`c`|qf zcmlA_8mGzX6ks7atu5LE&#DFXIxL!X>>`y495`m!1__Y;kc&P0uwXFTPmC(46Dq4h zcy1cYR1_}Ts-^Ui@G?@#9VIUhFuX1M1mMPgaFVk59dGTs7=$WaVznKhui{4y?AZkM znFhcKl}8_r;JHY~iuWA>r^HKSz(A&i9u_amkn(tU|C_;{aC;IF%RjJFcjtIx;u%Ch zp%m8Of`w$RIFLjXw>Uxbdu1cgeH=>z$Jdw0CebwPcKO-^5)JHAUGjyL@wAXcu#{r^ zt5(1TsMR_F_}L$YMGfAe`wyRR0lQOPz##q~ul8`#Q78()&Q1VZ{{Cm~)mv<%`FcCJ z`p^5LhmXLE^0<&NdW20IL4b%q`}>E-!hgTZfC2*1+`(0G1Qtnkq!7Rr9nXQ75%J78 zqAH$6?lBpiK{4j{xO?)e1Ga)o9VKs|VHQh3Mr%Nfb(F+2 zh^|J{rUO9Z-8XqM{5kgQFpokF=#zzAfPfhlE9cGUu^FT((g{Ac?WcFlrH7OOsna7> z9vJVVtf*g9^KU~8(Dy$+m~hXNeM*ZiTIh%}UFz)zrMpybG8_zR3XAU=@PJ!z#a~V! z5eZdIMM&z)zBl?jj5%*Tz?o|7rp8qvElF(BcOKa(W!-$W#Po|I)+tX}V?v)6lsrEl zs>cH0IlrK<8N?mOsW&91MaPyJ(LCw-I*dP04EkbpPzmIlluNIBl*`TPiySAa{Z2|1xMjt}!6a3%qSy$_BdPzx>q)IP@Rmc_|I5Gm;a3 zFw;z4F~g*a8$mdpOxqdWntJ5@&*pO9rS)~0k69>4#NM}%$j`e02(e@RO0EG0i5`t3H^ z@2&w4;5|=)StHuJJ26#-8VfhQ$Ry28`a-PFfFo_Zlw$4h_V189vHW>M`Au3?Dy~7k z8Qx3O$Bc#p;KZBk83DMZJ;lfBR_I5A!pM6z3n<#1>U253nkK!;#F{P8sqMttiUV?I zx2;sj5RRqztF$=XSd{MoSTHAQR#?m?Six9mI>-B?@!22HIobnUN>4vW9B5uz{RkEH z2OZo?tp0l;GuPlOEiq&QI4SeTctcjy-pIp=voM44~&3y`FYQm$n0#}rr2 z78VvjlJ$>rm@F_rUkSD?G{88`)7OHjad{7pN`v#>pE0Cx zx9%-6nIjTMGQIJ@pHPh&RU{2{hN2Bjr4kquN6MebOyxGYI@?kV$poxF+b**B@63SWX4XdjBmmPV$H_KS_A)4jm?Mu;Yec^)4|$g zVJHCmwXeMR^?R;DOOVS8{XR(+yXcq1XeYF$p|(DOCShA#^!@@b5INkx6?{nZUXfan z6!fLKpq12tmXS+DWaPRR!uvGbijm9#VEdnF`tKq{^hFF-BOwgdMo-R8c3tF_x3I{o zV<*0zg4zu}?rB!gB~3;YI}PqDE=~Q&r@Rs;BrrAJqN47+y`>F1g}(RvLQ{KJ9sv-} z!7%F>VH(KJ#)N(DYmnzYdEp6%PvhIIF$H(0J1iNj&b}9IY%j_xBL1rss|BjaM+W-s z4TZHXz#pbdzxJhT5briJ`Ja3&LhVi18J3azB*EZvclaYB2jiH8D*17yHxxHM4gy zaT(pNe!sj2@f|&;q|)fu@U8%gn>4zvMC!kk zAW>Eo6B=99@fY#btdH{KP1=5Vdp!IfI9MKY5xZ0vo#|KYl`n1((%#|TvHmC7MPWy5 zH^WyAv*G>28;-=q%lC)CkraS2dRl8$mk+Z+;PwgHVH)D!WWHo<{L70jrwm;vAVw-Z zvoQ~Wx+xFa!CMaNezH`vpO7OB8|(LwEYX~}S_+-Ep^}`9(a^5*r`X{ytJav2R z8!8{;cI#Q44rs|z<49pBck=Q2r&FCMzB-XNR2Rq$nT!u?^$xL5IzCkE3wAiRnLh=6 z)`Wv~HiiqM5)5zbd-chRL~i%kW!VoZt_oElTycpoY1MREJ&*yp_7nn1$LK-)NPpjy z&B6s{BL<1OT}uNUuqs+I{alZwi4;i~V^B@h%y?cbl7~~zOTd<=UzMFduhAbCeGqNE zd%qm&U-S^QCWv~?H0w9C1_8Nl@IB35#d^hIWuFd?q>+%dZ2DNrSgKk3#);=4RiSwC zYb;$+Xn{BON!_$U@9EJ2Z{D?6LZpjAQEmU`S+e$*yNdG(eX9({HFEQt|2YpAs2cJ0 zIu%k`-tn>h|fY&9orJ|cqgqVJn!MbpA9o0xd@PX0pR>`bX zWK6w3F>i~67V=uj;N;ygYU<5_K?&1R#Fen^UX zrXPm`L&$DwO@x(FAKeLoEdNndtCVPw*=s%{%r##?sYSfWnJ5`_ET2+R{Xy`EN-QH$ zqNoV7to^`sUW*k$3QKw#JJmEX=Js=+BDsYkOGY7~Y6;8rf?#%URALM#`-#eHXGwjqp69kiDGx!r zjoh-#K|(;$uaT1El_K;aMCI5OVqnRnY>tq`J9kuT4Ove3wNPdyK(q`?)@fGp*Lr8c-QA3(f+&8qbOiVX4Ly3<)J4y7pfB%W+$QN!1awFsHjAHr zjN-_Iy)HXcPpYoARkMY}jdstnk5!8YV~{uY)}ta8deS51BSlD#8j||?(O!r*z>J)W zL9g=gKm;Yl(N2yJC89w5JAn5oBSWPBE<@L?%`rAdK)Zg*?GA6DTcgDy)m*d3} z?J3C&$jdcfFy*Mj`x?@cjVs56PxJN|N!a}TcZ!uj8yM6@%QuC12@MQ7;wX33vFz%p&g;W!HIOHmG8VrJ_HjYSa?uaESW=+D|t=UBK`(ae_}AbCriPW zcgQ7Awl!FHC|Z}CHZaSO5<*YUKs@$2CbD7O{;3k?28G*UPn~+sQdu%RIGR~(9~ZXE z>}Hc_QQqz2mKu1zaG$_u*F5n^o_NOtRhxoAPDX@5?~s8CaNZi;5?%H+(fUc=w}Ubr zIBnkgTLy+2>Q?%S8ijcTmMb3+wv5J+Zb6%h`67t4RpA! z6`Bj@Y>^UlA84jU5=WQp+*9 zG_*}rcMhn7OFr4BOLx>+?$BZ0XfGY6#T~0JESaE14F550%w(np#OO(L_IvBp9`dbg zMh)2fE^8+={Gccmb5|`=RN&4h&yPoqs8q*^7M&dCz{Ik-2DO89cJ?<6f77C5mD6kT+l)6H87fkWo+N8X9!N8*=2O)D$! z_nM4=$LEBSHjrLTjT%|!b5k^&jVK`wXTIJl^|l9Oy2!k7^o}|$SN5*a1GG+aQfRxB zM&`V^zG8Y^LJ4U|Y^&W4E*S^jSn*#}TVA_g{MgOUl3MVwmtm0hn_bX|EQbmL-jm)F zvx&VsZY{tNjYT5(kN7MQdn_sDp;qCR_IdeodmmB^@gR7=D!uoVD;EQ^?fIcQ!ic{_ zyW)RUq+_&)3$owauM#0h4CbcsH*A)VWDszK0D>vK?oaDdyj#@1QozDScd&<3OzJw? zC@-gu-sRlrfqAek}QJ+0Q=|vbWIPrVo#J za>yZ&$#AcE`S(0@B&`*BW+{U`G)*iYcy2!FW zrmf=W8))jyjH`I+C2nL1v6FJ{e$Ej2$-Nn=LW*>6m7tQQbamB9Hgj`Y#=fMo`TF)? zb)vj={X3gcUIk7FBK(D00~!0rT*SKJ(w>{+>m+*@^;cy6#6rTu!f(ubzdomOX))BL zNcAUmM~w1*Q%llCES71^Djz7H54#8T2dZTZvC9k;PFGnI`_%nCNwvNDd&{rbalAq6 zK%JA3L;kR-bX-ME-dCA$pK*;`zr zt(YpL;Z>o=1`v`oVQLYy>|-f6 z?pUwq+#HNBigJr~qmkNqfVb?k<|p|rx$DB~UPWmOuuMo;scCM6xRQzI5f_53|G|&N z?`@1R%3HFdZHg(JGi}9rc_bBZ=+tzq2&&6~(u`p|Q9V~*R2kBh5zt6%=&%RLUq8qZ zBSU+jg(0M}Iu~5b#Dwt<1&(l&-q)(6Y5<$H(V4eeY0mm#YmJDN0uFs0jRjKiUfR#J zu>y9AI5vRJV=%6h56!+DV4uGQfxz%k_6DtzJPRH~WaZt$I>ucxs?5cNYb^_rjAaI) z2i=@c2&7kV`@)7*vcI)Oo_Ct$>4p~L7*ImkCH5ps`6n|&zTZoC9xcmw;+%G7)v7e{ zH*z)PY{j3q${9~cpE6#!ydxMnwXovOhlh--Nu02JNq=YA~S!SS$h1bwM-#pF(f5VpUD!g=`mK*ofPVaYPxAbM|p;nXxb6}lxBHY0}f z7%j_qSGn?zeaZFYd|3xcqw%3}RWt!ZoRZQe{}96v;zT#2$F?;^@yUJ}TTpU9{AX}~ zy<)@vap{g_Pm|~G8|%uFSiw1q)SpxS$-sSeX`q$fU6}P2O3~>Tx3K!eRk7EXCUtS8 ziS%J_ye9`&QYA&Y19cjQ&d!Ug5HrI+27Wp^7xF%y0})lv`pcGo7s-m_9*RyrXfS7S zx_osy3o;v4al#&|$ zlX8tX#hHl*S!ytK2^fFO)Y&0TKMdNaa_Bq(|$=^x(oUkyx&X~Xd$eq7a)RQ z$cc#gbYORwh|R+M$QmhE3I;+>lyAS$CS7WnHC7qCAz)Yz0yGlj|MEa(6;a?c4snke zBc@6`l;*Q1g3K0G@x{{Ix`nRSxJ@fURvDy)PegR1@qDn(!kqc1?yB=G9>T*sN1k2p zFGJCfjc*$c2Pzao-^1|~k3!H7RxOi5k#DI}NYVDI_v`Du%r#m#nA7S5i_}vnyTYAD zwx(UaqS~mH9^;ZrCi@OlUi1_h9cd;k$_|v^#8XM|#P;ClhG=AJb8(U>ZPIMyW@Ywn zz({w9Z8BKf8wDhGQ$#%Oj5O5oKnSj^{gXGl{51lzUi(>I)Y0&FiFQ$qW4OYMU9~42Xih= zNQTn6rloJ&bMa7ISaUgV*snp%SL;^VJ@ZFGg?aIXCQFw*UtepE@*Z4B@_F2hHWa10 zZGwDzYQ{S)01pvKEYLek&ZJ+7c!uW@Vse?(J2@b1eOGyvW_UL`+?A3$V5yqzL&#F7 zPmB&{H`Mxy+Cy$UGG9@P{=I3D*?v1t-Zbx3(Ies9@XDB_nRWq~4*HGM!-14VnE}0m zts){3w@eM$Z}{4EMo;rt(s$K;VU{mba#Sg6z}-YadQBNHP+{v?aa+X0(L(R$1Uu4> zzV!F26^RPvg65ynrkX&B`mY;)n7to)O@NIm9hL&4GH)Lr`EYcDsXuq^-R(R3nsAvy zRqI70$Pb~2K^?vkE6u)|d(ik*ld67ui|SVe)|k;&ipH{6x|;1CYCV4`EMCoyUGRI0 z1?YZwUB;18PaZ3zF3*=-l58wwZn`74c5}nyyLElJ)uu%YHr-1Xf(>&3DS%>vo4+i* zkGNxEr#MJ8D^vOHon<5nK~TlDIhGeg7W|L5i1}gpx@Y^q1X5S zAHMJ(#isFpFx}z*`vg*gP`K}A?miPl)Blc=)cW~EH!&6}!Q&IZ&5j$O(~{S+t{A?NFzL8n!Pp$Vx! z6+7*BAT0vTNt80RLroq_el3#T4wa7<;WRPGmY54{78UTg(lXO1lT_Z1E~{N5vaqzc z+D|Oze0yK#E`PRdT5_^XJ^I{qGU2l^Mu3mKyE*IMyC$(CL(>*{es3ZJTh!{GdZBRM z+g)_@}C)8WK6Lk5c0m)kh z!m^N2;^Av!TN7Ud`5n$_1h;m9J@nE$#{rA8UpHHKkpY4+MVsi%%@H+g0C;G;7B)R&z$b6d3AVUWyKM_I?kfkLQ9FJ64OHU&C_x?vjj9x4_pXJd!avi*+e*VvfA zArP)7u}QrU*;zFZRlEF46Gk}QzY}A3y~Nv6YlBd;2zQx26 z8JQ_SRaLi%KYM6xU2r{E-42&skX%+!11WIFe~JamYAaTr%XFvvOsu)e+K@U;F%5Hv zL#J-5*#E8Z`ABD?<-X&3?CX6!xOQRw=J|srv%7xF7jzx)vS$m!=u!=o`4d5Uy1F%J zH0^a0prgHpRq0Q6yCUqDjP&GQpG<>L=oC4iWq#1?@1r0PJ}$4(2(njqyCvs<;-#=q zoRWpeSsC@>U=E6?I6vB0YGPK2gj9k)ze(qY?KBD2C3ByZo7iN&fK7cMMBD>wq3Wb>s1* zq9RT|pd{bMQokU#sTY&ToLM^vl6NOSvS8!F_kZABdG5KN%(it795SHJW*GqLI}x$G zoo&E)wA|4ZMnrKS(GRTj6Cl@Z^~4)w!W{#C=2RTf_)}NrbmiG0a57k8$tMaH+3~Sw zV$Rd!fO}gn0g~sLT`H<7@J1REviS1!-W&#Js>4GJq_NXV=G7V)8MvaKZ-qC)~Pwff|Xys*ZBC7o^%n6IU`uLC#@|`5@2e_kd=Q zy$C++znbEPUks?9 zOO^Nx8hu!^o}!7M3x(craNpsg~P z-=|T&XmI^*<3cCnUMZ}cxC&~R7K~nRj(!3%`s25`7$V_+F7+#PejsvTJN$<3UJi?M z&svs8VhgD}$a)(EseX-_I>DJkqC>Ja#7wTi07m?drLtV&zg%IhuR$$OX_(sldm*d&NKTac3g&hLU;qOCtz zg8rls7~TRcG6q%1>M}e)|Iv93h{*J<#JJ;jGE*m&KWR8~`tc5&(NCx$?8q4V0M358 z&)*a77-<_|NF&sfn62n{4)g5FOQLwc_}=_uTDufEvGa9u+(;q&`EH``0L=~e=`US^ z%kpt!gSA<3Iw8+{>(P4SZT-FNQMA-w4 z77N*2J?RZjp-6Brvbf!(2eFHX68Gbs!->opoNrC1@0ZPour5ab7U6DTL(1(6?= zf0322Z)7CFF*ev+BoKo4FWw7z;x*AVhaRVs;9f13D~n=eJ&f4?!R>53U34<(AQbdv z2{Q^f6xM~jnbWqzc4<Qs7Err+Ll__nK& z{dF{?QjeG!#0SO>bHAg$l-jQTSq7BJ=3>lV<5r{Z?lH1{!eKE4d2|OUw`zLtF$vL1 z_$G!xmC?5Fl)t)`%d@-x!bCq{kjVACx(MPt}r#i7mu%l7~U$u9YeJr#@G5RbLr4=w&4UH}`~l~3#C z_0dng6I}r&Yh*d~P09C@HBNt+wG}_yJPFD&r;^83UCQ2uocL_>Ok(3?o< zIs=amB?v)XZb{xbvTa3&TaSMn}1PHWPZ``K;g$M z!|&c%=D*4JUEATp$D?aV)#6`$JAri@=u(q9Z`Qzn3qC32a7xsO+nxO_GBp`B`4bEH z8f>$uCGr*$@&ZX#!IW}1PupRDkw)dD5HZEOu+5O+QWsDcYrmPDX9%a(^TJ8~;Md1j zvRpigNxk-=n}oa)3zHh$OvD{ptH`ckIgWbi zFp}p=Ml@OLvipX(lM=kP&t-S5r{Lk^;?WL!3mkOIZC{)vYVzsw!VCNz$EgAh&e5Z#OQ?t2j)f$+wT~s zJ@(~z_0qhH7vZfK!+0_p&bA75|Aj~J&OzbDcRCt!HBP5f-L9zhikS;|h+T(>|C$oA zPHu~swdUqQNCdQ?$WU7N_i?F5k)66rTcd!>*}?PUA1B=t?Z1N_et%wA=J!qqOpoHk6fCo3P0 ztj}Mfz;mvdHmeiHoyrupi3B%5H+kmh{b&YfWInT_&@s0bXKDz8+CTDdu1fR^Wx{>gPnT*weiESp)W%i1zXmHfN{ea!JN=t6L$wOnokio=(|GB z9qPIl*$NYJQbLjyIrm6^0;j6I3&GMaxKp$}T$#;_x%ECbM;4p2WLEI%^$#Z*>NKEn6V|KPgCuU#h}SfownW*JQj_IKNLJt> zH+-H}Qtkavp<1zH+?!Ul?UIihr$W-#;8JHMpAr6|%0Vs0T*-8ESL2$fZ&g68r>wqo z5=yWY&>p!``yv-3L)&z2Q$_}@--M$~>)sonF&6LK4($TjD59-=e<-?!GdkknGwD(L zWKKhRJt^ck7TVbDUq$0=C``NZx^oN0T1|GlS4tqA8^bMQ0_Rth!jq>oaqqI*rE&0x zhi4py(Z`Crs}xIc#~%M9pXRh_;t#fB#kh5>j90Q^BX@>A**&h)#JqDMY(JQ;AoccW zsHqKTxESY7S8Efa{bN_?K2w{Y zo>&Ogi3OAWX|_9TQnsbtBgRjXk~1$S__4HMp$6%@z>u@pWRZSfYi3);5Yew;MXdA$ zi9A(g>*mLRSpCbVDy!ac{Qh*_2icjs2BmDBuU=wcnN*XBLF$)%7lo=bD#r$>YE~nCYJ=EGqmW$avXkVugX;gYblgiN^m=~EJWhN zj$j?rFqK<4=s+rf55wQGJmoc4{ zic-Q*R(^}v9M(xCf#@V+DH(Tl7ErRJ&R!rIXry%!rA(ru%54R5)9u1LuuBqRSwmTZ z9wDdUf{0yepi#TCKUENQ+j)SNlxRI8Onju{rmJYlr>*P#Zr0r{dIJu3vydp}#R#fH zsOI8R7P9pSXjd37_ZZ(8*U63jT~Y59&Og(;`X}Bg_uaf)5Wu)iVD<+u;j_f_AC1ZuQ6ccngnkeyHL-c>YV$G!r>Q8N7YZiHOX!nGK$#bz4knIbLTs3Qno6Ai=ZmaA0$3{oHoHO!)!D5HL(A7@zoPV zqg*hXQo8<>qF$KfcsO)%jx9RwyH*^{HOosLZOL1Ev?-~o)e)sJQ00p^rH`6ZW&+38 zMy=+iKACr6nu%;YeVx(Po@(WF&-8t$8jga`w^mi==61pfe;mS_5HzI8VVWeBf_Xiv zq+ye!FYb~M-k{?yyVu4T5Jk>jLm6x4cwS^zUhQ^BJih2DDj?^V{aUl!@^}BQ+NRCg zT6AZSmts{@@T-`^SLK8?H2IuAd|Xp`O3^oPT?lcIuUXCM( zp8NF4tX@R=>7S1JYU{38zyLO~dr#N7UA;}e>+s;E-L`V$(gQX47)o4T;81?WC8UMwRJW=^~1@bWny^ulK@G)h`XHjFBv zU;vp;l~OpCZ5`5c%^_85|7@z$Q@v5 z9c6lGx|Qe@3f2_x#1toB^fvlIGeF5MpC4jRSrTlWbyFNY!g0P(d9ul2!#QV_P z_fv*jD=QxOAQ$1#=X|Qjhp&7CS{c@WpZ+}3kVieP7<>ZbvDD}`JaH>0AzxBbl4V|A zUJ3wbICmyVXPK-*+du@y>WBxKrBnxx(4-`W|2|2%b0K`v<7%_lz(Sty)! znFWk2f%JWACYD_3e1{x+HsQZ90#dM_Li7GOP|UJp#u^bbp(1|Z@7HLyeOA8$32u9( zf5pZ}QnDC>Wc(WCc%vpGT310}Xu|FTS>7S}JuNLQ&zKRAEx861Yo$O$KJtQ=+Zm|b z%W998|5MO1grOCND?oT*M|WcJdqFf8#S9qc@c?r0b$cB=#&At%(|QHeZ)~6J10nJ# zyGxD1BHl*1*$Z6EN?7$1(Cpv>8b;z0Mv?$dHSSUb-mhL!=mt6rHgJrMORd1Z2>U>j zYU&;x+{2R6rF@;3*1sM$?KE|X3IlG2+MZkVhDGL}Vg&MBeYA0JKs4EhfseC^UAttA zTh=>&Z*6R-sNx~`GZU5p5AuzA{l)xu7Y*KHRsBUnK&e4&yt?$J)a>g(F!xf7A1DMK zAFfULw)V98B$5aVlhVI1HR$)leD?&15Of?;$9Xpsw#C+Ee|0sb3yy5~4iyBf*B+Q( z7=8y>=A#1R@_bMb)GO`Y@Tzlb;f0?w}<&5y} z@3G-T07CI~^86AR$7Z4?=t9;(}*etytY1e88}sTu%(anqE#iA*MNb9WqrsZ{X8|Lp#Xrv=Hwd$Vnf5W$l%>R z&RdHw6GA_DHq&YNgtNb){m4icn)-GFEwuRsQio2!8!ctQy5K}Tgv5MlHAB&%kIlzO^diG!8NX&F(?-{6MG6Lot$+uelRUUR( zg^d!oKQ(MpHolk)UGnJ^9UXC@$_Gg%!}{H{>Q2Da6PS&AFXwBis2Mw5Y01Y0U>9@7 zdSG6tWy#tjE0WE$?eiws3iD~^YlBluZ^Qq>e*!yT)=F#CH-w5a0~^?9>u?@m`_nNU(;SSa*W#`eKw-s+o5Q zAy)A=eACd3T%ySOxGTA%~%#Dx$)^Sv839iS*AZAYUyiH$rv^Mw^HCZJ z9IPP!;lW%Or4T0Hta>ofgVc5oSdI238(72W>;}|9YSwI2MgyZ8sZBPpAq5Sy*3#Kk zs-=r^x~Z3QFT}O9-{#F=bZbrK4qDF3MS)MO@`9!d5hoQo(H`GWq-4Hz+>GxsT~@pgbD!`;G~Xl31)KtxUe)E(71=F$otjLvkt=BFZUgVD?v zoKnH*_FHW)Cx2azY6oFo>~3KKUo9KL7EMW-+`Lg8++#-IOr>*EFIG%2WzaiS%VkV- z=+8Mi^B^3eEzL5eoHiv&;9O9V>bk^>Ms@j2T5?OZrHMbAA8Hf->C$|hj@VyV&T*ZQ z{0huZ3uiC%4o^I8U3|^217|1}xQH~`&?qp7>@R3uoS&WHr6){3nyyTbw$m;wNAqSb z2J^}yw65(^&m6NlvBPXuxF($}=kxz5tH z$;Li>qhip9CfE=hqsT+~NWK*gdHC9wzqRFr3gQsN9fx~d(j0s|w_eUj+e-aP<(}s; zRPm$Uqwv|gJ!zTd^C~+14wcC_PX|TCG2}M=sOl&wm+v6m`<|4tEBo?_KEAiF0;B8} zQNm@epr@7?3IcO;-umr656lbLhx)swFrWJ9bdc8}J@MZLRh^uRR9l3%yWF9m=D3x` zQSBpcETU!!ZP@6d^pv)j9!FF!-Kfx(K+%01dUVqLZ9hu7cgJ&iX7JV zgEc(}o~U5HR;=$BKAGf!T&-YW4tks9-cs^$9Gy3OjijTDPFV8j#AlN~A}1$o($Zt9 zhG!ey9LZ;DHPqDlUY-$U;XU%iyB!&I=*A(Npyr4U-T(G%_=5E4hhkT>12-oM*X*TP zmYB?&Aht3BUnQgCK?R~YLUy7(KZfBmxC>W$j);Z`48(cwoqD}i{7HxCjOOFRqV}=L zLcEV9HRT_VnXddHdMDxZj%5Tf^TVafwv z(!5wa)9J`L7;;L38~Dif6K9p5>R2W5>c6AR(-ue+B?o;anR}&X5yX?CnRE|1Rsh*^ zq%W{)2V>yy2pY3oETL+P-m{w8wld>*MrAO+U>Ys&y-Ku((QRw0q-qshu?TUYId1AxLAplvh~wq#%_N+vF>_=GIkn+X4273ub`0_x40EZ=KW+t~ z6B;;Q@iTapGJG<5K(?=2Q*G{a9=@ROM&&BxCb((E+IKF&5@dqcUFKMz$+JsOjEokJ zAmol|ew%7cM=^5LOy9-d*PtSEWz4GXP_Qvj$#F73bXwH^@zkUJp_lv*y5wnoCQE4F zzIkI!A#eR|>$rPImD^pG_=>h5xsLBy+ghHt|J|k5v6OyakVD_WK%-EhIA{b|8>P6{ zMj}+!ZC3Y4ZRlp{gvkJ>vlH1fQMC}`JWO1Vq@CG#hc9T@X zWndl*Ae^pj^{b^MG6=h*nJMEm``pqO#SE`OTVs;%WGQV)oU7~}N>BbHu}N)aFX5wuPY4*w(^Kh;lCHo=igzRx5pwBL9EKWOiwmi2yu*84N%Rmr zI=C~o^*@v0KEHd1qUf(+M00wvrr$*~!z4)>d?i6K*9x%H7GbBur)DH62~mm|SVAv^ zk?>&9eTaP-*ch+P!d@v?-MB;adSAht<-+O3!&6r~zBG0;e!a=1d~)HSn-h$2CHKK< zgD?!mV(Q6By)=XQMddoa*_Ol{Skn+AqD}Y4Tq!iIRkia#*L5y|9V5H;iaJ zVG5gOt19Hy&u3;LI42HkL0F(6SVx6=AKHPLx7#1xsfN=*y1q#wqnZ zgehmkA}5)7o`Zbvb6Is+6+)t0_1o|PWMs5W#AdYk6s^o6f_v+-%H@x5y|ycgCwVUp zhc`Ix8h!!vN>j~bj07yY;r-u(E7w(!K2qGUJDvq>T|5dTXI+tW)qx1=gg|mqLF^T| zwe&X3jn;)k1!7Eqn=aj2JvnWi+GdSM7sLH%`7BsP6qny;ef^lLtJ(tjH}JeSQCAqF zdFUy_96*X*)y81TGwgoPdXG0w8545a7Q5!2Z&Sft?ex9>h|oaMkbyF62a93IduGa$ zt>-?$Gxf-2@%IgH#poKWkmfQM|ErQAeMgh#{g!uePxf1jh93Ft`*DBb)6ue(cFKB_ zWc|A1!SjXik>xmX=c*M!+1yPU5(+|5edDW371{dk)b37GyFWI_Z%}o0!+tXXWyD+P z&Z2BgYGTk)xJAn0yA}B<#M0E#h(yFtM9*5j1bxMNg;3<02*GXN`kwm!cmq}8yYOQc0k6m<`OMAp2^a8wDs8D&W9h-NcfSpC{US{LY=mj!D?VCFjr8{ z(w?U=_8u&yt-aMmuS-$h+A4{N6n27cCMuGjco0EIM_5x#(oh3`RhD>*W%no)^*Z) zWzoKiW-*@3h`rb{-kQVRbTwHF9Fd|YHBEexk(mK|aQXnAk1ntd=ILVSV!^X0d6Is7 zy#&&_w)QB>!bd6hLxsF=&l0wJ#EG;^C%`Lee#JYvE4=V~i&J8k?m8VTDc0MUKb1Hg z{IDQ^BbZL_qMZrjV{N_lf|9%N#vl_h%L}o!zfxbFm({E%J?IeDthJ*hxE1HMb3_JX z{xAK-V&&v3?-MHvqn705l$VYYPCu@s6>qiy9SN!3r6JgX)_Q*c#?Sqt{zI6hFhw@W z%q|RE4;jRMcF3#YVKX?`H$SQufHax<^ojQDv}Z_>)|X1Q9y4o3nC%epje_by+&iOL z0zQe>{%9<`W;(>J!Jk0HIvg8OqB@kD?H}QWp^kvjlJr$#7^jwKm#&nNs8aK=61|p~ z;%o_NuDt%Mhb-pv(jcgAGT?71TD2LLIMJbzQWsR`M1`{vos&vQaQN43c*e#KdEZTX z&=?h1uSBah3-=;dtoehyPfbLbD09MP(gv%Cw}f{Gl~SR2JVV*)F}d1C=iBULvxx_x zN#eEA`G#qcQ2#n@xJToe8*G2+UD-CRFw2{ zDLuwMvQQGwY8}r=>*Z6|3HQv>tV;<*2Hb@|io4Ni% zYW%JIuy5tI)t-w=&)d^!$$N~DD#_RtanoRO!!F9gZr6Cn%Vmi++H-=Y7BiDpfmg;d z&yRH2cNgrw=b-H5XWh%yzE;<68vXQa#3f%HB{}gf2e|>x9~bt^bIHssS8k>nr=-=H zy`o%=A(|*_Hmst_iVq#m5sN)Vg{=@SP%g02pt=;-`5S(;RB>e@(-pH%dRa+u87HD) z?-o=md2jv_XX<*GiIjR=ITT656alj~Azh&CY6eG)L4Z#WLe8hsGQ@=zAq3N6Jhk?T zX>3@DHfSK>)w)K{GDh=u#KGI8ZcxH^i%;8aG>yE4RdBsHIyOBiLw+Fl_7mAlZb#?( zqg%QzcndJ%b)N^6e0ow;e$(055C3Rqlx##^BXDDW+_T6T%@X~7mH^eqRxhermex+1 zz=EnHr$Bj0O}GZY;?aY66x?ZE#8A!2O|1f=i8%@W18O5PNf~85PmHPDAMSw3x z2;jZ+%BGC6f?AFoC~}`~(7L$MdF`eOV+3F4K1zUKLk4%-fj8|11~#FEY))Y);V?;k zopv6L`J8M#(&fB_pzws z=k5wW1^mKuJrs_}czsJ9q4Gl;efx}uI>r8GSQwj};i+cE)%YaL4A@3i?%vEhn^F6K z=GA-@ugi1gSmVm;qv)4Yl?_`OT2~K@fij6-6D)>(MtmHt{b&k`2|hzjYs#z&G9Rgz z%Z!Q1uhux9I?k;zqZE?0G{1iRQRO9jJKDCR@4>LPsti!%{a)*8sw*?032f1unAKw! z+hAWg9(PMwyM=zLbImgBF8r@nu91q0^G_=SCC#LnwptFXQZ8MH6vB&tAkV-~eIQgu z7?qL@dvPo>-^9-_^@xr=9&t6s%J9=+LM39H(e190%mmf!g36@TyfL54tJh{e{yHoVsui6O(%fH&{9rm5pqAJ@&kx>J(AG0KLf963v5 z^TjK}%hBFM10M)B-|GpL2A~WVF<2yIX&`e`BovMc1N}v^3R%x3V^em)X1wj}eUhZcA*+ zyyL0+k38=^<)TO9F}m)&jF-NJeOj?GOu-oToG{2MT%sdI6QaVLlb1-}IVVMX_cSqX zpm9BE7!u*;?DepF*CR-UXL-z04(5B9B!qK5#XiH0Grs7*%V?JUijC>140OW0uopa# z@lv}H0ZUUd>vc{GkUYBTzcXawcy%M(b^p2Y?RNvV`C4NC85V|_c}?vM+UC;q@`HDG zCi)wWNrl`1=P)++w7SkrajbSf^E>6J*ZM2$>l2|^&x7Y?yC>#*`=wnP!GiywEV3)$ zSWrne^8Rn6^&6*HGnf6?P{Q~b>X~^(`I>Rz2LD38f&NEW zMhko&0RWDc0`=qs4Va(*1Hgx;;}07^WZ_+>RkU^f)JQ%ovH%^8sX|Tw75@riK+$a! z;77C-uGtKOvQ8j|tb`v==C%cq6+n~g`1bAF2VVx}$qGE&-Sa`)$3Bpfnt(1KoK38T z-yj>i_=?txQ$D?O?(SHl{S~Au`Uf3BeWq<9hftxw{&Q5W439elaFZei;R6eTxBL z2?=3sfo#yfL_wgmL#TQffb^C>e*7?h&lojwdjf*n*3b zK6G`}5w!kfUVyt|>wjknRymt7m9}-_S+LMsr`LwR1~uCLMwJ(K-qh|8{cno6b@#7W z{=s6k2XAX=(f8w@eh2GjtDePc86di56o?|%D9*SZY#)97iaY#X{Yc;Xv7N*!k0a`BOx( z`3!rz=M^85FVjYC?{RGX;*{Gw^==b z!vPcp|5tl7p9YeIL=>s%dO&edR$zuHiMfTg9<-j2`~LTK9Z1mQvT`5r|Kh*NHssan9;}SSj(=$E)j6_d>Z#`*BC6^ z5`R$oCn05aep<}hJ8RL@MBPoTw@&wy)@#R~$@~*ilkX5hQbLhx*>AiDAK5`LzFn$C zP*ny14HcGQl(=aSo3gJStQB)NG66_i8^LYE>+*}CY8XW1tbstE-F`sC3Jc3E*5>;k ztV@8dEr9Q)Y=}(7iFFl2k)efk=YG{7x%BEu0dIBV-P@%?{rR`!Yx51Ym^wUBJPmoV_w|f8dNwuv->)f2 z21d#s2KuL!yT^Qz5`CHeSi~GtG!fx|%~}Lg3Ol*-bNoq|nqZgQ3cC4eU=%H*p}FC)-K zqQt5!30{%z0s{1S?EpliCuV`dA2SehwhtJl;mf?~Q?(~P{a~d)nYy(wc>}(C!FP`+ zpB5Q1Bo80D6!&!ShxZ|8ECuSNy%IsM>J=38H|g&{w&Dn)@OOL3a8#H}6H)Y1K-}%v zjG;T1RpG(F20M>cxE7$oJK`PhR@DDzQ9 zgSW%azrvWN%>*M2fZYAUAC}<&0%Q7c5yMpf9&V2Ga-DOHEK!)h!(VpqpZlkj_WsOG zE;&i$dR)P=pZA*IY%nF~ZTXkr&*T2P6%VdeeI4(wD35<5)*kqr5`Fjc1KWIjx@K?R z+rIvOpu16AVApHDEqGzRE!3ltSxvPP=+0W6Jf1( zEua14emJedX8$m0o`O!3PcWdd0|b>tA|6S;5G^MJ;fSH3Qc6@wTxg0Oz#z_$=mSco zhAeA6oK@e&EL%e!uz}wxrxG}2M*$l&r`8LC)Xu(%03!jKMPxV2Z<+r>h`zq34ydzN zfr|DZGjRVes4dt=z2~Fj12EqZXkOd^8T&A}oKLa7qzoYD&H^x!Xr^G%0V1F|-%~-= zl_XToNI@5A5WJy&>lDzBfsB_*NFmko9@4>KJ=^V0M3?sn4Jw2MO$xKjCxZoWco6r1 zdua7;HO7&Vw3iu8Z>`s;g6UL+PW@)}hJM}evW()KKW7J&t@F@!zh0W#q+l~_XIXYD zY|Cj%wGV=S&^Qi{Iy>)t7lXZ8jpo)bQDoNL9PcAF5)utf&cMlkF}!=X)*u0cpb+uh zehi5>vR+te$51+mgGPo5$W8R+Ufv9~0oA>4!)stPlvIemmDH3-r|Z`R4y~mHtMLKC z-NoJ}_Pu;=jA60B#+lR%hG!MDv~Q9Mk9|}puxjudCT%3L$#R-9u$%apAoDXpdLHg_ zO3NuTP{i;-Ro2(a3Y4YSB9uH4>PECq(%_S*MJ7Dw;^DmHGl$hU?gqM-4a(u$Ry;3N zZV^p4RqMf`gU|L-X?JPGC<(1pRVC9f`T2%OwSnAcAwhQ5*LW7aL+nwc^()wJhP z#6}5Tm$>}Clj+NkHU7sH=ZaucVxuRFk$4Ca3@4i{NmqrSP?iN@a^}Gbpg81%xuz=>T{0#L3b&&a4tj3x%^F0D zrf-Z`L##@1F@_V>A$NFIF>zqr4*&N+BW!@v(#3Wnd!u7(m&;0j*?Zq!vNw2sFrBel zAzj5UD@pyevo9pR0ec)1<*2 zICr;FY7l-N89JS)WMx4}PHEVvX)yZ+ArLYdx`npY6qenrCpvRU*qpAQ=n3*mC7hp@ zgj^kG9ake^Tgv|q2hyYmD$BReaZfPfxLB1AzqXNt-`@B#`w_I(OR>ip@+YhM3!`U~ z<@nHkKNDYG0c#%SB&-wAlj=VEJv9fcc&ZVh=23i`EhhSjD$1stEZuVDN0sc@N5!gq)29vpKIg$Ycrvt@tX+UXsftq`1mNf?43L9Iu!I{$^w>icZ)7z zVu#mSVyFjW+cGf(v$r>nz=T~3!=3FGN5E0kv3M-1xthlKZP|$VORpg zGLfHo^7U`7-GoKl$+wS=v{5~#4;~Rhg|!o4>AGmqtc}r(0yxEi$RwE;9}+R|1p_c} zEbvD&*Xp_jm6#>4a3@gqQd8uOFgHfX&`~T#U+>XdHUN|sLxNG9=%ZRii)j2y*a=FoPU3y;T6 zf0v#e7#>BCcnj5PX4JIZ$6wIos(M4$k{+%&PAY1+jfxj85#)!6|tvBVGA6I z3Zzo<*O26!GdvG8A5!p3m(e9Et~86ZzR&Rv%S^)DMA&Ku;;09lEcv2l9!E4DZp{Xc zGK*89O+V`sjQ-fpmOyG^-odbqA{xIC5mJ&c)Y0@Cw{@7?2k&rCQHmitt&(D{W+s(b zKEkobvb#d+{)gJeENzj-w1bgJ2)|H!#Bt*h`YD?i$0-*&^Ya2hia*FoAdKE{G2t|j4%j@d!>6x4r z1#r|}5cAL$iE^3L%wDBN0XSl=h-DW)cN&qLeoA`95;j}jI@}y0NHHSKy5E>YS#Xg| zTU)OgGZ-<;*j0DgPIdH~KLKAtto*G(@z9|4r(X zLq<~4o4Bj{MjkrRhORoQ{$-T<3Nr*vtaKq78G*lOcUbV$czxNJuURFo>4(L- z&FXG^`SRrZO2aHcMXu?HVA-Zy^Br{?X`>bKoGeImK)rzn7+r4&g4Gxg68l7GVxJmW z)i7NVAeg0AEs$D5-Hzt>ZbH_s&75%+r)z#K(CXIpx26tA*Iit>`QY2uir`4s_=$IQ zUgt^LRe=x*=9V!IV!|C3+vNGEqissc2xfCHiXTU@yqRVH8QIfzFb`EyuQkGX-5X2n zPDn0O4i8Hh<$OM2r5v-VnxUWsKWrQ&_x$C$hf;bZ?bA5U36}&R)Rw}76t9z zm$m%n!nj`X4P5OE7rJW`i-{#7$^ZkB{TBlz%MUGqVuAaw_ z=XE{Ee}KoW-MzRa`*@)=fX?T3*galDTnNg3LRXm@EvIa>c~d^9t%uGJsO zcrWOkzmmqECh4yr;8!h2KWn{B8RU!ebf!rrLMg+vkSG%i3=^U=&RtmG^E~oe1EO=f z1CdxLTNexND>pRhGLA3KM}b z?JB4RJ8Oqn*HqBp<^o#5u3?`QR<+U!Kl*wFibD-nxT&~sWd#@_P3C}fX|v3|902gZ0%6wCWUz{8EaimKw`hU zsMJ74Vt$EYYOw`5XKA>oj^sEqi#o63fACR*YFlq4NTB8yhU+Fp*jjI!RIp9OZ7a@k zdP6=9S#ZV6-p&*_QN397ysI%eysI^x@K%_#$p-z<+ec)nGdxU5?l$AFL3``XdW9Zs zO(}&M2PR<)WfBh+M&bpI+7Fvwg$v0POrK|0^o*m-p1KFRn%zG0BCd6UNGiZQg&s zRVC}^JuSb!YQ1+ud?)xu_8>-&4#NESo#zOPX=2cG**_s8ASfeJ$@Z|ww>w~6H@UII1Tl(w8 zJ6HPfPA1%+EdBLD(CNdQnd7^=o{DE(vKw(yHUdzqyCN0ZgAfwr9#G95G2)~^$zZDn z8)oK-ot;LT`u2B^HGr%`592ctS9w_EB0ypiNkM#hM=yVstNipMa+tVhnB)v`v0|=G z1<$#yKjxp--Q-ngg6ULM8@twosM*f+*_8tcpSi77b|QioeUnoGwUr#syT z@^MMBS}c{9hH@4w>K=U$sM@#`mO4eeXrAD_ut<+x+aWa=ynhGDvXcFu?D5!qq~>lZ zC;o?tDSzC@SS53UIhq&Ti9;zhk5TNqhsqW+K9~_2bDN2tpZHCVS415$(RW~P$(xzu zd-bKqq66>dL3dQlPP{$mxXjCb-NFFAYkyC9Vj4`MiA*Uy?QTEBnx=WmvK9({dobrW z^Nqr_@Mh*p$g|jr5eR!!aTJ0Sr$4h{y$2GJ>@|0yg1S{h5&d-VU58FN_=E^JgLj_TSSfxWv#l7fh`eD=%V{{~e*@6Yv`K{}&1_w(TYv39XTrqiHrJ;>%I-1*-v7B= z>>xVyn#;nKy@(2|wI_BNowd0;JpsyzC%I+Dkgp@0`YVnAma;x+8f(=W4u3)ojjkX+o#tgdFVX3n!Jl)G?B?D#|-iD=&n^s#3aSRs?`49}-+h zJEm0D#|8tN_1LFW^QVy7e1j=`%SwihWSR9%7iBDgDwnWAQ-H`^cqIZAGJQAM+(_gF zOtQu(YOjCiw$;o(neLf6&lMkOS~?bU;C_7Oi={bQLi2qlQjY7PF(W?EkohvBS?k6!gqLMGL3-X-o$OR z^1XI{VRBqJ5Y(1~EbbGJl;>=B^p$rDMWk}2Cn5WEur33Z0Zd&H!=^EULL%y$>DFa{ z_s1dO{XU2M>#c|2;xi`Qu^G54@Y6&bXxaTN(4eT&!oT}^EAIem_THg?uL~~ z{l4gg&0X0=ThTgE{dWvw*-5Y0rZ43nQf8bd27jwl_BgRyi=#vrWzgZ@Wcdm7#XfHU4;ftW9kdSYauY`}_?ARYQmE%|3E z(hnFR0*sMrV;@}8R>F3!n{s_*LO5eu=neCF>0lOcVL_PhS*>)fuLZZvfOwlu%>N~YbS z@G9!rukI>c!FwTmh#{I@>Xg}clc{jdEidGe^cy3=s^mxauJ# z2+;ykysS~KQ?d&-t8!>$qd4RI^dDL;6&;K6r>`v}7bJE>iiTn9yjPE|`axjJ;I$gG zzRM!B3P$&TE*{5pgiuTd^Hu1M122l?ANKucb(CAH8~Y5U`*DEp77v2z?n2W`oF(` z&HrI)D@6lQdhg6cC>OF|(Ncxa@0cl)3d==_;2B zXo%bU$MzVYmbS*?_xHFrjFzsHetFXQ^Gg4dQvc$sbLYBVO>z3(O&0nA{S&Od8vhf{ zQT`)_t|kEF-zKoN&gQRH+UtpSS*_Bcg&69HHVvE1ip1az`V7MWAM90Ov%g}Xy0fNY zTo?fHbvm!Nz@)J|8?scR&tIdaSWjubG~uH?T`OLraarMbk>`TY*zzo3=F&!^6}Jy< z0t}pU|NC|^YR}Hg&hJi?ebo~|PudtGo{4pSdHtVV_0VYd*YrS(*2~!Mugt&RUy8+f zFlK;TeiV#UT;SPzvI_8#@*71peL)vz&o2yzK)}ZU)FyPH9?%Lofr3~gTEQS_1;w#6 zU5X&cnz;!CZop5g4zf=mAkSq!3Sb0(b^UjHHP)b*k;z!futk7$ZwZHx>|+B}+X35R z@W1yGcj&agfVcb|khM=NttU12JU{oz+t zhPa#L;TtWXEYdV=xAggggl2g8yZS-$gcy+KyR2}o#>D5XWBzc|{=-h_RlW6c;@=Jpz5LUE1KA)TmNpvWj-MFWK%Vdk_UGFk`FU>`1Qsa& zwcT%t;B}l{9#IIq-g5aR)}yX$bYlPq(lW^%&EAb3VW++lm}hXiN}`PVcA zyh2-aFQCv8E20v@9N_Eu?i2^X=mo%wqwr)I^Z{;mGyy7Q6s8-7Ha_kf(k@IR4# zIzbSxvAePkW`ezq$;oa?=M6ZJQt_b;$Sm% zYGKvBYLKHgSaq23&nEe6<~cVykWfIHY1St%;idoa5E#e8X=K{JJS^hkym3x_^Xt&) zn#G2TSc?8;Z|uziU=Z4E?-aRk7s{{ZiZ6!!>R#`AM0i1Ek-(7DU- z^QFEPuAoCJ?j>@|(^8?Q;?_$;MOlI1O4tX@sE3TX!1*)`P{sN9eb71LudH+hnJs7L z(JAf?mabuk+zeYNH6_s#sxS@m2frNG%=T-Oh3Y^3qo;u6kA8$L>S+tc8uI`{uCs2+czWL`=aHXO|HgVDJN3@=oBVk`S zhEZ;}N0lUoQtmi6E9e^w5pdBIgALWUw_aMUEM$?8+c1`Jz$Hl`f-joHtrxHhF6m&h z7>$huyWlvBJ&zDyUk)Az`B~1v-a1GF%DjUyl=!y&w=`&_E~!~6#Y_*l!qFT9`_0Fg zL>YvLG2YPuSV{6N+CTy842c?`9&0Ln06kTUux6!bi+(^1%?fQ$9FBRqtNhj_9LKNV zA+z=8d{e`hk$7PVZBtk|g7+?#64|`30+tWPDn6?Kk$s^R@vmvGV{K6dZebs+{&KE< z@K!YW?M}TKtH@P8^kQPVMZ^8O^V5fRZ_I+~dAtov$36l_X=B*Lhs8@V#xG8L@W8pF@kajRddXChHwpoF|SH$~S{to*Fk{EbvXVkWkI0 ziej)q3rXy$pArYU)_r(b6NaRVQ>s-yjbSAZ|HJ>kkoKW{=Y=a*F`&T2P|E7taw|Gr}u8{>Pf z&;b;R*;6rgdIA)e1D05s1C(}(bt8CNVs?>(Z|f3fL5j}q3M%k&Q)XR;?DKt+p==~w zl@fWsHe(VlqEab)^Nt3*xOTXV@7tG8pVN#J2)x+Pk0*jooD>5f%j#YoFvomPnFa0C z+r3PKbZN|B1ia4u+81n(Pzg@Zqfw40A=n6z%EG$YYLRXq>^VL#oMf1P5n0U}QPN3b_WC8-cDP8~OCkkskiZj6C2?<@ z&j@hfttP3`Y#A`3_*U7)Q!!Vb-jL#2?SRO-{yZ47(wB$!M4N#&ZR+GDfw*?Sfjbha z3E}1E^V98VcNKrm7p2?%^|08O`k-yRA>NTXqc1a{cb{!6LnFu;xbL|*P2@B$Q@DI@ z?eF#%5=r+B7dH>0Iu2|9`Ez^eK74XOGznKGx=0sei~&lHL(j*{!k=jgmqqwcE~906 z!o{|1UhuM5e3FD3Cpm~O(JZhw3wXmd+9@(V_5gV5XiEcpQb`&kC3^G>8-}@dBEMbs zdxbl=KA*cXPO#JR-6ed5^g~k#or>cHAA}FaV47R-KL%RrhIdqjzPeFfGx`11Ho9xS z<+O#g^-)5dT-#acpQX^+Wr>P3+O;Rt*Eg0@fhLL0A{iP-ap=-#@kFaX7}`c#PR5`9 z?vU6H3!C`(xlXg&TdZ>W-CwpbCbIEoiCl`NjnG6&%jXU)AWGO+r>sva^TQiE$0NA% zVChh-?UR=sCh_jn1y9Zx^4*^nESiEpk7tg7Ab?@#&kyk5Ilp&Y^&T`gD`@(zKl0Y! z%dII@3rrr(7p(s;Wf7hsB|qb@e>ZELWr(ivZfU29_*EG-Xo8-(fViHk>C!iNhtulJ zZOq8ZN`=m7E4t1`N6scWmtG`(wF_2gYwbFbkojj1Z5~vcQNhsv`maf-d_`fbNrB_! zGni%GBbeLy7@p#yU|Nd&C!vDL|2D;4v@6)-vXb27IFcUKH6Ke5s$JY*l zNW~YyRz`ns+CI@rV_;s#c{iQv>i*=+dTB2aFi7CPzu@sv`+x8N=L|~dYiX$yFu~(N zmEt0NrwNRd7#gN7@F27X*klaNYe#o?cSiu$uyaH;PQ8%_&vkE5xHb0AOCQO{W}zGK z(=WlN{pXdm*WNQ9MBH(?*3!IkL2u7uYZX>s{-GnO_pJtHQo&qpFR|F^qJ>V7O2$73 zG$7jg`iTig?1N{&#JYzE^6q}Lfp!mrm(j76hkzu7Qp_rVS^Ibz%qAmXUV8?*yCS(K z;1I7wdQ{f}ETd0AF}a4PeFsGP2eTqi>iB(n$y-ei*2fC~3fo(f)(sKgC2_pu@9B8d zq4-FgO*6hRca{r}=DkkOfa9S+@Qg*rBx~Va!fj3uV)$*tbLw2cmXp|3f+WX4+zoh) zF>$`?0H>N`#kjYL=jTHRbOPmTE_CKut8g&efFhsCU^|YXPSEvr_Ae$uOTvwg1y1gR zU?}nCfEH~i=EfTU7$?*5201#c+Io5kG-nr}TkFl%i)|l3@e)Czq5B}aPA12769xc- z%yXx@t>Ft$rZl`cO~M|ZMp?H4#R;mJKcfo{%DTwteDr-FSH8i|u5>5*>UebY*B|`e zXAQNh2|UNfrN$@UxCb1c9LuoZ{P^6nFj&PCi3uUV#)aW%)FMw*s@(tP-^=?m)(MP* zj*v@pk~Q_+yW^q>BIqNaQv{aKA<|+Yl@yd;Ocn3G9+xH^s!xL|#x} z!(auLkBD1A*q|-gU5*tEvp3g+0Vl8z@Va4;rgz6E6>_|`GCZ9{#gvqt8Q(oJ@OPx%e3uxmjv z$=Ns0WhnUXp!p47(XWBT!;0D!ZWjw(pS~O+R)-&YCwF5-ZJj6ouu>xU`F|AMU?wJ* zwVD3&*Az)XH&wF&MoI2&ag!v(^dH`shVAqsQwGxuQ!zja6x4M!?fod1d%kXZ!C6TP zv_c;>-+>RTgqnfH?UE0-D3wSY9sJVrWhm1?PBavI0bYxLIE$g7VU3cKsb$=&pil!! zI0J;Bv2ei4p|k}3T9no{NKiW&Kv6lxvBjn1oyPn=z9>ro^X5%q!Pw+j@Hp<);1>zm z0G$IaK0E=RDc=H7o#f+TF7++2Lj8>jKAa}26NXQfRfA#YJ11Oqg~ zkg)h*TSOZc!L8bM7C=rIclIwAAbqXxS~Js1{geu$tIiu18*0%@@RNZ&S5q=tE=MRv zZpD(lEQ7Wcfr^}EiWA-c3xvBnD+F;8K`aSGhSII`lttL2 z3s+rW^iZ)Pi}z{5Dv=YfU!-U>#((ic)Ts+EJ`QLdt0tu?GErBhe+!LlkjPfFG$|OD zbUCynXA@WNPgOV-eQPtS`yp(cNSEPQE8A9M>zeg4o{Vuvf`NH7A9bno%dY14kV?brOn-AF4NuHZiI zHKYc%h=r&O1I{G?-d)f@QHSXOV5hU&RfjxV7zUDFek?{Tgs01Le-cb3$rv%$)90?O z{&OHh5+kKLN-Ik=Qbd+c9xYWeUDpJbQMj8rsFiHi%+^TcI+vB2z50&^ib7$*e1rT) zvQ)awqUkLk$fyJb%FwFY>qCh?-sQncIE|vyOE&6&{7Y~fIK+=O zdVs+mBlZiYiq(AZAmyUN`zWwW{=igGXUrxsRcz*LGE?4P{dj+Er7m3T7zOka{*cSt z+9up;)NeYHMLpdfa|$lTb4iMo7?1}iM8`{i`kePMs%tY=%3wN5dJuTYt(&816axkf zH2$6yPBk}^E<6B~l!O%CdS|ik0@Hrm#dxl)LeSl6!yQ|)^L=ktk^RDT?$>jA6a&lY zYLp`Pq@a6E+QtAEv-C>FpKkv3ld6prfD<=K``2G0LxWh8^=^&AC7>0#@)ZP0s5^gK zx~7=Ou!w~o2Ek;754(JM>v{C%PTcf#j#scdoau;zq5l{@1s#HCR7W+9(L6+YNNue~ zh3;&F^EEEtq(aCY$lOl+$z3^Rt!C>k>Za2d6pDSeW)|Tkf+g2%9z97-1fDr*x&;p5vvR=7Gn@)Y21dCEY?WKvOq-#XZ zCJJ9+D^7F%@Kz7ddUwJ3*#uEbQFPGrK{ar+GhTAS9!d{6n3`4NeR-$a+iC1csu@oE zuU~JX)C~E5cRqx17d|L>XF;1M>odI-_B8?8!E)J2TyFHn!tPtuqA54OnS=FpJ|a?| zsnKRK`!+)s%f$CU|GEIHh%@(@dT$Vw_D{7VG5`S7+{>4+1p9PS>I3GhW&L_V@W!Ch zZnQiQ>=<^auq89dDENitvSO4XJn1bbhMjiUi!+o~UFu65z!);|jgW&K&+d0-%?V5C z&_mSHp(&m%fHG2FHR%zWb2M>)kwy^Wi=9S zQZOj-xV3ul*<41-%si6wI&Fn%WU^LoC^d4l+3PR5R(g7+yBez90d=al#d#XM2&j_@W>>%#CYa(LyR$Wr0>;2rSqCbSffHZmzKQH^w2I&sR~S z4M)Ogf!XD+;U9zAyCed;{;)9p|Fm~jaZ!HZw+9JH>5>jb=|(!F5fB8Vkp?O0 z4#|;56j4e_KPui_^^n> z{H%yv{Wu|lpMpbAItxQk{0)>3n0Fa9M>Rik0}pd!(Bm1zjZslH)#Mu?OKc?jU457D z@;-Ff7um>^#!knGNxRVLz7#!xT)kW-h4+%GcU;L7w(cJI+z0fskvr zp%1*~^F((odWENR74buYGTwCH-pqV`co!wV$RNNtNI-`XSVXqF73I3-@78M)i?n&4 zF$_H(qVK{Fi#ozzVpdp8e>Xf6v_S8?vT?gUR7__ctDt*PVNP2aEwqO;_|G}ONTXfzNa22J1i`G7iul-dHVOtZx7yr%j z$HYF`153qxJxgC`do@sdFD?jkX+U?@gOGH~-h~!}%J!cfd}blrY+wg0BNLaSq!20M z5$8Z8kGs!*LeR#~>QDB{xH6Z}O

n$nWb=!H+P@_H z`W^`>i{$Ebv{f&%L%M1nSdIgd!PY?e!paY&+5kn4gEE58n~SI8_K0?V#}>S5`fPl`jkzqcsf^~}0vQI|V z4UoXlaVw|tk3>Puh9{2)2lB$cB6C>ebqnZ&Zh9_`vHz$ zw|wLn`SQ#U&#w)5lPMLFrUZ1}_ZcqiF1o``+L+?{~HMc*=X+?^Zonfemv6tYK!JsCDe?vhm{9aAUi{Ly%iC-$t~ zd}J{=7yx`W6dR8^C8FLON9eY`$GhxhZg3Pi)SrodWN|-4jMCyhjCTT54%EDz zA%&{L&-yDZL*C(yq@pz+zae zVr_mj0o($pAtd)3muj9@eo)3-25LbCX_CTQ%9`Y?3YGGFX|=0Hg3QAfDS-nzUM`3M zIp*WgjMK@w;Q9Ny4z`Jel7kbrTy?e6wuv>jJ;?7b-}t`@ev(KsGQQ`=&@=CWZbvOe zB@hIo_lZm3_Dhm$J4qBIep7NN`A|4n*M!c=Bs~AFM>!MW&-ypcD{XmHidm(z$|_Py zO>PS$@*kY)sg_F;VnO)o>eet>WFhP7kbqX)1ES}7C)^&(n{sAI>auta=|%NpY|y(% z2VN$#N)OUq+QHNB??lm$Jozb>fcUz28xORh&Wm6E0e%K|FJ0Vuvq^t7WTo1qZXUnc z%Aa*)VrM^9e9tc0Is>-rikFUCDghNC0mdI2rUd5BULfoH#P?3a((BM3WDx}i#F>ka zGFfVji-eBFT}bF)6(@4`3d`m9EMq=W5yEtP`v~I2;CwIt*$ZXJm?pS4NxHhf?U%?y zoZt7^Y#V$+$^ZeaWwoo5L7yY1Cgkn>7XmwCF)L2e$2NL>MI$4ppudKCD?azb&ZDV1?o__f?>C-34+<@?TUX)|nA?cKU^wjv`=mL4J}3v3 z466L-jn6FqjhE5UQ0z9a*Gg`&fc?7?QBV`xdlervUTKFGlzm>F8nVqIq{7UGm6SCW z`s)SprQ9}0?%NNADB5}58kozShQ*wYAKaNTAf~LzCW1wt zJ1`cPE3gnhtO++;r`GD@KJng_(02Pa24{KPI<8H^&0b5TjZuSj*zR6xT!)0?607Fq zkwJU;*_&VDR64V`F)QFcY`^vyFMB!t6h=^$K64C>Jq;KbTDa8Qnc9|XEIWZC-(2Q+ zyYwrUJag|oBDFO6?SLk%B#&A2Nu|H9Mn%3XsmKIcl{xtrx3O|iS-()n)HvDJ+yBTI zab}z8G~Xw)`&n+o$Y1l@zG93$L&E9t06})yOI<$b*7MMDO$FX0Y&8z@%#aG6IOng( z_(MoIkb%xUS+@mL@K+_|BWA?JIc1+5Rq6dSR%*kv%KDV>S4R)?&Q8ut5q~kZT51)I zwSxDk>B>IomtVN@SZUEVn}KYUT|~=w)kPapxl?|KB_RYk|AthqG zlITxHq$7FQ82oaB6qj;%+?T(Cnfrj1oC1RJy2@9Z=d$ZUb%W43H!E3OCDn!|L3jb3 zxx;(PJc5)Z;T|ZKf-?Y>-svu|X<1&1O8(Jw^8S%I_t!$A7NF8?3y(^b&gpYKK1dc( zag@YbA1*v#&iU|>)3k>ZO9%Z^sj0IekdDsKZ>`R{to@?->eL#JYm56 zA<2W1o)EbWO6q;zeg0otB^WDT?up7%?UjV<+H7wWNwK)g^-&~~eRWAVc@y+JK=}h3 zxw+LY>#wa^e##lD*H@v=ReP_djxv!^Gk42mqK-Q2pZCL>#x0rgAkPr9|p zpi^Sj{ZRS=UQzX#Wd|p6oA;ApCSE+Ee$KVRkh5ocMJCf$eUhtMAOt?fu${F1(q(-j z#kJ#M+lzVdWZ)G#$T-@gh_f3Fl^?$YdGtI#u3e+t-V+jvv&5_92}wJkvXbG7J!`YK zd*XRRg1;R42!n~k62JfP6ZR8`$lt$)KVyc7;LBuPm?YQ{@>9~6Bs0**mZzU;cz-@p zeu6#+JzCbyrB#r#+~G8k$(|zTd9|Q2)@MFG_y&^odyi?6iSBd?ibBr*m!qXtd+?!3 zpHNF+;Ki=m>qowZ^HgL%;g@;)mD8L(@D-UX>(~jd+VQ}Px97#Msn+(#L(jSWSMPLE zTBSb1NanSPKwj<3!;-hXnlACqWbs89Sv(gpB7C~Ez zgJHhJ2)?o0Z+t5-e!ary;{T<()JFso$m&xM9kh-ysdI_r5{2hQm53kZb7()6<1iq` z&QnfAlHiSgnj-SY&v)ELyG%2G;%2hxbuXFpwS1^9FUwQn2vsJ!AHOL94Eeu=18H|w zE!?o$h}+58g|K!c*3Hn4HRL$p<<98soAheSpCN8$!PP0}r{AmVgnxKgdk%bShjc$8 z!h+{>j_8RwXvoHpj-BH-@j{mE@lpZ8a*5FO=c{hM9(+au>p1&Vt83Zuw-!umb|3PUWf?&hqFy95RK#|HzUeZ>IuM$fE^z==)LQV) zWuOzC=j;fm^xY=~vY<=oejGjwY;q7un=WCeD~5|5lwj7T#kcCK{0xZaO)vGY8reW{ zS*O_g((i&w+PxjtngiER?>GB2$?d`QS!~_>`())G)`_C+c_3C}4?j7-p&rc951r54 z?0qN1J{s%G`x{KQ_Tv3-NL3f(C&-_XDSpL%dk*1wuH5dkz9zxgb~GH=bO{3qIK$_nyA2>q*Tl4BN?8+Wgw29O zZcrW4lmZF*2FZk0YK<%W7U>bZm($fK1$jnnXdy*M^^MGrnL|ENSHYzy2aSasBA7rE z_U;QwkK3~%IXO>~cG-vn_l^S>wt0_QP&2bu5MCLSKo3(N0XSkplUor#9Q`5iM$}o*P}CL+BVhmyn~X z!oVgt&;|+dv$|wdI$Yawx~{z!Is2Hb1YT)AL(-Vx68~Ma_6x-}oaawEe5o2rjk3w zB(8@#(_2@M=GMEtXzMl)0(y7$`GO~-P$j)d`U3wcm`H=4wG#15v9hZ>tl)>!#Z`Ce`wVh=6iJt0#b`yXK=qwX@!ZB=`Vrnfn^f1a123-FfPiSkiA`(X2FQ`!aC+H^arvlK{V8 zgA>=EhLFoiv?V*Iuo=B&tPpH@D{ycT?-lx7eRWElGdi|m%&6TYeQET`i+WjuZ zX2%vD+M75-+XEHCBivn7%dnRS|$YCC)Qg`Ls%zu<;JM%8YY?hop+7yt&kg%blYgG>jb?1 zeJb4b?SDPLasz>USYaSAvMoR&nKTStBx=dQHO!{iNpG!oBo#VmEGasT3^<^z4X- zeO=~V&U138uGtk9dG$9IIdt&i+8rn5YLdJ&kaV^R$V0=zbOFv(R3Y3ie>E#dctsPz zIgNRI7?@+8o|>0c9Uj4q#n|cUAL6kJ!ZICQ#3wsktD_AQNko+ZViN^jKcm)nUFfu)KuWR#K9`a5$1&pIpa3vWp5$Q){OuBDzwv$nN+ z>9?D9w*R)1Jo@O}pXwo>4UB7RhTh15Y2>ybuc5d8H!9KIvSu8@mW@^G>B9YB>!bC7 z5ig#Y0bq`-U8-)aRI?EYHY-yRh5s1JnJDX}Nd4@r&;Q5iz&v~AN-rhP!|4k|;xRDa z3WEvDJ{K!%Cm6tq5?n@vM{ZOn+j>WX+buQ4Wu8Z5_E2Ack9N~Sz8r`K5|ImOUY$}4te6_7v`oZ0GKs%Ux_98^{LQfc z=OsMIIi%+3G=OGIuE$61mQ)%4AFNc%G^XR zTK6xgiE#*}AyKa$9Z)^Rw2IQIs&?VfElLX*^BxO(Wg$k8^UAv7$x&hcy5?!Mpbi8< z=tGz^DipNkDq`17myM0JWCVGDZ`FS-RklUuecmo()Y%tsark3wV&)7h`*J`3{VMnR zKIX`da1Xxv3~4Tt^PEu#;m_M^;q`JwpFw6~%R0s$kIX7AwvU>C7{Y1zxX?cBWr_rS0uZvW{wHIO^e^0vY;*7Q>`lMRQbmi8$%3WZ$yddw&4{av{Qe^JH z&Mx%++K=c*VAnEJ<26Lu#pI_H|ADU0;Ios-XH}P14QDUeRRrAa<}no>kk0mIuXF`+ zp|I>Ob3doSVA&2h@@i2Qx5WMN;*`vgn{#hqKZjS@^k3D3IqjMK-CPtj_hjXrb;Q;I zD#{NgL|3Q6juTTT9~!Ep426*o&H#@z^4z)&?lwc(&oo;Tmf0A)X(^i3FSCO>5|7MO ze4Z+?eTaG3_1?JI{|`qxpLdhSSx{4{arJ~w=G(V9p|VT^k`7l2qCqhQ1qBUWPt?2K zYCruxKCmU|JiF-zwLTaGc@O z1WUEZMy{6l4xKJGJg`LH*t<>z7RmeR^^5q%^%q2pCyRm>H-*8$6BK!ua~Gr;hr=h^ zw(etBp31q^2Pq9`^kxS2j$S>-G$JnJ`%vp8mJmY!r^ACXW2WJ)jfXGTwY=a4*%H!D zo!6OCjyc?hHEp(6Vt>AAncIsEy;1j^^m`BQ$$nwiaM^*`uQGhVV!ig4LUXs|ieIp=S6#!mZ3M#w2Cry3M&lAPBA4x2RV(jLpXjY-n`~X#e0I>#WckC?> z&n?J_1(IXh%>f6ZEATaDqW}*21!@$xlH|aKc_;CP8z!|nS!!%mm>Kd8^!z~+{e!HF z8=#O*4bNLv>~GJ$SQ5>8S2v%Jrhw%#aXQ>*em5?jriF)9P_Q%0e^WZ(us3=)o+=x< zH!R~AOF3}Tqe)Y@%MR)eQ;58w4CT@mrY^0$4-?W~QE+@FymTn?YTnTg*f`lh^zxj} zgf&z*`@Dc4aChwqSf%?XuGd(pF{w$(-j^a=h~UhPv-UJY>&)eJPp?*`va332&8@?4 zuYUU{N-o3?4{`dNwz#wWp%r2D&g`}1y7bGt*0;Udj1Cu~e9Y zvovFfp`Ee{FUJYrcIKNOY)?p%7t3*nwrWcLh$3Vg*0F^ECQpwTKka>?>VoRyqLsG zC?s!Y;NM>9_}8QPs$hI8l#Qp?8XSZozhsAi2nh1A-df+sqkhc3PrUm>Cju>HsFux% zdN(Ux6CTiIAp_=DYIuhvRC_#`nIEkvI6pvl_UrrS5jQcQ3>B_3N0@vUP4u_0<7g=? zmUmj!F3YA|jCJ{BUdnl+M+0%Pk#)p-esMf^S`ru;gH$W`K}1fPLpu{(_A&d#*}2|3 zZ|8p&IcN+vx6#6YSRTu#F4xR$~Wy6J*QpR;~y8)(_AY`VBUj4Wn) zTXN#i@cIFzXofCeu?(f#ZDjg>pr2dEkyu~s-2K&=P?OFpHO%qy`*wU_ZbE0sO)uzI zr?@pO!#-;3w-Xm<)obMrg5c27bt3j?=i%nTNfO;Q?W=5N+K(;EWA zGVAu!y=``Q5s-(UrhXYNOf|MyoC40WB%Ef`NQTmKArd}TBtE9L`5uTu_|KZ!)V=#Dsi_u(-m}v)*}l1soWfLV zC4T)uPNNZr(4&OfQtxJe5zpUD9@W#UGClCa!fdOp5nVs64st#`<)Py>P$gNI1 zFVQr22CO?+&j4a4OY9x;`0H3^#ipHRK@LKOS9St2levX43fypEbF_2YLi_!9l;l?xvNT>NGKp(3G zK>m^P6~OI5CX=EVid}jpNk`2cbM?Fp>#x(r-NFbN&YC|Kx~ku8_;W0rwYa`?)5|R+ zS|`!&URBN29>o5jQwuV*a{E(e_5&+)esrSL!_vAz@4N1pxLHtMLH@U&&Zd^1=70AF zGfMTiG+qd#^9Pxkwbv2DX-%vo^qn*93?4K%zHZ;%LLv`dINS7)I#%}|iuezq1%z1N zwc9N}-Tw)R+{q387}U&u$~Xz3eJl9YsP>C~v%cPsy$GfFFCRGPLXeRQhJ}m006p*(F!6p)arGMNz3?bI<(6+0%fNxzp0~`BE-U!4*dFp3-hp zWJqE`sH7lkf53;yM*0D_9Xl(bpxcS)=~=@u=boDB;Fe6E>23jv+Oy%Yk_ij_W+!1@ zmA8AJ1*<3FoUJD75#2N*))$30-F}rM_+MWZe_7+WT#Ln1oXCHS1sYs2z;Zi`Yp!so zSWET`84HsF`=c%fzE`1CjG=sTNwOgM<=p%J0dgst@nQ*>&o1v1e0`H)kJV2P%u`Sl z&S<8#VMGo5=iqi}n4D!eor-dH((uM_PfM9**T5K?;8ZRTAEs7i-rwJii&VJuTLitt z^H=d9i*?r}M)6;u0oB<04Mr4~d$GKMBlH%#*>f6xefhY1>zVe0M$n3?AxOjOU(S*p zQ_&ey9}`z)29KIXTxF-FE1{h?ef-G19bSJo0he+6KPC_aX%|{{Dj{zTo6p*4=|d55 z(}5u~>25O`&mzLKtK??s<42&ux=U?iM#!&%t7l^VWat!Y1m~kcc^R$eN=D8;rkqOz z^Jz+?@IIxjf6Ck!<3F#3cem5a|4v$NNVwKaL^E!GN*4`Cr4QMDtl#=9m+%~*bgvHs z*rnZHuwppzE)z&f)=z4oxc<+v zSF(Fjqo%R+hz~D@puHi_yu$J_vc2JdUe-IFi_?l;yj0iFI7!Cu!rd|5C*a79iBA`) zRu*4yNt8JWD`*J7n47h=S=$3hW9Py93+bBrQ21n@P07fR#%}cf#mrlmbQ8FrxWZMT znA<6Qw~anSdq3jfV)p&r>^qwQk$4-fALv9g_Z*1kww_3M*U1A-H+At8={|8>hGy{V7t#PJ3r(KAbzBBR>xsH$wUpc6gDkFqK#R$Fj$ zzBDjBdYR=D6Xxv9CR zsZwvuntrjGFk``@k`~(&R_?J>E>nAOema?_o&PIghtx?I+~H*>AT2#~87y&%Bp1y# zoUXD}|5Q>*6QTiK%_oO9^zNlXz2iSVe%U_PRt)C@4@z?52KR>ax-dkIBbli5$olYS zM|`RwH-t*XLygm#ziTftE#AYW{Y1Nw*pq;U+obmbfmmXn$)HFHRKwX2A3GRYKb^X> z#TwUokt7E!In2^hY>VYqqJ%3U=T#5%r_Eui!IC5io`oE~w~8OX3txE+UOH^5eG%$r z6Rl8=ZT6_!FS`UG3?ri%adsx8=h&8ZVO6V4iAm+acfE3oxrAx+v70wLnpIH}h}Vv@ z!lq?`Sn8fp_dju+zVBKcbuke$|8?EXZaxDs#|C>T@IYSMzZ^U*qab;LcNP5Z1=ZS!E1af45*jMYbjLckO-~M*68RHylf5Oq)_FOX+CJeIix zo}#L^$d-dKjXbzI-4_em;*p!<&zeS@I|MrzI$|9cY+PMP9BeW#ba*vqqgBP0f!6dYi zF~4xbpzsGzZS_#=xDlgQMPf|P&dfLN61C;r)uWx=VW)%aX6LI8bf~2BXR5J$e0|l@ z*8vmGuF-peS+#FMfQ;)TwfvALG}@C1*>3*(Y|8EV)Kw)W>q8l=_(aoeVmNEYQN{+f zR_x2+YQ}GpPOU@RE?eZ1V)EQrNCE7Su^E_g;~Ya3{ddG#d|5?cuw{9{)Rsjc^{3k} z7~d#_q-AK+dA+Zn+{eB>NRe{8HTAwJcsX6Xk+qMdOe}R6im2dt7mJ9F{Bc_&Y}n5& z!QLrVs8ed=;;E|eAK z#9hOw&Io3B-8a}oqQUV&v}naT!&T3OwptKW9a_bMwRUjQ{NUz`FVqx8jRGA(awbm8 z$s7?=l_^G5=`?*l5FaAHwtP}k|0sGaP6zoVlM)bG8ReTu1vK+0O`<{)fZJ!E}SbPh_aWMTt z0V^NS5$t!w?emqg-QRf7fIxjcgyR1dAVKNp0Af28=tctMm7wFt?f*Q&UG$1zXmab! zCInspsn;&~v{^bJWSn7<%BIQ^@o4GmW3`5m?bJW&$Ed&OwyB=~Efq2%kS^enP>{+) z|8H23a-wMl2-g+O_xG)fi)mX6RsSn?w5#sw-ZiY=O_X_P@OfOcB92`j?pb)RpiU7pvf$PD-EAAReL90(K!BT_VE?1PMjO>%g0RFHcPMJqtBf z_|pqAEsSvuu)Xa7$O*%Bg@&>Cyg=3HHSN@x$Ha78LCV}&nfIh7aR5Tf`@C~_ug+xUvXzT=?}#Z_YGB?D$6*RU%MA5vhjf& zQP+Ps;U5BTW_FO>XM6e+xDax74m{R?=1cbWcX8y^3({V`|>!WwgxAxzr`%IWb=zNQe6nG=V5dn*9}+ZYwS-T)-x z;}vEJ2{;2NH*+q? zGyOvZLK>({JdmUot{T9gjd2=yoUetVTWcgam$D4v_BB*8MjGy9x&{{(}Ug;aeY%Uc+o>R@;!hJYq;B7dz8?vXnsU zFd}Q~bUhHCJEc2$C;Y>!o-l1@4rKTC*84SbF1#iRQ$MFSE;veMaLf^Vq2C zeNV06HJ)2K#;w84=2Sauinu`d1{m(jmLRTb5UAPD+;H{x|0$Zmf4tSrMM>aE;62ms z_KPT)7MTJi|G2tp4J07#tg|P;0(@I(nv3pxfwG3T$^b=C=IqzgVx0!t9&kZiQROYS z2?h@HO%1o2$9Lp9+?MJ9J5_ETYVa`MabyoPbWQ;|aw&QkNDj9FZUiJ$3POFU+&hYF z+VKE|+cgYxn+>_WzCHt)2KYUR-B&1v!5Cgx5mquK(`fs}#&Gt2bR@Cje@sxBHjiN7 z^gs7Z0iO$Aaj5U_m9bGTe2-R%bV1-W0(qyB>%B=Mf6D!zvaKt}?|^(o zGuX?aECm-IGCN)|B)~vI*%_scQ<3t4+)X18R-2R=_s_&OGOR$(L_O4%ybn{|8LLk6 zdxM!Tf5ba@Tuz!mN|HujymwcLoy%xKVkr3Phsoks@y zNsqXh8-aP-6BKb11r7i_Z3NZ}P@RdwBFy7!{v=S6F!OIqgINtOFp+`)bVt)To%?0@ z+aZ2&ke6*fKqa~nD5tOd-Mmj;Ot!=rT$gPaM84>E^4XGYJwZ$?V5t)L+AsY;NbNPs z!4E*5LuO#6=E`Ih!D=~Gh=NepeLO$I$H9xhJ7VS2ls>l(_}j~VT?2T$QvrK4!)_J~ zGo2UIMD0Zn{lgx;n{&;@%fQd)ve+eS#;ItHTL7zZpaN^j-Ehf?M;lWf&n&U9J8?2! zXo_|3OFYY#okAHTQ@8YS&=c_F{NGb@|T&O6end4{+{aa$lae7#k#FT{ReVSIf zb9Ci%!D_PT$VE)xC?K%=_C|T;>d%XeqO&#zF(j45*Cm<>pV4aM)naV(6cas%EWJ|q zhrdJ5<7gU4>^U72sCRt#P{=OVq&vl;)F;%S|K(6Ai$8Yc-P>m_QrN;hB3Z#KBnr|X z@$#+79-H9+aB&2AXJltcqj(&YP*Z^Hy9Rb3jeX@qU`jAaBz@Kv%P<-VQAvVEpg;S7 zL2?JIy^Y)^X|!7wG?EAHtclE;93+uL=uknc9s=`{gk%k7Pqe3B7VurKK?8FW6Arcy zidzg}`5V@To>g`m^(LmZ(l5*C~%JvqVQ-Y`7e zE)JN;xb?tiL0Iga`t9#OuFR(ZedM}w`RN7K2z3Mnj7&TFcQbIhlz^IAlziP9kFx0} zuGIN~hek88yl=b5u+)a)Jy_^3GRyZKm}gRtp0ndo^q{w%qWBCFh?#C|;Mn-tuO*Lfv5pFRgbg$9)p7TE;A+8j@YnR~`b$YPU-Se9b>m51$ z#8ZC0EO*JxaU-T@2N7hZz96>NG@|xe6C#Vrw;awq>|IRJw%JpU0+xs_9!9ZJTaNkJ zb4K}bSxANAyraLTB--U7NQ|Wat7VlYo`AOCcPTkH%EF?O-LXP6pniECOU$%}BNxuZ zY7}1I)~4+HoW0!>N)P`*P7=l>0?{-HJnRlOi@PIOEvNce-c@16x!QNcOGy8 z!9ehTgX_=kQd}FXtKZxCvvaH6oPD^_g6i;8O1S3f z3AF;ZC3AetqaHc1y-4Mp$%{FroS|>J^!-Lc7-|s!%1tk>bEKKbua>ph5jamNNwf{n zC3YUxvMbyrzE9ojd%_>}^nxNOs_RPDn9oV|MMvXM^aqZ%ugXlAXADs!Ts2~lJD7OO z@8n%x87gRk2XE6|#-~{fU+#>*PWr9~u;2@kT+VpJ2%N|IPOMPEb7m>X=CjcI6cHpO z*aRU_VyPvJhwq3VaL6x#T}KGLX0CH-A0yKGC|(Rc4Qan_JwTEQYK>i7YFMYojL3ebCoV*S9CnPW2Ls-eE(&n|%mdn7OzKXnvxk_sB0)+w>Uhg>+ zJeL6@=JYc0V2S!o855ey8p|?TzKz9!Rem+yV-q^#b!P532-c-sz>|mh-28+i?a(Qi z*M^%3REJ96?~hH%urL5v%rT5iC3{)aT5NrzPSN_#WuI$!g|)GQZrHD0B3P(dW04Q8 z^HWIFf;w1BNm1>)Ij;6RE~pa=CPv({Y12fz{q%hMZ@lv}&9YE zL4|y8Qd4SHQbH{Sybp2;ZPNaf87#O+EDOR5W?CA^VsNlI?qICYl}mL}w^8p=r&50h zEq-S(m3v)1nRH7#8sFPPY_eMp_JVl#yF$kaB z%1Xg1Zq$mRmd1B%1*;>p!{fCSUq>XUr(uQQ7XZ&$#MJh#&NW(N>)fgLC<8sWaB+!^ z7=Ln>Gjt;8tREi<4*3mRRE8VSYxTGaG@Ya(Rk=VN>Sw3Lhvby9LZOdTi@3o7(wPEv zHU@S)cM@~`0&b}JMxZUez818WhO^Yk{lO3IU}Xt=Saqr8>-bHR2l%7$5F_Gd9MgV0 za1O^seXINxr!Lov&V-p0cZraFc0>$DBOL2e+5xufZTgOaQkMP>))Xl~quq-CbI<)d zf&WKILum^5|MNwZB=J8Ni^sf;2Qk@x*4=FS&~(bLB$wYit|1&qyfdr z4V<4H*ZM!zjgOCSQll<-7pah92cXHZpnw*_&NJhH*0GvNCJ&*m=e4!9odFwT6aWRK zs{lnH$P@UCaSj(AZbk!8#te;Jh6U~(GY!DK#eg2CUs@T-(}zn4?y@)pcPDso54KB?;2t#i;_hxCxGqkDOK|t#794`RJ1lTF-``ue z>Q>!9sAUh#oSB}PKHbl!Pb5fH9utie4GsP!GBD=Q`q7J}>R>#hw)!K^dSr>`{Xq7{lirA#Y#SJv_elc{^T>VdjQ5sUTIJe-TZ8e@ zJrnGHuwgv>@>IGn1B#=*RvNT&IsTO}H#awaeR(!@$oc*qV1MHRyS>1;UQg9gN7+#) zFgGoYxF#I?zrKB8eIGXC2UIA4alZ+d!rYpkYd3X6)mdwZobf5WyKG7~xWMap?9v$`B$ z`n}~i6&DwXy!mKvkFkX>z!V&a<%7uqcIOyNm}e1n5elA2F4AP$ z#EZ5requc77^^L=y)z|1wJJkV?pXNY=j$0@zETuTq+djJH5aeL8hNwJo?L})Lw%VM zp})WX#S$3_iM9F*8El@t=NrBK_RX_guiX2(zpQ8E+U{hd31xSFC)btd6-QKQWEP%N zF1}gyW+@*X9j#}EZA%q!&+B;gD1r_&9rE&&Yr`kUZLOnSq#JN?=hOsIZBbkPW|>nz5k2ID9i%Ki;`jjT`wStmPPJeTEB&puu< zT;<(@i$D9fX)|L>_%yP*cxq!w|HZ+KqM~6lr^5qD@WOFegFu)>*B&2v5>w&_dCQHP zqj@0M4iqVu5A2cEGTzN|Hpl%|1dK?+t@b4*t9xq-l9Z5oV9;Qi7RDo5D#3F0PSlH> zwi3PZ$Uiv{KzXEK30Vt4E6>ZLl97|MQP-9%)rk7TzXMcJk(H7Pa*MT)pqOi6@>t89 zJX5QPiKD-&Ip7iyAVL?QxX#p2&u;@~Z*FZh*z$HtKSB5@SY((fv!hQ14}7fPC?zX- zmvY9pRNhLu`_&uH-=tPzv(I$w3!n&7E{SJWx&(U`j-funiJIGriFPh>9#~{op z9FR`EfS*ddb3Ki~$1N^XqGaIl@2Z2v!MBo6QL(Xe#js;XZGJIx9j((nkVzYI7)@Qj z1_zn3K%o%7gou1^Tth<{sl1&%-I+0lr{#(^A{O&$>SFWj?&*jOc%kkG9fU)!FyjI+S=NhOc#EHuX9EuBO~kX?v`9=aw_0&q^eFey5C7P zLeSIGqyAi}-}ARSxXg{N@@S>G4%BFGV6xd4DfI$<2zA*TM;$!`w78#fu<6vwtPrsV zgd{NnqaIKhwBYM}5WT&LZn+8x86 zN!LdE=W<0vMEbU1uIm>L4h~i2$mndDIwHdhJZm0;RvHSuQVPKD_1WEIAciEUsHiA` z(@1P%H2uBlTHssc4iO~bUn-gWk)85SMjbxQ9&sPsPXSd1V;Ow?wzf7lEWFm!GHoe+ z1@iGBa{(}tFBi%C!D+J(b!v%G01;o}gfvzb_=i%* zRd`63E?W1qH>BG}d9=?hIv5!{C^Bu|9+L64TSmX0ZFo-wb8VJ*v=5{&&lh;LHxV3( z@XKzQLHCcj$tMr=A5Ng5=-MwMQBQeo=KU}~`_A)nyBju-{#!g!D);?KYNw#g8neNu zQ(obBkE(yHmw(yyL$_Z@#Qj8ymDAyYKwwR`?@E(Xest6=pY!&RJHJH|feInJ0s7-4 zPd0Mss+iAxwKnbkVqHnv-GulnV9*|<2ZwlIJCz@5)vjI1CPac7W1juPy+QPUkRN5^#`zuVk$@M+|SR^ofBx}HLHPauEK;i9>unOXe8A= zpf0ZiCq1Nu4{5kpeTw zS$Q?#8LAzSgZoi+0w)_^6j?ePi@sN(6&Y*qiCQT{T#_(~c8yh*dj6F+9tUvz90z>+ z{kZDQ2eh(x)hBD(T9}`0U3|#B(uXI2m^1FDD^tZbd8wIw0euF@?D)d#e0IQ&J=h8d z>ExSo2|J5s3A-wOWu)r-FeW#^W?xg2v?TfRut({nw}f0r+Rdg_gZmwg8cz|A)&^Xu zUJ|EGl^BqO9-dsAZo1eN3KlcCR zONEgZeq6E2qo`BvmpdF?wRfF-IF!kaOO;6;Z!~Ci+jOecjn=N7a;hUmOw;7tE&7F* z-5O)uKvTUGn=~2bzeO{+VwX@(lnGOwfFV|iU1T{W2ogi@P@@b{dul*QFJRU9C38|>0~A!6-udwSNOh9qu`O&N;?M=r zP0}rxS0s6duAm{bS8xBO>YkLGbE~}v6N;mL(Ag9&jRKm=6LRWYnG1b83P1>*{)jR} zFO$#&(zQ_d`yRbSHGoNl@!Y&&+Ryb#ug#+-P1Tyt&|DLr&JpaFew9S8!kuK*X`cA) zXXuKA20GQ@l!qFISh+K`w)F>qi(*!NQJp4jklfLddTvMZ3nCftuM@rzS+=JlPM$)K(V~c>tWW^zWz@5){B-QGp@Gr>C>d=+p3%<;Li_ zN5gh6lcPTs`LZHk7r1Iz@lkQx&*_A(Q~PpOga8qh)-%QVWWhd*bZzyV&dUiOEc+sG z^P@x9?1IqVt~%g@Md-F2ZfR(kV)ezh>lo2wHmgIO-{6aFAtVe zkA-G5teMa~3Slq~y0EatWnEId|HI{uDYbkZbl3bg?};WhVGsq<>y$UhSQZn7EXDejZHzWt1mNv@=R*!Z}8@c5L?c3l3G z{CwuAXg0P1vapfx-6$fqfy0mCNu!p^Ymw{Y#j7uz)L47oo8svHEj%P;$Inyqr;$E) z&Y=Ktq7%$?DN5&JW^=PI)t!NnT|+I)p!I_IH22<$Zo$iKxR|tV1y8zT$|Bv+w0bJ1 z=Wr@XNT6Q&ASIhYiv|GOhO0>KBOVDJPVMFpJ-5@QjGBZ($dH5T3m6Z(R=*4q-zT$| zL#3c$Ek#<}^6{(nH_uBw{Ce@r)*~v`B%d)2y-7(eI-OnNbsj#CZ-|~3o3c}OGEX#0 zDO8D!wvUT-miblbimC>@SHe;NzAoaCc#B#0ErMn1_MPhA0m?ddN4U!NPB zFtZCPr;^8GD>eJ02xQm2EJ`m@^<}qt@`ZBBBr~D+2$3HM(7uP?0uiz7fP<=`NDax8 zzD1`M2z&|Z8#05^e*QMlk|*g-m4~6^q(x2nyOdK=g3;^w!9n@9&K6Q)Skim3_C`N9 z3V6+ay<&*!X{R%n$)5D;Cv&1Xntv7@AjpZape_jjqvQL=RJP+27%X#*h!ePQ)c5JU z);ZQrYCj3%O$V<3n2?M%HSz-y{L9pUK47@{5}GeRR5|qV9J1&9-tRMgGv64p)O$y3 zTNTn=L-44j%NEt4pB(3JkI~gYWSKYub)90g^7d_4_M`om&6zX9d=w^qn5mJEfmtO) z%yHnAiqPN`J)H*?6a8+m&4Oqu@2`Z@bCyemMuu~(d zjlT-I4MPFF!@4hZ=X%^5RYrM4+v#3^A~zk-vb?=Ww&Ld{!bC{kkMs40=_w^N`afgDTxHAc9GWk9P7PrmcXU19XtvuMLguV7Xsz_8z3mG z?tgw%diqUH|GpKX*2epo_L#1|?s^zJ0UP3?WC}_rWELa)_Y3fPOFh@`RvwD8eap>n z?7x1c^qn&~Qmo>gwq0`CEi-6g1sVIDRVwdFr~hjMs5ciL6E1%WQcr=XOm@VYl>YwqVNqz%1&O zUX~Qep4?IcWjNC2zUjeH9xkNi9FA8%7fEoci*0q%F9R_OiAt?rCEFD!r9??O4Wxz# zQ{!3DG0h=Z)jo_$4uL08bfZ21afO*<;9?DnIsxpwE=+Im9MRb13P8gdj8+MMV3BEm zpcF4oRGt~*M#=M}G54Xv)BY5#69onGPEkY}Z%ayEmjfU!XdL+2>>ok`Qjdh>IySP0 zlZYG1$}$4eLVm3`evCska=*)K;p#|s&1I8sa{kffxP%Cedti!u!LB)n}& zZ!2YxuzQmZSwajYE`|^C%ap@ZXP5lkgrgC$(&~8dq%skco?7`dB#fh4{xpQzlM7uelRgVu}fPy=Ry>uv2es;9CkdSN$%7#>jFY>V4-`L6Qe7uy8VCIz@Y?HC6QMGhGW_r9Qea^YdbUh@KKr*J^>c0(I3__N zRNiJyt8e&tz{|a(zO-K@q;J?~{PsB|J;5)Y6xUBJr#US-f*Np>{9t{J>V(!X_|ho{ zbqyfQV+=}ox;_;=Pr5H@HHx!XDL}1_z)<-(a$Iz77K9;onZUVy0Ur5O7%I#H$_g8I z1$?9vPG`ht79!_fDvYO--&TR%7r4MyGDy!_)*_DzhdA{E~W`xm_lo%6vqtyCj9`DUNq*~kQKR}mUsi(*(i4n^^$$`C znCpPYt4Q$11Z1Jo`9SlAef(V3M}%ew&)px`;Ue;dxg`Rh`JR8yFRO9mzfW;u{ri8e z7C9LG-|9h)9XI{~Q%VdD=szYL+#Q+=bLW0sP3Qj@CpHq%e>k-J@BcNdw!8*UleXQ`V zT^j$P#u>Z2yX$0`H-6Za*P-&}BI&;&-k~(}`^ij+gHf#hU|1~ytft_JKM;;?mN?$% z{+k;4_+9_bNY`RrSDhtq_jiV|1??mFX*TD7=gWIj+C|an|K9+8r&%@)q%d?i!|)%! zHW)JJ`MOk(deHJ`8at1SUN2AL4R28;pU`LsYS(Pp5+ZwruEAiv_5aM9go=h1)Y|$% z-@w4c!2vr|3Sw|2O!qh&`u%_v22nZU(#EXC=%6ce|DK(>5%wZoFqGi9H%=kqbAO(l z3w!uP9O*UK(1*p`Fa6K19t*8bGfH-zHVGF)DY8lZE} zz%bC{c|;gTE);e&U-gfqW!wm%oc(7D9Tc6*3f*6l%F1j((!DqU^8AO8$#rA>yS7MJ z(4McD{M$(8TKs?8a+n4Rg`Up<#kJJ7$S$wp4Ez zX209aKrGo7jN&q>7h}MFWvqN9{ck~Pzf+pXiD!%myiZI_%Dd#=kcF+|b}6XR_;9KKIfK_aXsN-r0LE9U>+1osufDNOshmcH z{kcf4zL@{-jeP0whDXjQpY|{PPPBD&AnGNmrVbRew6qJ0i;;+_k&&p6cW3c1K>hdn zU}kBB6h`y8P5V$VNO)wv?mz$E>|zBFUwm^N5NnTNC@W6X`}S{F;9EiPe!@NV zL^?bQvvux=X75U~OTQp=e0eBuHy#^3p$o0L%GGRL=}9ENBCKV5rT>f~7XBO!7PQ3D&eT`gbxbEkD0jRu4Bfw_0Y# z)ql2JM7wcdq`WIqN|j+vEjP}H)|$V$(6dAc+Y|gJ}`Kdrn2kZ ze%!m}9=a>Thz~jL(JZ9+5PZF5N!@w{_hXimY(Em31k)Y@l9G}<#w{!?U?f|xt-ZZL z=;eb=5&41NUIOm3M&{*>)O>;Y@V8CA2jc5;`^8#WjI|&2n z3E{U}0!7FDJv?NgV_>kEZ&Pu0uH~|sW#Z=aO$(*sav~@%FE=c1u$falzq^~aPkdjE ztdj;)jYiCj5z|0<5`n2Bu>A?JU+<_GdwwYNUL(wZ#3Ts1OvcUW);|xHISlxi7v!#YQZ$JXBXV+bJuoG+LFkn%GUq4as~?VGOe5Z` zVmklLQ4qmtPX?cDh=}K9#Gbg>0LDp%yUX*VD+H#zaONiJH#@iAjw%#=GU*ONTw2Km z*0O0eL4A_A9yDknBU<>7VNw)KEh(#km)iw<&e8S-!zRLN1JF;+CH7iuuad2;tQjnDyjlWU%PBfIt32+r}`mF1CMdGAByS z%=|I)$d1FXjcL~`rQ_Z-TioC9{N{#0L{u~sre?LX!NE=tBNwYwtLE){{}-PN_m9{6 zU+5D{woLzIb^WN>6vpfKmxh*ihQhIaX1JE?anVM9Bd01AVEvA|t53(H@~jID{{63U z4HOP7%^?q{46GmNiKtVryW>UmHX>lI7=db*AT?pfch%h2M-k?vo{+rscRh98IM65Q z2vJ^z*4?9j>Ry_FLkB5%>}s5PH56Qk33YE!7n{2OksCCDM;53ods%I+lijB0~AI)#KlZ z)|XXL*;c^N=cZS!%`1A1ygZs;r!)%j+l>dN+dFJlYSH~Lx0BQ_7n}X2V`1Y-Vs^{F ze0AVJCg)|Bsi3R{`VGZ^WNm4g>5PkTur(7D%(7RdkAH<$pY zINQEpmFGz9=Uje7lG-^ek?USwehSVVjHiqy{pWE@H5QG?9@Op=&i5kXbCK+-dt|To zrpytQ_or&i&IbXU+Th1JE528 zEp8`2RD5M&4DI3{5nH#u3wqNc z)?TWVl_P`p_DUZV_DI{d1O(C3S>JsV1&Mrne!2^u`%~@r>}7YzY9)b%SekFF2kv}1 z(Qt_hk&tmn$M(hdm9QJG3`CqQi;NH-RSc}6GC%7MMwVR%7bzz9T@|LW@9yp{HJpV% z)~FkcHD>L2}GpxXVTM{_pga z?*iDi$<6Lo;zS=N%Aug5KI}8Hu^kuaCd?$*t8vnKVIN4>S$`qm#db($(X1_GKVMIv z?a5^XzJuvu@;-TkF(kCb{ey$i{s%!F*j_kH-wy`!kTip&T6Yotr5dllZ0A*>PaONj zpZpDM?hC|5C_}Avr!NU6!h&7pm^_SA$Wyc`$jiWUT3~-|1(N`FJp?(|>x5#f;BIdq?U>>WtTnoEy}I zpmckT_vy@T&Kw%Vg-!_*k?_fG%DibVm?tQ13?Wh%(jYS-0yqSNULZB^0qSX}K`;fr zON-yxH&rkFX@JCm$c-8|P znjZq}l7y2I>{Zfv5NYM(0E0B3H>yiA>TJ^r`au|yJSeU+thKr^#6?Lx-0Fh>w0Vj9 z$H%A*@}ayUtL#EtbK>b==K529@f05zkiZ4cGhYo6o;iLJM6TBbk>kEh7b!)n9Vrlt zg#&$Yz$x&eR*RTp_0sq(t}g~pWcahP`y0k)UxIbHTjcyg{6FDzB(Ey zf0+ebWC}Br1W9oew}=GO z^a|;3*h^=*z$_zTJLyFeDakf{xfxy42hdLPx5!vBd*j&+N~E>Y)nW~qTO^(SdB#$e zoO385IyX!6k|A+yk)nhTU(wAx#>R-}|1uS_spYmmaBfPU3tr|k(C+c=XJcYu01!K2 z1vdHnNeb#JF(Me(VxS5)7<`Y2i0C@&eC3ckDO}T0Q&TgQ;Lq@P-x`SB^jm6l;Pets zVbcjhL??{6x~hcLsg#SxQGC!Em0!d8p*iF5?LKK?Mhp)RuWu=D{QftZv&?;6CK0D$ zg#Wt0lZsY8tLw*Eao)?gAIEer2+5a}(rz=+n!yWtPjM|R!lKepRytd>0D&)ZKvRT% ze1E23sRm{pbWM5JJr%;|a3;gHru$Swq}mKI0_O|5oCm!tnc8^W6gW-cr#|uUE>&&Q zQJIqmbUN?Ksjuf0QuE56z4JJ7D5W94yS66lxGcUPh~giN{&44Vkz@G@_@Mw@WK7W{ zztsL<`l+|Bn#ZYUDN~x-GMPQn4jA8 z`}lXb2UaALPW&e*X0*-oDhgInK+7qmbuor6-W>DGey#~lx6z*cWlR+w9=@+rV+izm zgAJ*>AD(!i-lxzM!8Z0Ec7GjUr?%zc!?YH3&Oisja|mA{cHyB~iHa%Rl-oqTNY++0&A0H6#O$AS zi0TuM-_)@MY-4S@7Tmu5LGO=|PJ_r@Yfj9C3$!%N8D!zqYCxBoV3|oA;xK||h09{E zs4134_^NVceA`U8;N~EtyvSEv67x0YoX&r2T(C-V4VBbR8IpV;R16xqKAkBwt9>D2 z*9}>2bcjAR@=_n&1uxI*RGeabo{ftJ(eiX6hP5QXXUR-Y1a?~C9~-0_9-Mh4c^ul<;MXIuSEFi zNJ#>&ALa1p&(T`#rz;4W!Uzp$K_Z&Uc<3U+vbT||;u@n@qLSSsrQPv)M9=C}MEKA0 zDr?}_La(s%_~yu8^n(!1$PT7MZKS%4HS_rIq)Hs_y$_b*CW9E-t2Y1$`fpG43@+c> z<3&?5F@hd2u+$`n&CzBVf_&WhF<2m&wC(znXaRX?38q7TMg^aJx=3IIeB)<@ysE2p7JeJzuIa66emA$=2uD z9)P$9_2q7!CN+q`!b$E}VaZ7=w`?R3Q2Y>4&h8CD>`D&5U4|9;cpA6#F_*;C3`x~C zNB%9{kB?b`PEyx5mh|i8=QD_fKky}q+AYR1Ht&uZT%>bUaq8d;SP0&rGaNd&>6nX4 zrc~t!{S+l8t(N`u;<{>iw8O>pjq=gHa_M`yY(3lJp8B|eOUwmuI2X6O{MnX~YIvKk zz)(bPT+>+74u_59=F5jKkxQ$GYR!zL365S2vn07|Vdo7}_7WN4$v(ZjU)D4DU{_4a z9ilelTxHh9OnT6UBEHK&o$7f%?OZ6&X4hORx@Y;iVf-o2PicOL;3)ZosNv%8KfxPU zJV-^*_hlqyuGk93sC#Tr3}(-j@T)<>l48D>%STjaaC3lC3keW*CC``NZL>*)?79&R zhz6Nbu#~^g7Y7yv~$ZrLc&73kvHHL2I1vu^7N5otIi2%tCD5#{!X8s$}4;?=Oy2F5i+L&! z{17MX*$**Z-WbGO61>J@cZjcLj{YD{id*99Jz3PWyXG|66C|D#9U!kqf$ zK!GA=o9EYKwckRv1}Iq$!48V|u%b2g$DvXp;6#pSe@89l(U)&?`(Brzg3Cdlg>6{$ zX|ij!PeR&bLx)*P-T4bzGv!fl`eT@x zasxVh>wJ+Q;alDwMEVnjPwHz-p{9mD)BLI#(e$;$$jXzNwX2t z;3^L6C)JUQK?aHB#}mHmZ%X0CTTdsA>wS}bvz%FIf&S8lnYE>xhD508_UO&v9J%#y z%PyxHDi7%^S)%xFzNCc5sNQw%*fHBa>pt&$fP{YGSQIkcMXBbn!X+KU=LbNkQ#jSe zYuZ^(%O36-nJbiGH(R&Y=9;M#kO)%u_qFIb;2sfrULla-wTVu9_qN{;>Xn~s&?Cc1 z==CU9ZPbSHNO@p)WklsfgPzl3c^LBSyWjHX(^+o(jn}vE)o_+{R9sd{P3Cm5SqP&} zV4u=c`j=M)@N?rqLcd~moEPH0_x$i?>9C*$>a!r zN^yPXh5*BL(+Aj$ZVl40x^>Zh2GS-9Fk=jSQL=9QEv9f^<{%`AFYAwTsjj|^q|-nt zZEA{n6qc0ubB}ua@e8A~>)7=a`^@_2(Y|-1!KQoNr;~MOGoLX~qiBY0%A@Cu2wv{= z?NwQmE)h(uz2ho=5lJHxrjfR%fFd zI_4J=ub=a#h$ndN*6J@~162Jfs~9keSv+5`pEc_!Uoj+virJFS0o*Pq3B)<8!zv?H zzTheb=5ip3Jdy6tmL&;x1<9SjP^Mz$qwuWek^k!xrQh?-=dTOrXJpg_ew4Rokg-^-xX}pxa#c^_c_~fi zp?Y=K;(LLHzm;%J&IWw+t+Eb4%zhhtT`<)L1Ntt(hWBTP*&eubj=@Nncop7JVv6<{ zQF8k*_O;eX*``*jOs)c<>lq(!+d^S=#`%Al`EiJC;yU~o$#6iFL>b9 znsBeC!tgU(+g}CrmUC`lC>|L5QNu&MobCtaBs#P}5(0KT6(x;*;9v@(;67t7) zw);jcGeyxUle+VYM)XpZ%E~=t5gu_4-i0Z{-aQ+kR`^in0mF<75aK4 z%Lvu+4)q5OIVfpy_OIV3`YZ$9=Zt4Yi-lj={ePq>RYFPoD@I@rOrr-eCS%`hKH~jH zS+ibIRN^|mF7h~);Va#3jt$TVWuN{mcsvBKU-$g*H%<`SA!Q9+*yaW|3Q7i~XNAb9ufY z4ZEK_#;VlS*|Z0uP2b#qn7O-wvvrm* z0J@1`3sV4m_Sz@_ry(|QF!g=1c$@tHbfLWAFs6Wa4`H`A4SUynixL?z3sK!dFx)PCnxhAKW%COrafc(v$xL2ta;&4 zUBBIiC$R6jaJjhC=T?N$q=IV89lVu`=$T@u_bv91kR|`@X%1i7A$2F4CnOJ1)OiN0 zCO2o++qHZ5Iy;KAMIt`3IJx?J{n9>^YMT?4`*k25c$uV3=dTYglZFW2I9>~A8wm+1 zzjUVE3qJ~^c;vc{>ih~axu72+*+^9|+1L-Eq5n-EbsI|_yp`FyNb~VPfpgNwfn7%@r_ji5OZ)qDZPF5x!#?o~L6yud zZJ`1I!in`bx)bO7tJbtmAAwf04CJTINuO>Z^4tZ9*6Y|jiERRG7i%IW$svH6z4ZO`35?=OKEBS32 zp@_j@@m)jf%fZ)6d((c(>4Frip*8 zLFBp5UrfGlHTJjWY%7o0461s8(UPN5aaaR$tnrD>O@eJ34$9fhfW};TTRd~fLY>8% zVve;afJ&5tYSu5mi}lGaZnSxOd277scT^0E#BNBq=a_RVp=kjlT&y}o$>_RpHNf+U zV{7e>D{^yPbUu0xgq+4)8tzfD54SlIQEa&GzBx4PzbSuOsB~>)RxvolH}6DdzG%3B zHcl&kMyS1wkE`qs+R4Oy38TI_zjjm3=swOKQEp&r2_OpKyDBl=c{!?@%iDxj8DC^sYx{49@rO`OL+CTsKN?v2vsr~QLikY+}7OVOy~(Juy^;;{ zz`3jUS$)cTMSSzIsTw(rr<6$8^`7L}(oWL!_7+ANKXe9 z%24rNqNeNH3yoj+A8*g%wkE!N_&JFl@*T#@s6lrY&gH%9bDQv+kKdnn@}Epz&ZC~s zX%;vW2R<`$bA`n=wDZdb!xWH_9wF+#3@ta5krKfo=d16LD8edna*YtOC6X)(5>2e5 zw_K1RXP4-vexfX-2yc*%A4-}{$11)Wn|(o5zilHUMOo-@!@FHamm6)^VKOKd_huM% zGFzunUhG<}LXghBGkblN^)=IJ&tJzVh4t zQPu}?FTF(tn&W7Z@mIe#I)Q-$H&Ke- z<%3DD`r|31M&l2*l~2*E2t}VNA9~JP>vc+veg^-=XGX!@V?aw*d|{CJQP1iECwuO@ z(YSjOLK`nt&s=pi>yi!1)Als!Gzx3-dHwAMQO0*`L;Xs@Z1nBvxjt)2!+`<1PW@3~}0r;KF$t8W;)bh@R;o8-S!;cl!VTHr=z}Hq=h^Ii$ZsZXcvTwn$T$up|1#* z@h_WqF06xSCoN+*``jPf+BKMFZ6-&uqV;r98~oOP8sLR+(0>g{zgsA9J)wjcpR8r{ z?ITO&#;u0T#2*Ma6E^6wbFpeQxgMoBo~{UOZ1zV}LiL**C1G#BMdAFXSn0gdAR{0| z3??PB8MdXd*h&41xH`SOyn0|Y#tpXXW3YEI5#{COx&6yN6(PKT|K0=^(JwU!^)}ef z_e~I0r>3J8@&%7)H%D$(JZ{cGRhlQu6W>>^dJdEyf`_LgqefsYjvZ_&OXBt(W4K4^kM^^s_ny7_Tp1|R%xfQy z(SY!9s3mRBICi5(w%p6h6PeB23h-)?=XpCk{?2MjUtqE6jdKmg#YenCv1T~dyGO_Q zmS*}#ex}bllq}7My7*>2^$Zw-h#E3 zd`MWt0o48~VJkHpirVRt6ldXOW`lqIlZs6?Go&M;F^QJjuM3Z9#r__nDk#Y4W*mRX zjLMk9V&U_>$!gLgrMfXBAN|2E|FHcen1s~k;X%YwBE@vNQ99N?tT>c^w%Xdb-&k^v zIosd@tAAN)Ahi-Z94$BMGz6a{6urW@V^rb?S2Yor49*QE&&cM;PcdfM>3tz|+I1}K zt|p*^qh5{KD8Sd8Tk#dOWys(eE!V>YH`5Ny7sOs7;Wx{y}E2Q3K@C^U6 z@z!p|LX9ucFNYU0KBY`kwRNNO(K6~sOw!2C!JAM$4oUD%yBou?6iE&8?i?*`b;*A6 zRQEAPO71&yMvRlcVX~~0e?O)PtVGQR{QA0M&TVgRuU%(>X)%!#`|mBHRYB{&DOfhu zMFVR>F1uFhR9L&FS{(~;u<9L43`#M5FrqK2v%Y{P3nD;qO!{f&+$lnP717pSE@A}x zE_Sf;Jc@W_;f*opw@`Ji9E1dd5~(}~t{4hdn%n|JRlDV=k@%#%i8Ta1Z{}^1ul^c= zzqysU_a_n~R(OazmDgg-8SE8xFx)D&L(J~-P0X1j!<27pM~XAr?@7=qCUtbHUC?Fn zepP%QXQvg7)oG0*o*qK=ZM~h+cKe}GLoKl7NwrD9QTl0!1UFT{_{;#YoGmQ~?dyjn zOVz9Rjp#m_tpT~3N0SU=Tct!AR6iMUsLvd*i zA2<0GEZyt7rjd<`@O>HHe4XBIIwwnC>>B9Ns54fhFiaKL`WwN(q+O5Vx1JWDgzerd ztA7wy^j_XrSp^x|@JwB7uM@n_X8mY%g86qL1fFou_-4M7O2Fo zf7KfA<43lUIQS+sYQEc|N>GDotBcL!?ZT3-l+n+xk6{Cj^>v{###-r~>`-o~CAc!1 zlC>(dw{IFNu9;o3oj!9+C@8iw15%jDik55}brZh$GI%QEAN}mesf(!hE*;|A4lPgk zXz3nz@;;UJTAXK``e*5<7N*~Ou99_+vh~aU|^7kv5gv2lLY zSkY{7(NEf#v?AMmL)@QQv0WKre4rn@6A_MQ0jKQB393k4L;-Uf#wNXSH?fAI#VNn4 zdFIId&#adx3nZ9+!#q8YU09buKk@F@)-QYUf4};>YuuL+ZOba=OUYV-Cf;e??2ez$=;i@To(zMljNQ69OJ&9A!xf^ z>P_A^bj*rOBL~|TM!jKW3@j_II2{_UGupVV;CY3A(ezpHNlS*E*e;CvHr7RDr_F7A z;;$Tl52;b3%?g48Mah5Z`rboTf1MfAId#m;UGMTh0AW1nUwuw*#6SFIHPQXMpfPis zMMIhSr2y2CWUlMGN6sG%O<~T2hTTv{0#|#7rT$L`OJvTqul)2M(?#_|F*GQR9;VU) zEq3vFMl$z-<`q{$lIH?}A3d}$kXWXe?GgH-M!NUxgMj?I$wDXlkXQ+$H+Bz3Y9`*G z7x6{o=Z?c&<9-qxIO3f3sN4WYo)(K)lz^{486O_%6OI~MTNeUeeYWdBplNdHTSZ-- z6WHbHKf;*=^S>30|HUx(4RP~9(2&sVi;2if3@P+j7=vS zizw((7x{ohq=?c+%sxlB;noI3%n7U5pKD^y6tqY<5VH;YvV@QnaMZc+;eoM;rMU`U z%;0{r&Lk+Rn3ZdRizRE|cJUJy+JfCw(${pLhE`xYP!2MN|EDVEN8QO-Qe;QX@~~jO z1v(ARLZnz$&|M~?1Xk*F#^G*%W{qLND73*E3!4#c*9f*9BMFO7R|kg|zif=JHHeAB zt7+0cN&LQMG>jh?hu;=?6rkO;d|`Vs)CB#jL7=lx@+}j!Giz$mQk!y3cWO{+x+z*x zjf4vfMXVrUoFvFb!C(^S^u(YnsYDxO^vaYKk*z|mrl1g7<8_jDHC1omLZBImKCBi z!*TaiMF1yT(%_#GD8KCa-6&kXTa^5^`9WqpvSO`|lrq3TOGKQes*kJkMYG0vjNWSu z`X4o&fXmX_#+v#Q(?6ufEB6?~QVLz=9!`#;lJ+GG&?u4PpJ18VZ0t?D5PdPaDS?tq zu(EfV57n&xpRW?={Qd-&&m8+ySt9lg@ED9-t(Sh9u>J3c6h$f?uCL%XDh*|tRTe`E z=y!Q|mD&&jhX&^|I9y469!?WL)x-k1_9>nVf^vgww=khQVCtyJYH31oG|JG4o#)Mr zj{Y2l^TDYk<;`s@$XTE;uf8siSRYz1g;8!Cgss`=0ISxl9e7YUQ)y59nu2E|lhpfT zA@*Lq#L8N#(qe2@bPKaGO2&c{(jr_w$6YrmEsQdg3H1lOi^@)9ywVI)GiKW#4&Lz! zS!s23%vtwtEz0fnUm)c80t{XI!xNzTTSh@dLSla03-7~o%4B1}4K^+kAg?E+uMTO7Zn0|d@9g>{BIB|cqkELQy1LF%TVS0m zvsuA2u1$!f+txA5;a1LzoVU4OtvlviCl`p3Ww{u}+ex?WY4!-pcIg4{hR6hN|Bfz} zvZEaz7@RrZ-}GrLv1IG!mD`38xLld%S7D{ zREod4Y_GD<*Juz|3VAwTPveDAQHsfE^fy|QWmuh(5WF1owVf|V@VD0wjBAC+4%aw; zzj9nhh!*vc2PYJOA0&Mc(yUpDrwLGUx%q9tgGGQXHTcX5O81G3i^KX1kbRW@y|=yn#mvmCv0P5`M`9xB zRIr`7d5OZa3mhCA+NWjyFv(HG=#Hhe;oU~<+_%Og*q8;$W{gB!qTsO-_XmTJlK#q^vNe10uB3K!Vr!O_90aKogl>fsQ~|=AznYv#~J~T)u6sb>1#y?m@_m6Un{l zow%AJOVpW}80tWO@xC1%libtRx3uecG>#oa_PB?l)>xe}7#3;{ZdH z7iegf0u@wp%L?6k)~Xa*@&~8M#FJvNjj{@rCfFLJg%0NrHe$oRUuI+d=)(^=DmNj+~>&J zM)(-k7>58Vi6D!Yis_e^Z5bO4ohLRLv4}0a2p5~Yzf|b{rIF7OAkZ-I#!3b)8iMB8 z*Wz1vx!D^1FW=bFaw|DCGI3+IGr_T;rCT=ZP{ zA_ll%TO1ofCGf~JI6yGqbu)&M;1^=7(tuor(>T(LV1;V;gG3{mpWQ@r7Zffd!!;G* zI;@`|OVAH;pyQnkqcJ?eZ;p3~a{^MLT{UFMFJ^RdU^yhK`KX4sP`Z0zXvlM?>onnT zX-0Rq%RIdD3WD5{#2UD!dHf(HDK!;e5nct~osWQ=+Srbf4-3)#^Yf5~-Aj87OKu(M zbzedwBeeVWAju5{MgYD-%0Rr<(=OuFCcc(?zi@N;UYZfABjIKw zy9=_lHdFnR9^q61xYPPO-wfyFp?qRni?x?9l2n#)WRr!Q`E6tQZrPFl{%s=d_ysYXs^;HLFSTwJ47o{M$pgM z9+yESNCuh2=C(iS@Rf-s{CV3ZMMyUNM&vMni_XR5+WUnNy6st|3zfmX1=dET!-E?Y zia|XrE(!k+i8q;{Fv@S7UvXpCUdsn<5RwWS&3=#m}CFKfM23`4zbe?Vd`*?QHw0$MgB--d@h?y{@-|J1*rCYk~Ee zLB3W!ir&UN5jSrw_B>)IbEpUrBnfVbrP9u7AO9!}6!xIq4 z50;SzPO^dINplGLRQOyz4>6DWtH@KQDD4Y z2~Jf?H(t1#%)oF*wL!-?wxf7>yOJ?%xfKkx#Xe}b3&2wEZoyEdFLD)|qw@ z1N}#Ws0cQM*!otm?C}qji;X}@D;O&;$t^S*p~C18)l@JFIq5f*U7WVjLHS-RgFsGdCfgGz3?)lLhPQdq2Bsk+D#LoGI?8|H#vq6o@uKZK~>V!)EJSZJtr$KW2{?qAXZ{y9DX;Kq&|>|vf0j$ z_YJ+OvpsAtz1qeF6yk_`3bLlAh~#tmG~7=TJG#5O2Ztri>{r*;%s~E}X+l1TzmK%x z{niFAd=FWn{Z~q%ujubdH6;fHwn7U~adQGak#45&*h{{S3xa*wO1~6LMxHNdM#z)3 z^0(|nEhm=4xdEA4kDz~{?5{9GmY|JsmbI)cWoznlpGH) z)93G4s*OxadBf@#_FlKo@Ze5!YrhuQkFIXSBf_D9{l!-IU2#}8v^Z+bXE>Ji$(SM1>dY_)BY4o*Ynly|64uk z;vJ^jMKEwJZ~tiPYKZj#iMGoev}d;GuF6A8c&;z86>v7Z8;_!YzyRWE}a)~nt_XTaC|t9 z@NN$EMZ4Z>4YpsV8I|-D{>4q2Lx8L;EUWI{(+`v-)AMa@ZKh`ROaXy@egb(y{x6bZ zyPwK8PfqJ+LG(g+uui_Hot>au9&-d?F@X<|mk;VFHAU5VtsO8QP;1^zeek5mPrUjee%061qVCebg!by|AIJ?oIU1 z2fxcF8-Mq|d-}1gRizFGdU?$BN@I#0n!+W~UO!sJK+vMpLdQFbLiVqr zV5+j2+s$6z=AQ67y6Z|DUP8sRTZ#^!3L50U(=)?sGn~sm3e3W|AAo zw%wK2Pf36e-3t6PK%B{$JJuKc$C&(kCaiESjzX=>PhSJGT4X*FS&%UKG;Syu6%_@G zjEudc@&s*r{Qh48Xa$%+`nE+ctiHztde6>={|{{Fe;e&i*QDyM%^tT4i zRN$(_T<_nOKzhC!xS?~mnO#@Pabb5WTFboXkkVW-#yHOBa1zAPG5?*6sR!S*i*%lQ zTD)4VpZ#T^FFdFV_aA4TAGeDcoK3V}_#$`sAm+mcw}%SSvzU$LPJ%=6smydVl=6N9 zAHFpT^4$|)wm0b|5IffYCg;+DkraQjCbo^mI7nR&2}wn3;#dR2E-pA!LO&CCA;*%k ztMtJC+MCSs=+5z-O|%c4UR@WJxs=}bFJdpWG>ZSOH`fLUrv2+z7%2o_y45^)(X zzPCpC`G4LQ!Lb~aIC*$oer6Da|sl(yPIS#8_2&T$3plsY8)UYtG%L6X%4 zveQCHYpML-k7$|c`%-Qyivd$(3sT!t}3w~z9QFAnm2K`=~wt8I~ zm3U7vf;R=S8~3+0vM2BMzf%(W{y?lJ&CD2$Ux1W(*27^NjxFn)PQxP1NBHjt62eL@ zYw{aPa~7z$4Zz$>kOB@VgVV4QHE^f0j2n<3=__R1%;1WDJB< z+9ekqj+rrvrV3E+e*bPypqVKWgN|*jv54;fJ-qPXg>=7_wf~s!`|@{YG&km8r*w-R(}Q90n>Lm3|G9?v zXNpN`l#ahu?n*pJtk4*x`}++>r>74AHS@-!s}LxSIN5)nZ3#y*pLCob2g4G9`nHRh zcfXuwhNj{%uDW$VZia?2wxsA7V_rs%TygrL%Rh1ZsFBT!?j;Q4)$vzbsX+Jyun#9K zzSR?b2ZLrbuFzX@h6b?&y`xbVcoZ36*`)FGuEaPvTl?4|_Zg3{);_?X%un+m-~cXxDKNjSHzajL z+u-}60D^?}oETU=;fcXt@?knWj>rs?@9%lm?`}i>G#h7O5JjP(IG>L3dTsuUD~QC^ zpOoN;}II!WES8$C*cB0qyJ4iy4V%AE3 za~r>WsQl`1d0-)@>Qt!;!Z6?Y+2O|w#~D}$sU}GEeUc>fYkD5(`X3lpsVQ1ZqFB7U z$3m}*tOirQoLx6UH*W7D)eYIizK*E50ZVJGYRC}BJm8yTHd;1103#v+xLgc>t5dpt z9lUW3CmW(#6fXWz1o{RHN1yENoRCD=l_QgDU&P-6TpKZB2f9|08%EhtSw$0^&$z02 zS8ybAZ;5qS0ul#*8ScCCnwpsn_vP<4mkV&Pr#3d;=wW2PX@QhNkaqsE`Ro!%DJI5I z9NxUJ8;B#@3TEqxAIz)v0eo6vFhOg98TFfUzF1}rJ8+DO+WY8_#beiL9l@qLy%*P@ zk)FR|1HU*+1N&<_Ytrv%D%KA*#Opp3SA3>2I)Jigc_Hz6g23{9LL1RR4ZRx2-jsJ? zxzn9E%ruw9>9?1Bh5%lzZ6}%dZtPBT+IGFlAWMf9&N^Qpw&*Vox~9V)xXmCz{D(_N zK}`tOoux6bh3?p}9v2b-@57jIsk4|aOJ*|eBviEss4{-pg~AoGM}%&ni$m~zB-h!3 zdk;K`_Unu>JKh$ey)un5Xf7XUIHi?;K0{&5;UsKt|JqOSoiP2eMm%UwNt@S+TndpS z?v(gJCb85?v;`HkS1L~410jYL36WNh4JI49_UjG!qBc@8vJ)by(0`3F+;JJWE{BUr z#x^B@!Q1LoB^OZLB#L!}>!lYHyINsML^T#bA-0@_OZ>;|q% z3R1sZIrk7$m^#~DeXlWShaP1T3KKITdpTA!rz^rrp+Ai7hv?%8d(Ihr3H%tSsx43C zR|3POmfX`x!Stv~$96-j1IP}fVX^$%RZRs_=>hU3WE0@bEP+>qz{|a*q&|Ht4u=Ii zC?ce!lpJ`7ba-5@3g%ngGSj0fi;ioa=!3FFr(RD@tV%SEKa0qgtDD>*1(2~h(}iv{ zElx|wMBR_09l}0hKa#*Re-M2;bmS0&k8cLwRRmWqwS<_j*Fr&l0awo?K!a^X2alT_ zL#z&O%3-e05vo=>u3V9B9*a+{qWZPm?aghvpjq)g_org1fDkc^BQiJnm`9s<@({kH zT0yzhMcVy%8M$`tJFp_ESY=Ex4pVcx(q=va8$kcJmFqvrNbR8JMDp#~8G#)R$B;l^ zm^%c{JiQv>!(n5`E9n<#YW%Hi3t|)wn+>8=S2^d;DYdjB6!Ae!+LSBXdCUUMWFFfz zLWB-+<{jv%Fx>vl0kH4b^pMh{qFb|Yiic=C-oNCMlM>5!R_2bJ8T1gS5W2)89|xK{ zB#u=E88Iwd#o(wIRfbkz2^I-w9ZN(_GZ9=&{-B~4s_euZF;Or{{*F)K!qP?xitP-cYd0hfDQ07z z@Mig~!|x<513@Nq>0mrkqiU!q5wz9)Kv-GrsE+jyI?!up1M3dzbo0W-gHWSD=t!x6?H%=H37CRj@AN>Mp?!RGdEe&Qi5li>xNg{tG~@ac6Ql-1NcF5Z! zspGI1_9}HUE*%vijkXK)`nR_cowZ*PYweZW#J-tBZVZ&7s6erLFG zA*1h(Fz-bW5LHSv@l{`?Q9jt7sg^hA6ZwCtoR()}2BLW(*T;~`OW#`yLRP2IFFPG7 zNT|TUi?>sRNf75S5{s;$5!>(u?6{pd)XGZe@Z4L))KlDx5!hY|V`!Lj}7nnY@T`~yK5246~ z@RBK;eWl(8>q$cin?Wn9uk=Gw(m~{I$1?HoK}V@_*S9l>+RwPZW`aAupO8BW!_AiU zkdxiR)#>*JUrTgyQL)oQM(qr+C&$BZ*Y%&d1bu%VlzW zcc1s&<*r1-*-psTs^9*NrSt@C!6#*bpC|MnrGMJ}=V)cG8&*!$Hl25i0#qz`Y@ZTM zNMdjFm{UbOULb)X?!j~VU`L&oADh4O9utmxDo=NQiLd{D@_Mi`ELovZP7&`bZKs9{ zbdx7pCwjg7NvFyy9=;{WxaGw&4lu$#2#tE@Gu{QZ%FH3}$MghZZuowNbLw&L)L&k( zye>yvc|Q$O;Br~xYC5ileEz+N=Jh4ES^Dzr(X&YUO1Q!HS>OFa5dY6C<|^lTd+~dN zyS=YFalZFrtGoQ!2HsqH{Xk2jIE(Ehl*7xbjMyl&LgTq>!S`o}eX`M+8V%v~Yq{C7 zhOKz-_+c}j;>YDaBP832*u*e}Iq9O$hDL23Py846%Sdlu!C{T64(a2y0ZVg8Cm&-MBv^QN4wWTI5n=caflwNeGG_{$=*7pv)dV^=C*bSDaK(|Ks8T7P3 zCa&1yNQ9y|bO5oilGJM_e}l=LkNfX;T>sB@qxGT37bcF&MUr->r@@7zz0hQTC`%Tf9|^d#B4R1qp9% zhm)rtVOwOww<#&y-?!}TS6T_isqcqA`pS1qjp0jF!CqVnd0h7Ki;|7%CGBe=l<)%lHoOZkD(kc755tPbpj` zY$7N{ZbeWBG*zrQ;^tZ0n&uh`Vrz`2Nxzb*H8`pYBW9Ge zLN7$|blB44FVu#gwi1|(guBVi)p*BrOD`4JRgYBYm%{U*WZtoD9d`Da&2F+Ub2bbA zPNz#K3?wln|K!5656$bvDYO~!zJEH3=h6U&5I1wFQxwD6OVBZUDgK@sZ9QGZ|Df+p zUFJlrjWO7!79@aF_!?1s(C@JOSbo%AWY)}Ap<0cndD%@?Mz+nU+kzk2+r3!i{S34g z_CWbT0nXB>q?W>{2E{OCy-wugql$|UcikBPn~Sd@QrhLvcvh9r5%1+^!|c;mn-Gmv zZF(~RW16+yO)we$XJELOUHPM2u>4 zygsreLyu{-@iOJ`L?9KfHAZM3m3YJAuds$GYk^7hetW63znY;8#L_gjI6 zKB{yRjOQj3@EfXeSw)s`YEKc;P2gcffvvjEgJz%+EVYx9#41~%%h>0UIu}(Gf(rcn zgh$h zb%?gw+3@8FT|;+Pl-{cAZ#IFYi_16KZ=ua|`G?uWECm||w_G2A&`0{RU?UmA zb-T7fr`+*E-FDXHHXF9+eOX|Wa5yPXpl=SJCw}g_7_0poJ#A_d?)$5G%6TKIblX=k z-lg{PctiwPiMrVf=ld6fH_dgNUKmCk$x+WMqvPgXxH2b!$d$f@;oBF z$Jx1^jNTL2wh59jx$#ls0c3>*es(f$t)kK8NozJ!q4|q)OH? zdd$tUb25Nz81DY90?fq*pkD8**8C|i4wBT@(X=Q9*2^U$wBHZr_KxxM1T+nwnzUv0 z!cl!N*rP2w6fXR#A~laLj2k|L2Iclct#jEfE{fc5ZfuY~_FJA)b)%cvwk=odx;grM z>?2&?cehFsyFs);Mjq&5L4~nmli@BL@7#2+Ujx;!n zlg+Y8ixS!mAPnVlG1gW=VNioBXeP!vT&!*1tYtXkK@mN*! z3mPj56OdazSgez%K(j>DB8xzfFJfc?z?ET1V zr>uK^M8=9r9 zA)Hk&_v4LT^P(rXU)qYrbPP79<0P)RJb_H#6q28v%F&<~?zL!Py328U3gC)6stIT%nVj!r9dhKNe_9;I!1WxN`jz7_ot{gbt^^#P&Mzk3ssBeSpa^seSaVseSAW1!S6-6B%e?DiD4-Ir=A3ZY9sL8D^3A4DyZIG8 zTq!$L;QX1hy(PG{wlza~6ed)=>2X-sm~%C$cc#-!uZ4gE20-vA(%O@z{5w^~inFB2 zp?h%E3TG;jfN_F=h8V$GrIgVeug3DG(BJK+cay=>r@b0jd6=tqw>LICf6v7h)-r#g zl?}4s1?24sOGIyNwq@|I%EBSk`a@R}`rjW~r*JH8%`}r}D!-RQvr2;cZJ}*>eHoAp zB@Z}E#Av1ySTH$24#XS}4jz8ky}$yC**R=1As1rc)Y_LTIlXQA#*v~6eq}yy?)3|J z<4u)5?oBYxwh8+Lq~fiu7_LB5@~yFBV!=D$zs9q*znJY41noD6J?NbOW8D1n|8_vKZ@p3j9~T)N%j(3A1rP7@Hp z^;u3~{23dN5x_&7{Xit5(4J7eoD?W!cHG6nxb+i7xYLHb;Lx*e^qn>fRR*CsMWNKO z;ZvC%5G-ivu$}_{9#9D1~KQ#+R-;4Oen)EDPpO|!R4 zLGJ*^`H}g-Pwm(jiNJ4ZHZdl;?Dj(Vk;ez~(MV#=GKR<=Z*I%3ZnKUL0h0hct@UNG zFqUK8t&sU}A^S{q;qMm%v}%+zrATZ5BT zP#wKb@Y7N|d_KT$eblj!9{d5ERSS$E5U<>J={i_~&M0*!-Niq$!PwYR5}WT9`|H5E ziLcz9Vb7x*?20>e%W!`>(=oAEKNsb*B2v;X>tcy`EfaYOMPs;;p5Qrd`h%lrs*%~Y zUB;GkFB)*}#q-2SFGg4HPQ;to&`(4?M}V>N#QdsB=opXOQ2ax}kIC@j)qgs( zN(ZjW8~;l_drTuk0c=}sX7~@Lv{iMnIKCc~f{7j}A8v0ic_wg5r^S9t1rw69JFq?g za3}b~!+>#<@*Pd{$~UqHTblJEFguAlnL?3ixMHPwJLyEQ07r7fP%5`twNRNfREGk8 zIVLM@DlF_L8-gS?MqjlJf#t#3CO6ia)W`!f=)>mv8o;8>kNG|VU!gY>q9X&YpE-?= zl=T-FX_RIXfaVLj55|=XUlQ#LcbI8DQ;NK~1Gwc1;XX8@S$;b4mDK$$@Ot=SpyNP# zcaP*_Y57zc4D+Cb2RZyl<7Ri-gSG^_;9=t%Ls^05-6HNy4^8w6!nHqHgzq=YB)MCY zp#n!-p+NNA{MI0wSvUP#?%^jL1rGj6iG-7J=Mrz`dqSdE`WjUJ_E+5S_h!J#`}vQu zGIH_4WKm8`dY!V6Z7f@FXpq*+44CdzCc3va0RK924iFndB@l*bX9_vMklxXG{~a0N z2NBh2x04`+HT3o@Hy?H~7{!zP$z9_-)8g_rTZl4+f*Wc$xTid65CQ}Dax84lD9gS> z!MXqkP-O~@#bR2e8aw%zkxeCr43#0&{z>>JdU@yJ7sZ%Hy>M0sf#}M2o?Akqc*nj< z<@OG^%U8M?&vnQYjFj`vne$h1VyjlMDO>&ep;Lis14B56V5qrrQYtzmJX>*&s}KI) zJH#KWgki&OFSc&BJZ=6&m(XbgTkPdzbYxg3=O#7l-@ou_Iz;rroJ);rhR!s$5l!WM z8(cs5+Nx)xOwXo`B>Omi?v30VY;&eKunxr3V&0PYWW~^N`QUTJA~1t}JsvyoNBxMC zF8V$r(9vK{i!7sOw&Qf6()AO>iwOS66BLizDOO&7Nj>{kY5qsRmu3^rmv{f9G7`tm zrCuDWl~G*kfF5*bc04zPHYlhtUC~u0C_a)`t#w6u(acErE^Jbc&8XM=ds;Qz>xg$y4m6h-$}D zniCh!KF2_%%Fm{Qug8@PZ3G*6%%}xZV4bK%gAeW&iy;o(eyu8X9Jn}-I$XUVZNd*% zL~gpRTSx|de*y`krr;=OhVfl+L#5C0D`r8I8}10QqxF0T{{Bpoat2SqfRchr{DL`U z_@VQp5o~Gn0R->zguPp_^DlH-(!6Z4oQ>OJ_miRqHZHME{K@IA02fFB@Ax;)HB+yv zG#1OWy7_fHk25U*%_!>@d8?3;)MnQm{99#Ab+$%^7Cv?@r5Of)Wm*R-B-_!3ZQ zBrpGoDz(_DT;Oc7-^dLh|B44452YedU$&D)im(n$JGdD`AZW}g9T)>*1ESL;RZu(~ zWU22Fjnny2K*{=uFyl*hgy`bzJV#%LHo;Ib8W7G@%h;;aD^XBr7+iXDvl(PH8DY4y z1BK17@`KOLD5Wj8UuFuMwky|2fb&hCL9V~3Y~vavc0jpQ@VN(gf3#aJILgm z2PqBR{sq#Q{(^p+ViFC_Uo#m?{8?ephMt#>+b{W9J%HM4$#hWkX9qMs!HNtJh#v;E z=Tqzl(9LK*1Fv&>5$_JaJ4ov9=K#g4f#%nOQgIk1qg6SxwwEq9lsgN`jJQC=il4)O zgKYl4j%4_649oxTc@Cc>D&?aTv^=-Pcff!6<&qc(=32fOi^3ARuE_ghixn@^f`+Em@F1$(7l`e-`VLlRjPn41* zca}c{dE8&{cg<~wUcGk4R`@#tPDz=3gy#2g_B;Gqf8j4_?B1`;x(FC?Cx4X&@FM~A@@ZgnGZ=lwLy zfE4~2V$l82f9I0O1;C0F{fLZIl0G(5I$!li75%n~r5kc()IYTmaZN57-XQIXnptKr z0PE`orPqEnP+WlP0RQh$iC50|aE3nFil-t+KU(ixK*rW=3k@!xF?>j-i1a3Jq^O#v zNSPC4UpLOR03}#>_(VL{bi_|}6x$vl2gCEZaa>kEdLY|8?9VeOqkLH0rXF;ExO*xe zV=124mBR?F$$S83odCGxj~Kmllks1^4J?!W;oiv& zk1uOXRV^aSh*v8Fd4+6areZrdLl1VQAycpvNTR3whL|c(6W+S`%gRmuG-X;^&AsRaWRi#Ds85`87FoD z3BQCEnQ{>BMv7Uw)F4urjF6!SzZS&aj}ZUCIp#G-wAG}K_|2ul<4%FtYL^O#@*w7L zR3*7oY-5p!8y4K+;Hc1mM^oxSwNXbDe{Gir(0deK_eQex_RyN$VX|xbnjEfrgWMC! zZe+&}w{v8APaC9|plWUJ36xs?7Aeuk9+qUu-J!o|l86e8x)B=5e7I!v3Wu*z5-VO<|1Ga2+m$^cla z(ixyNJO8<%f5<_UGU`2PD!y$x$5ZZB^ci!>F}FQSm&ZjE5LteCHbXhPqv&Yt26v^8 zswbglpv_H5#}T4pMF#87cm5PIlZxf()^9H$? zn_Rd>GC1Ar%RJA#8{|%vii-c3T-Ye(v+$yd+l?uD7f&?P>g#!bya17#tsWwID9+6h zB8!&L!Oe#_lQ&PV`NbN$P`!dQ316ftc3>Cqs?zQxjCj0+;&7%Bo++_6MHVV0N)QFB zT(xj+BzR09m-r*EMuBEO64^?4GG1J1cu@~a>`0Mlrq-AHZM7SxtF0c%|DV%(#QsZj zC;hk@jR-4f>uVrmyN4fB{J0?usF zmr4w%padI8U^Y2}3dD`6BXHCF+dRbX6i7)^KKRuL3-{G`ce|26MmBo17U5{;FRAkS zmfQqvRS1oftmEiERzg98WHLEtFX_W9P(A*d#^Y)AyrxV85}80-CUtJAsKgO zv7nQO7B;y{&iHi>y^jEX2F)`GM>hy8Fm&(i$Jbu_p~tQR7zTKD$q1`BBC2zV=+o%K z_{$&n)@vhR<uG<0(AN+n-F2? z;~)$Zar=R>VX|>UVdmP}dZI;X;!L<9eAnhB)MOfkFa&lnOE1@BI$dJ6knDR)PC5E% zKt>;`8iyLA>MmM#RWdbMlaYX!hvW&zp2k#8#QO}+x*rW&v-_H3Ka$GW!csFW`)mP- zQFJFP?M=Z0FnJ3pLp-#9V~uTWprZ=J!o`Md*|NmMlxLh^?REB~ew0&+QL~1ZRwik) zv#_uj_fbU1D#1heD_jLOJuUdW*y}tNgWwWvGJqBCv_I2`yo5=Oxofj%o zgr4Iov+H}=`zEBz(^aDSae$zH4r^~@oY#hNHq@E^W={Ycc-20k4)(Yid|8`|@^ zB0W6E41Q=p_IsjxJ+cXy{oP3#SIxeK79%J2ejN}>-y46$o@&!+ci^rw=nL0V0#D@6 zhpAC8Bw|m(zXUk&f8U;XuJ$@60fY?4^lJ-9P%$~J#eOMXAG5P!I`jt+wln7gJTow# z4UBq6uR>9_U-*6{kvfdh{2ksd9Ze+s#0-3!#b!?sz@9spQDP}@vpVm_31_gAE->rE zds2pcLn*Qf!k_Fau$ff-)NL^pzcgEDgNS@z9x~YCLtC0DJz_rQeof>O9ZaoNW3d`c zV&?MScXMKvD6=g6#Y{T?O8;SPfQH2v=k=^#QRG;-_J_!smlkicT(eN&b;m2|wr~q+ zkhyl%?xOv?zmbvDS`N;L%Q9OpJ>tMu84)+x8^G9ictKai{N9~%gi{={qoVrIZ zH#?c%YBYaD5_sqeuI45or9QY=IRj^^?N3Y!{6D;XMOa*6yCj4J2@b*CgS%UB_XKx$ zcemi$xH|-bJHg$Z;O^eI%bfi8nbn=Svzv77=O0Ynkg}U zm%Igt^Cw5jq$vgS5A>Z!Guhl?*nB?FqQv{D7Xo@s*_;@rPv!C9mS;Q<94?@Yw34?@ z4PrWITx|Dd;J_DzrvgJQI!mgcnNX5YV&c%S8}{mMYUnppDrF-G=1_%6PKj+Mc9niJ z_%dX~$j~n__$?0&rML0&4&D=IR6Hv_IBZCb)roor9R;H#ZEJUXxIMMQk%I841yo(-6e$=)~(+}+`t3y;(2xG=6Z1cF4kB09K$w~h-x)H}yOEJRCDX#SvBKNRE7 z+-N)KB$M?!4@1CQ%mXRuU9XtFp)7V=yAKNCD@|$!DH&pTcqvMv#f8rm_SW*XCp@=e zg*5M1`LUjngepT+oHX3tIr4E%+ zGw`7(cxTtXXfhi;hsj9>N>_*uIq3z9S~dPC6s`02l?1i(c&NZzrQ%4%6QV~^DdeKCVJdP?N2*1y5_|S{F{{nKRJ^w0f44_=p~ zZOkx}GL=~?Azll@af6~xH)T=A!D6D3lZKYN^if2tL<|6le_*%DUVH!bvUuKtXO;H1 z^EzkgE7-C~=l=6gY2=0z*)AaY+5Ys&iSE}OgpU{4*y_f>DhY;S=U)F7O74`F085^z z55AOD`?T(l>cAr!Fw240+fS{6kE7zF&JsWp@xu<*?fRqkh3Drg_yc|QY8skESCoZz z;h$n6p1G5Wx9WAqlz-~2LdCq~4G%z{Qn`F#m)~ZPIrZYYN-riKkRGi5syh4*Ut2Ni zE0LgtO(5;oeVo7QB%wFI62yO>v=x3c0OwgXct3mZrg(hY$q}pu77mL9v<@* z`!fH=51CLr+5J9n)18P>B8sYT#~+&4iQHQlzH@m{e{?ZP9*k>Py;-G;&1^s^{rsF%rS+1UAH29?(~IO3?P zpr?VrY3lwyFIB*Y<>~GyGHDPTVWqpe7MtA}(-Xmt$7Hvh1JqYR#R&R(g)k%m?}6?2 z*Eny17wh!#v#myj(9i?mn#J=$%scml*e_JP@0TLec`||@S#@p&YSGpt#sV7g{qNHd z57{k|jI9g2O4JO-QahQvj`a@N_35&e((s=lL_+R+R@p}{@5|rw_*W)ldSRCA7zdj3 zd@fGFvnZLrsM&AQiaf7-IL36CV0JZo8TMp@?dT}^{s)FvMuGq&*Up!CA^fbj} zqXN@xJo5v-lp;zU@{Ji~^>q(vVnIi_-}c1_4& zF*hqQAC|Jf)yIYHrW$|byA%38_8jr$&%%cplPce4!yQURQHg3h9N6UDe#V03?TcO6 z2742_7@~X8-jD`^Uk~fQ`vswwj*mUUSqST}2qjwAb^66(Ki|gF-f`@Yr3Tg%yorjh zTmIU8D}I-YhziEL_^h;jKP*#iB24>IWo1X6coap?>z1EqLyl?JPQ0(8JVwy6A0Qji z@nOT9{V7(*Uqu3Id+H(n!WisN;uQc&(ADU%G~q#89EGi0J!Enu9{Kx-Q>kFSd~U5P}cLd;;>K1w&tN_b=(l!i|KlvZcmtf zBw5G6v*=BvdR6itSq$(yk6C;P*a@^6t2Mt@IX;+;sXx@*uXhn+D0tg-k?RqZuh+u+ z9H;6pZLDlK<{U|pONPga$B%U8npR5Et$Az8Vudao9)I}WMZ=#C`7j{L{x+#?_)2H^W_h z;5DIE=zXbEvlZSr{ty!T2-BZ|)HM+eZ-D6m8Cq$vie20FH8}L7;610V+Gh}R_i7{9 z!fK+1eG8k-kQx`^74O}8F>J}EV%nz*E!lQJlodjPs8IjmD*ABcmwz)6!5{9=AdPNW zk}8+V?6F|IwJ@hi`l*I&)iyz45Lk+?m3A;UXH+N~6f8;tWqPM6$kkdY970IC!)_?E zz|0YjQ7ipqGwwqNywMIKJnt&(Z?(T4;4as7$KL9e&*kYpA>W*w_RDu}s`&~w51V6? zgG((D<&SzVAi7#qlq%#dXih{>Ef}fy2@x3i(bO$(ihXmw3`uriEiuE}CbcV)f@>ko z_M=B7DyxI&2heaQgA+$VUSK>5`L(P$I6*hvLKKRjLM%)}9yN6vdjI`5Bv?QE zkb0YFEQT{CD}52>y|LnccS|jJnKzA}a=aYfyO#px?fAr%qfrRwhl~4Wzu&lgJRVmy27>~ zauo);4#-5tIXuZXE%~v-9lR2oJA9~+?4Ix{aKDR17q2;-8{iw}4CeZlK)W}3GbQ7< z!8Cb`8JN6;Tx9G8W{^YINyK&?hL2uOQ9(GX=ILhJKa@hHC%G6<#9`q6u!+8MHI1w7 z0sa7GJsRCgX)pK#~yIzKYO3jBW%rC@#BB_iOsW2<^R#U%g`K@p0w-j@O zQ*EvH!e;v6yG7BRrFSbh6rs$b^x813k{NnI8X}j*?}l^&xh=YBHPvigM`XQQ>7B$^ zwI9Tm-9MN%P7cn0UEGQP++ab-R<4Jl#*mG5U4_$$o%D_m2~RFW(Y6)3P9W`D8GIR~ zQ87Kb`I=FQVred;Ppew6rp*9dhgwMyhz0&9C67UmJSwxxVIAL}d-2?3mb&FWtZ*NN zL>|MDbE{^DYmW2PJ7M);B44W?D32@h-$o)uE;LsW36r$CS-)NvQg@v-1SyqGGR&QE z;9GIhi9M3aODg}0+fVu6Lw23gS%Uff^b?xsC|g!|4!KGkXU~ujQI8dC`L8$LE(pa{GJdng5&z?d7f5?%~$vz8OCHTw)Np z#fYteA}LGOG;?y$_sHQ<6sIANM(IWnJi;n(z*I*#qoN#_*jKpLhug0sYkxOq(8r81 z8P*&-8)}#5FQKFw;73p-{AO89Lz#Sq@F=sT_B&D0oQhS6nMBD_M9`{ndJnA&Gf$bu z;NUdiVaUn-t124E;p)_1ZF0d=zji1F*TDyq6(eL9tvB%NJkvjJ93s%t&Dv=H>-<)0q zWOH8&W)>5QV8lYuq$_BJ$Ip$RpKTnJ6!eoRv`X5|j)d~K>fXbOnY9VS*=b{QMe{6~ zDpb)41H7uYL-u(F`EL7cIzLYTDIT_+QO|oK`DmI&vs*7X~%|%Qq1u)kAncFjY z+xD1JnXJRO>qlNCv0^Xb=&k8X2gA4OR;n7y6S42*YtU)DmeN<8-CHVHOn<;t*l&aC z&ZVkIpVDu7a-lI6DT+Dj)7uc>}zmF)Rpq4pI7K*L+zR@Zu?l&ArLdks$ zu9toG2v2Vp{Ct;PgDd{_*?bH8XAD25;aI)Wwr0BNcj^iK!N>Vwfh`62feN#3E*`(H z%Z=tScQz~eLZe6*2Fa^cL7}8>+a@>XQ#e_k(W2#}li2g^9j+n-f!N#X`6WJ6AK?%R zXHpoMQ~M{PoX^}wN*<*l^KNu8k!d6m$;ZOXKFJ3Zr*3r8@vvD0$D?DCh%)>vY8jZI zR&V=EqyEXrKK4!4pLNRPLhoyqFIGE`v)E)nP|d*{*&RBxyS+P<@7W^cE3|xw*ip>k z_asW^1rni!PD~Nsd_kUf4qPw3SA7%HZaVW^_g(?>llTbKwH5tEA}7>r+Za*~)gj3r zBnoS&4+SPdvPUZfK2LXsKW=xh|2VVL?E3!TC6Q)3334Ea(BD4{I9#jZ9H?rI?no%k z%SD}){#2JeJXzk(^m=*5a)hEXzWS3{iLeGk*|F0dviRU zM4+HR*KQe&NIPhbM***I&rjXxNtq{qrfe7?H9e8aT8~tGZyrnN^T((sa2z%TFES+0 zeA?=#UhEoixajlBjYv6VNvj#9Ev90TU{9Y?XMs&?CCYJh@6^Pu7h}XYo`lg3lWT?F zS*TD!$&LnFODNm3=MOV4PBt5=Pwz8QMqRTK7q^{}*;&g4Vkb!EYxSs4^n7EoXXbPS zg2?NPt-6+ymn}a_NMe~=B&h1m*D))!*u=mFw2R!Sxr6>BOc%U?@|`acaHIG6YhhAf z!7sD^(VlrSK8{swJ$(Z>KP>0PujebK3Jr|r z7=OSrB&Fx7I+*dbdSLt29&m8Ql-nYfPdsR*UPgWOD}S>3$O9CO5WZ4%6*|KsSgR?u za-g63pcaB@*p`?Ro>jC>f+?fRdK)C=oXabZ-17!4ebLtBcD!eejhy8_3G4ESDVl#E z|B@B^3XStU$x;=a-s^`a9U=csKo1v|bn;i&0=F+hTa!6L>7xWKlDT`-7l?pQ+vCwM zHIiW1gIdBOrYk|)7XtImR{s-~msT*}eW=vAcQxzZwqVE(Wc@Q$d=AR`@}8g*wcL7p zk#O&1ShIon){LxFM)tc;gb#hWl0UDinGG4OJ^6q)&;p%Z>nUWC{eRYciP**FySWn! z31E-Fq=i0NFg02L#?hifNshiwG)hGvgxb8dRez})-g@4IoSPTZN4-Z885|qjt>9Jc z$^J})@wTz9(WUXj#m61BcZ{iC#4U6;G%-;UH>}YAYeX*gA|xa9;wR{@`C)<5ur!ix zWRh+{&YTFc5E~xztI<2^tYBUzoaU1)w=}Ow{+dYvt9P}`=GMmYlq?tpnYWp}_wD_( zjipYTEbC=A;VUc#3bE_{M*gc)W^Q{rO~->#>Z3NBEM6~BBzys?NBI5s9lI-W!j$pW z390by6u$|brB;)Qg~}I@px0@UAJ!yMiX0xt@bsY~wUdv=Fec_8T$bS1lY8-#s=U{U zPfINx(WiH$uOBEzB?C@1+N4mgR@TWttO~wxx3~^>eI(qD3`t*xvepu;7<&W`O+@k* zr0!&oOy1Aw(UQrk@E7alk;FozZ)r??ibB<6PS*|zo_9d~T56@FkL1w@+%cyc^q@B$ zxXG7$AD3y~kQ2A8#Mej>uEY-6!L+~K##o#_wVmbg zeC2K5(anzot80fP`m8h!CBN$GnM~3a$zUjXg-t#tco}af`CbUVB{?%+trTyNYt81hh(BGuT_bAWIZq@;dhwL-Codt3x(B&Fcc_4<>=@v9 z+`g@Q7e=4{URCKhUw_dvRP7}y$9_(>M*K}xf%pAhj`AgwCCKq6LZzFdRiN7;AByD<%o zcF%@PE*I(A(9jdU7sK~t`jxglmJ`N12~p9Z37yuqNe$zT04Ow%x36vfI%W5SWv|elkCNuG88MX8pyldnPI- z=CSki>rF1}X=6ube>j`J^_&Ze`=LS`dG~ig7JlqRD z6o<`Ku6*NhZqoxQ+UY}M@#9r(R%Cw9+Y~7!tnBXTR-N!gV!qRu(0&IocDzUnGU~OA zXzfGVZDuAeY`Cvzfg#dZT+-`pSL7|{I;3ztM?0<@83};i&<&CF^(Eo-CTn`RjHX0Z zvq?{=%jQ)tKdhF9rL39yqb$dJ{w%&7uF?|@gu%d{@J0PPHbrt7u2;KGlcU=dGJD+; zgbC2m>5^HVh=m@%IcgpEq{Cw(lAogOl3LF@)EIQz$ej-8;|7}6E3ersXGF8X>ng>Q zsU%rM$28A(-~|^ymQ5#*gVzqnX?S5C{)cMYf@%7uVdKddM6#C4-xqop&|=Y=F2-+x zpPoDh#+J~sUwMWTIIBJu$&syPyDvwUu?%gC!S9B~lkw%fEANkM9g5v)xMch0AjQ2t zBTK43F-h%+!Irik=5&fa1A%>sdRP;H>-2fZ3*1J(f$*wXESEzf()p-55JgDcNta1w zFqjB zN&2UwiOI_%(cV^QtMpC?qVf7Ejxtbhr}1)91>#k+h5P&&2sKL?B9ODY?#Q72TlPJE zLC@<)2ffkwH&lRb^a=8rZA zUb$6OuGGf}(c__Jut>4{1r1Gl*;PtLuqzk-9vTaZM(sQ^odxO9Hg$EQCMc0!OZ94h zm`p+7vRO8rO{}0c*SWiPr-?u}m|dkU*3l=8VuLlMBg;$90R0oq;Ny+qDL*2ch6nY(P|N9?zLNzP;UJ6n!HKg#k+*_|5 zimW9%UDcS#<|Y&BKI+56aonG?*jUYPc)rp7?tIxp!R`AJ?9FWN4@Ag~Lg^4DqbM}b z7G8^Qjj*5?=E=N=bm4AO{PD`>q#p7f+coL zS1X2ym%**Ul6r46DJf?#dcgz%s`-QQQ+b*rxcy!xC6q9kgI-IrJ>&S;4X6$Tjn`>B z_|73azPA$?4>MkyY`t*LRNCM@Zk5zzXO+ZTFV*UF2$$Gz3C~ol^v=8|Ksx#zFz8OH zD4*MIOu%cC7R^E-ORqG#S#`~{T)*;k0^7M%wOrld2uNqV@_WDc>yR3T*-PJy)0ZN` zEmNB|2ByXr@0b<*dwY!-+sQ#Z?iX>vAtC1Dgj^mnbCdc;*^-PS2g=;X6(#anSNoDh zND#jgOotPXkW*|Ogdac$(53Kv5yS>@X51glhRR5ItPMjTcbA7z z(YOtUr}O11^6eIw9}RIhGwR84bm##uD90LLhdAspZ*d~@!ZOJ|AoenSKtT8HWr~96 z4c=fvAnRiKZy$la*w)G(n;qjx$<(fO^~W;VhZ-txL;=WV1bX_Q2GbQvZG>zIyjoP} zk=fHp;0)j&dXzUlkE@|Zn-#P|NXnNtvZbFnF1R76sc~mjT21mqYx=0k_2rbd zhl#X~hl8UCPW6QbPJMHVH(OOCA8Tcq_uVLzig3r71z^65+EHi zsl(@48c3|=38?CL8sSWdSLViY(ezcn&hFDjfs;BBTkSaNdahFER6?))scjr-*bIZ!nXiYJp8`3?NhrdYeZ#xn#QmIna}qNQ3B6$d1^OUH+I5WQBT zB+#8?0sy=Y>U>mzrX0b`KJ=>|c*aBeQv}7kl(0$3rBj|3X%4~{F=0NX*oe#8zjdU{ zy1}sx;_ky*_UR}dwG&MkZp%S-0?_iLJ71;8*zS1?D#`KH%z0-9 z{)A2pl7QzV}5p3{Qndo$1w_io&pt+ZqKBU7QH?u1lRQH#FD)Aa&m3>%yw@ ztlXcAHSF)%SFFZx(;yYwDA8o2mp*`N@-3y?Nd& zhxKBv4ZTu+us86?JT`|@c~@JZM8NrIvllNx*wRM7e>PnZ3Si~o`wc(FBwDp(ExYy~ z1;UL|oqX40tbfY?2B8n+&GPOpwx~ExpH$-GTuUEVuJH#T#}^!kR?6h4g*0x^rUlkX zUz3zl%s9as=&n}p9SU=!V^|e92vu_We%^t_W|t!l8exClT*|IW?zHfDs4)cM&D z;4DQzurr8Sg_8P5S)#m1*tWQ67}hjG{1}iDU8GbXo{mfQCir@j05oDN?oZ}al6hC9crqXZqN>twvii?X$GB8GG@Hb7o)V7XyJOol4XG;%Y5wf~+? zcSr4z^wgt1+@@e;e}Es1AVBG|=cBe$zU!TU#M@()Pmi((ZvAv>7NSO}*K8IAbMs#@ z2|(oY?`Xr=Mc!F#s>P4;+mVbNQxG5U94UwgkOM#lTmVXfRRhAT*^W!*5i1LKvNt8Z z2gQ>Ghxx>liaC1%tN8m33N!Q|uuLd&e4k2_W9`2HQLafdMKULC<&j@$yZ~4LgdkIY z{}pEnO&6&SGGLktD^JB@E+&Abv?22%e%kiJSJ@ee-g~{@Sm=(NjetMd!G}b}JYYic zoR^S_^vN9$qelw$*=c>)miewg2m->V($i#n)y6WD6fG|j( zxXCOC0rpioR9V|yl&^^V0bKsa*ojlhb8I&SfefQL=jFvxZclqph1jg0$U(Z z{L8832{y+jezPc|P>dhNN8yQY#GZM4B4?{f4%^8J*u)43Z>04IU*-pmG{QYKT!9VB zL%c}1;(EmSLCSKBPgF@%ukbIC-%Cxlq*2_!PwNj7J(~q>-wVVMG4$utIPG-lhnmb! zvXrbfAj#Fjf|+2Q=Rxsgc>0&mXaO;Cn$}CE@#eAcpb)qA_0SH~jqc;!{QEhjxiZEW zA%=g#VJj8gKaPHXG)3Vb@R{Xi_z0D_#bv79FE08%fM;h8S8FsFL$i5VC)y`)P8Onb z=bzP@n-8Sl*7`iVHmD6`81(g5^~4&00}BT6l)ZJiYKBE>j>SlSo6UGWZtkAK;>6G(xg7=XG1M6%#Y3+ z9X_7x0WRp%PB8|n%?`E3!)P|Fd!^uxXZikC^%}!Jh`H<1`%^$rKf%36%p0gY!=^mbFDlA4o@7LcyeisM%LkblhOA}=t z?!tVr#{>@8UlFvzuGiz7R*nAE!0n^gpZ?o2tkC|0=uu(2b*N;{y|-GP@TaU48G0PP z#Q2A77E}tfH%H|}NHMLjbqyE|NE`FT>bz_&mnqEUm}`5K@EZR7)>@O%w^BW)-J!f` zzUMvSs{*=S2=TTfczg)MZdckLWrb%4KY&>5XtaRNIgceqtfOhOOsrhJaKa<_WCM+q zA2@JiZpCP|rtjWMPL`dzwZrgdw`^}$ds#~BW|dDCy5~|YrIHd=vG4BNgw-w|pxsuh zZ3Z9%K5~3WbgG884mVO5hF;krg)KJVT%?`lQtPYPT{Og-pj2mcm4k+xy8}Y7TIO;# zPZz1)DErP+LtS@%Wzth&tm=uptGCD)1Jwmj3%&uPOm6`@2X`ZQv$iQ1&uW&#q3HrF z;&6w~g$MQR*11M|=QD5b>k&G{pT33240QcOF-E_fl%e4K2>Pge2d_LS5-e-p` z4l+db8|f$g5AgnffZizbpLo@$%p}wq#;utNtr9IpF{Aa(+M`TST@d_RNJh5gPRPP9 zRHu&e?^_H9vY1;3-$ zqrrlE`keMS00<}E<&$D7C2BM!C-|TB$@p)E4B{jEz3?Wt9mBN;eVQBJdz+sC(h%im za1u{(HIn!=<1HEHOrWDe_;`fb^8V~NV``53=y_C!hU2u%Sz@3~@DHpDUYt}aT^%a? zFxi?&>!~^l2@T&Jr2|ojl3(?RJsvLzT)jLTqoKNxx&N&HbF$lH_Rr#=5 zGZrh0r^cxjOPAQ;M3ji4a9Jh8h=#w5h`kZEb?^4rzDAcvnt@!E(&5B2k~TzEqels~ zh%VScL}X3R!?S!p%!D?;PV%x!uvnr%q|xcyZYRCHRAUrMr`@b*$LjZTIap+~(#Ya| zp})}-Zq(%u$zn8s0HkTh4<}Ha)XAXrvQF}m0iY~_L?q;)_=R=0L?NX@tLaGqN2LsmIm7!Ow}9=Pw%b_^Fi}{brZj{k3)*0vl8S@QoP) zbSm#0v;m+$;|6AKX9bOe#cTq9gQ`~&dm;FuL?%U^?5*!}c@T`04*-)ujCul(0C;m= z&J_wW25JsxDzrFXGqKI#QE`z=)A_xC9}#YAcQ}FOp+d6(l%ne?6&MucLg3dAs62B^ zLV#Me2S9BQp!*)1?f9APY?tB5G(zvVoS+jLC6VGL6TjO#WT( zbRAavh2e#-;kp&vk&wn}W+{r$_P(ECXS`6U^Mq5w>wb|tmc^@ z0GMu{_t$$KqRFRkUvY><)jHpvek0&p61MB|mLK;K)VW%FJQk;Q7iwOKE* zL@#UCH+erguGuXk-yoj+QYr`$XbLVTqob4U)M{}oV({ARf_xqO<97a4HB2n#)5;NAZ^E!Pp)=e0tTJ;+7AUa zS%Smxxmv_Y^WV+2{EdKW%6p%y%#eG5bF)*+zA)63Sb7c_%uv=rN{DV`IJkJ=(j0zy zyk;-xyQ@~S&Y7fx8tc8bz+Y*yQ`2Hc2-W-P2T-7^310B8ZnPRSI6r_j(CCL9om6!) z07Oa|X6m;Z<5IIjfdCi^)eMDca*)Wcd=Gnmx=K%w-SML{&?QVWUw)tMb*=>SFHSNm z^bBRVP8Yg6o8QZuKH}#s27~tEy_B;T-Q+h*pi)s@duM_xHKtKhj3zx&nP;T+7 z@y8n?q}V?UIUbXDSNr47q20DGc756rGq5>}Q5*)4XEfX*q)0b^!@%+6}>uw;SbU+&{lfdhZ3ZiE4}y=o=vii`i#3)Biyv{ySD-nhiB10Thj-elw)|Tw%R^-Tn>d~i zea&W**|<0hr799u$=KXV3Zu#N>{lk$|L%IgM4>^%Y+<8?j#_3Q!Yx4#p%8K();;Mlcu$N8kHr4lIrAf|HI zq}$$4OAsaHi$#t+T<#PJ1wr#5pFoIyfHAi4n~CH25jTI%UDIj^=EG@v)3MOHWXGAZ zjQj#uMarm9N@ayamn_>|bL~}3d^8g*c`V~csLa4bT(`$HNdF#B-EljwWtkGjHOMCs zgYP&>-}Ebtl@OaO3NF(JU|D-HyyuD;WC_InzW~m%>ic}=xS-=!nw9Ws0P_(uG0+|j z2=JXd!-0N~dq5Q=J!#HwH66#L59Jz3Vf5o(qu2RUFCd;|4wq-O(b0~}rQ`y%*wpi> z03DlISg2yn%&w%Q(nLtb0MnHdc)1Yq09Lfi6uyA>;=N7z)V1Rt_PX2Ql#q%YG=h@g zY*;bCT&Nl>|IXmj%ZHknv;OmNU??UA`Bjq}2umJ*ra4Si`jho(+#XioN=abH_@n}EOi3mT+Q z8!()CeAcctzE>Xp(!(5+B}gh==o!EP;`TXTgmdV6<0}4$-nQ;@uQPr#v^ew~&eU?g zytG?r7(~Gy_>T&*LlIY#XdWyYDsnHGqR0xJEV08QO$TW zIUy`BL|w~Ai?-^c??rd4bLf5^CDy4{=~mz%A_beHm<2oS9+M^;>14X;6Qi#wq4jE1 zBfq$QxZYG;ef<2CV3Yblh1kNH6c^!PHSD3F7u3ek&LC^t6Y_So+8p6{aU(R0>UAs@ z-tlcsuTVHU!&~JImILKp8+9<%NMcH*LX&;`!Tu%+G2i8MtF$D0#=`+R3i1?U<)c7r zI{$3=QvkRIPrn^j@F4KCi6iQv@RBq_S2ViC>783y%Cbzgf(AI#Q#`KsOCL~$R4_@h z`n#L}?m$D9@dl{tUPTR}kyK0pIi3ZTna=%FyL z15yo-;7Y+4V{*h#b;&6|I}uZQOD3hE6&9r`k+S%)y*x@7XUvvsVB?F6h~l(l_|HvX zR`+iuxYWeD0^}g>A_@j$(&S5Fo=G-|GfeQAC{f&kGi8x$Bo=YdKVoUK$hv;Z4a#z~ z4PzF85UE$dJhcv-d<)r(zP5jnlVs}QGd_zlN?9VuxPiyv4p7S7nIL$2#cKY=O?rx5 zMSHDZj?Fa@%7$S?$s&G}g^Kdm9)cOcP3+@`2q*)7Su>5hpC)TUre1UM8p$ri`3_=4 z>M*G6?9Nywt$R4u-loI`Ly2nYFd=<#Y|sqykf}(g@O${W);S&l2mwM0QehkyX<(dY z|I1m-g|}uk-)j8lZ=Lb{kT%_L%+1Zlakc(}`0m6G7dNUNrzOhk8}OcWs!SL{ljMxr zH!*c7i1>GbXLs)l;@M@aEUp2a{yV|;M#%m?PGi+F_O3q(oV+1<^?mW%@Sq&d(!oFr zwsPPtGLwFNi=eN-wDj75xb0r~4rjv15=ck(=RdM0ALQOb{YR_Z&ji9(%N zQC?4T4@#WSSm0C}@$ZTli>Cu2@RX7r`t<^6+OQ|*BT6Z1Ks+M$g1|Hu>E1(2yKX%Z z1&L4^xs^HL1}8OqUosk%QM&BnM(Rb0cIE|@4KfTic0#!CCQ2fux})(|Qeb;Y*W@F^ zu10gNyNii#H1qs-N&dg&@F?Seghug^K0q{&BJW>Q`zNYPHI8z7e1S@FeqAZj4l&Ty z%B9j)>U;gTT4=;Ce+K2n4Fgyt04EzlTh91cMeW*Bx=Hzlla{FV4 zso-0O+(xrg#CRp7L~r0W@IxGK^%pJy#r~pyyYgz(PG$FSX@h(pErr$-RfQ(r*>{$Z|zW1utA}BCES)EP|wwN(F0!#z@3f6UEsx7B7FsdhSN@q(UT(m!!8GK{LGT(@2GIgtpg+hm*d*6e1uSvj8L_om_dbVSu|^A6U3 z{boGG@z{Nl*t5uPH^ezAn4pa#DjX;}<6h9eme98|$lxhA{skmXYBV7=DHUz4zYH+ zROgce!aR443J-s*I`_n9HH{ieXP1c~5@Y~(zIVL&^V#@Z_G1B;Cw)f?S_nG!%(=yE z@i5T1=e_a5t5PDLWY_tsnuTriKs9&`C~YUbZSIYu$bv7#fZl)W<7C23Sfj9i{vcG~ z)ojnKdb*W=zdz;?U-l_|#E88!8HEv@%kwUgZiRK2bC5_VQzruPh437P$b0iS(VmNP z@zY6<)E|qGcy$2ii~6Y)%fxv8ka*pScT5({`{{+eSfzvS(G=7b%F;l>Qa=0vv?Z%+lh2>Y_d+l zU)CQ1Mu;D9(d40)BUn9;OG}*fWKtOD75FZL`R*2t906tIL@UMM*{U5zrD zTx@p#nb_(L{rxKnk5e8-$~#OtJQdZLlr8_qJ^_bys_*0ha7Kw_K?}#&+pab%b)L+Y z#Cii?>M~{%a=ph?u28M>kupIclVKmM4QoC(z-lqH8pE4A@7KNl*wkV@{e+BWC2^#R z058>*M6gY-7sLvbtvCw9(*X-YS1L!K{Vo>$bJv!`Ho`f;AxBS&qRR__HO&HeFD?;b zjLlI&w^JO_5*M36$YndZc89EYKO3tNpCEDBEeo%?YaHaGY_|Kj7+&@kufC|Iq$Se) zkbcG)^eN)T@DL>>Wh^^Pnb7!=UCJsOI1@Z1ub=>LJWS8Xo=K_|GnT?>wz_wgBUw%L z0;6DeyZ`>0%4MIMsqMTwmUevwr*L{((X_6-vdcK=4w$o=%S$azB`*hp@6(S%VO*3S zlX2ItT|JPjBbh!rWBhWs1YX|7+ zbN;CJkWkR(-y`%%zG)i-ceMiq>mShte!4}$x0Igs@qBM{UOtaN= zJ;XE{t*NWp@6v(pOr`$MC9^d~62mEsIkp)?&Vc7elPddLxBX8hubVs!Tu-DENslQH z=}!nwo8|8#$@H{9L;(o+inTBqb<2b9v)tC>p%HP-RlA5q6)nhO%52yE@DNk`Rr_(S zIVXsBi~0cDe7<|)9kAMMyF54*5O;5$$5&ZLmR)SDdm46Ec+ zB%M@`Y+nouiG&`i(&p|!&-jHwTRq!jA7_oS2xD1%gLvd0wlzU@T$b3d*A5dOO<}B> zNRSO&7JQa|KLGmmN^0=aP@cv8rX_%LLzT*R#iptBn(WGWg)47T@Lk)s^VM}67jR9a z{uvJEFpv;7MU0i?yzSa;%R>a_5#2nmDtg0>;lX-=ICj```h*2IYREyPOyixAb5d3HQ z44U>`F6}ypxUP^%UK9r`o$u-n_?)ZuL3WkuD0Ba8Nf}NY7UO2^6)N1q z=rGc0j_q;CUT%^_fYIJoY`K}|@Gc1DkaQ%Dgq1Iu5&s!bx;bkU_ z{uQ*dSzBKAP_R&C|siNeGf5ImoDTY2WQh zCs32O8cU{VR_!jiyJy{`s_WE3AAoeD_eUc5(F?(C&lG(pl%hE_ye7GsIFVl4M3}9C zTvH{B5Zwq>fRt3WhT1+`bt46wxpS5Fu=sej@@6HAkb_-IoQ>dG>g2)84|l`xunY8N4yF7`pGo`7f`hS0{Zl#Y*0ss>MT1~LwK2bI$nX>8 zQ*#1Pnp7h$h}B_H)x?;>!rHEOhsl8c*?83oO$*~-E~Wi0q0L+r%Y4s#-L6N)NEd3( zlTXVh`;dMAN@N%AMQiOk9*eyJHji7>-w6tmj6K_M7$kVY={8*pqbhdKYm3GANd&z>n`AS z&GV#!#u)aC&q3}6D$O0tpggqrem4sjO(VM6p$1H&V-a;+OWT(*RozegWAI@zbvw zXlp9jqUAMC_+*0+t^0fSgXER%tmDx||BP({K@Cg5x^qeIbg+>>jJ&jZfC0W>?}x2W z+y)g-l9?jeZ^p73P&M5{QYzv8IM~?C@rJ-D$=pa9=R_l{opp!@gtSd@DB%L!)bRo+YwW zLVLpH@;OA2C3?DcT6w{3D(|(rEK#9EzUnYOC-W5zjfnL*XXr=MSYy2`qDQvxhZzr> zg@?%c0)74-kb{A#it{9&OU4rszFKSzNd`DD0ftDTPq)J+IV$?R7|}W)*l0!$*3`s? zEw$gT(^5z$PN5Hv$+YTlho1T;EM(sQ(x@x;m6#W?nBYcviaVRpJ5QYrp_OZ_Zd0cB zL+8dgS_(ZdVyJvGdtT6zuUeP(WJeJqUy96F9_T z;mz%Yl^(*>$B{GJrkI3zm6$F-FPa?iiJpD8xP8jfQq$-EZL#!7L@zWu{B5_toR zr0IIj;eql+iKYpF%QN{s?4|1CMCFh&+7&5$ZP%dlP>6ba(_5qDo3{0O%1$(hpc;RYDZZof2t$faH!rAu6)9Dy}p#QaUY z#r;3dV~c}~)WV9aL1$%zm7^x@rVt^h>h?d8QsAiekMUc}1_jBV_cHnXqff&Bi$&FL zRT_vUXADjy2kZBMi9k9JI{z%5O#{H<>9gHrK_FY&todJkon=^*ZQHe_K@es{rMp48 zhOVJYKn0YNM(Gd`hK@m`K|oR@ZxAUFl&+y0C8WE%>pSOupZC|heUHEN2W}G==XD%w z?Q31owfECaN&mZNrN>fnkp6eG{OsA; z52slkR1DoI;=<=_>>5BgYt#PvTyJR(#|ExwW;?at$RuB-D$0*|2e(sk^Y|_ADVgk1 z2a0B7SI-7%xvm{OpZ%da0BrexxgG7sN(&URnD1HHZo(^OBA~$nDaP{p;#W$;RP3y}QZco+;wUzSK-?X@2C3J$0Mo5O1J_Y!GvNK2(&M+_Y=Z{1#y`_5m( zhT9vTSyBI$-c-TKU4DN4;U7+>9!-Jx!q@wocxtg9-OWR91ZFNr;us&g{fR3TmW~4} z&GlYV$qS3Rc=XKx z4TyQDNco&30^9VW;asrFqa}nOUc})Anp#0aSf`q1<(qA1)bhrt@&E<~M;)dnsp+uBIDqpyNY<&`bV+mHAPnvrWvEfy}>x-@c3@jey zYWNe!dVKo0=YQqYe4w6q%Y{HBR{>GYe7(ACiG!=|WxjO#fWghK0U*G;o0t4?RDEPyfqE1SME2AA^VWjw154&wAgj_NB4?n0Z&MbzAJ$@LTLr zxwe*~qFU9TX~ouW$)b)F42C1CiNOW%?p01Mtd~27Iaj7jGxO~pkkH)2iwU z3xS#d2c0-oWqw(0FWx8Tk9JvJX9Z}2H>iPDe5Ep*#$Opxi@Cxpd%)(@4Y;`@OLkP8 z{ik){&Wx;&2cREKUEwe#0$jYk%00JgX^?t|qO<^)X~Eq|AYmW-U_aEcd@}bSMQgMc zkQ+VA5nw6OPVR|kiA3K=#iIg)Vs-On&j7Kc{!ruNa$Qgwi4NLq%d;KI;S57EZJPV^ zDlm7vvnA~hR>`!zJ8#|8Jmu8*)t{y}3qaq-BxQ(h=I9_C!hdQ6Gv1o`Umv3>!f9>U z8j3nF?d#&TRlq0E=uUM4qWSzGhc29E2)sNG<1NA=h`Hu|bcNtm5S@qtTi~a5 zVxO}bQVtA|JXvEXXx;72B_}KLeopf_*<|F%0w;baxRBnn>z`++o8a%_jGs+pKm82G zZQ}#mJ~>@!CkpTHLzsrY{6G2f{ix49T}W|n6PT$>*1e`pLgDb!O1-O^b%EHQt&R@KBiQIrN3vh!_n&r zW!=ri=8GLcv_&QZgoxo^o=(D+a5A<4x$uyXwbMG~K`6_L15ne2UXQAX1Gj~(oX?(- zK7=wj3+M?-+xSA8?c-1xc*^qYv48SB9MbW1pjgWvUpmHt;%(5 zF;!x zyHc09A>9O1vr@!;7T>S@L^6aj=9(cIRokUwEZgk?ft&WJT9&P5&cuPs_f%ehyX=f#G`g@nCfj2yR;V$hxarZqjdfu=SFsC#D;XMnV?M^0l){ z<*CRf-=9eseVv`Jc$AL2?)~fSwtK)r`nU;z9;?Z0PB)6b{j+wMiNKx=YQ+q}Tq>Zw$H9cWf)#t<*3ox#-m~N$yG~RnClj;gfl8_*~q%N+!d9+JMA7sMJ-0r0mQg( zBSfw9`txj`u7N8q#LdFlo*kh9VZ;u59N-PP0JK35P|-$JpEOzq&^M8b+VSX+48gE4 zQF5x7pOvi=A&I1XQYbdG-k4K3=mBy1}P2HI6?8rBZdWkz{x$ck0@o=)VXQy zYQw=`yLek9Vc!2O#-$-hPC2(v_1W((dMoMQ?GGu_EfpUCeVgUA&@Zaa-)%`dZUjwc{1!{&iw%*kcot5dOjH8MZCjEGVC!m*DCH` zG5n8t=?`p|x($qW*e_$#W2B^*Q{@%&?P)n2!dK$;;%tt@{qPQ#`XBJzQ+MCfJZGZT zCVP5k*PCJdO6?(|w#f55wTIuIZ9THlE|cRx?gvc6(wcPKBm>uZ_UM_Mcfq~&t5q`~ zU`_gGh+5#Y36j3(e`c4Z4~m=B}!eF=VQ%>S@XXZMH$K?AI!6uj2Fu{|!<2|x+RM(St z=Ki)>q?Xi}hz`?>J9tfR*&FwAN(bhY_>hcHj1^uGwQ)?i_E>%3>nU{J=7Nzj&IAHNP@))BH*Z*~H34C!$`EzFJ%uxR6N zrY+O^tz0cT*edkHWCg`jZfIk1(}80!{>r>*znnysK+|E( z-sAAondwU6*PyK{&l=^5ujuC3k)&KL!8f>y;!ixwmXUIHg63pk3Pq|BJvH6K>kW!+8d=uow@2%tlFcrtaKum z&t4?^Ke=nk!#vR<0AQP><^2~8AswHB1&!Ug7fWxy!;F;n^g0V{)WZb__ z_jftHi}gxUKjfJ8c>7P-t6Y2XQ^|wiY~If)dzaysWB~TUB%?(lOULgSaS;9j0NMx3 z*6v>%RR4Vu_)j$BCbTm}u)=qi+1uv1o87I0xZTM!PH$Uxlk<07f)%H2xF9(NY!&cw zZ=2Q+!SURqszb-;#5U{r*nf)8);xVT`KilZv zcoT5Z*gU{`@TKX>t7hIGrgSAMAv%XGVDlxq>y6-xt^=t?wUsAvdq?GswN!~p%#RJ2 z^>O1Oli^|9J}>=@4&POn{OaG8rh_>*ovwSPba;lvkpqNSA)GkrS@H98>1(O0Cr8Pq zenpL*R4V8>4$W-L7PPsXY#Cm=IQVg)CC8yzjMC>{KWU_i^oc6BpusHk1WNZ~e;-BH z6Lv3x%f@-jv1T5(7TlFB!JVg}$e(24_~vrAXMYie(9^{w74@SpKJ8B`$($?LS5h-F zY@9o9BVF|(H{4p)DV|CCs!Z1IheQGQV=r8KNlQ1MB@CF!U28*MZkLS0Na%PXA6*~6 zy|=gv=4U2mf~fa3V-oxW7iVlhB6?i!bNYL#^EMids)8WVJP1`lG-UuBzwvhg&8^pe zziNCnTBw7@gzBeh;?VQcW0N`qTw>}J+c*r*Vg$HLg@94yGGq`CTL`Tlz)E{e0gmbr zacE=WyRQhu)TpKIBz3^ePtU zeky6dsaR+MWRzt5>unbpM0pF!!b#|U7m}6BVtek+d|luNE&=zgtfgfx5v`=bmu@sC zR&};D^+US>>}BUm3ED!KN+FPDD3j9KsU;0q-x7e>r9?B-pCMrdrk{)Je9bg>T4_O& zxZkFM&gi_4^*|;!u+M6##obl1g5}z@e$i~vZMwvrjuz=z0d4vKnD!eDW%aO%WQ zZY#aI4ZhwPF#9D z1i$k>#H9MR8IR6rCfcWr76H)K(4KxYa*9ufavd*wb>WSQrkNC_X1j~X|N?^H$v2HqLU`A6LdzPoYQjKgsiKR`O&Yi!+b3Iw4N5Kfq7(}AKKMI8SY zW|b4B9|L)+@GT)VX$Q`AWS*vbdeXsks7n;2uhj0O+LV5}8+7_f6m2yAqeHr*9jr9!4zN3j9@jK4g_&&Z!5ktHmrfQ(b{KZMX}bDJ_i7On-T?Bi@w&I&slQVSjWOL5@W9?94I zZX^%y%l>uPFR=`+bNvDZY(;ofYl@72Lv;c>__CeGPUtPsaEZ@O?P^5;!x}ncH0Ah9 z6TKM;zmV1krVo>gnd!s;I6aE88B4%rJhFBH4#(t^?J0G1UPIE3t7k}yQA!!4Q+9$k zmEl+n*>#F(>#JgJD{Gfvu~7j_z7;q{IHf}g$$on*P@k#4ZNDO(4EbV}Wha5?u7kDT zXHm1QI-h*=t*gdm@yKA%^oYywIq;sr+oZV&UtZ&)W*T@?FtA&*NbJBBO9Lzax!@JO z8=owccZ`id?SIeA&PrI_xMC-Hijd!_ou{E_ypf`)J>VwT|CxT#$kT#Zbn`P&buch^ zNzvc61a>mchv;_seEW#Us1OlMiW(9yaVC1)>e7)}KAYzFQ`I5;Cdaf~ssr1Qtv}HM z*%$xtumO}CG<+lv%L=8FgF8b=^^=(oJihzpXuu=!V*?kkk={_^59zLOAU7Y473wfO zjJgH39c9w}?J}O>ne~x^;_>^b6~;N*xKvh;LJ~U@53uA3;f=j`C7ZyV)P=Hr?bv@3 z#E`>yw;)mOCApVuNW0JEfX`%)UrlG)aje{&3VB^syF=u)Je;Nq%Fpu87boh+moq-Q z>$A#qh$9jx^^{fpm7)fOIGvg?NROFG?hgAQeI5KS68n$|PRs}znm3)x=(b2v*sB}a zQR#J7rW_2@h|_8DduZP$juTTcL>*gQXsSq$%^Tpr$6gpCc0Yo*b04@`J$^jDE=X8< zv^T*tZ1t4leEaNfZR1AxL0^T78( zFpg?i+`8;(OC2Y^ml2uGmc!~xac;6qgFTC;LA@s{ORuE6iT{cr7bOuM7L|~v)3P~M zoUHZlx}6n|$%1hseS3AtX@aCx(h^sPYt++Wv8pk*uncF2x#cTW!4u#arikOQ9*@%+ zdfgJjan>9ZJzOZY-!!KT?ZljiIgEj+HrZ_z?+)IA8e>WL?iy*nb~;QagOv9oG%&`t zk~>EMZ$bT+cXN?mfzX=(JX$YVAcc7Jady(8Lng}t6%Q|>@uB;#sE$M(tgju79qEex zBoH`ER4AHKljTE?D~bCJ=iuJjZ6Ui0VG3x}^OxVXS&! z#lzNKsh@P1$$NL?aD@M$tEFGy-58gEt0G<3qVUL@h8C8LCmg1v0~Bqz_gxsyoKaZp zt7b9SBGk!jS-!g4sb<*WP5VqaHndtx=4qBT{aPs~V`M;IjYYf97H(%g`&1l5LOZ3B z(J;tCeJ9hA-}zgMNQfu-K&SIXGDwzhj!M+AL`O2`gUSP56C*G(7aTIlBI$Jab75aTkMhYHH~x|k|dX7G91#&FZqOoN@st|AhpbbSm%#8wqao8XGILUyhBQD zTGOTrEpvxpmlv3xtN&K04rP*0VP}~WVs|QcvXA@_SrTx79sT}h*_Xh@c(=Kn zF&a66mPzO=h}^VTo9*Q;z;_No(}vnZ+Yj| zXY3_wB@Z3h7Ojsn3%5PaluaWChpS_#MTo71cCR|5&81DmdDs%`h|nVBhPgN|S3ySL zZ8`LJq~^~G29f5gM6v6r9+iwJ)(0p2etf5m@*RE(PRcv0)f1g!7?TLx;E0turBG(UFwei}r3?9=Y&^+G*;d}EpfZH=(I z4@#FFkbShE{ek$sJ~JXRQ)#l&3N;%fGBKl^DnK}1I-2}moa~hz=sr|TTOg`~3dvGpugXwvNFW;$f*9y+T-zr5>TYaOWAR^vjnH?6mVGI0X zra|Mb@#!B6_M3gAf4XtCl8m@Z%cFHq?8tM5()xLJG2$bd*~*)mO)CwT+Q%%+)MbtH zHN&(&25!Tql()LKEp+7uLo2U}U8rp#W&zhn3&Pl`IM7N7r+p{q(0lqJPwxJVpZn0+ zQRpOr7awonX7*T~&pre0-J)g}p3uNxr;w`_sB~aIO;=Y!|GZ5P)6z0sc|L@H z;y9@(1Q;7FPHSdN0vZwKX~W29G2Y@6&3FgYM}V495RmjRS-FlT<;;)yy5&;C@)IMB zBKFh(C?|jh+L$Kz=CxB{^QW87q(EO!M-uA)HN5QA%)6yPHoo~k7LlRCnwot3Y{;w(*bl>AF;jiqF0lXqB=kRoxUR>; zJJOR5hYDCO&Q||`^f!AO9Xn(TTZ(NqV=ceYx<>^{u7RtQiv& z{QTG*nQb-pN6Vgcs|}x0Krn`jn>F3AP*}`Sz@8{}5t(%>3Obt%oBqa;1 zz+yMa_84Jg?Ucn)TPMrkNhrUF`;Fg3?+XvolJMTh?o1YuE5vvD4_J}M$8xVMy{PRECD6l4mZO*Q!h~A?yze>MSR;6g_ zA{lpdnZh~Xok*GA`QuzRzB}G%G|H5d20Me8`&z6dAg2xhdfx@4AWo1XEd4+yGb8#Z z2@B&E>_SVG1JVg-2nqxnmoHZa=&i9aVddzsq2Kkf{;a5eW^h4ucf7G4i-q_AzxNGQ zoTnf4Pz>@xZT*qOfm-W&d_U=pU>up{`i^S3U|X=o1O2K$ASMphszZbtIhTHEBETIs zK#k6iZUB}YNj2kEoBvxukOlA>0^$?bCI3K5Q$U0<1JS#IL;qul(vJ7R(oi)S_lu`H zb%$C61O)qt1emiI8oDbiWS>o(rI7S0L#yC)eyCG4G4$RKsQQ*5z`+U8j|T!8x)18R zCgSqs8s(-8F9r)#LUuMi+H|+}zWrLM@%kkF#csWbH)@3Ro%T`daH#R4+YuAev?PQI zZQ*Zi5xR5594Tc;`g6EE zrNPs%xF_h$aiP!CH=tm{J)49^{3+UX8~I&o%!PJ_D8+JQK!*Z4Dld697o3a}C?F4R zh0ADFo<`#BTGd&JLAgBGXxYFTIW|ma?!INP6|nRR%~{SeX#o2v84Wlwjn(iyX#}p} zIe^BN&=9&^NdX|X4}b)_VFNb6;6yHct;ruwykL4y7#w^u%?hPut43aVO93--@@>xK z-G`hiV7lflxZI+;mU}r4X1jVev>#bI@|_B_LX`A81(Uuy9)AAj09d=!!DVL&_>qfq z0F1>SEcd9R$Nj21iGyfh3+Ts*03%wf0_!xe9~G9us2pQKhWWM73lNY%7YrQx&_|-_ zfn+w;)HR1TJD05MoP0 zhuCS_z$GMV|5ud#bXwN_vL)QRIzbt?! zv@J`80&A6530+R`X5!A5hVAMpcM76&p!+mZOU+9{OnwJ?2_Ox~7J?za;;|TmN4!8r zOjx|&sEwW>DE~vtJBWC-rYcSx_=ZTHJ#aC?XEN_F%hA{$R_853B0v@c&Hhd+Z*22i z=U)ZwRq#}g0edLp8-PKFD{Zu|qvd>O5vS{?B$9gp;vWI=*aOxZp+gtGCa707t(MQi z?{*j2QtUmD9muUX_@{f3H4#265Bs;2buYPbDLC^s zve!-X=OOEKlQE<@SrY|d&*}s@`$9t+dF9%F0pxFW2k$@nJ10_Kq8F>I!AB=FohdNh z*!wC8oKbAmCd^rCmOb-Uu5#u}!TE2NYtu60GNTH*JCmfAzo(L{5zJK0_*!Dx>EWnz z?$^mmmf=O?opeWDx1VF=4D7EMXr_s1ZqvMMYbe9JNL-|S^eA5oeY$NpgN&nc-<5OK zxX_4!6Plrd4rIyP9-IqBlUV>d5$HrYr0_zpNl|9p!QIYz-WF1b%irdO&KQrRW?4}@ zfJ-AuTNI!6n>DHW9*^mBCgwI+62KGE^~|o3qB)@n*^)u*^YlAEH+hU@=%16aY78Dj zz6#RCoge=({np<-_61rDeoNN_S}|%poGB<7d)#q(E-`cG(ZL=NOu-kON&%)^HQi^B zmur;diy)53w{yB8twz?pI$@+5BXgL9YQR{OVX87CmQV?uq-=Tx#pVMpcLF*_PAr=YvZDAkYa|uBXMf3r&J+MP9hyYsh*Oel7FKUIKsHYOz6BH9*RA|MWj~3$nqu_k_eP=diGv6u-i6dV}K_86Cwj72W&muDAg! zp!8w4?lAhUE=Z0YSxzKqOBWq38mS~fDHu7ZF?w8v;{>2saUQ?!<3qq|Xd?wN7btlKnu2Az8PEB@6_Su^hwWkI{sW|jp{#>ZWw1+(}>|ZX!>cK ziw{#3CD~@7EZ9a$=}17uCa}%rFH?W0#4&!NkOW)ai??#@e;=|P5o865l+#stf55BB++oWd(}k@fmyW8w&M0yaduIXW+$cn9=1_gaAo@hxsnmMpCmqlXnsh0 zAs9ypJqCo-Km7qJLwOf)PJCe~lRuOQPWD;37b*qk^BYxnhl>R_rdwJL?j@faO{ZH; z@K}-MaN{apt$kW%*WoK9uNlo&ZTHsx*nCmsLu&YIG!36@f5|7ukbanDBmk_VT`i%6 z&ycAx$Sk5e40&+*iO`bZ1L9>;Y1DM1QLU?SX`o<61wl)<^b^pvW!4ygpVb>u7WVlpoDZ>`ch*( z*fvt6JrtipS%Cm+NdAu4de`}#kc8qVXOT>wJGwn#JaDRg#xd)cCBX^aB{9u?szz<3 z`%IVf%ygF;)xnkMxSV6c7Un#wygLOJrkYIoy_0aiFMmU_b;Aps0&G8dbFj?|fxK~H z|G&A$g08m)PJx&>RnxGs8j5=k=9Jeh0gZ6AUuR0^ln<+9 z2xe?uNGWsBx)}C*eYJ&k$YhOAOZ> ztBGtE^RL8~GN!E6ZXPfDs^35_;u>7p+@Wu`NF+Y{lj9vxi0FZ~HSVvqu8h}F3Mf_s z5>%1}{7zF%`&(F8N!p_P+a59YbiO<$dm^7bQ654ULpAwk;zjFdRSdbi*NYuG^otdq zK9;S|aJ8bgf?S{fb-#Tb?#6~sDgSYp{MU|gYK2STmQ1~B4o`;{#~n8R+7Q8n2LmQ@ z;`#{F_eT0-qNhpW_79bVDpzt{ujV@G#+j&eSwT)ppMTV`>$wrO_jd(%6~m|seO;1^ z+=a7RM8*7^{e-t~2G~K8AAg?6Q>j+d74bx4`lR( zFEip|(Uf{l8Z#}fy4qUG|1p1oS z`su8_$cJJA;NqQ;<(T((59SSYyOBD^(vniD#zbh=Hy-C+o>5utM1O9mC0g8Q20P|W z;61$6PVQjAPJQ)@&Ko@m7R?Q}ck4_o*DF3^(s1*FfaFYln1=D0c>y? z2ePweX2-gDa_2?Dn3*{@a~B!f0g=C4{CjR6?9=GtwL699|8n$eDyVifkGxzOv-_cr z6jgJzx=%oGU;E7avD_9(X@NTEDGu)3s4rgrf+1rEsrix zD0%U{8`yuq9s4ZvkK?~vii}nF15i0zj9vt>W*|?n`NZU&068}$7gNG{PWJkGJS5-_ z5t#KAxp-hGbad(ba225!|H#iSi_Si#aTXQ=a#_ZT|A3IxMJAn`|6-Kr66hZQVJBP! znD%7WeuCgXBbc!d5Qn=UHEbXa65s2yjg9Dp4!ZmW(1F&|KRqTzE)GCQ{v!Yf0EIVO zGpD%Ms^7rOHjovk!7%=Qmym9?Uf~gCq9*4`>88~kM!_HV5?FjX&KjhL0aSSgFeTYGID&3* zWdbq>U-qZ+!e9o#@<8aBe~w83!`Mn%d=$i+^(DNdfA_pt)DC>B_xuL?67-Yid-}9J-YP9p*sWQxm^Dzokoh>!NS=wE#8yu7Q#00BHgr z`L)B6st_piKJ5g0dM>nd3hn1UnTiRh)l~-YQZBG$DA%#DJb{scX3gy^xAJRMY5}!0 z$wqwf4;YCLny^TWW<)PmzW*^v0G$}GJ223z9%#^8JE9P@Ub{dOes)vUUjdvcpP^|q zZzBUO1nxio97uZL=n=yfU)uTwe<&>!e3Bv2(zp}k3;z~us*xh zQp>T*nhNX!ALyv2EC#&yAL6uTl=AD<8BEH=8CsXH=e_C_w&2?OseRO*F6NdCMu-=* z%x>qV6K<~cTQZfVq0&Oe&x@WFHW=**Z3w9j=RwYMmobYBYreW)7UcpSNv<+T7%#uO zs8ivmuhDo+jT})m&7TS2;u>FCEVM;jP@wj*Zm*ODGiE8v2)F?G*&UDq~`p^lhRr3!Bz>*8)rgW_Rj}>S~+41?VeOsO?TU z%lncipM%hSX^Lbj6Hp@}Y8@03iom^_a}VDT=m<+dD;aae=C}bc;n=u@$3`w@_~5}4 z)*FE8UUD*`3)xC{fod}Hg3k&cy9US&=u!?9(3s#c4f-YG&~kW3G)=L`2HI(-1hRHC zr=+COn^~e75IXutHr_hHQReio+rJe}e1e5V^A$XB;-NGN&r)Bo6~5j>GQ|nUo`4)u z3B}nKUQcBvU?5+od6nI_64;nV1CPY6)U#x!zMrEZI@wV~zG{{LAxZ+MLKi4hdD^*d zlTMAPc8*&Rg6!ziV82CUtTBX}7PISIE8zgtD-IZmsrn2GAd+bw`r93X0<=8F5Q|q* zvQK|_?VEG&5A7l!EojnJN=<6dPQRUUc&7ouYG_?w)RIZ zW;cbE`LDc^EZfDFgxT2-6L1A8fI`LN`bM*KrnRf~OlN7>8QPu!Z5RJ62HlvYw|!S= zl>y(nw~}a1R2k{e9qmQ8Aqb{+VuTmdU8E+cun~4;dqMwI84~dz;w=;>e-*F`U!+3f z0|Ukxn60`Khmc4O%yK1Ph-nD9%aR0kBWU+}$4L500STH7OUy%=%$=c9aWBXK%Bwi3 zSVLr+GpZCtxh{_GQP38%65no$DA$}sHt~~WL07@=(S|e`*K-Y==ti{4Q7vGAb~%>g zbo11T@aacksyXjM>OfBIr}PDeOP=-`Rkiv{j!Jaz84k4BENh*tdgQYL-gadTn~_*n zfLEnMzT0EY=Ou-h(J5|5oz%|kmhA9Y_$@Wp$vGPO1gSRLYWT~Aj{{zuj6I9vwgZ}? z)6BxJM+X-}@7(`7BAp|DK|L=|ohJMypBP#YGP0O5gx5Q;pJNJ@h$gJY=@+~GhFw4q zN3R_tU-xI@`q1(q-*0$7>~#5hsz5aflRNLUD(s-AzbIKTD_fYE{fp=0;SMb%7DCRn zV)yDR;MPurx0&f>x7Mlpv7EQoNbS=;ggqV=^St=9sL)tGAWKJof!6ux!xEZcUewsl z1aD)$I@euCkRr(X(A}8gN(AiAmg6$b4^UCp0#}iyp>#|&fRTOn6v{5^>y3fcMj<+Z z!HXx~CV(U){tONYN;`8HJ{=Z8N;rNC1u0h8Hls8?M^G?~-ylG)x1V)Cmbj%u9hgCmEe|&8imnq1Mf8JPLmqa!a&-~)j zT$8AdGD8cn8xW43A!1x!-fm6!wE-I3tUtbmOdfRCt$~-q9XISuoBO3~J&$ zV#Zh9V)j(w%I-$rOr)gNBfaQ9_^z7K#mlR-wBA+CkF|Flk`Myve5*O$n4St_i#+>G z+=H8~M*>bLcY;Haw8<&OllV{mpY6w(9~)PXKWqzZXxk~90<7*o+pfMa9$=G?(1#+} zFs*4XFi|lEj4R^=i1mwvCdM7h_t9f_QlCdO$c_jE5v#$l#y-#tm{q)0xBr|5216BoJe zzMl~AXcoJOo^5uqA=Yjf+zcI8By+Ha{OU|QCAx2tyJblUKT7Nvm*$X;4i<^8g&fCY z3`#RvU;F?G;Yr?-V83}OXij9#I@U#+G~AcES3#szV2jPcg^k(8_i-dFuDwjEZ6-o; zh;#MPxM)Z^rDV^c`6W#Dx1u*DUU!6fe`Ks`8SP|?IBcdQ#(o1t4S)9r-hYVD@P~3A zyr+W9?Xh#$ay6}C*>Cvgd3gyJ8LmBw*dxBj=0(Bo1X`GWUOzQpb+}M%sfS?=h1(81 zO(}R4@r%V_V|Hq#X?-!9x(+2$#xVIlYJQDoi*%VY`eFaWp(U@M8ewY)AZEXo8IS&T z5jj9DQ>dHw-kS6%vhmAx4J>?_TEHUJLjBI-R!aEBCy_|DF$cwFbl%vPZ3|h0hQ{}!I}h7~DOsT~bND&3H{jGhp)

    9BjlU)qHG4QJBghq`4(>eN=(Y%VYv1Ap!417vxkUD+pPjD^@fm zX*b!}O-<&AqtxPzkpkb<6FlX1@ipFyq{fnPhYH#i3z{u5tq8=NAPTv?0E4(`RSo}v zJx{;4X=V&_ynuvj7k#K8*5MoCp&-2c&`Lj?_cye)pg#Rq_E-Dx3aV8M=Lhi164G!! zrx{5pld8DIfMAWgpqGNZ@k&HVODQwy3Gew`H7-}lg%C^D@sp~%wTw)2@3Ye0SH(@K z$*{`V-K4|?R>~y@x|Bp5ty5hDTWHK4N>$6=z6!^en&7zX=5OjBx4HOKl4_y7PCNyA zM(BJjU)l%zwe^>i_%Bu{vu#|pTT@_-hxMo}DwqzPaiG}2vFE$$Jlv9m!P{H=brXbPVzLUVbZ` zx-~U>Y_d&1x6|feF3SsZiW?NLkmsMj%>byWXQ-OFlr)|Ue-C-b_ZELr_B^4#K+@NAJmU$`aSQJR_`Je_DF5m)SDQS(_SlM z)+?p#fN{47ww5gN<|DQb$|j``4n2wAd)NFuZG7n0;iqffax0;~^balDl2K{9CR=~( z4KY!($;sW2KVx?+l8S5ad)b#62l1<2j^{<^QMP>dOGnzV+}xrwI)y(J<8| zkt}^-4rb!pZ@d7TiJK?Gj~urG#tt;Ht526`>1~NUP`PlRPI_BR(#X!6qx}N8c8HJ5 z7z(s%z}_ykC*|Ugdokk=#4qqqb?v9%FnRA!4KXs(>&~Sz8Esntvy{;Dbwcx@mo27* z32;tLL$Xu?xqNF7>8RfO`xY?$9#!sEK663Oj>r0k&Oym^SPRN7rALe6AKDgwAnAhV z?`vlcb_$L~-0;(bQgm8~e+NIf#0e^4cPhLI7fN}+&0rM6d-UImQ6!YxfyeeYw5)gix~Wooss5}WJC`r}ouCA-f-fPkOD+U^dr{?nYH zRK!ESsutRb*g9qayDnT|8<}T~VfAK>TM;e32yp4#e2r4Py3%CtR~=WFj8C?VL3hRL zVzOQ}x+!LkS&)Nn8Snq&PJehq+5d`l1@1;4J87(B2dd_|TdXQ16-Q%xPL=as0OdLl z%x)XgRexIYEP>-r%meihjW^joRl54->@*5uRJmYAjOP>(I2ZPx#1ZP1_K>z@;j|==^1poj532;bRa{vGqB>(^xB>_oNB=7(L0vAa{K~!i%?U%bs z13?sqQ`lJ8DA-uoh#l5hys!`iMbvm9 zqT&r7;K;8Dj=LwiJ7;7U*&lvbX3iYW2Q#yCBJ|G`0tHZNjBsOuTw1K=_0Gx5_*~gGmDlMSg>xrWnFUFb4 zK3UWjuW-yEw?tY%iKB5xA#SZ+0NfcwZi%#j@Po7$;>IdW`f`Pu=1Lqf$Ssi;5V!x~ zMQfM_6ZPzz3UhFipyv5fHV(NZ(gMO$#$L#Dy6a=^`sK(0rNUUbO=?+it}ht5B{Bvy zk!;G(euq+FGPm2*8ZURORqYEE{CS*d2@I3u@2T0rP$xMEb#9}dygbB6lX zYS{T=mi9_oK#-fK&%j0S%ckPx#1ZP1_K>z@;j|==^1poj532;bRa{vGqB>(^xB>_oNB=7(L2AD}iK~!i%?O17S zR8}9cN0~j%aqM*bmKQs`D#zZlixcuOU32M|t;)WE2F0_TRNhx4a1SFvF1EWAH zP&TUt1Z7bKq)H=OJM(7iI$PV>XZrcw`(~h&w!F@gSTZL$d7Zv{-#gzu_k8EOF9o-t zB|-HROM>btmIT#PED0(qYYm~**MgA8!D};r2E;$rdkM0{f+8RR#(iNgn*P`T*ZC3z z9L6xP#92Z?1)XNN{wYEI)~T?pnTXmguflnJ4T4P;Ac4RV2CCKXg6GO%H0*dAhK0kS z%j^SXK{{$TO@aGT8G=f1K^@C;gBWOPTjhd|lSlmIi27U0?Rl>G61E!+; zq08!}g2{<^FDUtgVcqosywyiJ-Y;gUljCIZ1tr+d)re*?UCoB4pyZ`OHwY;QKT1+hP9fA=Ip!h?@0J%IwvAH5rB_!w9fHOv}f?{$myb zc71po#>e6YD&)4o^>+zszn%hp-Vm83k*&JTFA!)Unon&+)5*1DzPT{1dXR*XO3-N% zq`R^d!RA^%B|h1T8z`^w6m0wF!nouvl0$D7#6YFZhjG7^BIJE ztT(wmm&3G@+cUSn1g_8N%im++WEKNXqj;eD+!!ooPy&!we+&)VUgvfjMz*`wlMDqz zQM>UOcz8eLb;4g;2}k(?*4!xBuXKV9v&91RMZ@-Yn9+lAq4?N9g@RJwIm#EZp*|)9 zm9#DTkpMM+Ftaxao~mCcZ0+#ZRKQL(VO%znWBZZ?)v+RD=Hf?TKe#w9f;T2mA(s`N z%jG18=a`i{)$J5%w7e?$sjz%8fnyFzBJr8evVJ~>ap?$->3wyq0H@>+A*s9!_rLq3 zvX4aOQ$CaI7ijU5s2IbZL25Tv|5_^%J)O=ll5l{@=xb&W@neyt7qyGEXsMLpq z@3$E*=TH|Gq@(eN>GJu}W~=+4!lzm1vDL7woy0&`@DUZ>HbCKm)(eKAi3dYZ)nPW^ zO_B{ot#u4aTvdt}a{@MfRJGM@P?Ew~8x7mv;5JH!?vqF`9XK zJ{k&{uk$%78`gg}4ekpE&CV!gJbCcso}ajWx1r906s(akPje zFh}Y@g;cL&y+tVfhPeb~T0zn%%VEptzKZV_sE|8C;m^vwugmOY+lkdCSmUyL(YWI+ zx%K?D=cOXIm*vWA>E}^Vbh02~zLGbP?|EK+;}8qUt{jfy=B6DKSIcKPM+(m)g|m5P zx1;cOL5X{&l~m!R{7_4s+(JT)i@Guwk5{#ylu4bqSd_xiYyt1_ICCz1Jke&FL519Q zw4B=p%Q}+7JaI(tAdrYhGR2^*yWf{5;gG9=&G!r(RP1J<{kqDOaFH$QL(__}XgRx; zhe_%}jkTp2l+SdA$A&DPg~XR3Qg~MZ2^T@lA0(Tvt@I-}4lROh&kPeV zeg{rgRq}_TvE*I#=tLgU*Y{)g?Xu3AdpqfssrH*@CQn-*bq8@JQV%!}1>=abbqVp<$cQm#m zm+xUio}VT^w*?%Aa5P<;W>C$iH&YTlEd#5bNzVues@28#Xy(lB1H;mh*S_p(FwLNZ z787*tIJ}thXeO+?K9rxith;$li1XQ{GuW$g;i}w4=G1Baj-eTp=!vIB&eQZ lNl-n-lAwBu-GZQi{{Xj3AY!U%$~^!8002ovPDHLkV1l6+WZD1# diff --git a/Health/onboarding_instructions.md b/Health/onboarding_instructions.md deleted file mode 100644 index 4c83577b5..000000000 --- a/Health/onboarding_instructions.md +++ /dev/null @@ -1,43 +0,0 @@ -# Onboard to Azure Monitor for containers Health(Tab) limited preview - -For on-boarding to Health(Tab), you would need to complete two steps -1. Configure agent through configmap to collect health data. [Learn more about ConfigMap](https://docs.microsoft.com/azure/azure-monitor/insights/container-insights-agent-config#configmap-file-settings-overview) -2. Access Health(Tab) in Azure Monitor for Containers Insights experience in portal with feature flag URL. [aka.ms/HealthPreview](https://aka.ms/Healthpreview) - - -## Configure agent through ConfigMap -1. If you are configuring your existing ConfigMap, append the following section in your existing ConfigMap yaml file -``` -#Append this section in your existing configmap -agent-settings: |- - # agent health model feature settings - [agent_settings.health_model] - # In the absence of this configmap, default value for enabled is false - enabled = true -``` -2. Else if you don't have ConfigMap, download the new ConfigMap from [here.](https://github.com/microsoft/Docker-Provider/blob/ci_prod/kubernetes/container-azm-ms-agentconfig.yaml) & then set `enabled =true` - -``` -#For new downloaded configmap enabled this default setting to true -agent-settings: |- - # agent health model feature settings - [agent_settings.health_model] - # In the absence of this configmap, default value for enabled is false - enabled = true -``` - - -3. Run the following kubectl command: - `kubectl apply -f ` - -Example: `kubectl apply -f container-azm-ms-agentconfig.yaml`. - -The configuration change can take a few minutes to finish before taking effect, and all omsagent pods in the cluster will restart. The restart is a rolling restart for all omsagent pods, not all restart at the same time. - - -## Access health(tab) in Azure Monitor for containers Insights experience -1. You can view Health(tab) by accessing portal through this link. [aka.ms/HealthPreview](https://aka.ms/Healthpreview). This URL includes required feature flag. - - -For any question please reachout to us at [askcoin@microsoft.com](mailto:askcoin@microsoft.com) - diff --git a/Rakefile b/Rakefile deleted file mode 100644 index 4ac2b1137..000000000 --- a/Rakefile +++ /dev/null @@ -1,9 +0,0 @@ -require "rake/testtask" - -task default: "test" - -Rake::TestTask.new do |task| - task.libs << "test" - task.pattern = "./test/unit-tests/plugins/health/*_spec.rb" - task.warning = false -end diff --git a/build/common/installer/scripts/tomlparser-agent-config.rb b/build/common/installer/scripts/tomlparser-agent-config.rb index 052bb5a5d..ebe1e3982 100644 --- a/build/common/installer/scripts/tomlparser-agent-config.rb +++ b/build/common/installer/scripts/tomlparser-agent-config.rb @@ -12,7 +12,6 @@ @configMapMountPath = "/etc/config/settings/agent-settings" @configSchemaVersion = "" -@enable_health_model = false # 250 Node items (15KB per node) account to approximately 4MB @nodesChunkSize = 250 @@ -89,10 +88,6 @@ def parseConfigMap def populateSettingValuesFromConfigMap(parsedConfig) begin if !parsedConfig.nil? && !parsedConfig[:agent_settings].nil? - if !parsedConfig[:agent_settings][:health_model].nil? && !parsedConfig[:agent_settings][:health_model][:enabled].nil? - @enable_health_model = parsedConfig[:agent_settings][:health_model][:enabled] - puts "enable_health_model = #{@enable_health_model}" - end chunk_config = parsedConfig[:agent_settings][:chunk_config] if !chunk_config.nil? nodesChunkSize = chunk_config[:NODES_CHUNK_SIZE] @@ -179,7 +174,6 @@ def populateSettingValuesFromConfigMap(parsedConfig) end rescue => errorStr puts "config::error:Exception while reading config settings for agent configuration setting - #{errorStr}, using defaults" - @enable_health_model = false end end @@ -194,14 +188,12 @@ def populateSettingValuesFromConfigMap(parsedConfig) if (File.file?(@configMapMountPath)) ConfigParseErrorLogger.logError("config::unsupported/missing config schema version - '#{@configSchemaVersion}' , using defaults, please use supported schema version") end - @enable_health_model = false end # Write the settings to file, so that they can be set as environment variables file = File.open("agent_config_env_var", "w") if !file.nil? - file.write("export AZMON_CLUSTER_ENABLE_HEALTH_MODEL=#{@enable_health_model}\n") file.write("export NODES_CHUNK_SIZE=#{@nodesChunkSize}\n") file.write("export PODS_CHUNK_SIZE=#{@podsChunkSize}\n") file.write("export EVENTS_CHUNK_SIZE=#{@eventsChunkSize}\n") diff --git a/build/common/installer/scripts/tomlparser-prom-agent-config.rb b/build/common/installer/scripts/tomlparser-prom-agent-config.rb index be9d08e59..664691a44 100644 --- a/build/common/installer/scripts/tomlparser-prom-agent-config.rb +++ b/build/common/installer/scripts/tomlparser-prom-agent-config.rb @@ -84,7 +84,6 @@ def populateSettingValuesFromConfigMap(parsedConfig) if (File.file?(@configMapMountPath)) ConfigParseErrorLogger.logError("config::unsupported/missing config schema version - '#{@configSchemaVersion}' , using defaults, please use supported schema version") end - @enable_health_model = false end # Write the settings to file, so that they can be set as environment variables diff --git a/build/linux/installer/conf/container.conf b/build/linux/installer/conf/container.conf index 093c9ef12..438c0891d 100644 --- a/build/linux/installer/conf/container.conf +++ b/build/linux/installer/conf/container.conf @@ -33,16 +33,6 @@ @log_level debug - - @type cadvisor_health_node - @log_level debug - - - - @type cadvisor_health_container - @log_level debug - - #custom_metrics_mdm filter plugin @type cadvisor2mdm @@ -107,34 +97,6 @@ keepalive true - - @type health_forward - send_timeout 60s - recover_wait 10s - hard_timeout 60s - transport tcp - ignore_network_errors_at_startup true - expire_dns_cache 600s - - @type file - overflow_action drop_oldest_chunk - path /var/opt/microsoft/docker-cimprov/state/out_health_forward*.buffer - chunk_limit_size 3m - flush_interval 20s - retry_max_times 10 - retry_max_interval 5m - retry_wait 5s - - - host "#{ENV['HEALTHMODEL_REPLICASET_SERVICE_SERVICE_HOST']}" - port "#{ENV['HEALTHMODEL_REPLICASET_SERVICE_SERVICE_PORT']}" - - - @type file - path /var/opt/microsoft/docker-cimprov/state/fluent_forward_failed.buffer - - - @type mdm @log_level debug diff --git a/build/linux/installer/conf/kube.conf b/build/linux/installer/conf/kube.conf index a1c8bf928..53040e2f9 100644 --- a/build/linux/installer/conf/kube.conf +++ b/build/linux/installer/conf/kube.conf @@ -1,11 +1,3 @@ - #fluent forward plugin - - @type forward - port "#{ENV['HEALTHMODEL_REPLICASET_SERVICE_SERVICE_PORT']}" - bind 0.0.0.0 - chunk_size_limit 4m - - #Kubernetes pod inventory @type kube_podinventory @@ -38,14 +30,6 @@ @log_level debug - #Kubernetes health - - @type kube_health - tag kubehealth.ReplicaSet - run_interval 60 - @log_level debug - - #cadvisor perf- Windows nodes @type win_cadvisor_perf @@ -82,11 +66,6 @@ @log_level info - #health model aggregation filter - - @type health_model_builder - - #kubepodinventory @type forward @@ -357,29 +336,3 @@ retry_mdm_post_wait_minutes 30 - - #kubehealth - - @type forward - @log_level debug - send_timeout 30 - connect_timeout 30 - heartbeat_type none - - host 0.0.0.0 - port "#{ENV['MDSD_FLUENT_SOCKET_PORT']}" - - - @type file - path /var/opt/microsoft/docker-cimprov/state/kubehealth*.buffer - overflow_action drop_oldest_chunk - chunk_limit_size 4m - queue_limit_length 20 - flush_interval 20s - retry_max_times 10 - retry_wait 5s - retry_max_interval 5m - flush_thread_count 5 - - keepalive true - diff --git a/build/linux/installer/datafiles/base_container.data b/build/linux/installer/datafiles/base_container.data index 9fc7ce08f..7dcbde31f 100644 --- a/build/linux/installer/datafiles/base_container.data +++ b/build/linux/installer/datafiles/base_container.data @@ -57,10 +57,6 @@ MAINTAINER: 'Microsoft Corporation' /opt/test.json; build/linux/installer/conf/test.json; 644; root; root - -/etc/opt/microsoft/docker-cimprov/health/healthmonitorconfig.json; build/linux/installer/conf/healthmonitorconfig.json; 644; root; root -/etc/opt/microsoft/docker-cimprov/health/health_model_definition.json; build/linux/installer/conf/health_model_definition.json; 644; root; root - /etc/fluent/plugin/lib/application_insights/version.rb; source/plugins/ruby/lib/application_insights/version.rb; 644; root; root /etc/fluent/plugin/lib/application_insights/rack/track_request.rb; source/plugins/ruby/lib/application_insights/rack/track_request.rb; 644; root; root /etc/fluent/plugin/lib/application_insights/unhandled_exception.rb; source/plugins/ruby/lib/application_insights/unhandled_exception.rb; 644; root; root @@ -106,32 +102,6 @@ MAINTAINER: 'Microsoft Corporation' /etc/fluent/plugin/lib/application_insights/channel/event.rb; source/plugins/ruby/lib/application_insights/channel/event.rb; 644; root; root /etc/fluent/plugin/lib/application_insights.rb; source/plugins/ruby/lib/application_insights.rb; 644; root; root -/etc/fluent/plugin/health/aggregate_monitor.rb; source/plugins/ruby/health/aggregate_monitor.rb; 644; root; root -/etc/fluent/plugin/health/agg_monitor_id_labels.rb; source/plugins/ruby/health/agg_monitor_id_labels.rb; 644; root; root -/etc/fluent/plugin/health/aggregate_monitor_state_finalizer.rb; source/plugins/ruby/health/aggregate_monitor_state_finalizer.rb; 644; root; root -/etc/fluent/plugin/health/cluster_health_state.rb; source/plugins/ruby/health/cluster_health_state.rb; 644; root; root -/etc/fluent/plugin/health/health_container_cpu_memory_aggregator.rb; source/plugins/ruby/health/health_container_cpu_memory_aggregator.rb; 644; root; root -/etc/fluent/plugin/health/health_container_cpu_memory_record_formatter.rb; source/plugins/ruby/health/health_container_cpu_memory_record_formatter.rb; 644; root; root -/etc/fluent/plugin/health/health_hierarchy_builder.rb; source/plugins/ruby/health/health_hierarchy_builder.rb; 644; root; root -/etc/fluent/plugin/health/health_kubernetes_resources.rb; source/plugins/ruby/health/health_kubernetes_resources.rb; 644; root; root -/etc/fluent/plugin/health/health_kube_api_down_handler.rb; source/plugins/ruby/health/health_kube_api_down_handler.rb; 644; root; root -/etc/fluent/plugin/health/health_missing_signal_generator.rb; source/plugins/ruby/health/health_missing_signal_generator.rb; 644; root; root -/etc/fluent/plugin/health/health_model_buffer.rb; source/plugins/ruby/health/health_model_buffer.rb; 644; root; root -/etc/fluent/plugin/health/health_model_builder.rb; source/plugins/ruby/health/health_model_builder.rb; 644; root; root -/etc/fluent/plugin/health/health_model_constants.rb; source/plugins/ruby/health/health_model_constants.rb; 644; root; root -/etc/fluent/plugin/health/parent_monitor_provider.rb; source/plugins/ruby/health/parent_monitor_provider.rb; 644; root; root -/etc/fluent/plugin/health/health_model_definition_parser.rb; source/plugins/ruby/health/health_model_definition_parser.rb; 644; root; root -/etc/fluent/plugin/health/health_monitor_helpers.rb; source/plugins/ruby/health/health_monitor_helpers.rb; 644; root; root -/etc/fluent/plugin/health/health_monitor_optimizer.rb; source/plugins/ruby/health/health_monitor_optimizer.rb; 644; root; root -/etc/fluent/plugin/health/health_monitor_provider.rb; source/plugins/ruby/health/health_monitor_provider.rb; 644; root; root -/etc/fluent/plugin/health/health_monitor_record.rb; source/plugins/ruby/health/health_monitor_record.rb; 644; root; root -/etc/fluent/plugin/health/health_monitor_state.rb; source/plugins/ruby/health/health_monitor_state.rb; 644; root; root -/etc/fluent/plugin/health/health_monitor_telemetry.rb; source/plugins/ruby/health/health_monitor_telemetry.rb; 644; root; root -/etc/fluent/plugin/health/health_monitor_utils.rb; source/plugins/ruby/health/health_monitor_utils.rb; 644; root; root -/etc/fluent/plugin/health/health_signal_reducer.rb; source/plugins/ruby/health/health_signal_reducer.rb; 644; root; root -/etc/fluent/plugin/health/monitor_factory.rb; source/plugins/ruby/health/monitor_factory.rb; 644; root; root -/etc/fluent/plugin/health/monitor_set.rb; source/plugins/ruby/health/monitor_set.rb; 644; root; root -/etc/fluent/plugin/health/unit_monitor.rb; source/plugins/ruby/health/unit_monitor.rb; 644; root; root /etc/fluent/plugin/ApplicationInsightsUtility.rb; source/plugins/ruby/ApplicationInsightsUtility.rb; 644; root; root /etc/fluent/plugin/arc_k8s_cluster_identity.rb; source/plugins/ruby/arc_k8s_cluster_identity.rb; 644; root; root @@ -164,19 +134,14 @@ MAINTAINER: 'Microsoft Corporation' /etc/fluent/plugin/in_kube_podinventory.rb; source/plugins/ruby/in_kube_podinventory.rb; 644; root; root /etc/fluent/plugin/KubernetesApiClient.rb; source/plugins/ruby/KubernetesApiClient.rb; 644; root; root /etc/fluent/plugin/in_kube_events.rb; source/plugins/ruby/in_kube_events.rb; 644; root; root -/etc/fluent/plugin/in_kube_health.rb; source/plugins/ruby/in_kube_health.rb; 644; root; root /etc/fluent/plugin/in_kube_pvinventory.rb; source/plugins/ruby/in_kube_pvinventory.rb; 644; root; root /etc/fluent/plugin/in_kubestate_deployments.rb; source/plugins/ruby/in_kubestate_deployments.rb; 644; root; root /etc/fluent/plugin/in_kubestate_hpa.rb; source/plugins/ruby/in_kubestate_hpa.rb; 644; root; root -/etc/fluent/plugin/filter_cadvisor_health_container.rb; source/plugins/ruby/filter_cadvisor_health_container.rb; 644; root; root -/etc/fluent/plugin/filter_cadvisor_health_node.rb; source/plugins/ruby/filter_cadvisor_health_node.rb; 644; root; root /etc/fluent/plugin/filter_cadvisor2mdm.rb; source/plugins/ruby/filter_cadvisor2mdm.rb; 644; root; root -/etc/fluent/plugin/filter_health_model_builder.rb; source/plugins/ruby/filter_health_model_builder.rb; 644; root; root /etc/fluent/plugin/filter_inventory2mdm.rb; source/plugins/ruby/filter_inventory2mdm.rb; 644; root; root /etc/fluent/plugin/filter_telegraf2mdm.rb; source/plugins/ruby/filter_telegraf2mdm.rb; 644; root; root -/etc/fluent/plugin/out_health_forward.rb; source/plugins/ruby/out_health_forward.rb; 644; root; root /etc/fluent/plugin/out_mdm.rb; source/plugins/ruby/out_mdm.rb; 644; root; root @@ -195,7 +160,6 @@ MAINTAINER: 'Microsoft Corporation' /etc/opt/microsoft; 755; root; root; sysdir /etc/opt/microsoft/docker-cimprov; 755; root; root /etc/opt/microsoft/docker-cimprov/conf; 755; root; root -/etc/opt/microsoft/docker-cimprov/health; 755; root; root /opt/microsoft; 755; root; root; sysdir /opt/microsoft/docker-cimprov; 755; root; root @@ -217,7 +181,6 @@ MAINTAINER: 'Microsoft Corporation' /etc/fluent; 755; root; root; sysdir /etc/fluent/plugin; 755; root; root; sysdir -/etc/fluent/plugin/health; 755; root; root; sysdir /etc/fluent/plugin/lib; 755; root; root; sysdir /etc/fluent/plugin/lib/application_insights; 755; root; root; sysdir /etc/fluent/plugin/lib/application_insights/channel; 755; root; root; sysdir @@ -271,14 +234,6 @@ touch /var/opt/microsoft/docker-cimprov/log/mdm_metrics_generator.log chmod 666 /var/opt/microsoft/docker-cimprov/log/mdm_metrics_generator.log -touch /var/opt/microsoft/docker-cimprov/log/health_monitors.log -chmod 666 /var/opt/microsoft/docker-cimprov/log/health_monitors.log - - -touch /var/opt/microsoft/docker-cimprov/log/filter_health_model_builder.log -chmod 666 /var/opt/microsoft/docker-cimprov/log/filter_health_model_builder.log - - touch /var/opt/microsoft/docker-cimprov/log/fluent_forward_failed.log chmod 666 /var/opt/microsoft/docker-cimprov/log/fluent_forward_failed.log diff --git a/charts/azuremonitor-containers/templates/omsagent-crd.yaml b/charts/azuremonitor-containers/templates/omsagent-crd.yaml deleted file mode 100644 index 46c5341cc..000000000 --- a/charts/azuremonitor-containers/templates/omsagent-crd.yaml +++ /dev/null @@ -1,36 +0,0 @@ -{{- if semverCompare "<1.19-0" .Capabilities.KubeVersion.Version }} -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: healthstates.azmon.container.insights - namespace: kube-system -spec: - group: azmon.container.insights - version: v1 - scope: Namespaced - names: - plural: healthstates - kind: HealthState -{{- else }} -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: healthstates.azmon.container.insights - namespace: kube-system -spec: - group: azmon.container.insights - versions: - - name: v1 - served: true - storage: true - schema: - openAPIV3Schema: - type: object - properties: - state: - type: string - scope: Namespaced - names: - plural: healthstates - kind: HealthState -{{- end }} \ No newline at end of file diff --git a/charts/azuremonitor-containers/templates/omsagent-deployment.yaml b/charts/azuremonitor-containers/templates/omsagent-deployment.yaml index 55b1f4a8d..2a60fbb7f 100644 --- a/charts/azuremonitor-containers/templates/omsagent-deployment.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-deployment.yaml @@ -84,9 +84,6 @@ spec: protocol: TCP - containerPort: 25224 protocol: UDP - - containerPort: 25227 - protocol: TCP - name: in-rs-tcp volumeMounts: - mountPath: /var/run/host name: docker-sock diff --git a/charts/azuremonitor-containers/templates/omsagent-rbac.yaml b/charts/azuremonitor-containers/templates/omsagent-rbac.yaml index d9bca069d..a167e99a5 100644 --- a/charts/azuremonitor-containers/templates/omsagent-rbac.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-rbac.yaml @@ -28,9 +28,6 @@ rules: - apiGroups: ["apps", "extensions", "autoscaling"] resources: ["replicasets", "deployments", "horizontalpodautoscalers"] verbs: ["list"] -- apiGroups: ["azmon.container.insights"] - resources: ["healthstates"] - verbs: ["get", "create", "patch"] - apiGroups: ["clusterconfig.azure.com"] resources: ["azureclusteridentityrequests", "azureclusteridentityrequests/status"] resourceNames: ["container-insights-clusteridentityrequest"] diff --git a/charts/azuremonitor-containers/templates/omsagent-rs-configmap.yaml b/charts/azuremonitor-containers/templates/omsagent-rs-configmap.yaml index fc7c471f8..7fa85bfdc 100644 --- a/charts/azuremonitor-containers/templates/omsagent-rs-configmap.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-rs-configmap.yaml @@ -4,13 +4,6 @@ apiVersion: v1 data: kube.conf: | # Fluentd config file for OMS Docker - cluster components (kubeAPI) - #fluent forward plugin - - type forward - port "#{ENV['HEALTHMODEL_REPLICASET_SERVICE_SERVICE_PORT']}" - bind 0.0.0.0 - chunk_size_limit 4m - #Kubernetes pod inventory @@ -44,14 +37,6 @@ data: log_level debug - #Kubernetes health - - type kubehealth - tag kubehealth.ReplicaSet - run_interval 60 - log_level debug - - #cadvisor perf- Windows nodes type wincadvisorperf @@ -87,11 +72,6 @@ data: log_level info - #health model aggregation filter - - type filter_health_model_builder - - type out_oms log_level debug @@ -243,21 +223,6 @@ data: retry_mdm_post_wait_minutes 30 - - type out_oms - log_level debug - num_threads 5 - buffer_chunk_limit 4m - buffer_type file - buffer_path %STATE_DIR_WS%/out_oms_kubehealth*.buffer - buffer_queue_limit 20 - buffer_queue_full_action drop_oldest_chunk - flush_interval 20s - retry_limit 10 - retry_wait 5s - max_retry_wait 5m - - type out_oms log_level debug diff --git a/charts/azuremonitor-containers/templates/omsagent-service.yaml b/charts/azuremonitor-containers/templates/omsagent-service.yaml deleted file mode 100644 index 00e6a1d3b..000000000 --- a/charts/azuremonitor-containers/templates/omsagent-service.yaml +++ /dev/null @@ -1,12 +0,0 @@ -kind: Service -apiVersion: v1 -metadata: - name: healthmodel-replicaset-service - namespace: kube-system -spec: - selector: - rsName: "omsagent-rs" - ports: - - protocol: TCP - port: 25227 - targetPort: in-rs-tcp diff --git a/kubernetes/linux/mdsd.xml b/kubernetes/linux/mdsd.xml index de14240aa..6391d4403 100644 --- a/kubernetes/linux/mdsd.xml +++ b/kubernetes/linux/mdsd.xml @@ -140,19 +140,6 @@ - - - - - - - - - - - - - @@ -231,7 +218,6 @@ - @@ -341,11 +327,6 @@ - - - - - @@ -443,12 +424,5 @@ - - - - ]]> - - - diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index c8324370b..24db6f20f 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -27,9 +27,6 @@ rules: - apiGroups: ["apps", "extensions", "autoscaling"] resources: ["replicasets", "deployments", "horizontalpodautoscalers"] verbs: ["list"] - - apiGroups: ["azmon.container.insights"] - resources: ["healthstates"] - verbs: ["get", "create", "patch"] - nonResourceURLs: ["/metrics"] verbs: ["get"] --- @@ -51,14 +48,6 @@ apiVersion: v1 data: kube.conf: |- # Fluentd config file for OMS Docker - cluster components (kubeAPI) - #fluent forward plugin - - type forward - port "#{ENV['HEALTHMODEL_REPLICASET_SERVICE_SERVICE_PORT']}" - bind 0.0.0.0 - chunk_size_limit 4m - - #Kubernetes pod inventory type kubepodinventory @@ -91,14 +80,6 @@ data: log_level debug - #Kubernetes health - - type kubehealth - tag kubehealth.ReplicaSet - run_interval 60 - log_level debug - - #cadvisor perf- Windows nodes type wincadvisorperf @@ -135,11 +116,6 @@ data: log_level info - #health model aggregation filter - - type filter_health_model_builder - - type out_oms log_level debug @@ -291,21 +267,6 @@ data: retry_mdm_post_wait_minutes 30 - - type out_oms - log_level debug - num_threads 5 - buffer_chunk_limit 4m - buffer_type file - buffer_path %STATE_DIR_WS%/out_oms_kubehealth*.buffer - buffer_queue_limit 20 - buffer_queue_full_action drop_oldest_chunk - flush_interval 20s - retry_limit 10 - retry_wait 5s - max_retry_wait 5m - - type out_oms log_level debug @@ -644,9 +605,6 @@ spec: protocol: TCP - containerPort: 25224 protocol: UDP - - containerPort: 25227 - protocol: TCP - name: in-rs-tcp volumeMounts: - mountPath: /var/run/host name: docker-sock @@ -897,39 +855,3 @@ spec: secret: secretName: omsagent-adx-secret optional: true ---- -kind: Service -apiVersion: v1 -metadata: - name: healthmodel-replicaset-service - namespace: kube-system -spec: - selector: - rsName: "omsagent-rs" - ports: - - protocol: TCP - port: 25227 - targetPort: in-rs-tcp ---- -# this is for versions >=1.19, for versions <1.19 we continue to use v1beta1 -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: healthstates.azmon.container.insights - namespace: kube-system -spec: - group: azmon.container.insights - versions: - - name: v1 - served: true - storage: true - schema: - openAPIV3Schema: - type: object - properties: - state: - type: string - scope: Namespaced - names: - plural: healthstates - kind: HealthState diff --git a/scripts/dcr-onboarding/ci-extension-dcr-streams.md b/scripts/dcr-onboarding/ci-extension-dcr-streams.md index cbac41838..23e929d7c 100644 --- a/scripts/dcr-onboarding/ci-extension-dcr-streams.md +++ b/scripts/dcr-onboarding/ci-extension-dcr-streams.md @@ -156,21 +156,7 @@ stage: to review tags: agent ``` -# 12 - KubeHealth -``` -stream-id: Microsoft-KubeHealth -data-type: KUBE_HEALTH_BLOB -intelligence-pack: ContainerInsights -solutions: ContainerInsights -platform: Any -la-table-name: KubeHealth -alias-stream-id: Microsoft-KubeHealth -contact-alias: OMScontainers@microsoft.com -stage: to review -tags: agent -``` - -# 13 - Perf +# 12 - Perf ``` > Note - This stream already exists stream-id: Microsoft-Perf diff --git a/scripts/preview/health/HealthAgentOnboarding.ps1 b/scripts/preview/health/HealthAgentOnboarding.ps1 deleted file mode 100644 index 9ce8eca74..000000000 --- a/scripts/preview/health/HealthAgentOnboarding.ps1 +++ /dev/null @@ -1,432 +0,0 @@ -<# - .DESCRIPTION - Upgrades the Kubernetes cluster that has been onboarded to monitoring to a version of the agent - that generates health monitor signals - 1. Installs necessary powershell modules - 2. Onboards Container Insights solution to the supplied LA workspace if not already onboarded - 3. Updates the cluster metadata to link the LA workspace ID to the cluster - .PARAMETER aksResourceId - Name of the cluster configured on the OMSAgent - .PARAMETER loganalyticsWorkspaceResourceId - Azure ResourceId of the log analytics workspace Id - .PARAMETER aksResourceLocation - Resource location of the AKS cluster resource -#> -param( - [Parameter(mandatory = $true)] - [string]$aksResourceId, - [Parameter(mandatory = $true)] - [string]$aksResourceLocation, - [Parameter(mandatory = $true)] - [string]$logAnalyticsWorkspaceResourceId -) - - -$OptOutLink = "https://docs.microsoft.com/en-us/azure/azure-monitor/insights/container-insights-optout" - -# checks the required Powershell modules exist and if not exists, request the user permission to install -$azAccountModule = Get-Module -ListAvailable -Name Az.Accounts -$azResourcesModule = Get-Module -ListAvailable -Name Az.Resources -$azOperationalInsights = Get-Module -ListAvailable -Name Az.OperationalInsights -$azAks = Get-Module -ListAvailable -Name Az.Aks - -if (($null -eq $azAccountModule) -or ($null -eq $azResourcesModule) -or ($null -eq $azOperationalInsights)) { - - $currentPrincipal = New-Object Security.Principal.WindowsPrincipal([Security.Principal.WindowsIdentity]::GetCurrent()) - - if ($currentPrincipal.IsInRole([Security.Principal.WindowsBuiltInRole]::Administrator)) { - Write-Host("Running script as an admin...") - Write-Host("") - } - else { - Write-Host("Please re-launch the script with elevated administrator") -ForegroundColor Red - Stop-Transcript - exit - } - - $message = "This script will try to install the latest versions of the following Modules : ` - Az.Resources, Az.Accounts, Az.Aks and Az.OperationalInsights using the command` - `'Install-Module {Insert Module Name} -Repository PSGallery -Force -AllowClobber -ErrorAction Stop -WarningAction Stop' - `If you do not have the latest version of these Modules, this troubleshooting script may not run." - $question = "Do you want to Install the modules and run the script or just run the script?" - - $choices = New-Object Collections.ObjectModel.Collection[Management.Automation.Host.ChoiceDescription] - $choices.Add((New-Object Management.Automation.Host.ChoiceDescription -ArgumentList '&Yes, Install and run')) - $choices.Add((New-Object Management.Automation.Host.ChoiceDescription -ArgumentList '&Continue without installing the Module')) - $choices.Add((New-Object Management.Automation.Host.ChoiceDescription -ArgumentList '&Quit')) - - $decision = $Host.UI.PromptForChoice($message, $question, $choices, 0) - - switch ($decision) { - 0 { - - if ($null -eq $azResourcesModule) { - try { - Write-Host("Installing Az.Resources...") - Install-Module Az.Resources -Repository PSGallery -Force -AllowClobber -ErrorAction Stop - } - catch { - Write-Host("Close other powershell logins and try installing the latest modules forAz.Accounts in a new powershell window: eg. 'Install-Module Az.Accounts -Repository PSGallery -Force'") -ForegroundColor Red - exit - } - } - - if ($null -eq $azAccountModule) { - try { - Write-Host("Installing Az.Accounts...") - Install-Module Az.Accounts -Repository PSGallery -Force -AllowClobber -ErrorAction Stop - } - catch { - Write-Host("Close other powershell logins and try installing the latest modules forAz.Accounts in a new powershell window: eg. 'Install-Module Az.Accounts -Repository PSGallery -Force'") -ForegroundColor Red - exit - } - } - - if ($null -eq $azOperationalInsights) { - try { - - Write-Host("Installing AzureRM.OperationalInsights...") - Install-Module Az.OperationalInsights -Repository PSGallery -Force -AllowClobber -ErrorAction Stop - } - catch { - Write-Host("Close other powershell logins and try installing the latest modules for AzureRM.OperationalInsights in a new powershell window: eg. 'Install-Module AzureRM.OperationalInsights -Repository PSGallery -Force'") -ForegroundColor Red - exit - } - } - if ($null -eq $azAks) { - try { - - Write-Host("Installing Az.Aks...") - Install-Module Az.Aks -Repository PSGallery -Force -AllowClobber -ErrorAction Stop - } - catch { - Write-Host("Close other powershell logins and try installing the latest modules for AzureRM.OperationalInsights in a new powershell window: eg. 'Install-Module AzureRM.OperationalInsights -Repository PSGallery -Force'") -ForegroundColor Red - exit - } - } - - - } - 1 { - - if ($null -eq $azResourcesModule) { - try { - Import-Module Az.Resources -ErrorAction Stop - } - catch { - Write-Host("Could not import Az.Resources...") -ForegroundColor Red - Write-Host("Close other powershell logins and try installing the latest modules for Az.Resources in a new powershell window: eg. 'Install-Module Az.Resources -Repository PSGallery -Force'") -ForegroundColor Red - Stop-Transcript - exit - } - } - if ($null -eq $azAccountModule) { - try { - Import-Module Az.Accounts -ErrorAction Stop - } - catch { - Write-Host("Could not import Az.Accounts...") -ForegroundColor Red - Write-Host("Close other powershell logins and try installing the latest modules for Az.Accounts in a new powershell window: eg. 'Install-Module Az.Accounts -Repository PSGallery -Force'") -ForegroundColor Red - Stop-Transcript - exit - } - } - - if ($null -eq $azAccountModule) { - try { - Import-Module Az.OperationalInsights - } - catch { - Write-Host("Could not import Az.OperationalInsights... Please reinstall this Module") -ForegroundColor Red - Stop-Transcript - exit - } - } - - } - 2 { - Write-Host("") - Stop-Transcript - exit - } - } -} - -if ([string]::IsNullOrEmpty($logAnalyticsWorkspaceResourceId)) { - Write-Host("logAnalyticsWorkspaceResourceId should not be NULL or empty") -ForegroundColor Red - exit -} - -if (($logAnalyticsWorkspaceResourceId -match "/providers/Microsoft.OperationalInsights/workspaces") -eq $false) { - Write-Host("logAnalyticsWorkspaceResourceId should be valid Azure Resource Id format") -ForegroundColor Red - exit -} - -$workspaceResourceDetails = $logAnalyticsWorkspaceResourceId.Split("/") - -if ($workspaceResourceDetails.Length -ne 9) { - Write-Host("logAnalyticsWorkspaceResourceId should be valid Azure Resource Id format") -ForegroundColor Red - exit -} - -$workspaceSubscriptionId = $workspaceResourceDetails[2] -$workspaceSubscriptionId = $workspaceSubscriptionId.Trim() -$workspaceResourceGroupName = $workspaceResourceDetails[4] -$workspaceResourceGroupName = $workspaceResourceGroupName.Trim() -$workspaceName = $workspaceResourceDetails[8] -$workspaceResourceGroupName = $workspaceResourceGroupName.Trim() - -$aksResourceDetails = $aksResourceId.Split("/") -$clusterResourceGroupName = $aksResourceDetails[4].Trim() -$clusterSubscriptionId = $aksResourceDetails[2].Trim() -$clusterName = $aksResourceDetails[8].Trim() - -Write-Host("LogAnalytics Workspace SubscriptionId : '" + $workspaceSubscriptionId + "' ") -ForegroundColor Green - -try { - Write-Host("") - Write-Host("Trying to get the current Az login context...") - $account = Get-AzContext -ErrorAction Stop - Write-Host("Successfully fetched current AzContext context...") -ForegroundColor Green - Write-Host("") -} -catch { - Write-Host("") - Write-Host("Could not fetch AzContext..." ) -ForegroundColor Red - Write-Host("") -} - - -if ($null -eq $account.Account) { - try { - Write-Host("Please login...") - Connect-AzAccount -subscriptionid $clusterSubscriptionId - } - catch { - Write-Host("") - Write-Host("Could not select subscription with ID : " + $clusterSubscriptionId + ". Please make sure the ID you entered is correct and you have access to the cluster" ) -ForegroundColor Red - Write-Host("") - Stop-Transcript - exit - } -} - -Write-Host("Checking if cluster is onboarded to Container Monitoring") -if ($account.Subscription.Id -eq $clusterSubscriptionId) { - Write-Host("Subscription: $clusterSubscriptionId is already selected. Account details: ") - $account -} -else { - try { - Write-Host("Current Subscription:") - $account - Write-Host("Changing to workspace subscription: $clusterSubscriptionId") - Set-AzContext -SubscriptionId $clusterSubscriptionId - - } - catch { - Write-Host("") - Write-Host("Could not select subscription with ID : " + $workspaceSubscriptionId + ". Please make sure the ID you entered is correct and you have access to the cluster" ) -ForegroundColor Red - Write-Host("") - Stop-Transcript - exit - } -} - -try { - $resources = Get-AzResource -ResourceGroupName $clusterResourceGroupName -Name $clusterName -ResourceType "Microsoft.ContainerService/managedClusters" -ExpandProperties -ErrorAction Stop -WarningAction Stop - $clusterResource = $resources[0] - - $props = ($clusterResource.Properties | ConvertTo-Json).toLower() | ConvertFrom-Json - - if ($true -eq $props.addonprofiles.omsagent.enabled -and $null -ne $props.addonprofiles.omsagent.config) { - Write-Host("Your cluster is already onboarded to Azure monitor for containers. Please refer to the following documentation to opt-out and re-run this script again:") -ForegroundColor Red; - Write-Host("") - Write-Host($OptOutLink) -ForegroundColor Red - Write-Host("") - throw - } - - Write-Host("Setting context to the current cluster") - Import-AzAksCredential -Id $aksResourceId -Force - $omsagentCount = kubectl get pods -n kube-system | Select-String omsagent - if ($null -eq $omsagentCount) { - Write-Host ("OmsAgent is not running. Proceeding to do custom onboarding for Health Agent") - } - else { - Write-Host ("Cluster is not enabled for Monitoring. But detected omsagent pods. Please wait for 30 min to ensure that omsagent containers are completely stopped and re-run this script") -ForegroundColor Red - Stop-Transcript - exit - } -} -catch { - Write-Host("Error when checking if cluster is already onboarded") - exit -} - - -if ($account.Subscription.Id -eq $workspaceSubscriptionId) { - Write-Host("Subscription: $workspaceSubscriptionId is already selected. Account details: ") - $account -} -else { - try { - Write-Host("Current Subscription:") - $account - Write-Host("Changing to workspace subscription: $workspaceSubscriptionId") - Set-AzContext -SubscriptionId $workspaceSubscriptionId - } - catch { - Write-Host("") - Write-Host("Could not select subscription with ID : " + $workspaceSubscriptionId + ". Please make sure the ID you entered is correct and you have access to the cluster" ) -ForegroundColor Red - Write-Host("") - Stop-Transcript - exit - } -} - -$WorkspaceInformation = Get-AzOperationalInsightsWorkspace -ResourceGroupName $workspaceResourceGroupName -Name $workspaceName -ErrorAction Stop -$key = (Get-AzOperationalInsightsWorkspaceSharedKeys -ResourceGroupName $workspaceResourceGroupName -Name $workspaceName).PrimarySharedKey -$wsid = $WorkspaceInformation.CustomerId -$base64EncodedKey = [System.Convert]::ToBase64String([System.Text.Encoding]::UTF8.GetBytes($key)) -$base64EncodedWsId = [System.Convert]::ToBase64String([System.Text.Encoding]::UTF8.GetBytes($wsid)) -Write-Host("Successfully verified specified logAnalyticsWorkspaceResourceId valid and exists...") -ForegroundColor Green -$WorkspaceLocation = $WorkspaceInformation.Location - -if ($null -eq $WorkspaceLocation) { - Write-Host("") - Write-Host("Cannot fetch workspace location. Please try again...") -ForegroundColor Red - Write-Host("") - Stop-Transcript - exit -} - -try { - $WorkspaceIPDetails = Get-AzOperationalInsightsIntelligencePacks -ResourceGroupName $workspaceResourceGroupName -WorkspaceName $workspaceName -ErrorAction Stop - Write-Host("Successfully fetched workspace IP details...") -ForegroundColor Green - Write-Host("") -} -catch { - Write-Host("") - Write-Host("Failed to get the list of solutions onboarded to the workspace. Please make sure that it hasn't been deleted and you have access to it.") -ForegroundColor Red - Write-Host("") - Stop-Transcript - exit -} - -try { - $ContainerInsightsIndex = $WorkspaceIPDetails.Name.IndexOf("ContainerInsights"); - Write-Host("Successfully located ContainerInsights solution") -ForegroundColor Green - Write-Host("") -} -catch { - Write-Host("Failed to get ContainerInsights solution details from the workspace") -ForegroundColor Red - Write-Host("") - Stop-Transcript - exit -} - -$isSolutionOnboarded = $WorkspaceIPDetails.Enabled[$ContainerInsightsIndex] - -if ($false -eq $isSolutionOnboarded) { - - $DeploymentName = "ContainerInsightsSolutionOnboarding-" + ((Get-Date).ToUniversalTime()).ToString('MMdd-HHmm') - $Parameters = @{ } - $Parameters.Add("workspaceResourceId", $logAnalyticsWorkspaceResourceID) - $Parameters.Add("workspaceRegion", $WorkspaceLocation) - $Parameters - - try { - New-AzResourceGroupDeployment -Name $DeploymentName ` - -ResourceGroupName $workspaceResourceGroupName ` - -TemplateUri https://raw.githubusercontent.com/microsoft/Docker-Provider/ci_prod/scripts/onboarding/templates/azuremonitor-containerSolution.json ` - -TemplateParameterObject $Parameters -ErrorAction Stop` - - - Write-Host("Successfully added Container Insights Solution") -ForegroundColor Green - - } - catch { - Write-Host ("Template deployment failed with an error: '" + $Error[0] + "' ") -ForegroundColor Red - Write-Host("Please contact us by emailing askcoin@microsoft.com for help") -ForegroundColor Red - } - -} - -Write-Host("Successfully added Container Insights Solution to workspace " + $workspaceName) -ForegroundColor Green - -try { - $Parameters = @{ } - $Parameters.Add("aksResourceId", $aksResourceId) - $Parameters.Add("aksResourceLocation", $aksResourceLocation) - $Parameters.Add("workspaceResourceId", $logAnalyticsWorkspaceResourceId) - $DeploymentName = "ClusterHealthOnboarding-" + ((Get-Date).ToUniversalTime()).ToString('MMdd-HHmm') - $Parameters - - Write-Host " Onboarding cluster to provided LA workspace " - - if ($account.Subscription.Id -eq $clusterSubscriptionId) { - Write-Host("Subscription: $clusterSubscriptionId is already selected. Account details: ") - $account - } - else { - try { - Write-Host("Current Subscription:") - $account - Write-Host("Changing to subscription: $clusterSubscriptionId") - Set-AzContext -SubscriptionId $clusterSubscriptionId - } - catch { - Write-Host("") - Write-Host("Could not select subscription with ID : " + $workspaceSubscriptionId + ". Please make sure the ID you entered is correct and you have access to the cluster" ) -ForegroundColor Red - Write-Host("") - Stop-Transcript - exit - } - } - - Write-Host("Enabling Custom Monitoring using template deployment") - New-AzResourceGroupDeployment -Name $DeploymentName ` - -ResourceGroupName $clusterResourceGroupName ` - -TemplateUri https://raw.githubusercontent.com/Microsoft/OMS-docker/dilipr/onboardHealth/health/customOnboarding.json ` - -TemplateParameterObject $Parameters -ErrorAction Stop` - - Write-Host("") - - Write-Host("Successfully custom onboarded cluster to Monitoring") -ForegroundColor Green - - Write-Host("") -} -catch { - Write-Host ("Template deployment failed with an error: '" + $Error[0] + "' ") -ForegroundColor Red - exit - #Write-Host("Please contact us by emailing askcoin@microsoft.com for help") -ForegroundColor Red -} - -$desktopPath = "~" -if (-not (test-path $desktopPath/deployments) ) { - Write-Host "$($desktopPath)/deployments doesn't exist, creating it" - mkdir $desktopPath/deployments | out-null -} -else { - Write-Host "$($desktopPath)/deployments exists, no need to create it" -} -try { - - $aksResourceDetails = $aksResourceId.Split("/") - if ($aksResourceDetails.Length -ne 9) { - Write-Host("aksResourceDetails should be valid Azure Resource Id format") -ForegroundColor Red - exit - } - $clusterName = $aksResourceDetails[8].Trim() - $clusterResourceGroupName = $aksResourceDetails[4].Trim() - Import-AzAksCredential -Id $aksResourceId -Force - Invoke-WebRequest https://raw.githubusercontent.com/microsoft/OMS-docker/dilipr/mergeHealthToCiFeature/health/omsagent-template.yaml -OutFile $desktopPath/omsagent-template.yaml - - (Get-Content -Path $desktopPath/omsagent-template.yaml -Raw) -replace 'VALUE_AKS_RESOURCE_ID', $aksResourceId -replace 'VALUE_AKS_REGION', $aksResourceLocation -replace 'VALUE_WSID', $base64EncodedWsId -replace 'VALUE_KEY', $base64EncodedKey -replace 'VALUE_ACS_RESOURCE_NAME', $acsResourceName | Set-Content $desktopPath/deployments/omsagent-$clusterName.yaml - kubectl apply -f $desktopPath/deployments/omsagent-$clusterName.yaml - Write-Host "Successfully onboarded to health model omsagent" -ForegroundColor Green -} -catch { - Write-Host ("Agent deployment failed with an error: '" + $Error[0] + "' ") -ForegroundColor Red -} diff --git a/scripts/preview/health/HealthOnboarding.md b/scripts/preview/health/HealthOnboarding.md deleted file mode 100644 index 5e4db2b9b..000000000 --- a/scripts/preview/health/HealthOnboarding.md +++ /dev/null @@ -1,40 +0,0 @@ -## Overview -The following documentation outlines the steps required to upgrade an existing cluster onboarded to a Log Analytics workspace running the omsagent, to an agent running the workflow that generates health monitor signals into the same workspace. - -### Onboarding using a script (AKS) -We have a handy [script](https://github.com/Microsoft/OMS-docker/blob/dilipr/kubeHealth/health/HealthAgentOnboarding.ps1) which can onboard your AKS clusters to a version of the agent that can generate the health model. Read on to find out more - -#### Script Prerequisites -* script should run in an elevated command prompt -* kubectl should have been installed and be present in the path - -#### What does the script do: -* Installs necessary powershell modules -* Onboards Container Insights solution to the supplied LA workspace if not already onboarded -* Updates the cluster metadata to link the LA workspace ID to the cluster -* Installs the new agent that generates health monitor signals (using kubectl) - -#### Script Execution -* Download the script from [here](https://github.com/Microsoft/OMS-docker/blob/dilipr/kubeHealth/health/HealthAgentOnboarding.ps1) -* Run the script: - .\HealthAgentOnboarding.ps1 -aksResourceId -aksResourceLocation - -logAnalyticsWorkspaceResourceId (e.g./subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourceGroups/dilipr-health-preview/providers/Microsoft.OperationalInsights/workspaces/dilipr-health-preview) - * Please make sure the right location of the AKS cluster is passed in to the script (without spaces e.g. eastus, southcentralus) - -#### Viewing the health model -* Navigate to -* There should be a new tab named "Health" in Cluster Insights -* Note: It might take about 15-20 min after the script runs for the data to show up in the Insights Page of the Cluster - - -### AKS Engine Onboarding -If your cluster is already onboarded to Monitoring, proceed directly to step 4 and continue from there on. -1. Add Container Insights Solution to your workspace using the instructions [here](http://aka.ms/coinhelmdoc) -2. Tag your AKS-Engine cluster appropriately using the instructions [here](http://aka.ms/coin-acs-tag-doc) -3. Set the current k8s context to be your AKS Engine cluster (the kube-config should refer to your AKS-Engine cluster) -4. Download the [omsagent-template-aks-engine.yaml](https://github.com/microsoft/OMS-docker/blob/dilipr/kubeHealth/health/omsagent-template-aks-engine.yaml) file to your local machine -5. Update the Values of VALUE_ACS_RESOURCE_NAME, VALUE_WSID {base 64 encoded workspace id} and VALUE_KEY {base 64 encoded workspace key}. See [here](https://github.com/Azure/aks-engine/blob/master/examples/addons/container-monitoring/README.md) on instructions to get the Workspace ID and Key of the file downloaded in Step 4 above -6. Run kubectl delete on the file {kubectl delete -f path_to_file_in_step_4} -7. Run kubectl apply on the file {kubectl apply -f path_to_file_in_step_4} - - diff --git a/scripts/preview/health/customOnboarding.json b/scripts/preview/health/customOnboarding.json deleted file mode 100644 index ecccc2ea7..000000000 --- a/scripts/preview/health/customOnboarding.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", - "contentVersion": "1.0.0.0", - "parameters": { - "aksResourceId": { - "type": "string", - "metadata": { - "description": "AKS Cluster resource id" - } - }, - "aksResourceLocation": { - "type": "string", - "metadata": { - "description": "Location of the AKS resource e.g. \"East US\"" - } - }, - "workspaceResourceId": { - "type": "string", - "metadata": { - "description": "Azure Monitor Log Analytics Resource ID" - } - } - }, - "resources": [ - { - "name": "[split(parameters('aksResourceId'),'/')[8]]", - "type": "Microsoft.ContainerService/managedClusters", - "location": "[parameters('aksResourceLocation')]", - "apiVersion": "2018-03-31", - "properties": { - "mode": "Incremental", - "id": "[parameters('aksResourceId')]", - "addonProfiles": { - "omsagent": { - "enabled": false, - "config": { - "logAnalyticsWorkspaceResourceID": "[parameters('workspaceResourceId')]" - } - } - } - } - } - ] -} \ No newline at end of file diff --git a/scripts/preview/health/omsagent-template-aks-engine.yaml b/scripts/preview/health/omsagent-template-aks-engine.yaml deleted file mode 100644 index 5e063fd54..000000000 --- a/scripts/preview/health/omsagent-template-aks-engine.yaml +++ /dev/null @@ -1,586 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: omsagent - namespace: kube-system ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: omsagent-reader -rules: - - apiGroups: [""] - resources: ["pods", "events", "nodes", "namespaces", "services"] - verbs: ["list", "get", "watch"] - - apiGroups: ["extensions"] - resources: ["deployments"] - verbs: ["list"] - - nonResourceURLs: ["/metrics"] - verbs: ["get"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: omsagentclusterrolebinding -subjects: - - kind: ServiceAccount - name: omsagent - namespace: kube-system -roleRef: - kind: ClusterRole - name: omsagent-reader - apiGroup: rbac.authorization.k8s.io ---- -kind: ConfigMap -apiVersion: v1 -data: - kube.conf: |- - # Fluentd config file for OMS Docker - cluster components (kubeAPI) - #fluent forward plugin - - type forward - port 25235 - bind 0.0.0.0 - - - #Kubernetes pod inventory - - type kubepodinventory - tag oms.containerinsights.KubePodInventory - run_interval 60s - log_level debug - - - #Kubernetes events - - type kubeevents - tag oms.containerinsights.KubeEvents - run_interval 60s - log_level debug - - - #Kubernetes logs - - type kubelogs - tag oms.api.KubeLogs - run_interval 60s - - - #Kubernetes services - - type kubeservices - tag oms.containerinsights.KubeServices - run_interval 60s - log_level debug - - - #Kubernetes Nodes - - type kubenodeinventory - tag oms.containerinsights.KubeNodeInventory - run_interval 60s - log_level debug - - - #Kubernetes perf - - type kubeperf - tag oms.api.KubePerf - run_interval 60s - log_level debug - - - #Kubernetes health - - type kubehealth - tag kubehealth.ReplicaSet - run_interval 60s - log_level debug - - - #cadvisor perf- Windows nodes - - type wincadvisorperf - tag oms.api.wincadvisorperf - run_interval 60s - log_level debug - - - - type filter_inventory2mdm - log_level info - - - # custom_metrics_mdm filter plugin for perf data from windows nodes - - type filter_cadvisor2mdm - metrics_to_collect cpuUsageNanoCores,memoryWorkingSetBytes - log_level info - - #health model aggregation filter - - type filter_health_model_builder - - - - type out_oms - log_level debug - num_threads 5 - buffer_chunk_limit 20m - buffer_type file - buffer_path %STATE_DIR_WS%/out_oms_kubepods*.buffer - buffer_queue_limit 20 - buffer_queue_full_action drop_oldest_chunk - flush_interval 20s - retry_limit 10 - retry_wait 30s - max_retry_wait 9m - - - - type out_oms - log_level debug - num_threads 5 - buffer_chunk_limit 5m - buffer_type file - buffer_path %STATE_DIR_WS%/out_oms_kubeevents*.buffer - buffer_queue_limit 10 - buffer_queue_full_action drop_oldest_chunk - flush_interval 20s - retry_limit 10 - retry_wait 30s - max_retry_wait 9m - - - - type out_oms_api - log_level debug - buffer_chunk_limit 10m - buffer_type file - buffer_path %STATE_DIR_WS%/out_oms_api_kubernetes_logs*.buffer - buffer_queue_limit 10 - flush_interval 20s - retry_limit 10 - retry_wait 30s - - - - type out_oms - log_level debug - num_threads 5 - buffer_chunk_limit 20m - buffer_type file - buffer_path %STATE_DIR_WS%/out_oms_kubeservices*.buffer - buffer_queue_limit 20 - buffer_queue_full_action drop_oldest_chunk - flush_interval 20s - retry_limit 10 - retry_wait 30s - max_retry_wait 9m - - - - type out_oms - log_level debug - num_threads 5 - buffer_chunk_limit 20m - buffer_type file - buffer_path %STATE_DIR_WS%/state/out_oms_kubenodes*.buffer - buffer_queue_limit 20 - buffer_queue_full_action drop_oldest_chunk - flush_interval 20s - retry_limit 10 - retry_wait 30s - max_retry_wait 9m - - - - type out_oms - log_level debug - buffer_chunk_limit 20m - buffer_type file - buffer_path %STATE_DIR_WS%/out_oms_containernodeinventory*.buffer - buffer_queue_limit 20 - flush_interval 20s - retry_limit 10 - retry_wait 15s - max_retry_wait 9m - - - - type out_oms - log_level debug - num_threads 5 - buffer_chunk_limit 20m - buffer_type file - buffer_path %STATE_DIR_WS%/out_oms_kubeperf*.buffer - buffer_queue_limit 20 - buffer_queue_full_action drop_oldest_chunk - flush_interval 20s - retry_limit 10 - retry_wait 30s - max_retry_wait 9m - - - - type out_mdm - log_level debug - num_threads 5 - buffer_chunk_limit 20m - buffer_type file - buffer_path %STATE_DIR_WS%/out_mdm_*.buffer - buffer_queue_limit 20 - buffer_queue_full_action drop_oldest_chunk - flush_interval 20s - retry_limit 10 - retry_wait 30s - max_retry_wait 9m - retry_mdm_post_wait_minutes 60 - - - - type out_oms - log_level debug - num_threads 5 - buffer_chunk_limit 20m - buffer_type file - buffer_path %STATE_DIR_WS%/out_oms_api_wincadvisorperf*.buffer - buffer_queue_limit 20 - buffer_queue_full_action drop_oldest_chunk - flush_interval 20s - retry_limit 10 - retry_wait 30s - max_retry_wait 9m - - - - type out_mdm - log_level debug - num_threads 5 - buffer_chunk_limit 20m - buffer_type file - buffer_path %STATE_DIR_WS%/out_mdm_cdvisorperf*.buffer - buffer_queue_limit 20 - buffer_queue_full_action drop_oldest_chunk - flush_interval 20s - retry_limit 10 - retry_wait 30s - max_retry_wait 9m - retry_mdm_post_wait_minutes 60 - - - - type out_oms_api - log_level debug - buffer_chunk_limit 10m - buffer_type file - buffer_path %STATE_DIR_WS%/out_oms_api_kubehealth*.buffer - buffer_queue_limit 10 - flush_interval 20s - retry_limit 10 - retry_wait 30s - -metadata: - name: omsagent-rs-config - namespace: kube-system ---- -apiVersion: v1 -kind: Secret -metadata: - name: omsagent-secret - namespace: kube-system -type: Opaque -data: - #BASE64 ENCODED (Both WSID & KEY) INSIDE DOUBLE QUOTE ("") - WSID: "VALUE_WSID" - KEY: "VALUE_KEY" ---- -kind: Service -apiVersion: v1 -metadata: - name: replicaset-service - namespace: kube-system -spec: - selector: - rsName: "omsagent-rs" - ports: - - protocol: TCP - port: 25235 - targetPort: in-rs-tcp ---- -kind: StorageClass -apiVersion: storage.k8s.io/v1 -metadata: - name: azurefile -provisioner: kubernetes.io/azure-file -mountOptions: - - dir_mode=0777 - - file_mode=0777 - - uid=1000 - - gid=1000 -parameters: - skuName: Standard_LRS ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: system:azure-cloud-provider -rules: - - apiGroups: [""] - resources: ["secrets"] - verbs: ["get", "create"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: system:azure-cloud-provider -roleRef: - kind: ClusterRole - apiGroup: rbac.authorization.k8s.io - name: system:azure-cloud-provider -subjects: - - kind: ServiceAccount - name: persistent-volume-binder - namespace: kube-system ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: azurefile - namespace: kube-system -spec: - accessModes: - - ReadWriteMany - storageClassName: azurefile - resources: - requests: - storage: 10Mi ---- -apiVersion: extensions/v1beta1 -kind: DaemonSet -metadata: - name: omsagent - namespace: kube-system -spec: - updateStrategy: - type: RollingUpdate - template: - metadata: - labels: - dsName: "omsagent-ds" - annotations: - agentVersion: "1.10.0.1" - dockerProviderVersion: "6.0.0-1" - schema-versions: "v1" - spec: - serviceAccountName: omsagent - containers: - - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:healthpreview08012019" - imagePullPolicy: IfNotPresent - resources: - limits: - cpu: 150m - memory: 600Mi - requests: - cpu: 75m - memory: 225Mi - env: - # - name: AKS_RESOURCE_ID - # value: "VALUE_AKS_RESOURCE_ID" - # - name: AKS_REGION - # value: "VALUE_AKS_REGION" - # Uncomment below two lines for ACS clusters and set the cluster names manually. Also comment out the above two lines for ACS clusters - - name: ACS_RESOURCE_NAME - value: "VALUE_ACS_RESOURCE_NAME" - - name: DISABLE_KUBE_SYSTEM_LOG_COLLECTION - value: "true" - - name: CONTROLLER_TYPE - value: "DaemonSet" - - name: NODE_IP - valueFrom: - fieldRef: - fieldPath: status.hostIP - securityContext: - privileged: true - ports: - - containerPort: 25225 - protocol: TCP - - containerPort: 25224 - protocol: UDP - volumeMounts: - - mountPath: /hostfs - name: host-root - readOnly: true - - mountPath: /var/run/host - name: docker-sock - - mountPath: /var/log - name: host-log - - mountPath: /var/lib/docker/containers - name: containerlog-path - - mountPath: /etc/kubernetes/host - name: azure-json-path - - mountPath: /etc/omsagent-secret - name: omsagent-secret - - mountPath: /etc/config/settings - name: settings-vol-config - readOnly: true - livenessProbe: - exec: - command: - - /bin/bash - - -c - - /opt/livenessprobe.sh - initialDelaySeconds: 60 - periodSeconds: 60 - nodeSelector: - beta.kubernetes.io/os: linux - # Tolerate a NoSchedule taint on master that ACS Engine sets. - tolerations: - - key: "node-role.kubernetes.io/master" - operator: "Equal" - value: "true" - effect: "NoSchedule" - volumes: - - name: host-root - hostPath: - path: / - - name: docker-sock - hostPath: - path: /var/run - - name: container-hostname - hostPath: - path: /etc/hostname - - name: host-log - hostPath: - path: /var/log - - name: containerlog-path - hostPath: - path: /var/lib/docker/containers - - name: azure-json-path - hostPath: - path: /etc/kubernetes - - name: omsagent-secret - secret: - secretName: omsagent-secret - - name: settings-vol-config - configMap: - name: container-azm-ms-agentconfig - optional: true ---- -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: omsagent-rs - namespace: kube-system -spec: - replicas: 1 - selector: - matchLabels: - rsName: "omsagent-rs" - strategy: - type: RollingUpdate - template: - metadata: - labels: - rsName: "omsagent-rs" - annotations: - agentVersion: "1.10.0.1" - dockerProviderVersion: "6.0.0-1" - schema-versions: "v1" - spec: - serviceAccountName: omsagent - containers: - - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:healthpreview08012019" - imagePullPolicy: IfNotPresent - resources: - limits: - cpu: 150m - memory: 500Mi - requests: - cpu: 50m - memory: 175Mi - env: - # - name: AKS_RESOURCE_ID - # value: "VALUE_AKS_RESOURCE_ID" - # - name: AKS_REGION - # value: "VALUE_AKS_REGION" - # Uncomment below two lines for ACS clusters and set the cluster names manually. Also comment out the above two lines for ACS clusters - - name: ACS_RESOURCE_NAME - value: "aks-engine-health" - - name: CONTROLLER_TYPE - value: "ReplicaSet" - - name: NODE_IP - valueFrom: - fieldRef: - fieldPath: status.hostIP - securityContext: - privileged: true - ports: - - containerPort: 25225 - protocol: TCP - - containerPort: 25224 - protocol: UDP - - containerPort: 25235 - protocol: TCP - name: in-rs-tcp - volumeMounts: - - mountPath: /var/run/host - name: docker-sock - - mountPath: /var/log - name: host-log - - mountPath: /var/lib/docker/containers - name: containerlog-path - - mountPath: /etc/kubernetes/host - name: azure-json-path - - mountPath: /etc/omsagent-secret - name: omsagent-secret - readOnly: true - - mountPath: /etc/config - name: omsagent-rs-config - - mountPath: /etc/config/settings - name: settings-vol-config - readOnly: true - - mountPath: "/mnt/azure" - name: azurefile-pv - livenessProbe: - exec: - command: - - /bin/bash - - -c - - /opt/livenessprobe.sh - initialDelaySeconds: 60 - periodSeconds: 60 - nodeSelector: - beta.kubernetes.io/os: linux - kubernetes.io/role: agent - volumes: - - name: docker-sock - hostPath: - path: /var/run - - name: container-hostname - hostPath: - path: /etc/hostname - - name: host-log - hostPath: - path: /var/log - - name: containerlog-path - hostPath: - path: /var/lib/docker/containers - - name: azure-json-path - hostPath: - path: /etc/kubernetes - - name: omsagent-secret - secret: - secretName: omsagent-secret - - name: omsagent-rs-config - configMap: - name: omsagent-rs-config - - name: settings-vol-config - configMap: - name: container-azm-ms-agentconfig - optional: true - - name: azurefile-pv - persistentVolumeClaim: - claimName: azurefile diff --git a/scripts/preview/health/omsagent-template.yaml b/scripts/preview/health/omsagent-template.yaml deleted file mode 100644 index e58e9c33f..000000000 --- a/scripts/preview/health/omsagent-template.yaml +++ /dev/null @@ -1,586 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: omsagent - namespace: kube-system ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: omsagent-reader -rules: -- apiGroups: [""] - resources: ["pods", "events", "nodes", "namespaces", "services"] - verbs: ["list", "get", "watch"] -- apiGroups: ["extensions"] - resources: ["deployments"] - verbs: ["list"] -- nonResourceURLs: ["/metrics"] - verbs: ["get"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: omsagentclusterrolebinding -subjects: - - kind: ServiceAccount - name: omsagent - namespace: kube-system -roleRef: - kind: ClusterRole - name: omsagent-reader - apiGroup: rbac.authorization.k8s.io ---- -kind: ConfigMap -apiVersion: v1 -data: - kube.conf: |- - # Fluentd config file for OMS Docker - cluster components (kubeAPI) - #fluent forward plugin - - type forward - port 25235 - bind 0.0.0.0 - - - #Kubernetes pod inventory - - type kubepodinventory - tag oms.containerinsights.KubePodInventory - run_interval 60s - log_level debug - - - #Kubernetes events - - type kubeevents - tag oms.containerinsights.KubeEvents - run_interval 60s - log_level debug - - - #Kubernetes logs - - type kubelogs - tag oms.api.KubeLogs - run_interval 60s - - - #Kubernetes services - - type kubeservices - tag oms.containerinsights.KubeServices - run_interval 60s - log_level debug - - - #Kubernetes Nodes - - type kubenodeinventory - tag oms.containerinsights.KubeNodeInventory - run_interval 60s - log_level debug - - - #Kubernetes perf - - type kubeperf - tag oms.api.KubePerf - run_interval 60s - log_level debug - - - #Kubernetes health - - type kubehealth - tag kubehealth.ReplicaSet - run_interval 60s - log_level debug - - - #cadvisor perf- Windows nodes - - type wincadvisorperf - tag oms.api.wincadvisorperf - run_interval 60s - log_level debug - - - - type filter_inventory2mdm - log_level info - - - # custom_metrics_mdm filter plugin for perf data from windows nodes - - type filter_cadvisor2mdm - metrics_to_collect cpuUsageNanoCores,memoryWorkingSetBytes - log_level info - - #health model aggregation filter - - type filter_health_model_builder - - - - type out_oms - log_level debug - num_threads 5 - buffer_chunk_limit 20m - buffer_type file - buffer_path %STATE_DIR_WS%/out_oms_kubepods*.buffer - buffer_queue_limit 20 - buffer_queue_full_action drop_oldest_chunk - flush_interval 20s - retry_limit 10 - retry_wait 30s - max_retry_wait 9m - - - - type out_oms - log_level debug - num_threads 5 - buffer_chunk_limit 5m - buffer_type file - buffer_path %STATE_DIR_WS%/out_oms_kubeevents*.buffer - buffer_queue_limit 10 - buffer_queue_full_action drop_oldest_chunk - flush_interval 20s - retry_limit 10 - retry_wait 30s - max_retry_wait 9m - - - - type out_oms_api - log_level debug - buffer_chunk_limit 10m - buffer_type file - buffer_path %STATE_DIR_WS%/out_oms_api_kubernetes_logs*.buffer - buffer_queue_limit 10 - flush_interval 20s - retry_limit 10 - retry_wait 30s - - - - type out_oms - log_level debug - num_threads 5 - buffer_chunk_limit 20m - buffer_type file - buffer_path %STATE_DIR_WS%/out_oms_kubeservices*.buffer - buffer_queue_limit 20 - buffer_queue_full_action drop_oldest_chunk - flush_interval 20s - retry_limit 10 - retry_wait 30s - max_retry_wait 9m - - - - type out_oms - log_level debug - num_threads 5 - buffer_chunk_limit 20m - buffer_type file - buffer_path %STATE_DIR_WS%/state/out_oms_kubenodes*.buffer - buffer_queue_limit 20 - buffer_queue_full_action drop_oldest_chunk - flush_interval 20s - retry_limit 10 - retry_wait 30s - max_retry_wait 9m - - - - type out_oms - log_level debug - buffer_chunk_limit 20m - buffer_type file - buffer_path %STATE_DIR_WS%/out_oms_containernodeinventory*.buffer - buffer_queue_limit 20 - flush_interval 20s - retry_limit 10 - retry_wait 15s - max_retry_wait 9m - - - - type out_oms - log_level debug - num_threads 5 - buffer_chunk_limit 20m - buffer_type file - buffer_path %STATE_DIR_WS%/out_oms_kubeperf*.buffer - buffer_queue_limit 20 - buffer_queue_full_action drop_oldest_chunk - flush_interval 20s - retry_limit 10 - retry_wait 30s - max_retry_wait 9m - - - - type out_mdm - log_level debug - num_threads 5 - buffer_chunk_limit 20m - buffer_type file - buffer_path %STATE_DIR_WS%/out_mdm_*.buffer - buffer_queue_limit 20 - buffer_queue_full_action drop_oldest_chunk - flush_interval 20s - retry_limit 10 - retry_wait 30s - max_retry_wait 9m - retry_mdm_post_wait_minutes 60 - - - - type out_oms - log_level debug - num_threads 5 - buffer_chunk_limit 20m - buffer_type file - buffer_path %STATE_DIR_WS%/out_oms_api_wincadvisorperf*.buffer - buffer_queue_limit 20 - buffer_queue_full_action drop_oldest_chunk - flush_interval 20s - retry_limit 10 - retry_wait 30s - max_retry_wait 9m - - - - type out_mdm - log_level debug - num_threads 5 - buffer_chunk_limit 20m - buffer_type file - buffer_path %STATE_DIR_WS%/out_mdm_cdvisorperf*.buffer - buffer_queue_limit 20 - buffer_queue_full_action drop_oldest_chunk - flush_interval 20s - retry_limit 10 - retry_wait 30s - max_retry_wait 9m - retry_mdm_post_wait_minutes 60 - - - - type out_oms_api - log_level debug - buffer_chunk_limit 10m - buffer_type file - buffer_path %STATE_DIR_WS%/out_oms_api_kubehealth*.buffer - buffer_queue_limit 10 - flush_interval 20s - retry_limit 10 - retry_wait 30s - -metadata: - name: omsagent-rs-config - namespace: kube-system ---- -apiVersion: v1 -kind: Secret -metadata: - name: omsagent-secret - namespace: kube-system -type: Opaque -data: - #BASE64 ENCODED (Both WSID & KEY) INSIDE DOUBLE QUOTE ("") - WSID: "VALUE_WSID" - KEY: "VALUE_KEY" ---- -kind: Service -apiVersion: v1 -metadata: - name: replicaset-service - namespace: kube-system -spec: - selector: - rsName: "omsagent-rs" - ports: - - protocol: TCP - port: 25235 - targetPort: in-rs-tcp ---- -kind: StorageClass -apiVersion: storage.k8s.io/v1 -metadata: - name: azurefile -provisioner: kubernetes.io/azure-file -mountOptions: - - dir_mode=0777 - - file_mode=0777 - - uid=1000 - - gid=1000 -parameters: - skuName: Standard_LRS ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: system:azure-cloud-provider -rules: -- apiGroups: [''] - resources: ['secrets'] - verbs: ['get','create'] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: system:azure-cloud-provider -roleRef: - kind: ClusterRole - apiGroup: rbac.authorization.k8s.io - name: system:azure-cloud-provider -subjects: -- kind: ServiceAccount - name: persistent-volume-binder - namespace: kube-system ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: azurefile - namespace: kube-system -spec: - accessModes: - - ReadWriteMany - storageClassName: azurefile - resources: - requests: - storage: 10Mi ---- -apiVersion: extensions/v1beta1 -kind: DaemonSet -metadata: - name: omsagent - namespace: kube-system -spec: - updateStrategy: - type: RollingUpdate - template: - metadata: - labels: - dsName: "omsagent-ds" - annotations: - agentVersion: "1.10.0.1" - dockerProviderVersion: "6.0.0-1" - schema-versions: "v1" - spec: - serviceAccountName: omsagent - containers: - - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:healthpreview08012019" - imagePullPolicy: IfNotPresent - resources: - limits: - cpu: 150m - memory: 600Mi - requests: - cpu: 75m - memory: 225Mi - env: - - name: AKS_RESOURCE_ID - value: "VALUE_AKS_RESOURCE_ID" - - name: AKS_REGION - value: "VALUE_AKS_REGION" - # Uncomment below two lines for ACS clusters and set the cluster names manually. Also comment out the above two lines for ACS clusters - # - name: ACS_RESOURCE_NAME - # value: "my_acs_cluster_name" - - name: DISABLE_KUBE_SYSTEM_LOG_COLLECTION - value: "true" - - name: CONTROLLER_TYPE - value: "DaemonSet" - - name: NODE_IP - valueFrom: - fieldRef: - fieldPath: status.hostIP - securityContext: - privileged: true - ports: - - containerPort: 25225 - protocol: TCP - - containerPort: 25224 - protocol: UDP - volumeMounts: - - mountPath: /hostfs - name: host-root - readOnly: true - - mountPath: /var/run/host - name: docker-sock - - mountPath: /var/log - name: host-log - - mountPath: /var/lib/docker/containers - name: containerlog-path - - mountPath: /etc/kubernetes/host - name: azure-json-path - - mountPath: /etc/omsagent-secret - name: omsagent-secret - - mountPath: /etc/config/settings - name: settings-vol-config - readOnly: true - livenessProbe: - exec: - command: - - /bin/bash - - -c - - /opt/livenessprobe.sh - initialDelaySeconds: 60 - periodSeconds: 60 - nodeSelector: - beta.kubernetes.io/os: linux - # Tolerate a NoSchedule taint on master that ACS Engine sets. - tolerations: - - key: "node-role.kubernetes.io/master" - operator: "Equal" - value: "true" - effect: "NoSchedule" - volumes: - - name: host-root - hostPath: - path: / - - name: docker-sock - hostPath: - path: /var/run - - name: container-hostname - hostPath: - path: /etc/hostname - - name: host-log - hostPath: - path: /var/log - - name: containerlog-path - hostPath: - path: /var/lib/docker/containers - - name: azure-json-path - hostPath: - path: /etc/kubernetes - - name: omsagent-secret - secret: - secretName: omsagent-secret - - name: settings-vol-config - configMap: - name: container-azm-ms-agentconfig - optional: true ---- -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: omsagent-rs - namespace: kube-system -spec: - replicas: 1 - selector: - matchLabels: - rsName: "omsagent-rs" - strategy: - type: RollingUpdate - template: - metadata: - labels: - rsName: "omsagent-rs" - annotations: - agentVersion: "1.10.0.1" - dockerProviderVersion: "6.0.0-1" - schema-versions: "v1" - spec: - serviceAccountName: omsagent - containers: - - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:healthpreview08012019" - imagePullPolicy: IfNotPresent - resources: - limits: - cpu: 150m - memory: 500Mi - requests: - cpu: 50m - memory: 175Mi - env: - - name: AKS_RESOURCE_ID - value: "VALUE_AKS_RESOURCE_ID" - - name: AKS_REGION - value: "VALUE_AKS_REGION" - #Uncomment below two lines for ACS clusters and set the cluster names manually. Also comment out the above two lines for ACS clusters - # - name: ACS_RESOURCE_NAME - # value: "aks-engine-health" - - name: CONTROLLER_TYPE - value: "ReplicaSet" - - name: NODE_IP - valueFrom: - fieldRef: - fieldPath: status.hostIP - securityContext: - privileged: true - ports: - - containerPort: 25225 - protocol: TCP - - containerPort: 25224 - protocol: UDP - - containerPort: 25235 - protocol: TCP - name: in-rs-tcp - volumeMounts: - - mountPath: /var/run/host - name: docker-sock - - mountPath: /var/log - name: host-log - - mountPath: /var/lib/docker/containers - name: containerlog-path - - mountPath: /etc/kubernetes/host - name: azure-json-path - - mountPath: /etc/omsagent-secret - name: omsagent-secret - readOnly: true - - mountPath : /etc/config - name: omsagent-rs-config - - mountPath: /etc/config/settings - name: settings-vol-config - readOnly: true - - mountPath: "/mnt/azure" - name: azurefile-pv - livenessProbe: - exec: - command: - - /bin/bash - - -c - - /opt/livenessprobe.sh - initialDelaySeconds: 60 - periodSeconds: 60 - nodeSelector: - beta.kubernetes.io/os: linux - kubernetes.io/role: agent - volumes: - - name: docker-sock - hostPath: - path: /var/run - - name: container-hostname - hostPath: - path: /etc/hostname - - name: host-log - hostPath: - path: /var/log - - name: containerlog-path - hostPath: - path: /var/lib/docker/containers - - name: azure-json-path - hostPath: - path: /etc/kubernetes - - name: omsagent-secret - secret: - secretName: omsagent-secret - - name: omsagent-rs-config - configMap: - name: omsagent-rs-config - - name: settings-vol-config - configMap: - name: container-azm-ms-agentconfig - optional: true - - name: azurefile-pv - persistentVolumeClaim: - claimName: azurefile diff --git a/scripts/preview/health/optouttemplate.json b/scripts/preview/health/optouttemplate.json deleted file mode 100644 index b036aba24..000000000 --- a/scripts/preview/health/optouttemplate.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", - "contentVersion": "1.0.0.0", - "parameters": { - "aksResourceId": { - "type": "string", - "metadata": { - "description": "AKS Cluster resource id" - } - }, - "aksResourceLocation": { - "type": "string", - "metadata": { - "description": "Location of the AKS resource e.g. \"East US\"" - } - } - }, - "resources": [ - { - "name": "[split(parameters('aksResourceId'),'/')[8]]", - "type": "Microsoft.ContainerService/managedClusters", - "location": "[parameters('aksResourceLocation')]", - "apiVersion": "2018-03-31", - "properties": { - "mode": "Incremental", - "id": "[parameters('aksResourceId')]", - "addonProfiles": { - "omsagent": { - "enabled": false, - "config": null - } - } - } - } - ] -} diff --git a/source/plugins/go/src/ingestion_token_utils.go b/source/plugins/go/src/ingestion_token_utils.go index 896930005..4f245a514 100644 --- a/source/plugins/go/src/ingestion_token_utils.go +++ b/source/plugins/go/src/ingestion_token_utils.go @@ -75,7 +75,6 @@ type AgentConfiguration struct { ContainerinsightsContainerlogv2 string `json:"CONTAINERINSIGHTS_CONTAINERLOGV2"` ContainerNodeInventoryBlob string `json:"CONTAINER_NODE_INVENTORY_BLOB"` KubeEventsBlob string `json:"KUBE_EVENTS_BLOB"` - KubeHealthBlob string `json:"KUBE_HEALTH_BLOB"` KubeMonAgentEventsBlob string `json:"KUBE_MON_AGENT_EVENTS_BLOB"` KubeNodeInventoryBlob string `json:"KUBE_NODE_INVENTORY_BLOB"` KubePodInventoryBlob string `json:"KUBE_POD_INVENTORY_BLOB"` diff --git a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb index 017bfb08d..20faf4619 100644 --- a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb +++ b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb @@ -34,7 +34,6 @@ class CAdvisorMetricsAPIClient @cAdvisorMetricsSecurePort = ENV["IS_SECURE_CADVISOR_PORT"] @containerLogsRoute = ENV["AZMON_CONTAINER_LOGS_ROUTE"] - @hmEnabled = ENV["AZMON_CLUSTER_ENABLE_HEALTH_MODEL"] @npmIntegrationBasic = ENV["TELEMETRY_NPM_INTEGRATION_METRICS_BASIC"] @npmIntegrationAdvanced = ENV["TELEMETRY_NPM_INTEGRATION_METRICS_ADVANCED"] @@ -277,11 +276,6 @@ def getContainerCpuMetricItems(metricJSON, hostName, cpuMetricNameToCollect, met end #telemetry about containerlog Routing for daemonset telemetryProps["containerLogsRoute"] = @containerLogsRoute - - #telemetry about health model - if (!@hmEnabled.nil? && !@hmEnabled.empty?) - telemetryProps["hmEnabled"] = @hmEnabled - end #telemetry for npm integration if (!@npmIntegrationAdvanced.nil? && !@npmIntegrationAdvanced.empty?) telemetryProps["int-npm-a"] = "1" diff --git a/source/plugins/ruby/constants.rb b/source/plugins/ruby/constants.rb index b9516c2ce..72b035d45 100644 --- a/source/plugins/ruby/constants.rb +++ b/source/plugins/ruby/constants.rb @@ -117,7 +117,6 @@ class Constants KUBE_PV_INVENTORY_DATA_TYPE = "KUBE_PV_INVENTORY_BLOB" KUBE_EVENTS_DATA_TYPE = "KUBE_EVENTS_BLOB" KUBE_MON_AGENT_EVENTS_DATA_TYPE = "KUBE_MON_AGENT_EVENTS_BLOB" - KUBE_HEALTH_DATA_TYPE = "KUBE_HEALTH_BLOB" CONTAINERLOGV2_DATA_TYPE = "CONTAINERINSIGHTS_CONTAINERLOGV2" CONTAINERLOG_DATA_TYPE = "CONTAINER_LOG_BLOB" diff --git a/source/plugins/ruby/filter_cadvisor_health_container.rb b/source/plugins/ruby/filter_cadvisor_health_container.rb deleted file mode 100644 index ab64b6e61..000000000 --- a/source/plugins/ruby/filter_cadvisor_health_container.rb +++ /dev/null @@ -1,106 +0,0 @@ -#!/usr/local/bin/ruby -# frozen_string_literal: true - -require 'fluent/plugin/filter' - -module Fluent::Plugin - require 'logger' - require 'yajl/json_gem' - require_relative 'oms_common' - require_relative "ApplicationInsightsUtility" - Dir[File.join(__dir__, './health', '*.rb')].each { |file| require file } - - - class CAdvisor2ContainerHealthFilter < Filter - include HealthModel - Fluent::Plugin.register_filter('cadvisor_health_container', self) - - config_param :log_path, :string, :default => '/var/opt/microsoft/docker-cimprov/log/health_monitors.log' - config_param :metrics_to_collect, :string, :default => 'cpuUsageNanoCores,memoryRssBytes' - config_param :container_resource_refresh_interval_minutes, :integer, :default => 5 - - @@object_name_k8s_container = 'K8SContainer' - @@counter_name_cpu = 'cpuusagenanocores' - @@counter_name_memory_rss = 'memoryrssbytes' - @@cluster_health_model_enabled = HealthMonitorUtils.is_cluster_health_model_enabled - - def initialize - begin - super - @metrics_to_collect_hash = {} - @formatter = HealthContainerCpuMemoryRecordFormatter.new - rescue => e - @log.info "Error in filter_cadvisor_health_container initialize #{e.backtrace}" - ApplicationInsightsUtility.sendExceptionTelemetry(e, {"FeatureArea" => "Health"}) - end - end - - def configure(conf) - begin - super - @log = HealthMonitorUtils.get_log_handle - @log.debug {'Starting filter_cadvisor2health plugin'} - rescue => e - @log.info "Error in filter_cadvisor_health_container configure #{e.backtrace}" - ApplicationInsightsUtility.sendExceptionTelemetry(e, {"FeatureArea" => "Health"}) - end - end - - def start - begin - super - @metrics_to_collect_hash = HealthMonitorUtils.build_metrics_hash(@metrics_to_collect) - ApplicationInsightsUtility.sendCustomEvent("filter_cadvisor_health_container Plugin Start", {}) - rescue => e - @log.info "Error in filter_cadvisor_health_container start #{e.backtrace}" - ApplicationInsightsUtility.sendExceptionTelemetry(e, {"FeatureArea" => "Health"}) - end - end - - def filter_stream(tag, es) - if !@@cluster_health_model_enabled - @log.info "Cluster Health Model disabled in filter_cadvisor_health_container" - return Fluent::MultiEventStream.new - end - new_es = Fluent::MultiEventStream.new - records_count = 0 - es.each { |time, record| - begin - filtered_record = filter(tag, time, record) - if !filtered_record.nil? - new_es.add(time, filtered_record) - records_count += 1 - end - rescue => e - @log.info "Error in filter_cadvisor_health_container filter_stream #{e.backtrace}" - ApplicationInsightsUtility.sendExceptionTelemetry(e, {"FeatureArea" => "Health"}) - end - } - @log.debug "filter_cadvisor_health_container Records Count #{records_count}" - new_es - end - - def filter(tag, time, record) - begin - if record.key?("MonitorLabels") - return record - end - - object_name = record['ObjectName'] - counter_name = JSON.parse(record['json_Collections'])[0]['CounterName'].downcase - if @metrics_to_collect_hash.key?(counter_name) - if object_name == @@object_name_k8s_container - return @formatter.get_record_from_cadvisor_record(record) - end - end - return nil - rescue => e - @log.debug "Error in filter #{e}" - @log.debug "record #{record}" - @log.debug "backtrace #{e.backtrace}" - ApplicationInsightsUtility.sendExceptionTelemetry(e, {"FeatureArea" => "Health"}) - return nil - end - end - end -end diff --git a/source/plugins/ruby/filter_cadvisor_health_node.rb b/source/plugins/ruby/filter_cadvisor_health_node.rb deleted file mode 100644 index ddbb871e8..000000000 --- a/source/plugins/ruby/filter_cadvisor_health_node.rb +++ /dev/null @@ -1,197 +0,0 @@ -#!/usr/local/bin/ruby -# frozen_string_literal: true - -require 'fluent/plugin/filter' - -module Fluent::Plugin - require 'logger' - require 'yajl/json_gem' - require_relative 'oms_common' - require_relative "ApplicationInsightsUtility" - require_relative "KubernetesApiClient" - Dir[File.join(__dir__, './health', '*.rb')].each { |file| require file } - - class CAdvisor2NodeHealthFilter < Filter - include HealthModel - Fluent::Plugin.register_filter('cadvisor_health_node', self) - - attr_accessor :provider, :resources - - config_param :metrics_to_collect, :string, :default => 'cpuUsageNanoCores,memoryRssBytes' - config_param :container_resource_refresh_interval_minutes, :integer, :default => 5 - config_param :health_monitor_config_path, :default => '/etc/opt/microsoft/docker-cimprov/health/healthmonitorconfig.json' - - @@object_name_k8s_node = 'K8SNode' - @@object_name_k8s_container = 'K8SContainer' - - @@counter_name_cpu = 'cpuusagenanocores' - @@counter_name_memory_rss = 'memoryrssbytes' - - @@hm_log = HealthMonitorUtils.get_log_handle - @@hostName = (OMS::Common.get_hostname) - @@clusterName = KubernetesApiClient.getClusterName - @@clusterId = KubernetesApiClient.getClusterId - @@clusterRegion = KubernetesApiClient.getClusterRegion - @@cluster_health_model_enabled = HealthMonitorUtils.is_cluster_health_model_enabled - - def initialize - begin - super - @last_resource_refresh = DateTime.now.to_time.to_i - @metrics_to_collect_hash = {} - @resources = HealthKubernetesResources.instance # this doesnt require node and pod inventory. So no need to populate them - @provider = HealthMonitorProvider.new(@@clusterId, HealthMonitorUtils.get_cluster_labels, @resources, @health_monitor_config_path) - rescue => e - ApplicationInsightsUtility.sendExceptionTelemetry(e, {"FeatureArea" => "Health"}) - end - end - - def configure(conf) - begin - super - @log = HealthMonitorUtils.get_log_handle - @log.debug {'Starting filter_cadvisor2health plugin'} - rescue => e - ApplicationInsightsUtility.sendExceptionTelemetry(e, {"FeatureArea" => "Health"}) - end - end - - def start - begin - super - @cpu_capacity = 1.0 #avoid divide by zero error in case of network issues accessing kube-api - @memory_capacity = 1.0 - @metrics_to_collect_hash = HealthMonitorUtils.build_metrics_hash(@metrics_to_collect) - @log.debug "Calling ensure_cpu_memory_capacity_set cpu_capacity #{@cpu_capacity} memory_capacity #{@memory_capacity}" - node_capacity = HealthMonitorUtils.ensure_cpu_memory_capacity_set(@@hm_log, @cpu_capacity, @memory_capacity, @@hostName) - @cpu_capacity = node_capacity[0] - @memory_capacity = node_capacity[1] - @log.info "CPU Capacity #{@cpu_capacity} Memory Capacity #{@memory_capacity}" - #HealthMonitorUtils.refresh_kubernetes_api_data(@log, @@hostName) - ApplicationInsightsUtility.sendCustomEvent("filter_cadvisor_health Plugin Start", {}) - rescue => e - ApplicationInsightsUtility.sendExceptionTelemetry(e, {"FeatureArea" => "Health"}) - end - end - - def filter_stream(tag, es) - if !@@cluster_health_model_enabled - @log.info "Cluster Health Model disabled in filter_cadvisor_health_node" - return Fluent::MultiEventStream.new - end - begin - node_capacity = HealthMonitorUtils.ensure_cpu_memory_capacity_set(@@hm_log, @cpu_capacity, @memory_capacity, @@hostName) - @cpu_capacity = node_capacity[0] - @memory_capacity = node_capacity[1] - new_es = Fluent::MultiEventStream.new - records_count = 0 - es.each { |time, record| - filtered_record = filter(tag, time, record) - if !filtered_record.nil? - new_es.add(time, filtered_record) - records_count += 1 - end - } - @log.debug "Filter Records Count #{records_count}" - return new_es - rescue => e - @log.info "Error in filter_cadvisor_health_node filter_stream #{e.backtrace}" - ApplicationInsightsUtility.sendExceptionTelemetry(e, {"FeatureArea" => "Health"}) - return Fluent::MultiEventStream.new - end - end - - def filter(tag, time, record) - begin - if record.key?("MonitorLabels") - return record - end - - object_name = record['ObjectName'] - counter_name = JSON.parse(record['json_Collections'])[0]['CounterName'].downcase - if @metrics_to_collect_hash.key?(counter_name.downcase) - metric_value = JSON.parse(record['json_Collections'])[0]['Value'] - case object_name - when @@object_name_k8s_node - case counter_name.downcase - when @@counter_name_cpu - process_node_cpu_record(record, metric_value) - when @@counter_name_memory_rss - process_node_memory_record(record, metric_value) - end - end - end - rescue => e - @log.debug "Error in filter #{e}" - @log.debug "record #{record}" - @log.debug "backtrace #{e.backtrace}" - ApplicationInsightsUtility.sendExceptionTelemetry(e) - return nil - end - end - - def process_node_cpu_record(record, metric_value) - monitor_id = MonitorId::NODE_CPU_MONITOR_ID - #@log.debug "processing node cpu record" - if record.nil? - return nil - else - instance_name = record['InstanceName'] - #@log.info "CPU capacity #{@cpu_capacity}" - metric_value /= 1000000 - percent = (metric_value.to_f/@cpu_capacity*100).round(2) - #@log.debug "Percentage of CPU limit: #{percent}" - state = HealthMonitorUtils.compute_percentage_state(percent, @provider.get_config(MonitorId::NODE_CPU_MONITOR_ID)) - #@log.debug "Computed State : #{state}" - timestamp = record['Timestamp'] - health_monitor_record = {"timestamp" => timestamp, "state" => state, "details" => {"cpuUsageMillicores" => metric_value, "cpuUtilizationPercentage" => percent}} - - monitor_instance_id = HealthMonitorUtils.get_monitor_instance_id(monitor_id, [@@clusterId, @@hostName]) - # temp = record.nil? ? "Nil" : record["MonitorInstanceId"] - health_record = {} - time_now = Time.now.utc.iso8601 - health_record[HealthMonitorRecordFields::MONITOR_ID] = monitor_id - health_record[HealthMonitorRecordFields::MONITOR_INSTANCE_ID] = monitor_instance_id - health_record[HealthMonitorRecordFields::DETAILS] = health_monitor_record - health_record[HealthMonitorRecordFields::TIME_GENERATED] = time_now - health_record[HealthMonitorRecordFields::TIME_FIRST_OBSERVED] = time_now - health_record[HealthMonitorRecordFields::NODE_NAME] = @@hostName - @log.info "Processed Node CPU" - return health_record - end - return nil - end - - def process_node_memory_record(record, metric_value) - monitor_id = MonitorId::NODE_MEMORY_MONITOR_ID - #@log.debug "processing node memory record" - if record.nil? - return nil - else - instance_name = record['InstanceName'] - #@log.info "Memory capacity #{@memory_capacity}" - - percent = (metric_value.to_f/@memory_capacity*100).round(2) - #@log.debug "Percentage of Memory limit: #{percent}" - state = HealthMonitorUtils.compute_percentage_state(percent, @provider.get_config(MonitorId::NODE_MEMORY_MONITOR_ID)) - #@log.debug "Computed State : #{state}" - timestamp = record['Timestamp'] - health_monitor_record = {"timestamp" => timestamp, "state" => state, "details" => {"memoryRssBytes" => metric_value.to_f, "memoryUtilizationPercentage" => percent}} - #@log.info health_monitor_record - - monitor_instance_id = HealthMonitorUtils.get_monitor_instance_id(monitor_id, [@@clusterId, @@hostName]) - health_record = {} - time_now = Time.now.utc.iso8601 - health_record[HealthMonitorRecordFields::MONITOR_ID] = monitor_id - health_record[HealthMonitorRecordFields::MONITOR_INSTANCE_ID] = monitor_instance_id - health_record[HealthMonitorRecordFields::DETAILS] = health_monitor_record - health_record[HealthMonitorRecordFields::TIME_GENERATED] = time_now - health_record[HealthMonitorRecordFields::TIME_FIRST_OBSERVED] = time_now - health_record[HealthMonitorRecordFields::NODE_NAME] = @@hostName - @log.info "Processed Node Memory" - return health_record - end - return nil - end - end -end diff --git a/source/plugins/ruby/filter_health_model_builder.rb b/source/plugins/ruby/filter_health_model_builder.rb deleted file mode 100644 index 9decda881..000000000 --- a/source/plugins/ruby/filter_health_model_builder.rb +++ /dev/null @@ -1,286 +0,0 @@ -# Copyright (c) Microsoft Corporation. All rights reserved. - -# frozen_string_literal: true - -require 'fluent/plugin/filter' - -module Fluent::Plugin - require_relative 'extension_utils' - require 'logger' - require 'yajl/json_gem' - Dir[File.join(__dir__, './health', '*.rb')].each { |file| require file } - - - class FilterHealthModelBuilder < Filter - include HealthModel - Fluent::Plugin.register_filter('health_model_builder', self) - - config_param :enable_log, :integer, :default => 0 - config_param :log_path, :string, :default => '/var/opt/microsoft/docker-cimprov/log/filter_health_model_builder.log' - config_param :model_definition_path, :default => '/etc/opt/microsoft/docker-cimprov/health/health_model_definition.json' - config_param :health_monitor_config_path, :default => '/etc/opt/microsoft/docker-cimprov/health/healthmonitorconfig.json' - config_param :health_state_serialized_path, :default => '/mnt/azure/health_model_state.json' - attr_reader :buffer, :model_builder, :health_model_definition, :monitor_factory, :state_finalizers, :monitor_set, :model_builder, :hierarchy_builder, :resources, :kube_api_down_handler, :provider, :reducer, :state, :generator, :telemetry - - - - @@cluster_id = KubernetesApiClient.getClusterId - @@token_file_path = "/var/run/secrets/kubernetes.io/serviceaccount/token" - @@cert_file_path = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" - @@cluster_health_model_enabled = HealthMonitorUtils.is_cluster_health_model_enabled - - def initialize - begin - super - @rewrite_tag = 'oneagent.containerInsights.KUBE_HEALTH_BLOB' - @buffer = HealthModel::HealthModelBuffer.new - @cluster_health_state = ClusterHealthState.new(@@token_file_path, @@cert_file_path) - @health_model_definition = HealthModel::ParentMonitorProvider.new(HealthModel::HealthModelDefinitionParser.new(@model_definition_path).parse_file) - @monitor_factory = HealthModel::MonitorFactory.new - @hierarchy_builder = HealthHierarchyBuilder.new(@health_model_definition, @monitor_factory) - # TODO: Figure out if we need to add NodeMonitorHierarchyReducer to the list of finalizers. For now, dont compress/optimize, since it becomes impossible to construct the model on the UX side - @state_finalizers = [HealthModel::AggregateMonitorStateFinalizer.new] - @monitor_set = HealthModel::MonitorSet.new - @model_builder = HealthModel::HealthModelBuilder.new(@hierarchy_builder, @state_finalizers, @monitor_set) - @kube_api_down_handler = HealthKubeApiDownHandler.new - @resources = HealthKubernetesResources.instance - @reducer = HealthSignalReducer.new - @generator = HealthMissingSignalGenerator.new - @provider = HealthMonitorProvider.new(@@cluster_id, HealthMonitorUtils.get_cluster_labels, @resources, @health_monitor_config_path) - @cluster_old_state = 'none' - @cluster_new_state = 'none' - @container_cpu_memory_records = [] - @telemetry = HealthMonitorTelemetry.new - @state = HealthMonitorState.new - # move network calls to the end. This will ensure all the instance variables get initialized - if @@cluster_health_model_enabled - deserialized_state_info = @cluster_health_state.get_state - @state.initialize_state(deserialized_state_info) - end - - rescue => e - ApplicationInsightsUtility.sendExceptionTelemetry(e, {"FeatureArea" => "Health"}) - end - end - - def configure(conf) - begin - super - @log = nil - if @enable_log - @log = Logger.new(@log_path, 'weekly') - @log.info 'Starting filter_health_model_builder plugin' - end - rescue => e - ApplicationInsightsUtility.sendExceptionTelemetry(e, {"FeatureArea" => "Health"}) - end - end - - def start - super - end - - def shutdown - super - end - - def filter_stream(tag, es) - if !@@cluster_health_model_enabled - @log.info "Cluster Health Model disabled in filter_health_model_builder" - return Fluent::MultiEventStream.new - end - begin - new_es = Fluent::MultiEventStream.new - time = Time.now - if ExtensionUtils.isAADMSIAuthMode() - $log.info("filter_health_model_builder::enumerate: AAD AUTH MSI MODE") - if @rewrite_tag.nil? || !@rewrite_tag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) - @rewrite_tag = ExtensionUtils.getOutputStreamId(Constants::KUBE_HEALTH_DATA_TYPE) - end - $log.info("filter_health_model_builder::filter_stream: using tag -#{@rewrite_tag} @ #{Time.now.utc.iso8601}") - end - - if tag.start_with?("kubehealth.DaemonSet.Node") - node_records = [] - if !es.nil? - es.each{|time, record| - node_records.push(record) - } - @buffer.add_to_buffer(node_records) - end - return Fluent::MultiEventStream.new - elsif tag.start_with?("kubehealth.DaemonSet.Container") - container_records = [] - if !es.nil? - es.each{|time, record| - container_records.push(record) - } - end - container_records_aggregator = HealthContainerCpuMemoryAggregator.new(@resources, @provider) - if @container_cpu_memory_records.nil? - @log.info "@container_cpu_memory_records was not initialized" - @container_cpu_memory_records = [] #in some clusters, this is null, so initialize it again. - end - @container_cpu_memory_records.push(*container_records) # push the records for aggregation later - return Fluent::MultiEventStream.new - elsif tag.start_with?("kubehealth.ReplicaSet") - records = [] - es.each{|time, record| - records.push(record) - } - @buffer.add_to_buffer(records) # in_kube_health records - - aggregated_container_records = [] - if !@container_cpu_memory_records.nil? && !@container_cpu_memory_records.empty? - container_records_aggregator = HealthContainerCpuMemoryAggregator.new(@resources, @provider) - deduped_records = container_records_aggregator.dedupe_records(@container_cpu_memory_records) - container_records_aggregator.aggregate(deduped_records) - container_records_aggregator.compute_state - aggregated_container_records = container_records_aggregator.get_records - end - @buffer.add_to_buffer(aggregated_container_records) #container cpu/memory records - records_to_process = @buffer.get_buffer - @buffer.reset_buffer - @container_cpu_memory_records = [] - - health_monitor_records = [] - records_to_process.each do |record| - monitor_instance_id = record[HealthMonitorRecordFields::MONITOR_INSTANCE_ID] - monitor_id = record[HealthMonitorRecordFields::MONITOR_ID] - #HealthMonitorRecord - health_monitor_record = HealthMonitorRecord.new( - record[HealthMonitorRecordFields::MONITOR_ID], - record[HealthMonitorRecordFields::MONITOR_INSTANCE_ID], - record[HealthMonitorRecordFields::TIME_FIRST_OBSERVED], - record[HealthMonitorRecordFields::DETAILS]["state"], - @provider.get_labels(record), - @provider.get_config(monitor_id), - record[HealthMonitorRecordFields::DETAILS] - ) - health_monitor_records.push(health_monitor_record) - #puts "#{monitor_instance_id} #{instance_state.new_state} #{instance_state.old_state} #{instance_state.should_send}" - end - - @log.info "health_monitor_records.size #{health_monitor_records.size}" - # Dedupe daemonset signals - # Remove unit monitor signals for “gone” objects - # update state for the reduced set of signals - reduced_records = @reducer.reduce_signals(health_monitor_records, @resources) - reduced_records.each{|record| - @state.update_state(record, - @provider.get_config(record.monitor_id), - false, - @telemetry - ) - # get the health state based on the monitor's operational state - # update state calls updates the state of the monitor based on configuration and history of the the monitor records - record.state = @state.get_state(record.monitor_instance_id).new_state - } - @log.info "after deduping and removing gone objects reduced_records.size #{reduced_records.size}" - - reduced_records = @kube_api_down_handler.handle_kube_api_down(reduced_records) - @log.info "after kube api down handler health_monitor_records.size #{health_monitor_records.size}" - - #get the list of 'none' and 'unknown' signals - missing_signals = @generator.get_missing_signals(@@cluster_id, reduced_records, @resources, @provider) - - @log.info "after getting missing signals missing_signals.size #{missing_signals.size}" - #update state for missing signals - missing_signals.each{|signal| - - @state.update_state(signal, @provider.get_config(signal.monitor_id), false, @telemetry) - @log.info "After Updating #{@state.get_state(signal.monitor_instance_id)} #{@state.get_state(signal.monitor_instance_id).new_state}" - # for unknown/none records, update the "monitor state" to be the latest state (new_state) of the monitor instance from the state - signal.state = @state.get_state(signal.monitor_instance_id).new_state - } - - @generator.update_last_received_records(reduced_records) - all_records = reduced_records.clone - all_records.push(*missing_signals) - - @log.info "after Adding missing signals all_records.size #{all_records.size}" - - HealthMonitorHelpers.add_agentpool_node_label_if_not_present(all_records) - - # build the health model - @model_builder.process_records(all_records) - all_monitors = @model_builder.finalize_model - - @log.info "after building health_model #{all_monitors.size}" - - # update the state for aggregate monitors (unit monitors are updated above) - all_monitors.each{|monitor_instance_id, monitor| - if monitor.is_aggregate_monitor - @state.update_state(monitor, - @provider.get_config(monitor.monitor_id), - true, - @telemetry - ) - end - - instance_state = @state.get_state(monitor_instance_id) - #puts "#{monitor_instance_id} #{instance_state.new_state} #{instance_state.old_state} #{instance_state.should_send}" - should_send = instance_state.should_send - - # always send cluster monitor as a heartbeat - if !should_send && monitor_instance_id != MonitorId::CLUSTER - all_monitors.delete(monitor_instance_id) - end - } - - @log.info "after optimizing health signals all_monitors.size #{all_monitors.size}" - - - # for each key in monitor.keys, - # get the state from health_monitor_state - # generate the record to send - emit_time = Fluent::Engine.now - all_monitors.keys.each{|key| - record = @provider.get_record(all_monitors[key], state) - if record[HealthMonitorRecordFields::MONITOR_ID] == MonitorId::CLUSTER - if !record[HealthMonitorRecordFields::DETAILS].nil? - details = JSON.parse(record[HealthMonitorRecordFields::DETAILS]) - details[HealthMonitorRecordFields::HEALTH_MODEL_DEFINITION_VERSION] = "#{ENV['HEALTH_MODEL_DEFINITION_VERSION']}" - record[HealthMonitorRecordFields::DETAILS] = details.to_json - end - if all_monitors.size > 1 - old_state = record[HealthMonitorRecordFields::OLD_STATE] - new_state = record[HealthMonitorRecordFields::NEW_STATE] - if old_state != new_state && @cluster_old_state != old_state && @cluster_new_state != new_state - ApplicationInsightsUtility.sendCustomEvent("HealthModel_ClusterStateChanged",{"old_state" => old_state , "new_state" => new_state, "monitor_count" => all_monitors.size}) - @log.info "sent telemetry for cluster state change from #{record['OldState']} to #{record['NewState']}" - @cluster_old_state = old_state - @cluster_new_state = new_state - end - end - end - new_es.add(emit_time, record) - } - - #emit the stream - router.emit_stream(@rewrite_tag, new_es) - - #initialize monitor_set and model_builder - @monitor_set = HealthModel::MonitorSet.new - @model_builder = HealthModel::HealthModelBuilder.new(@hierarchy_builder, @state_finalizers, @monitor_set) - - #update cluster state custom resource - @cluster_health_state.update_state(@state.to_h) - @telemetry.send - # return an empty event stream, else the match will throw a NoMethodError - return Fluent::MultiEventStream.new - elsif tag.start_with?(@rewrite_tag) - # this filter also acts as a pass through as we are rewriting the tag and emitting to the fluent stream - es - else - raise "Invalid tag #{tag} received" - end - - rescue => e - ApplicationInsightsUtility.sendExceptionTelemetry(e, {"FeatureArea" => "Health"}) - @log.warn "Message: #{e.message} Backtrace: #{e.backtrace}" - return nil - end - end - end -end diff --git a/source/plugins/ruby/health/agg_monitor_id_labels.rb b/source/plugins/ruby/health/agg_monitor_id_labels.rb deleted file mode 100644 index 03680d054..000000000 --- a/source/plugins/ruby/health/agg_monitor_id_labels.rb +++ /dev/null @@ -1,26 +0,0 @@ -# frozen_string_literal: true -require_relative 'health_model_constants' - -module HealthModel - class AggregateMonitorInstanceIdLabels - @@id_labels_mapping = { - MonitorId::SYSTEM_WORKLOAD => [HealthMonitorLabels::NAMESPACE, HealthMonitorLabels::WORKLOAD_NAME], - MonitorId::USER_WORKLOAD => [HealthMonitorLabels::NAMESPACE, HealthMonitorLabels::WORKLOAD_NAME], - MonitorId::NODE => [HealthMonitorLabels::AGENTPOOL, HealthMonitorLabels::ROLE, HealthMonitorLabels::HOSTNAME], - MonitorId::NAMESPACE => [HealthMonitorLabels::NAMESPACE], - MonitorId::AGENT_NODE_POOL => [HealthMonitorLabels::AGENTPOOL], - MonitorId::CONTAINER => [HealthMonitorLabels::NAMESPACE, HealthMonitorLabels::WORKLOAD_NAME, HealthMonitorLabels::CONTAINER], - MonitorId::CONTAINER_CPU_MONITOR_ID => [HealthMonitorLabels::NAMESPACE, HealthMonitorLabels::WORKLOAD_NAME], - MonitorId::CONTAINER_MEMORY_MONITOR_ID => [HealthMonitorLabels::NAMESPACE, HealthMonitorLabels::WORKLOAD_NAME], - } - - def self.get_labels_for(monitor_id) - if @@id_labels_mapping.key?(monitor_id) - return @@id_labels_mapping[monitor_id] - else - return [] - end - - end - end -end \ No newline at end of file diff --git a/source/plugins/ruby/health/aggregate_monitor.rb b/source/plugins/ruby/health/aggregate_monitor.rb deleted file mode 100644 index a774478e7..000000000 --- a/source/plugins/ruby/health/aggregate_monitor.rb +++ /dev/null @@ -1,227 +0,0 @@ -# frozen_string_literal: true - -require_relative 'health_model_constants' -require 'yajl/json_gem' - -# Require only when running inside container. -# otherwise unit tests will fail due to ApplicationInsightsUtility dependency on base omsagent ruby files. If you have your dev machine starting with omsagent-rs, then GOOD LUCK! -if Socket.gethostname.start_with?('omsagent-rs') - require_relative '../ApplicationInsightsUtility' -end - -module HealthModel - class AggregateMonitor - attr_accessor :monitor_id, :monitor_instance_id, :state, :transition_date_time, :aggregation_algorithm, :aggregation_algorithm_params, :labels, :is_aggregate_monitor, :details - attr_reader :member_monitors, :member_state_counts - - @@sort_key_order = { - MonitorState::UNKNOWN => 1, - MonitorState::CRITICAL => 2, - MonitorState::WARNING => 3, - MonitorState::HEALTHY => 4, - MonitorState::NONE => 5 - } - - @@telemetry_sent_hash = {} - - # constructor - def initialize( - monitor_id, - monitor_instance_id, - state, - transition_date_time, - aggregation_algorithm, - aggregation_algorithm_params, - labels - ) - @monitor_id = monitor_id - @monitor_instance_id = monitor_instance_id - @state = state - @transition_date_time = transition_date_time - @aggregation_algorithm = aggregation_algorithm || AggregationAlgorithm::WORSTOF - @aggregation_algorithm_params = aggregation_algorithm_params - @labels = labels - @member_monitors = {} - @member_state_counts = {} - @is_aggregate_monitor = true - end - - # adds a member monitor as a child - def add_member_monitor(member_monitor_instance_id) - unless @member_monitors.key?(member_monitor_instance_id) - @member_monitors[member_monitor_instance_id] = true - end - end - - #removes a member monitor - def remove_member_monitor(member_monitor_instance_id) - if @member_monitors.key?(member_monitor_instance_id) - @member_monitors.delete(member_monitor_instance_id) - end - end - - # return the member monitors as an array - def get_member_monitors - @member_monitors.map(&:first) - end - - # calculates the state of the aggregate monitor based on aggregation algorithm and child monitor states - def calculate_state(monitor_set) - case @aggregation_algorithm - when AggregationAlgorithm::WORSTOF - @state = calculate_worst_of_state(monitor_set) - when AggregationAlgorithm::PERCENTAGE - @state = calculate_percentage_state(monitor_set) - else - raise 'No aggregation algorithm specified' - end - end - - def calculate_details(monitor_set) - @details = {} - @details['details'] = {} - @details['state'] = state - @details['timestamp'] = transition_date_time - ids = [] - member_monitor_instance_ids = get_member_monitors - member_monitor_instance_ids.each{|member_monitor_id| - member_monitor = monitor_set.get_monitor(member_monitor_id) - member_state = member_monitor.state - if @details['details'].key?(member_state) - ids = @details['details'][member_state] - if !ids.include?(member_monitor.monitor_instance_id) - ids.push(member_monitor.monitor_instance_id) - end - @details['details'][member_state] = ids - else - @details['details'][member_state] = [member_monitor.monitor_instance_id] - end - } - end - - # calculates the worst of state, given the member monitors - def calculate_worst_of_state(monitor_set) - - @member_state_counts = map_member_monitor_states(monitor_set) - - if member_state_counts.length === 0 - return MonitorState::NONE - end - - if member_state_counts.key?(MonitorState::CRITICAL) && member_state_counts[MonitorState::CRITICAL] > 0 - return MonitorState::CRITICAL - end - if member_state_counts.key?(MonitorState::ERROR) && member_state_counts[MonitorState::ERROR] > 0 - return MonitorState::ERROR - end - if member_state_counts.key?(MonitorState::WARNING) && member_state_counts[MonitorState::WARNING] > 0 - return MonitorState::WARNING - end - - if member_state_counts.key?(MonitorState::UNKNOWN) && member_state_counts[MonitorState::UNKNOWN] > 0 - return MonitorState::UNKNOWN - end - - if member_state_counts.key?(MonitorState::HEALTHY) && member_state_counts[MonitorState::HEALTHY] > 0 - return MonitorState::HEALTHY #healthy should win over none in aggregation - end - - return MonitorState::NONE - - end - - # calculates a percentage state, given the aggregation algorithm parameters - def calculate_percentage_state(monitor_set) - - #sort - #TODO: What if sorted_filtered is empty? is that even possible? - log = HealthMonitorHelpers.get_log_handle - sorted_filtered = sort_filter_member_monitors(monitor_set) - - state_threshold = @aggregation_algorithm_params['state_threshold'].to_f - - if sorted_filtered.nil? - size = 0 - else - size = sorted_filtered.size - end - - if size == 1 - @state = sorted_filtered[0].state - else - count = ((state_threshold*size)/100).ceil - index = size - count - if sorted_filtered.nil? || sorted_filtered[index].nil? - @state = HealthMonitorStates::UNKNOWN - if !@@telemetry_sent_hash.key?(@monitor_instance_id) - log.debug "Adding to telemetry sent hash #{@monitor_instance_id}" - @@telemetry_sent_hash[@monitor_instance_id] = true - log.info "Index: #{index} size: #{size} Count: #{count}" - custom_error_event_map = {} - custom_error_event_map["count"] = count - custom_error_event_map["index"] = index - custom_error_event_map["size"] = size - if !sorted_filtered.nil? - sorted_filtered.each_index{|i| - custom_error_event_map[i] = sorted_filtered[i].state - } - end - ApplicationInsightsUtility.sendCustomEvent("PercentageStateCalculationErrorEvent", custom_error_event_map) - end - else - @state = sorted_filtered[index].state - end - @state - end - end - - # maps states of member monitors to counts - def map_member_monitor_states(monitor_set) - member_monitor_instance_ids = get_member_monitors - if member_monitor_instance_ids.nil? || member_monitor_instance_ids.size == 0 - return {} - end - - state_counts = {} - - member_monitor_instance_ids.each {|monitor_instance_id| - - member_monitor = monitor_set.get_monitor(monitor_instance_id) - monitor_state = member_monitor.state - - if !state_counts.key?(monitor_state) - state_counts[monitor_state] = 1 - else - count = state_counts[monitor_state] - state_counts[monitor_state] = count+1 - end - } - - return state_counts; - end - - # Sort the member monitors in the following order -=begin - 1. Error - 2. Unknown - 3. Critical - 4. Warning - 5. Healthy - Remove 'none' state monitors -=end - def sort_filter_member_monitors(monitor_set) - member_monitor_instance_ids = get_member_monitors - member_monitors = [] - - member_monitor_instance_ids.each {|monitor_instance_id| - member_monitor = monitor_set.get_monitor(monitor_instance_id) - member_monitors.push(member_monitor) - } - - filtered = member_monitors.keep_if{|monitor| monitor.state != MonitorState::NONE} - sorted = filtered.sort_by{ |monitor| [@@sort_key_order[monitor.state]] } - - return sorted - end - end -end diff --git a/source/plugins/ruby/health/aggregate_monitor_state_finalizer.rb b/source/plugins/ruby/health/aggregate_monitor_state_finalizer.rb deleted file mode 100644 index dd69c9c4d..000000000 --- a/source/plugins/ruby/health/aggregate_monitor_state_finalizer.rb +++ /dev/null @@ -1,35 +0,0 @@ -# frozen_string_literal: true - -module HealthModel - class AggregateMonitorStateFinalizer - - def finalize(monitor_set) - top_level_monitor = monitor_set.get_monitor(MonitorId::CLUSTER) - if !top_level_monitor.nil? - calculate_subtree_state(top_level_monitor, monitor_set) - end - monitor_set.get_map.each{|k,v| - if v.is_aggregate_monitor - v.calculate_details(monitor_set) - end - } - end - - private - def calculate_subtree_state(monitor, monitor_set) - if monitor.nil? || !monitor.is_aggregate_monitor - raise 'AggregateMonitorStateFinalizer:calculateSubtreeState Parameter monitor must be non-null AggregateMonitor' - end - - member_monitor_instance_ids = monitor.get_member_monitors # monitor_instance_ids - member_monitor_instance_ids.each{|member_monitor_instance_id| - member_monitor = monitor_set.get_monitor(member_monitor_instance_id) - - if !member_monitor.nil? && member_monitor.is_aggregate_monitor - calculate_subtree_state(member_monitor, monitor_set) - end - } - monitor.calculate_state(monitor_set) - end - end -end \ No newline at end of file diff --git a/source/plugins/ruby/health/cluster_health_state.rb b/source/plugins/ruby/health/cluster_health_state.rb deleted file mode 100644 index e46d0bf5f..000000000 --- a/source/plugins/ruby/health/cluster_health_state.rb +++ /dev/null @@ -1,122 +0,0 @@ -# frozen_string_literal: true - -require "net/http" -require "net/https" -require "uri" -require 'yajl/json_gem' - -module HealthModel - class ClusterHealthState - - attr_reader :token_file_path, :cert_file_path, :log, :http_client, :uri, :token - @@resource_uri_template = "%{kube_api_server_url}/apis/azmon.container.insights/v1/namespaces/kube-system/healthstates/cluster-health-state" - - def initialize(token_file_path, cert_file_path) - @token_file_path = token_file_path - @cert_file_path = cert_file_path - @log = HealthMonitorHelpers.get_log_handle - @http_client = get_http_client - @token = get_token - end - - def update_state(state) #state = hash of monitor_instance_id to HealthMonitorInstanceState struct - get_request = Net::HTTP::Get.new(@uri.request_uri) - monitor_states_hash = {} - state.each {|monitor_instance_id, health_monitor_instance_state| - monitor_states_hash[monitor_instance_id] = health_monitor_instance_state.to_h - } - - get_request["Authorization"] = "Bearer #{@token}" - @log.info "Making GET request to #{@uri.request_uri} @ #{Time.now.utc.iso8601}" - get_response = @http_client.request(get_request) - @log.info "Got response of #{get_response.code} for #{@uri.request_uri} @ #{Time.now.utc.iso8601}" - - if get_response.code.to_i == 404 # NOT found - #POST - update_request = Net::HTTP::Post.new(@uri.request_uri) - update_request["Content-Type"] = "application/json" - - elsif get_response.code.to_i == 200 # Update == Patch - #PATCH - update_request = Net::HTTP::Patch.new(@uri.request_uri) - update_request["Content-Type"] = "application/merge-patch+json" - end - update_request["Authorization"] = "Bearer #{@token}" - - update_request_body = get_update_request_body - update_request_body["state"] = monitor_states_hash.to_json - update_request.body = update_request_body.to_json - - update_response = @http_client.request(update_request) - @log.info "Got a response of #{update_response.code} for #{update_request.method}" - end - - def get_state - get_request = Net::HTTP::Get.new(@uri.request_uri) - get_request["Authorization"] = "Bearer #{@token}" - @log.info "Making GET request to #{@uri.request_uri} @ #{Time.now.utc.iso8601}" - get_response = @http_client.request(get_request) - @log.info "Got response of #{get_response.code} for #{@uri.request_uri} @ #{Time.now.utc.iso8601}" - - if get_response.code.to_i == 200 - return JSON.parse(JSON.parse(get_response.body)["state"]) - else - return {} - end - end - - private - def get_token() - begin - if File.exist?(@token_file_path) && File.readable?(@token_file_path) - token_str = File.read(@token_file_path).strip - return token_str - else - @log.info ("Unable to read token string from #{@token_file_path}") - return nil - end - end - end - - def get_http_client() - kube_api_server_url = get_kube_api_server_url - resource_uri = @@resource_uri_template % { - kube_api_server_url: kube_api_server_url - } - @uri = URI.parse(resource_uri) - http = Net::HTTP.new(@uri.host, @uri.port) - http.use_ssl = true - if !File.exist?(@cert_file_path) - raise "#{@cert_file_path} doesnt exist" - else - http.ca_file = @cert_file_path - end - http.verify_mode = OpenSSL::SSL::VERIFY_PEER - return http - end - - def get_kube_api_server_url - if ENV["KUBERNETES_SERVICE_HOST"] && ENV["KUBERNETES_PORT_443_TCP_PORT"] - return "https://#{ENV["KUBERNETES_SERVICE_HOST"]}:#{ENV["KUBERNETES_PORT_443_TCP_PORT"]}" - else - @log.warn ("Kubernetes environment variable not set KUBERNETES_SERVICE_HOST: #{ENV["KUBERNETES_SERVICE_HOST"]} KUBERNETES_PORT_443_TCP_PORT: #{ENV["KUBERNETES_PORT_443_TCP_PORT"]}. Unable to form resourceUri") - if Gem.win_platform? #unit testing on windows dev machine - value = %x( kubectl -n default get endpoints kubernetes --no-headers) - url = "https://#{value.split(' ')[1]}" - return "https://localhost:8080" # This is NEVER used. this is just to return SOME value - end - return nil - end - end - - def get_update_request_body - body = {} - body["apiVersion"] = "azmon.container.insights/v1" - body["kind"] = "HealthState" - body["metadata"] = {} - body["metadata"]["name"] = "cluster-health-state" - body["metadata"]["namespace"] = "kube-system" - return body - end - end -end diff --git a/source/plugins/ruby/health/health_container_cpu_memory_aggregator.rb b/source/plugins/ruby/health/health_container_cpu_memory_aggregator.rb deleted file mode 100644 index e93c66c14..000000000 --- a/source/plugins/ruby/health/health_container_cpu_memory_aggregator.rb +++ /dev/null @@ -1,386 +0,0 @@ -# frozen_string_literal: true - -require_relative 'health_model_constants' - -# Require only when running inside container. -# otherwise unit tests will fail due to ApplicationInsightsUtility dependency on base omsagent ruby files. If you have your dev machine starting with omsagent-rs, then GOOD LUCK! -if Socket.gethostname.start_with?('omsagent-rs') - require_relative '../ApplicationInsightsUtility' -end -=begin - @cpu_records/@memory_records - [ - { - "namespace_workload_container_name" : { - "limit" : limit, #number - "limit_set" : limit_set, #bool - "record_count" : record_count, #number - "workload_name": workload_name, - "workload_kind": workload_kind, - "namespace" : namespace, - "container": container, - records:[ - { - "counter_value": counter_value, - "pod_name": pod_name, - "container": container, - "state" : state - }, - { - "counter_value": counter_value, - "pod_name": pod_name, - "container": container, - "state" : state - } - ] - } - } - ] -=end -module HealthModel - # this class aggregates the records at the container level - class HealthContainerCpuMemoryAggregator - - attr_reader :pod_uid_lookup, :workload_container_count, :cpu_records, :memory_records, :provider - - @@memory_counter_name = 'memoryRssBytes' - @@cpu_counter_name = 'cpuUsageNanoCores' - @@workload_container_count_empty_event_sent = {} - @@limit_is_array_event_sent = {} - @@WORKLOAD_CONTAINER_COUNT_EMPTY_EVENT = "WorkloadContainerCountEmptyEvent" - @@LIMIT_IS_ARRAY_EVENT = "ResourceLimitIsAnArrayEvent" - @@cpu_last_sent_monitors = {} - @@memory_last_sent_monitors = {} - - def initialize(resources, provider) - @pod_uid_lookup = resources.get_pod_uid_lookup - @workload_container_count = resources.get_workload_container_count - @cpu_records = {} - @memory_records = {} - @log = HealthMonitorHelpers.get_log_handle - @provider = provider - end - - def dedupe_records(container_records) - cpu_deduped_instances = {} - memory_deduped_instances = {} - container_records = container_records.keep_if{|record| record['CounterName'] == @@memory_counter_name || record['CounterName'] == @@cpu_counter_name} - - container_records.each do |record| - begin - instance_name = record["InstanceName"] - counter_name = record["CounterName"] - case counter_name - when @@memory_counter_name - resource_instances = memory_deduped_instances - when @@cpu_counter_name - resource_instances = cpu_deduped_instances - else - @log.info "Unexpected Counter Name #{counter_name}" - next - end - if !resource_instances.key?(instance_name) - resource_instances[instance_name] = record - else - r = resource_instances[instance_name] - if record["Timestamp"] > r["Timestamp"] - @log.info "Dropping older record for instance #{instance_name} new: #{record["Timestamp"]} old: #{r["Timestamp"]}" - resource_instances[instance_name] = record - end - end - rescue => e - @log.info "Exception when deduping record #{record}" - next - end - end - return cpu_deduped_instances.values.concat(memory_deduped_instances.values) - end - - def aggregate(container_records) - #filter and select only cpuUsageNanoCores and memoryRssBytes - container_records = container_records.keep_if{|record| record['CounterName'] == @@memory_counter_name || record['CounterName'] == @@cpu_counter_name} - # poduid lookup has poduid/cname --> workload_name, namespace, cpu_limit, memory limit mapping - # from the container records, extract the poduid/cname, get the values from poduid_lookup, and aggregate based on namespace_workload_cname - container_records.each do |record| - begin - instance_name = record["InstanceName"] - lookup_key = instance_name.split('/').last(2).join('/') - if !@pod_uid_lookup.key?(lookup_key) - next - end - namespace = @pod_uid_lookup[lookup_key]['namespace'] - workload_name = @pod_uid_lookup[lookup_key]['workload_name'] - cname = lookup_key.split('/')[1] - counter_name = record["CounterName"] - case counter_name - when @@memory_counter_name - resource_hash = @memory_records - resource_type = 'memory' - when @@cpu_counter_name - resource_hash = @cpu_records - resource_type = 'cpu' - else - @log.info "Unexpected Counter Name #{counter_name}" - next - end - - # this is used as a look up from the pod_uid_lookup in kubernetes_health_resources object - resource_hash_key = "#{namespace}_#{workload_name.split('~~')[1]}_#{cname}" - - # if the resource map doesnt contain the key, add limit, count and records - if !resource_hash.key?(resource_hash_key) - resource_hash[resource_hash_key] = {} - resource_hash[resource_hash_key]["limit"] = @pod_uid_lookup[lookup_key]["#{resource_type}_limit"] - resource_hash[resource_hash_key]["limit_set"] = @pod_uid_lookup[lookup_key]["#{resource_type}_limit_set"] - resource_hash[resource_hash_key]["record_count"] = @workload_container_count[resource_hash_key] - resource_hash[resource_hash_key]["workload_name"] = @pod_uid_lookup[lookup_key]["workload_name"] - resource_hash[resource_hash_key]["workload_kind"] = @pod_uid_lookup[lookup_key]["workload_kind"] - resource_hash[resource_hash_key]["namespace"] = @pod_uid_lookup[lookup_key]["namespace"] - resource_hash[resource_hash_key]["container"] = @pod_uid_lookup[lookup_key]["container"] - resource_hash[resource_hash_key]["records"] = [] - end - - container_instance_record = {} - pod_name = @pod_uid_lookup[lookup_key]["pod_name"] - #append the record to the hash - # append only if the record is not a duplicate record - container_instance_record["pod_name"] = pod_name - container_instance_record["counter_value"] = record["CounterValue"] - container_instance_record["container"] = @pod_uid_lookup[lookup_key]["container"] - container_instance_record["state"] = calculate_container_instance_state( - container_instance_record["counter_value"], - resource_hash[resource_hash_key]["limit"], - @provider.get_config(MonitorId::CONTAINER_MEMORY_MONITOR_ID)) - resource_hash[resource_hash_key]["records"].push(container_instance_record) - rescue => e - @log.info "Error in HealthContainerCpuMemoryAggregator aggregate #{e.backtrace} #{e.message} #{record}" - end - end - end - - def compute_state() - # if missing records, set state to unknown - # if limits not set, set state to warning - # if all records present, sort in descending order of metric, compute index based on StateThresholdPercentage, get the state (pass/fail/warn) based on monitor state (Using [Fail/Warn]ThresholdPercentage, and set the state) - @memory_records.each{|k,v| - @@memory_last_sent_monitors.delete(k) #remove from last sent list if the record is present in the current set of signals - calculate_monitor_state(v, @provider.get_config(MonitorId::CONTAINER_MEMORY_MONITOR_ID)) - } - - @cpu_records.each{|k,v| - @@cpu_last_sent_monitors.delete(k) #remove from last sent list if the record is present in the current set of signals - calculate_monitor_state(v, @provider.get_config(MonitorId::CONTAINER_CPU_MONITOR_ID)) - } - @log.info "Finished computing state" - end - - def get_records - time_now = Time.now.utc.iso8601 - container_cpu_memory_records = [] - - @cpu_records.each{|resource_key, record| - cpu_limit_mc = 1.0 - if record["limit"].is_a?(Numeric) - cpu_limit_mc = record["limit"]/1000000.to_f - else - @log.info "CPU Limit is not a number #{record['limit']}" - if !@@limit_is_array_event_sent.key?(resource_key) - custom_properties = {} - custom_properties['limit'] = record['limit'] - if record['limit'].is_a?(Array) - record['limit'].each_index{|i| - custom_properties[i] = record['limit'][i] - } - end - @@limit_is_array_event_sent[resource_key] = true - #send once per resource key - ApplicationInsightsUtility.sendCustomEvent(@@LIMIT_IS_ARRAY_EVENT, custom_properties) - end - end - health_monitor_record = { - "timestamp" => time_now, - "state" => record["state"], - "details" => { - "cpu_limit_millicores" => cpu_limit_mc, - "cpu_usage_instances" => record["records"].map{|r| r.each {|k,v| - k == "counter_value" ? r[k] = r[k] / 1000000.to_f : r[k] - }}, - "workload_name" => record["workload_name"], - "workload_kind" => record["workload_kind"], - "namespace" => record["namespace"], - "container" => record["container"], - "limit_set" => record["limit_set"] - } - } - - monitor_instance_id = HealthMonitorHelpers.get_monitor_instance_id(MonitorId::CONTAINER_CPU_MONITOR_ID, resource_key.split('_')) #container_cpu_utilization-namespace-workload-container - - health_record = {} - health_record[HealthMonitorRecordFields::MONITOR_ID] = MonitorId::CONTAINER_CPU_MONITOR_ID - health_record[HealthMonitorRecordFields::MONITOR_INSTANCE_ID] = monitor_instance_id - health_record[HealthMonitorRecordFields::DETAILS] = health_monitor_record - health_record[HealthMonitorRecordFields::TIME_GENERATED] = time_now - health_record[HealthMonitorRecordFields::TIME_FIRST_OBSERVED] = time_now - container_cpu_memory_records.push(health_record) - } - - # If all records that were sent previously are present in current set, this will not be executed - if @@cpu_last_sent_monitors.keys.size != 0 - @@cpu_last_sent_monitors.keys.each{|key| - begin - @log.info "Container CPU monitor #{key} not present in current set. Sending none state transition" - tokens = key.split('_') - namespace = tokens[0] - workload_name = "#{tokens[0]}~~#{tokens[1]}" - container = tokens[2] - health_monitor_record = { - "timestamp" => time_now, - "state" => HealthMonitorStates::NONE, - "details" => { - "reason" => "No record received for workload #{workload_name}", - "workload_name" => workload_name, - "namespace" => namespace, - "container" => container - } - } - - monitor_instance_id = HealthMonitorHelpers.get_monitor_instance_id(MonitorId::CONTAINER_CPU_MONITOR_ID, key.split('_')) #container_cpu_utilization-namespace-workload-container - - health_record = {} - health_record[HealthMonitorRecordFields::MONITOR_ID] = MonitorId::CONTAINER_CPU_MONITOR_ID - health_record[HealthMonitorRecordFields::MONITOR_INSTANCE_ID] = monitor_instance_id - health_record[HealthMonitorRecordFields::DETAILS] = health_monitor_record - health_record[HealthMonitorRecordFields::TIME_GENERATED] = time_now - health_record[HealthMonitorRecordFields::TIME_FIRST_OBSERVED] = time_now - container_cpu_memory_records.push(health_record) - rescue => e - @log.info "Error when trying to create NONE State transition signal for #{key} for monitor #{monitor_instance_id} #{e.message}" - next - end - } - end - - @memory_records.each{|resource_key, record| - health_monitor_record = { - "timestamp" => time_now, - "state" => record["state"], - "details" => { - "memory_limit_bytes" => record["limit"], - "memory_usage_instances" => record["records"], - "workload_name" => record["workload_name"], - "workload_kind" => record["workload_kind"], - "namespace" => record["namespace"], - "container" => record["container"] - } - } - - monitor_instance_id = HealthMonitorHelpers.get_monitor_instance_id(MonitorId::CONTAINER_MEMORY_MONITOR_ID, resource_key.split('_')) #container_cpu_utilization-namespace-workload-container - - health_record = {} - health_record[HealthMonitorRecordFields::MONITOR_ID] = MonitorId::CONTAINER_MEMORY_MONITOR_ID - health_record[HealthMonitorRecordFields::MONITOR_INSTANCE_ID] = monitor_instance_id - health_record[HealthMonitorRecordFields::DETAILS] = health_monitor_record - health_record[HealthMonitorRecordFields::TIME_GENERATED] = time_now - health_record[HealthMonitorRecordFields::TIME_FIRST_OBSERVED] = time_now - container_cpu_memory_records.push(health_record) - } - - # If all records that were sent previously are present in current set, this will not be executed - if @@memory_last_sent_monitors.keys.size != 0 - @@memory_last_sent_monitors.keys.each{|key| - begin - @log.info "Container Memory monitor #{key} not present in current set. Sending none state transition" - tokens = key.split('_') - namespace = tokens[0] - workload_name = "#{tokens[0]}~~#{tokens[1]}" - container = tokens[2] - health_monitor_record = { - "timestamp" => time_now, - "state" => HealthMonitorStates::NONE, - "details" => { - "reason" => "No record received for workload #{workload_name}", - "workload_name" => workload_name, - "namespace" => namespace, - "container" => container - } - } - monitor_instance_id = HealthMonitorHelpers.get_monitor_instance_id(MonitorId::CONTAINER_MEMORY_MONITOR_ID, key.split('_')) #container_cpu_utilization-namespace-workload-container - health_record = {} - health_record[HealthMonitorRecordFields::MONITOR_ID] = MonitorId::CONTAINER_MEMORY_MONITOR_ID - health_record[HealthMonitorRecordFields::MONITOR_INSTANCE_ID] = monitor_instance_id - health_record[HealthMonitorRecordFields::DETAILS] = health_monitor_record - health_record[HealthMonitorRecordFields::TIME_GENERATED] = time_now - health_record[HealthMonitorRecordFields::TIME_FIRST_OBSERVED] = time_now - container_cpu_memory_records.push(health_record) - rescue => e - @log.info "Error when trying to create NONE State transition signal for #{key} for monitor #{monitor_instance_id} #{e.message}" - next - end - } - end - - #reset the last sent monitors list - @@memory_last_sent_monitors = {} - @@cpu_last_sent_monitors = {} - - # add the current set of signals for comparison in next iteration - @cpu_records.keys.each{|k| - @@cpu_last_sent_monitors[k] = true - } - @memory_records.keys.each{|k| - @@memory_last_sent_monitors[k] = true - } - return container_cpu_memory_records - end - - private - def calculate_monitor_state(v, config) - # sort records by descending order of metric - v["records"] = v["records"].sort_by{|record| record["counter_value"]}.reverse - size = v["records"].size - if !v["record_count"].nil? - if size < v["record_count"] - unknown_count = v["record_count"] - size - for i in unknown_count.downto(1) - # it requires a lot of computation to figure out which actual pod is not sending the signal - v["records"].insert(0, {"counter_value" => -1, "container" => v["container"], "pod_name" => "???", "state" => HealthMonitorStates::UNKNOWN }) #insert -1 for unknown records - end - end - else - v["state"] = HealthMonitorStates::UNKNOWN - container_key = "#{v['workload_name']}~~#{v['container']}" - @log.info "ContainerKey: #{container_key} Records Size: #{size} Records: #{v['records']} Record Count: #{v['record_count']} #{@workload_container_count}" - - if !@@workload_container_count_empty_event_sent.key?(container_key) - custom_properties = {} - custom_properties = custom_properties.merge(v) - custom_properties = custom_properties.merge(@workload_container_count) - @log.info "Custom Properties : #{custom_properties}" - @@workload_container_count_empty_event_sent[container_key] = true - ApplicationInsightsUtility.sendCustomEvent(@@WORKLOAD_CONTAINER_COUNT_EMPTY_EVENT, custom_properties) - end - return #simply return the state as unknown here - end - - if size == 1 - state_index = 0 - else - state_threshold = config['StateThresholdPercentage'].to_f - count = ((state_threshold*size)/100).ceil - state_index = size - count - end - v["state"] = v["records"][state_index]["state"] - end - - def calculate_container_instance_state(counter_value, limit, config) - percent_value = counter_value * 100 / limit - if percent_value > config['FailIfGreaterThanPercentage'] - return HealthMonitorStates::FAIL - elsif percent_value > config['WarnIfGreaterThanPercentage'] - return HealthMonitorStates::WARNING - else - return HealthMonitorStates::PASS - end - end - end -end diff --git a/source/plugins/ruby/health/health_container_cpu_memory_record_formatter.rb b/source/plugins/ruby/health/health_container_cpu_memory_record_formatter.rb deleted file mode 100644 index ebf3abd7e..000000000 --- a/source/plugins/ruby/health/health_container_cpu_memory_record_formatter.rb +++ /dev/null @@ -1,38 +0,0 @@ -# frozen_string_literal: true - -require 'yajl/json_gem' - -module HealthModel - class HealthContainerCpuMemoryRecordFormatter - - @@health_container_cpu_memory_record_template = '{ - "InstanceName": "%{instance_name}", - "CounterName" : "%{counter_name}", - "CounterValue" : %{metric_value}, - "Timestamp" : "%{timestamp}" - }' - def initialize - @log = HealthMonitorHelpers.get_log_handle - end - - def get_record_from_cadvisor_record(cadvisor_record) - begin - instance_name = cadvisor_record['InstanceName'] - counter_name = JSON.parse(cadvisor_record['json_Collections'])[0]['CounterName'] - metric_value = JSON.parse(cadvisor_record['json_Collections'])[0]['Value'] - timestamp = cadvisor_record['Timestamp'] - - health_container_cpu_memory_record = @@health_container_cpu_memory_record_template % { - instance_name: instance_name, - counter_name: counter_name, - metric_value: metric_value, - timestamp: timestamp - } - return JSON.parse(health_container_cpu_memory_record) - rescue => e - @log.info "Error in get_record_from_cadvisor_record #{e.message} #{e.backtrace}" - return nil - end - end - end -end \ No newline at end of file diff --git a/source/plugins/ruby/health/health_hierarchy_builder.rb b/source/plugins/ruby/health/health_hierarchy_builder.rb deleted file mode 100644 index a59020996..000000000 --- a/source/plugins/ruby/health/health_hierarchy_builder.rb +++ /dev/null @@ -1,78 +0,0 @@ -# frozen_string_literal: true -require 'yajl/json_gem' - -module HealthModel - class HealthHierarchyBuilder - - attr_accessor :health_model_definition, :monitor_factory - - def initialize(health_model_definition, monitor_factory) - - if !health_model_definition.is_a?(ParentMonitorProvider) - raise "Invalid Type Expected: ParentMonitorProvider Actual: #{@health_model_definition.class.name}" - end - @health_model_definition = health_model_definition - - if !monitor_factory.is_a?(MonitorFactory) - raise "Invalid Type Expected: MonitorFactory Actual: #{@monitor_factory.class.name}" - end - @monitor_factory = monitor_factory - end - - def process_record(health_monitor_record, monitor_set) - if !health_monitor_record.is_a?(HealthMonitorRecord) - raise "Unexpected Type #{health_monitor_record.class}" - end - - # monitor state transition will always be on a unit monitor - child_monitor = @monitor_factory.create_unit_monitor(health_monitor_record) - monitor_set.add_or_update(child_monitor) - parent_monitor_id = @health_model_definition.get_parent_monitor_id(child_monitor) - monitor_labels = child_monitor.labels - monitor_id = child_monitor.monitor_id - - # to construct the parent monitor, - # 1. Child's labels - # 2. Parent monitor's config to determine what labels to copy - # 3. Parent Monitor Id - # 4. Monitor Id --> Labels to hash Mapping to generate the monitor instance id for aggregate monitors - - while !parent_monitor_id.nil? - #puts "Parent Monitor Id #{parent_monitor_id}" - # get the set of labels to copy to parent monitor - parent_monitor_labels = @health_model_definition.get_parent_monitor_labels(monitor_id, monitor_labels, parent_monitor_id) - # get the parent monitor configuration - parent_monitor_configuration = @health_model_definition.get_parent_monitor_config(parent_monitor_id) - #get monitor instance id for parent monitor. Does this belong in ParentMonitorProvider? - parent_monitor_instance_id = @health_model_definition.get_parent_monitor_instance_id(child_monitor.monitor_instance_id, parent_monitor_id, parent_monitor_labels) - # check if monitor set has the parent monitor id - # if not present, add - # if present, update the state based on the aggregation algorithm - parent_monitor = nil - if !monitor_set.contains?(parent_monitor_instance_id) - parent_monitor = @monitor_factory.create_aggregate_monitor(parent_monitor_id, parent_monitor_instance_id, parent_monitor_labels, parent_monitor_configuration['aggregation_algorithm'], parent_monitor_configuration['aggregation_algorithm_params'], child_monitor) - parent_monitor.add_member_monitor(child_monitor.monitor_instance_id) - else - parent_monitor = monitor_set.get_monitor(parent_monitor_instance_id) - # required to calculate the rollup state - parent_monitor.add_member_monitor(child_monitor.monitor_instance_id) - # update to the earliest of the transition times of child monitors - if child_monitor.transition_date_time < parent_monitor.transition_date_time - parent_monitor.transition_date_time = child_monitor.transition_date_time - end - end - - if parent_monitor.nil? - raise 'Parent_monitor should not be nil for #{monitor_id}' - end - - monitor_set.add_or_update(parent_monitor) - - child_monitor = parent_monitor - parent_monitor_id = @health_model_definition.get_parent_monitor_id(child_monitor) - monitor_labels = child_monitor.labels - monitor_id = child_monitor.monitor_id - end - end - end -end \ No newline at end of file diff --git a/source/plugins/ruby/health/health_kube_api_down_handler.rb b/source/plugins/ruby/health/health_kube_api_down_handler.rb deleted file mode 100644 index bb91f2e3b..000000000 --- a/source/plugins/ruby/health/health_kube_api_down_handler.rb +++ /dev/null @@ -1,30 +0,0 @@ -# frozen_string_literal: true - -require_relative 'health_model_constants' -module HealthModel - class HealthKubeApiDownHandler - def initialize - @@monitors_to_change = [MonitorId::WORKLOAD_CPU_OVERSUBSCRIBED_MONITOR_ID, - MonitorId::WORKLOAD_MEMORY_OVERSUBSCRIBED_MONITOR_ID, - MonitorId::NODE_CONDITION_MONITOR_ID, - MonitorId::USER_WORKLOAD_PODS_READY_MONITOR_ID, - MonitorId::SYSTEM_WORKLOAD_PODS_READY_MONITOR_ID] - end - - # update kube-api dependent monitors to be 'unknown' if kube-api is down or monitor is unavailable - def handle_kube_api_down(health_monitor_records) - health_monitor_records_map = {} - - health_monitor_records.map{|record| health_monitor_records_map[record.monitor_instance_id] = record} - if !health_monitor_records_map.key?(MonitorId::KUBE_API_STATUS) || (health_monitor_records_map.key?(MonitorId::KUBE_API_STATUS) && health_monitor_records_map[MonitorId::KUBE_API_STATUS].state != 'pass') - #iterate over the map and set the state to unknown for related monitors - health_monitor_records.each{|health_monitor_record| - if @@monitors_to_change.include?(health_monitor_record.monitor_id) - health_monitor_record.state = HealthMonitorStates::UNKNOWN - end - } - end - return health_monitor_records - end - end -end \ No newline at end of file diff --git a/source/plugins/ruby/health/health_kubernetes_resources.rb b/source/plugins/ruby/health/health_kubernetes_resources.rb deleted file mode 100644 index 743dd8b94..000000000 --- a/source/plugins/ruby/health/health_kubernetes_resources.rb +++ /dev/null @@ -1,288 +0,0 @@ -# frozen_string_literal: true - -require 'singleton' -require_relative 'health_model_constants' - -module HealthModel - class HealthKubernetesResources - - include Singleton - attr_accessor :node_inventory, :pod_inventory, :replicaset_inventory, :pod_uid_lookup, :workload_container_count - attr_reader :nodes, :pods, :workloads, :deployment_lookup - - def initialize - @node_inventory = {} - @pod_inventory = {} - @replicaset_inventory = {} - @nodes = [] - @pods = [] - @workloads = [] - @log = HealthMonitorHelpers.get_log_handle - @pod_uid_lookup = {} - @workload_container_count = {} - @workload_name_cache = {} - end - - def get_node_inventory - return @node_inventory - end - - def get_nodes - @nodes = [] - @node_inventory['items'].each {|node| - if !@nodes.include?(node['metadata']['name']) - @nodes.push(node['metadata']['name']) - end - - } - return @nodes - end - - def set_replicaset_inventory(replicasets) - @replicaset_inventory = replicasets - end - - def get_workload_names - workload_names = {} - @pod_inventory['items'].each do |pod| - workload_name = get_workload_name(pod) - workload_names[workload_name] = true if workload_name - end - return workload_names.keys - end - - def build_pod_uid_lookup - if @pod_inventory.nil? || @pod_inventory['items'].nil? || @pod_inventory['items'].empty? || @pod_inventory['items'].size == 0 - @log.info "Not Clearing pod_uid_lookup and workload_container_count since pod inventory is nil" - return - end - @workload_container_count = {} - @pod_uid_lookup = {} - @pod_inventory['items'].each do |pod| - begin - namespace = pod['metadata']['namespace'] - poduid = pod['metadata']['uid'] - pod_name = pod['metadata']['name'] - workload_name = get_workload_name(pod) - workload_kind = get_workload_kind(pod) - # we don't show jobs in container health - if workload_kind.casecmp('job') == 0 - next - end - pod['spec']['containers'].each do |container| - cname = container['name'] - key = "#{poduid}/#{cname}" - cpu_limit_set = true - memory_limit_set = true - begin - cpu_limit = get_numeric_value('cpu', container['resources']['limits']['cpu']) - rescue => exception - #@log.info "Exception getting container cpu limit #{container['resources']}" - cpu_limit = get_node_capacity(pod['spec']['nodeName'], 'cpu') - cpu_limit_set = false - end - begin - memory_limit = get_numeric_value('memory', container['resources']['limits']['memory']) - rescue => exception - #@log.info "Exception getting container memory limit #{container['resources']}" - memory_limit = get_node_capacity(pod['spec']['nodeName'], 'memory') - memory_limit_set = false - end - @pod_uid_lookup[key] = {"workload_kind" => workload_kind, "workload_name" => workload_name, "namespace" => namespace, "cpu_limit" => cpu_limit, "memory_limit" => memory_limit, "cpu_limit_set" => cpu_limit_set, "memory_limit_set" => memory_limit_set, "container" => cname, "pod_name" => pod_name} - container_count_key = "#{namespace}_#{workload_name.split('~~')[1]}_#{cname}" - if !@workload_container_count.key?(container_count_key) - @workload_container_count[container_count_key] = 1 - else - count = @workload_container_count[container_count_key] - @workload_container_count[container_count_key] = count + 1 - end - end - rescue => e - @log.info "Error in build_pod_uid_lookup for POD: #{pod_name} #{e.message} #{e.backtrace}" - end - end - end - - def get_pod_uid_lookup - return @pod_uid_lookup - end - - def get_workload_container_count - return @workload_container_count - end - - def get_workload_name(pod) - begin - has_owner = !pod['metadata']['ownerReferences'].nil? - owner_kind = '' - if has_owner - owner_kind = pod['metadata']['ownerReferences'][0]['kind'] - controller_name = pod['metadata']['ownerReferences'][0]['name'] - else - owner_kind = pod['kind'] - controller_name = pod['metadata']['name'] - end - namespace = pod['metadata']['namespace'] - workload_name = '' - if owner_kind.nil? - owner_kind = 'Pod' - end - case owner_kind.downcase - when 'job' - # we are excluding jobs - return nil - when 'replicaset' - #TODO: - workload_name = get_replica_set_owner_ref(controller_name) - workload_name = "#{namespace}~~#{workload_name}" - when 'daemonset' - workload_name = "#{namespace}~~#{controller_name}" - else - workload_name = "#{namespace}~~#{controller_name}" - end - return workload_name - rescue => e - @log.info "Error in get_workload_name(pod) #{e.message} #{e.backtrace}" - return nil - end - end - - def get_workload_kind(pod) - begin - has_owner = !pod['metadata']['ownerReferences'].nil? - owner_kind = '' - if has_owner - owner_kind = pod['metadata']['ownerReferences'][0]['kind'] - else - owner_kind = pod['kind'] - end - - if owner_kind.nil? - owner_kind = 'Pod' - end - return owner_kind - rescue => e - @log.info "Error in get_workload_kind(pod) #{e.message}" - return nil - end - end - - private - def get_replica_set_owner_ref(controller_name) - if @workload_name_cache.key?(controller_name) - return @workload_name_cache[controller_name] - end - begin - owner_ref = controller_name - @replicaset_inventory['items'].each{|rs| - rs_name = rs['metadata']['name'] - if controller_name.casecmp(rs_name) == 0 - if !rs['metadata']['ownerReferences'].nil? - owner_ref = rs['metadata']['ownerReferences'][0]['name'] if rs['metadata']['ownerReferences'][0]['name'] - end - break - end - } - @workload_name_cache[controller_name] = owner_ref - return owner_ref - rescue => e - @log.info "Error in get_replica_set_owner_ref(controller_name) #{e.message}" - return controller_name - end - end - - def get_node_capacity(node_name, type) - if node_name.nil? #unscheduled pods will not have a node name - return -1 - end - begin - @node_inventory["items"].each do |node| - if (!node["status"]["capacity"].nil?) && node["metadata"]["name"].casecmp(node_name.downcase) == 0 - return get_numeric_value(type, node["status"]["capacity"][type]) - end - end - rescue => e - @log.info "Error in get_node_capacity(pod, #{type}) #{e.backtrace} #{e.message}" - return -1 - end - end - - #Cannot reuse the code from KubernetesApiClient, for unit testing reasons. KubernetesApiClient has a dependency on oms_common.rb etc. - def get_numeric_value(metricName, metricVal) - metricValue = metricVal.downcase - begin - case metricName - when "memory" #convert to bytes for memory - #https://kubernetes.io/docs/tasks/configure-pod-container/assign-memory-resource/ - if (metricValue.end_with?("ki")) - metricValue.chomp!("ki") - metricValue = Float(metricValue) * 1024.0 ** 1 - elsif (metricValue.end_with?("mi")) - metricValue.chomp!("mi") - metricValue = Float(metricValue) * 1024.0 ** 2 - elsif (metricValue.end_with?("gi")) - metricValue.chomp!("gi") - metricValue = Float(metricValue) * 1024.0 ** 3 - elsif (metricValue.end_with?("ti")) - metricValue.chomp!("ti") - metricValue = Float(metricValue) * 1024.0 ** 4 - elsif (metricValue.end_with?("pi")) - metricValue.chomp!("pi") - metricValue = Float(metricValue) * 1024.0 ** 5 - elsif (metricValue.end_with?("ei")) - metricValue.chomp!("ei") - metricValue = Float(metricValue) * 1024.0 ** 6 - elsif (metricValue.end_with?("zi")) - metricValue.chomp!("zi") - metricValue = Float(metricValue) * 1024.0 ** 7 - elsif (metricValue.end_with?("yi")) - metricValue.chomp!("yi") - metricValue = Float(metricValue) * 1024.0 ** 8 - elsif (metricValue.end_with?("k")) - metricValue.chomp!("k") - metricValue = Float(metricValue) * 1000.0 ** 1 - elsif (metricValue.end_with?("m")) - metricValue.chomp!("m") - metricValue = Float(metricValue) * 1000.0 ** 2 - elsif (metricValue.end_with?("g")) - metricValue.chomp!("g") - metricValue = Float(metricValue) * 1000.0 ** 3 - elsif (metricValue.end_with?("t")) - metricValue.chomp!("t") - metricValue = Float(metricValue) * 1000.0 ** 4 - elsif (metricValue.end_with?("p")) - metricValue.chomp!("p") - metricValue = Float(metricValue) * 1000.0 ** 5 - elsif (metricValue.end_with?("e")) - metricValue.chomp!("e") - metricValue = Float(metricValue) * 1000.0 ** 6 - elsif (metricValue.end_with?("z")) - metricValue.chomp!("z") - metricValue = Float(metricValue) * 1000.0 ** 7 - elsif (metricValue.end_with?("y")) - metricValue.chomp!("y") - metricValue = Float(metricValue) * 1000.0 ** 8 - else #assuming there are no units specified, it is bytes (the below conversion will fail for other unsupported 'units') - metricValue = Float(metricValue) - end - when "cpu" #convert to nanocores for cpu - #https://kubernetes.io/docs/tasks/configure-pod-container/assign-cpu-resource/ - if (metricValue.end_with?("m")) - metricValue.chomp!("m") - metricValue = Float(metricValue) * 1000.0 ** 2 - else #assuming no units specified, it is cores that we are converting to nanocores (the below conversion will fail for other unsupported 'units') - metricValue = Float(metricValue) * 1000.0 ** 3 - end - else - @Log.warn("getMetricNumericValue: Unsupported metric #{metricName}. Returning 0 for metric value") - metricValue = 0 - end #case statement - rescue => error - @Log.warn("getMetricNumericValue failed: #{error} for metric #{metricName} with value #{metricVal}. Returning 0 formetric value") - return 0 - end - return metricValue - end - - end -end \ No newline at end of file diff --git a/source/plugins/ruby/health/health_missing_signal_generator.rb b/source/plugins/ruby/health/health_missing_signal_generator.rb deleted file mode 100644 index 84af81ea7..000000000 --- a/source/plugins/ruby/health/health_missing_signal_generator.rb +++ /dev/null @@ -1,147 +0,0 @@ -# frozen_string_literal: true - -require_relative 'health_model_constants' -require_relative 'health_monitor_record' - -module HealthModel - class HealthMissingSignalGenerator - attr_accessor :last_received_records, :current_received_records - attr_reader :missing_signals, :unknown_signals_hash - - def initialize() - @last_received_records = {} - @unknown_signals_hash = {} - end - - def get_missing_signals(cluster_id, health_monitor_records, health_k8s_inventory, provider) - missing_monitor_ids = [] - nodes = health_k8s_inventory.get_nodes - workload_names = health_k8s_inventory.get_workload_names - missing_signals_map = {} - missing_signals = [] - health_monitor_records_map = {} - health_monitor_records.map{ - |monitor| health_monitor_records_map[monitor.monitor_instance_id] = monitor - } - - node_signals_hash = {} - nodes.each{|node| - node_signals_hash[node] = [MonitorId::NODE_MEMORY_MONITOR_ID, MonitorId::NODE_CPU_MONITOR_ID, MonitorId::NODE_CONDITION_MONITOR_ID] - } - log = HealthMonitorHelpers.get_log_handle - log.info "last_received_records #{@last_received_records.size} nodes #{nodes}" - @last_received_records.each{|monitor_instance_id, monitor| - if !health_monitor_records_map.key?(monitor_instance_id) - if HealthMonitorHelpers.is_node_monitor(monitor.monitor_id) - node_name = monitor.labels[HealthMonitorLabels::HOSTNAME] - new_monitor = HealthMonitorRecord.new( - monitor.monitor_id, - monitor.monitor_instance_id, - Time.now.utc.iso8601, - monitor.state, - monitor.labels, - monitor.config, - {"timestamp" => Time.now.utc.iso8601, "state" => HealthMonitorStates::UNKNOWN, "details" => ""} - ) - if !node_name.nil? && nodes.include?(node_name) - new_monitor.state = HealthMonitorStates::UNKNOWN - new_monitor.details["state"] = HealthMonitorStates::UNKNOWN - new_monitor.details["details"] = "Node present in inventory but no signal for #{monitor.monitor_id} from node #{node_name}" - @unknown_signals_hash[monitor_instance_id] = new_monitor - elsif !node_name.nil? && !nodes.include?(node_name) - new_monitor.state = HealthMonitorStates::NONE - new_monitor.details["state"] = HealthMonitorStates::NONE - new_monitor.details["details"] = "Node NOT present in inventory. node: #{node_name}" - end - missing_signals_map[monitor_instance_id] = new_monitor - log.info "Added missing signal #{new_monitor.monitor_instance_id} #{new_monitor.state}" - elsif HealthMonitorHelpers.is_pods_ready_monitor(monitor.monitor_id) - lookup = "#{monitor.labels[HealthMonitorLabels::NAMESPACE]}~~#{monitor.labels[HealthMonitorLabels::WORKLOAD_NAME]}" - new_monitor = HealthMonitorRecord.new( - monitor.monitor_id, - monitor.monitor_instance_id, - Time.now.utc.iso8601, - monitor.state, - monitor.labels, - monitor.config, - {"timestamp" => Time.now.utc.iso8601, "state" => HealthMonitorStates::UNKNOWN, "details" => ""} - ) - if !lookup.nil? && workload_names.include?(lookup) - new_monitor.state = HealthMonitorStates::UNKNOWN - new_monitor.details["state"] = HealthMonitorStates::UNKNOWN - new_monitor.details["details"] = "Workload present in inventory. But no signal for #{lookup}" - @unknown_signals_hash[monitor_instance_id] = new_monitor - elsif !lookup.nil? && !workload_names.include?(lookup) - new_monitor.state = HealthMonitorStates::NONE - new_monitor.details["state"] = HealthMonitorStates::NONE - new_monitor.details["details"] = "Workload #{lookup} NOT present in inventory" - end - missing_signals_map[monitor_instance_id] = new_monitor - end - end - } - - - health_monitor_records.each{|health_monitor_record| - # remove signals from the list of expected signals if we see them in the list of current signals - if HealthMonitorHelpers.is_node_monitor(health_monitor_record.monitor_id) - node_name = health_monitor_record.labels[HealthMonitorLabels::HOSTNAME] - if node_signals_hash.key?(node_name) - signals = node_signals_hash[node_name] - signals.delete(health_monitor_record.monitor_id) - if signals.size == 0 - node_signals_hash.delete(node_name) - end - end - end - } - - # if the hash is not empty, means we have missing signals - if node_signals_hash.size > 0 - # these signals were not sent previously - # these signals need to be assigned an unknown state - node_signals_hash.each{|node, monitor_ids| - monitor_ids.each{|monitor_id| - monitor_instance_id = HealthMonitorHelpers.get_monitor_instance_id(monitor_id, [cluster_id, node]) - new_monitor = HealthMonitorRecord.new( - monitor_id, - monitor_instance_id, - Time.now.utc.iso8601, - HealthMonitorStates::UNKNOWN, - provider.get_node_labels(node), - {}, - {"timestamp" => Time.now.utc.iso8601, "state" => HealthMonitorStates::UNKNOWN, "details" => "no signal received from node #{node}"} - ) - missing_signals_map[monitor_instance_id] = new_monitor - log.info "Added missing signal when node_signals_hash was not empty #{new_monitor.monitor_instance_id} #{new_monitor.state} #{new_monitor.labels.keys}" - } - } - end - - missing_signals_map.each{|k,v| - missing_signals.push(v) - } - - # if an unknown signal is present neither in missing signals or the incoming signals, change its state to none, and remove from unknown_signals - # in update_state of HealthMonitorState, send if latest_record_state is none - @unknown_signals_hash.each{|k,v| - if !missing_signals_map.key?(k) && !health_monitor_records_map.key?(k) - monitor_record = @unknown_signals_hash[k] - monitor_record.details["state"] = HealthMonitorStates::NONE # used for calculating the old and new states in update_state - monitor_record.state = HealthMonitorStates::NONE #used for calculating the aggregate monitor state - missing_signals.push(monitor_record) - @unknown_signals_hash.delete(k) - log.info "Updating state from unknown to none for #{k}" - end - } - return missing_signals - end - - def update_last_received_records(last_received_records) - last_received_records_map = {} - last_received_records.map {|record| last_received_records_map[record.monitor_instance_id] = record } - @last_received_records = last_received_records_map - end - end - -end \ No newline at end of file diff --git a/source/plugins/ruby/health/health_model_buffer.rb b/source/plugins/ruby/health/health_model_buffer.rb deleted file mode 100644 index 1c3ec3332..000000000 --- a/source/plugins/ruby/health/health_model_buffer.rb +++ /dev/null @@ -1,31 +0,0 @@ -# frozen_string_literal: true - -module HealthModel - -=begin - Class that is used to create a buffer for collecting the health records -=end - class HealthModelBuffer - - attr_reader :records_buffer, :log - - def initialize - @records_buffer = [] - end - - # Returns the current buffer - def get_buffer - return @records_buffer - end - - # adds records to the buffer - def add_to_buffer(records) - @records_buffer.push(*records) - end - - # clears/resets the buffer - def reset_buffer - @records_buffer = [] - end - end -end \ No newline at end of file diff --git a/source/plugins/ruby/health/health_model_builder.rb b/source/plugins/ruby/health/health_model_builder.rb deleted file mode 100644 index 43ed30d05..000000000 --- a/source/plugins/ruby/health/health_model_builder.rb +++ /dev/null @@ -1,37 +0,0 @@ -# frozen_string_literal: true -require 'time' - -module HealthModel - class HealthModelBuilder - attr_accessor :hierarchy_builder, :state_finalizers, :monitor_set - - def initialize(hierarchy_builder, state_finalizers, monitor_set) - @hierarchy_builder = hierarchy_builder - @state_finalizers = state_finalizers - @monitor_set = monitor_set - end - - def process_records(health_records) - health_records.each{|health_record| - @hierarchy_builder.process_record(health_record, @monitor_set) - } - end - - def finalize_model - if !@state_finalizers.is_a?(Array) - raise 'state finalizers should be an array' - end - - if @state_finalizers.length == 0 - raise '@state_finalizers length should not be zero or empty' - end - - @state_finalizers.each{|finalizer| - finalizer.finalize(@monitor_set) - } - - return @monitor_set.get_map - end - - end -end \ No newline at end of file diff --git a/source/plugins/ruby/health/health_model_constants.rb b/source/plugins/ruby/health/health_model_constants.rb deleted file mode 100644 index c74f86f4d..000000000 --- a/source/plugins/ruby/health/health_model_constants.rb +++ /dev/null @@ -1,82 +0,0 @@ -# frozen_string_literal: true -module HealthModel - class MonitorState - CRITICAL = "fail" - ERROR = "err" - HEALTHY = "pass" - NONE = "none" - UNKNOWN = "unknown" - WARNING = "warn" - end - - class AggregationAlgorithm - PERCENTAGE = "percentage" - WORSTOF = "worstOf" - end - - class MonitorId - AGENT_NODE_POOL = 'agent_node_pool' - ALL_AGENT_NODE_POOLS = 'all_agent_node_pools' - ALL_NODE_POOLS = 'all_node_pools' - ALL_NODES = 'all_nodes' - CAPACITY = 'capacity' - CLUSTER = 'cluster' - CONTAINER = 'container' - CONTAINER_CPU_MONITOR_ID = "container_cpu_utilization" - CONTAINER_MEMORY_MONITOR_ID = "container_memory_utilization" - K8S_INFRASTRUCTURE = 'k8s_infrastructure' - KUBE_API_STATUS = "kube_api_status" - MASTER_NODE_POOL = 'master_node_pool' - NAMESPACE = 'namespace'; - NODE = 'node'; - NODE_CONDITION_MONITOR_ID = "node_condition" - NODE_CPU_MONITOR_ID = "node_cpu_utilization" - NODE_MEMORY_MONITOR_ID = "node_memory_utilization" - SYSTEM_WORKLOAD = 'system_workload' - SYSTEM_WORKLOAD_PODS_READY_MONITOR_ID = "system_workload_pods_ready" - USER_WORKLOAD = 'user_workload'; - USER_WORKLOAD_PODS_READY_MONITOR_ID = "user_workload_pods_ready" - WORKLOAD = 'all_workloads'; - WORKLOAD_CONTAINER_CPU_PERCENTAGE_MONITOR_ID = "container_cpu_utilization" - WORKLOAD_CONTAINER_MEMORY_PERCENTAGE_MONITOR_ID = "container_memory_utilization" - WORKLOAD_CPU_OVERSUBSCRIBED_MONITOR_ID = "subscribed_capacity_cpu" - WORKLOAD_MEMORY_OVERSUBSCRIBED_MONITOR_ID = "subscribed_capacity_memory" - end - - class HealthMonitorRecordFields - CLUSTER_ID = "ClusterId" - DETAILS = "Details" - HEALTH_MODEL_DEFINITION_VERSION = "HealthModelDefinitionVersion" - MONITOR_CONFIG = "MonitorConfig" - MONITOR_ID = "MonitorTypeId" - MONITOR_INSTANCE_ID = "MonitorInstanceId" - MONITOR_LABELS = "MonitorLabels" - NEW_STATE = "NewState" - NODE_NAME = "NodeName" - OLD_STATE = "OldState" - PARENT_MONITOR_INSTANCE_ID = "ParentMonitorInstanceId" - TIME_FIRST_OBSERVED = "TimeFirstObserved" - TIME_GENERATED = "TimeGenerated" - end - - class HealthMonitorStates - FAIL = "fail" - NONE = "none" - PASS = "pass" - UNKNOWN = "unknown" - WARNING = "warn" - end - - class HealthMonitorLabels - AGENTPOOL = "agentpool" - CONTAINER = "container.azm.ms/container" - HOSTNAME = "kubernetes.io/hostname" - NAMESPACE = "container.azm.ms/namespace" - ROLE = "kubernetes.io/role" - WORKLOAD_KIND = "container.azm.ms/workload-kind" - WORKLOAD_NAME = "container.azm.ms/workload-name" - MASTERROLE = "node-role.kubernetes.io/master" - COMPUTEROLE = "node-role.kubernetes.io/compute" - INFRAROLE = "node-role.kubernetes.io/infra" - end -end \ No newline at end of file diff --git a/source/plugins/ruby/health/health_model_definition_parser.rb b/source/plugins/ruby/health/health_model_definition_parser.rb deleted file mode 100644 index c185e5389..000000000 --- a/source/plugins/ruby/health/health_model_definition_parser.rb +++ /dev/null @@ -1,52 +0,0 @@ -# frozen_string_literal: true -=begin - Class to parse the health model definition. The definition expresses the relationship between monitors, how to roll up to an aggregate monitor, - and what labels to "pass on" to the parent monitor -=end -require 'yajl/json_gem' - -module HealthModel - class HealthModelDefinitionParser - attr_accessor :health_model_definition_path, :health_model_definition - - # Constructor - def initialize(path) - @health_model_definition = {} - @health_model_definition_path = path - end - - # Parse the health model definition file and build the model roll-up hierarchy - def parse_file - if (!File.exist?(@health_model_definition_path)) - raise "File does not exist in the specified path" - end - - file = File.read(@health_model_definition_path) - temp_model = JSON.parse(file) - temp_model.each { |entry| - monitor_id = entry['monitor_id'] - parent_monitor_id = entry['parent_monitor_id'] - labels = entry['labels'] if entry['labels'] - aggregation_algorithm = entry['aggregation_algorithm'] if entry['aggregation_algorithm'] - aggregation_algorithm_params = entry['aggregation_algorithm_params'] if entry['aggregation_algorithm_params'] - default_parent_monitor_id = entry['default_parent_monitor_id'] if entry['default_parent_monitor_id'] - if parent_monitor_id.is_a?(Array) - conditions = [] - parent_monitor_id.each{|condition| - key = condition['label'] - operator = condition['operator'] - value = condition['value'] - parent_id = condition['id'] - conditions.push({"key" => key, "operator" => operator, "value" => value, "parent_id" => parent_id}) - } - @health_model_definition[monitor_id] = {"conditions" => conditions, "labels" => labels, "aggregation_algorithm" => aggregation_algorithm, "aggregation_algorithm_params" =>aggregation_algorithm_params, "default_parent_monitor_id" => default_parent_monitor_id} - elsif parent_monitor_id.is_a?(String) - @health_model_definition[monitor_id] = {"parent_monitor_id" => parent_monitor_id, "labels" => labels, "aggregation_algorithm" => aggregation_algorithm, "aggregation_algorithm_params" =>aggregation_algorithm_params} - elsif parent_monitor_id.nil? - @health_model_definition[monitor_id] = {"parent_monitor_id" => nil, "labels" => labels, "aggregation_algorithm" => aggregation_algorithm, "aggregation_algorithm_params" =>aggregation_algorithm_params} - end - } - @health_model_definition - end - end -end \ No newline at end of file diff --git a/source/plugins/ruby/health/health_monitor_helpers.rb b/source/plugins/ruby/health/health_monitor_helpers.rb deleted file mode 100644 index 74aa35af0..000000000 --- a/source/plugins/ruby/health/health_monitor_helpers.rb +++ /dev/null @@ -1,74 +0,0 @@ -# frozen_string_literal: true -require 'logger' -require 'digest' -require_relative 'health_model_constants' - -module HealthModel - # static class that provides a bunch of utility methods - class HealthMonitorHelpers - - @log_path = "/var/opt/microsoft/docker-cimprov/log/health_monitors.log" - - if Gem.win_platform? #unit testing on windows dev machine - @log_path = "C:\Temp\health_monitors.log" - end - - @log = Logger.new(@log_path, 2, 10 * 1048576) #keep last 2 files, max log file size = 10M - - class << self - def is_node_monitor(monitor_id) - return (monitor_id == MonitorId::NODE_CPU_MONITOR_ID || monitor_id == MonitorId::NODE_MEMORY_MONITOR_ID || monitor_id == MonitorId::NODE_CONDITION_MONITOR_ID) - end - - def is_pods_ready_monitor(monitor_id) - return (monitor_id == MonitorId::USER_WORKLOAD_PODS_READY_MONITOR_ID || monitor_id == MonitorId::SYSTEM_WORKLOAD_PODS_READY_MONITOR_ID) - end - - def get_log_handle - return @log - end - - def get_monitor_instance_id(monitor_id, args = []) - string_to_hash = args.join("/") - return "#{monitor_id}-#{Digest::MD5.hexdigest(string_to_hash)}" - end - - def add_agentpool_node_label_if_not_present(records) - records.each{|record| - # continue if it is not a node monitor - if !is_node_monitor(record.monitor_id) - #@log.info "#{record.monitor_id} is not a NODE MONITOR" - next - end - labels_keys = record.labels.keys - - if labels_keys.include?(HealthMonitorLabels::AGENTPOOL) - @log.info "#{record.monitor_id} includes agentpool label. Value = #{record.labels[HealthMonitorLabels::AGENTPOOL]}" - next - else - #@log.info "#{record} does not include agentpool label." - role_name = 'unknown' - if record.labels.include?(HealthMonitorLabels::ROLE) - role_name = record.labels[HealthMonitorLabels::ROLE] - elsif record.labels.include?(HealthMonitorLabels::MASTERROLE) - if !record.labels[HealthMonitorLabels::MASTERROLE].empty? - role_name = 'master' - end - elsif record.labels.include?(HealthMonitorLabels::COMPUTEROLE) - if !record.labels[HealthMonitorLabels::COMPUTEROLE].empty? - role_name = 'compute' - end - elsif record.labels.include?(HealthMonitorLabels::INFRAROLE) - if !record.labels[HealthMonitorLabels::INFRAROLE].empty? - role_name = 'infra' - end - end - @log.info "Adding agentpool label #{role_name}_node_pool for #{record.monitor_id}" - record.labels[HealthMonitorLabels::AGENTPOOL] = "#{role_name}_node_pool" - end - } - end - end - - end -end diff --git a/source/plugins/ruby/health/health_monitor_optimizer.rb b/source/plugins/ruby/health/health_monitor_optimizer.rb deleted file mode 100644 index d87540941..000000000 --- a/source/plugins/ruby/health/health_monitor_optimizer.rb +++ /dev/null @@ -1,54 +0,0 @@ -# frozen_string_literal: true -require 'yajl/json_gem' -module HealthModel - class HealthMonitorOptimizer - #ctor - def initialize - @@health_signal_timeout = 240 - @@first_record_sent = {} - end - - def should_send(monitor_instance_id, health_monitor_state, health_monitor_config) - - health_monitor_instance_state = health_monitor_state.get_state(monitor_instance_id) - health_monitor_records = health_monitor_instance_state.prev_records - health_monitor_config['ConsecutiveSamplesForStateTransition'].nil? ? samples_to_check = 1 : samples_to_check = health_monitor_config['ConsecutiveSamplesForStateTransition'].to_i - - latest_record = health_monitor_records[health_monitor_records.size-1] #since we push new records to the end, and remove oldest records from the beginning - latest_record_state = latest_record["state"] - latest_record_time = latest_record["timestamp"] #string representation of time - - new_state = health_monitor_instance_state.new_state - prev_sent_time = health_monitor_instance_state.prev_sent_record_time - time_first_observed = health_monitor_instance_state.state_change_time - - if latest_record_state.downcase == new_state.downcase - time_elapsed = (Time.parse(latest_record_time) - Time.parse(prev_sent_time)) / 60 - if time_elapsed > @@health_signal_timeout # minutes - return true - elsif !@@first_record_sent.key?(monitor_instance_id) - @@first_record_sent[monitor_instance_id] = true - return true - else - return false - end - else - if samples_to_check == 1 - return true - elsif health_monitor_instance_state.prev_records.size == 1 && samples_to_check > 1 - return true - elsif health_monitor_instance_state.prev_records.size < samples_to_check - return false - else - # state change from previous sent state to latest record state - #check state of last n records to see if they are all in the same state - if (health_monitor_instance_state.is_state_change_consistent) - return true - else - return false - end - end - end - end - end -end \ No newline at end of file diff --git a/source/plugins/ruby/health/health_monitor_provider.rb b/source/plugins/ruby/health/health_monitor_provider.rb deleted file mode 100644 index 8e1d11143..000000000 --- a/source/plugins/ruby/health/health_monitor_provider.rb +++ /dev/null @@ -1,139 +0,0 @@ -# frozen_string_literal: true -require_relative 'health_model_constants' -require 'yajl/json_gem' - -module HealthModel - class HealthMonitorProvider - - attr_accessor :cluster_labels, :health_kubernetes_resources, :monitor_configuration_path, :cluster_id - attr_reader :monitor_configuration - - def initialize(cluster_id, cluster_labels, health_kubernetes_resources, monitor_configuration_path) - @cluster_labels = Hash.new - cluster_labels.each{|k,v| @cluster_labels[k] = v} - @cluster_id = cluster_id - @health_kubernetes_resources = health_kubernetes_resources - @monitor_configuration_path = monitor_configuration_path - begin - @monitor_configuration = {} - file = File.open(@monitor_configuration_path, "r") - if !file.nil? - fileContents = file.read - @monitor_configuration = JSON.parse(fileContents) - file.close - end - rescue => e - @log.info "Error when opening health config file #{e}" - end - end - - def get_record(health_monitor_record, health_monitor_state) - - labels = Hash.new - @cluster_labels.each{|k,v| labels[k] = v} - monitor_id = health_monitor_record.monitor_id - monitor_instance_id = health_monitor_record.monitor_instance_id - health_monitor_instance_state = health_monitor_state.get_state(monitor_instance_id) - - - monitor_labels = health_monitor_record.labels - if !monitor_labels.empty? - monitor_labels.keys.each do |key| - labels[key] = monitor_labels[key] - end - end - - prev_records = health_monitor_instance_state.prev_records - time_first_observed = health_monitor_instance_state.state_change_time # the oldest collection time - new_state = health_monitor_instance_state.new_state # this is updated before formatRecord is called - old_state = health_monitor_instance_state.old_state - - config = get_config(monitor_id) - - if prev_records.size == 1 - details = prev_records[0] - else - details = prev_records - end - - time_observed = Time.now.utc.iso8601 - - monitor_record = {} - - monitor_record[HealthMonitorRecordFields::CLUSTER_ID] = @cluster_id - monitor_record[HealthMonitorRecordFields::MONITOR_LABELS] = labels.to_json - monitor_record[HealthMonitorRecordFields::MONITOR_ID] = monitor_id - monitor_record[HealthMonitorRecordFields::MONITOR_INSTANCE_ID] = monitor_instance_id - monitor_record[HealthMonitorRecordFields::NEW_STATE] = new_state - monitor_record[HealthMonitorRecordFields::OLD_STATE] = old_state - monitor_record[HealthMonitorRecordFields::DETAILS] = details.to_json - monitor_record[HealthMonitorRecordFields::MONITOR_CONFIG] = config.to_json - monitor_record[HealthMonitorRecordFields::TIME_GENERATED] = Time.now.utc.iso8601 - monitor_record[HealthMonitorRecordFields::TIME_FIRST_OBSERVED] = time_first_observed - monitor_record[HealthMonitorRecordFields::PARENT_MONITOR_INSTANCE_ID] = '' - - return monitor_record - end - - def get_config(monitor_id) - if @monitor_configuration.key?(monitor_id) - return @monitor_configuration[monitor_id] - else - return {} - end - end - - def get_labels(health_monitor_record) - monitor_labels = Hash.new - @cluster_labels.keys.each{|key| - monitor_labels[key] = @cluster_labels[key] - } - monitor_id = health_monitor_record[HealthMonitorRecordFields::MONITOR_ID] - case monitor_id - when MonitorId::CONTAINER_CPU_MONITOR_ID, MonitorId::CONTAINER_MEMORY_MONITOR_ID, MonitorId::USER_WORKLOAD_PODS_READY_MONITOR_ID, MonitorId::SYSTEM_WORKLOAD_PODS_READY_MONITOR_ID - - namespace = health_monitor_record[HealthMonitorRecordFields::DETAILS]['details']['namespace'] - workload_name = health_monitor_record[HealthMonitorRecordFields::DETAILS]['details']['workload_name'] - workload_kind = health_monitor_record[HealthMonitorRecordFields::DETAILS]['details']['workload_kind'] - - monitor_labels[HealthMonitorLabels::WORKLOAD_NAME] = workload_name.split('~~')[1] - monitor_labels[HealthMonitorLabels::WORKLOAD_KIND] = workload_kind - monitor_labels[HealthMonitorLabels::NAMESPACE] = namespace - - # add the container name for container memory/cpu - if monitor_id == MonitorId::CONTAINER_CPU_MONITOR_ID || monitor_id == MonitorId::CONTAINER_MEMORY_MONITOR_ID - container = health_monitor_record[HealthMonitorRecordFields::DETAILS]['details']['container'] - monitor_labels[HealthMonitorLabels::CONTAINER] = container - end - - #TODO: This doesn't belong here. Move this elsewhere - health_monitor_record[HealthMonitorRecordFields::DETAILS]['details'].delete('namespace') - health_monitor_record[HealthMonitorRecordFields::DETAILS]['details'].delete('workload_name') - health_monitor_record[HealthMonitorRecordFields::DETAILS]['details'].delete('workload_kind') - - when MonitorId::NODE_CPU_MONITOR_ID, MonitorId::NODE_MEMORY_MONITOR_ID, MonitorId::NODE_CONDITION_MONITOR_ID - node_name = health_monitor_record[HealthMonitorRecordFields::NODE_NAME] - @health_kubernetes_resources.get_node_inventory['items'].each do |node| - if !node_name.nil? && !node['metadata']['name'].nil? && node_name == node['metadata']['name'] - if !node["metadata"].nil? && !node["metadata"]["labels"].nil? - monitor_labels = monitor_labels.merge(node["metadata"]["labels"]) - end - end - end - end - return monitor_labels - end - - def get_node_labels(node_name) - monitor_labels = {} - @health_kubernetes_resources.get_node_inventory['items'].each do |node| - if !node_name.nil? && !node['metadata']['name'].nil? && node_name == node['metadata']['name'] - if !node["metadata"].nil? && !node["metadata"]["labels"].nil? - monitor_labels = node["metadata"]["labels"] - end - end - end - return monitor_labels - end - end -end \ No newline at end of file diff --git a/source/plugins/ruby/health/health_monitor_record.rb b/source/plugins/ruby/health/health_monitor_record.rb deleted file mode 100644 index 7df84ff53..000000000 --- a/source/plugins/ruby/health/health_monitor_record.rb +++ /dev/null @@ -1,11 +0,0 @@ -# frozen_string_literal: true -HealthMonitorRecord = Struct.new( - :monitor_id, - :monitor_instance_id, - :transition_date_time, - :state, - :labels, - :config, - :details - ) do -end \ No newline at end of file diff --git a/source/plugins/ruby/health/health_monitor_state.rb b/source/plugins/ruby/health/health_monitor_state.rb deleted file mode 100644 index 110793eeb..000000000 --- a/source/plugins/ruby/health/health_monitor_state.rb +++ /dev/null @@ -1,266 +0,0 @@ -# frozen_string_literal: true -require_relative 'health_model_constants' -require 'yajl/json_gem' - -module HealthModel - - HealthMonitorInstanceState = Struct.new(:prev_sent_record_time, :old_state, :new_state, :state_change_time, :prev_records, :is_state_change_consistent, :should_send) do - end - - # Class that is used to store the last sent state and latest monitors - # provides services like - # get_state -- returns the current state and details - # update_instance -- updates the state of the health monitor history records - # set_state -- sets the last health monitor state - class HealthMonitorState - - def initialize - @@monitor_states = {} - @@first_record_sent = {} - @@health_signal_timeout = 240 - - end - - def get_state(monitor_instance_id) - if @@monitor_states.key?(monitor_instance_id) - return @@monitor_states[monitor_instance_id] - end - end - - def set_state(monitor_instance_id, health_monitor_instance_state) - @@monitor_states[monitor_instance_id] = health_monitor_instance_state - end - - def to_h - return @@monitor_states - end - - def initialize_state(deserialized_state) - @@monitor_states = {} - deserialized_state.each{|k,v| - health_monitor_instance_state_hash = v - state = HealthMonitorInstanceState.new(*health_monitor_instance_state_hash.values_at(*HealthMonitorInstanceState.members)) - state.prev_sent_record_time = health_monitor_instance_state_hash["prev_sent_record_time"] - state.old_state = health_monitor_instance_state_hash["old_state"] - state.new_state = health_monitor_instance_state_hash["new_state"] - state.state_change_time = health_monitor_instance_state_hash["state_change_time"] - state.prev_records = health_monitor_instance_state_hash["prev_records"] - state.is_state_change_consistent = health_monitor_instance_state_hash["is_state_change_consistent"] || false - state.should_send = health_monitor_instance_state_hash["should_send"] - @@monitor_states[k] = state - @@first_record_sent[k] = true - } - end - -=begin -when do u send? ---------------- -1. if the signal hasnt been sent before -2. if there is a "consistent" state change for monitors -3. if the signal is stale (> 4hrs) -4. If the latest state is none -5. If an aggregate monitor has a change in its details, but no change in state -=end - def update_state(monitor, #UnitMonitor/AggregateMonitor - monitor_config, #Hash - is_aggregate_monitor = false, - telemetry = nil - ) - samples_to_keep = 1 - monitor_id = monitor.monitor_id - monitor_instance_id = monitor.monitor_instance_id - log = HealthMonitorHelpers.get_log_handle - current_time = Time.now.utc.iso8601 - health_monitor_instance_state = get_state(monitor_instance_id) - if !health_monitor_instance_state.nil? - health_monitor_instance_state.is_state_change_consistent = false - health_monitor_instance_state.should_send = false - set_state(monitor_instance_id, health_monitor_instance_state) # reset is_state_change_consistent - end - - if !monitor_config.nil? && !monitor_config['ConsecutiveSamplesForStateTransition'].nil? - samples_to_keep = monitor_config['ConsecutiveSamplesForStateTransition'].to_i - end - - deleted_record = {} - if @@monitor_states.key?(monitor_instance_id) - health_monitor_instance_state = @@monitor_states[monitor_instance_id] - health_monitor_records = health_monitor_instance_state.prev_records #This should be an array - - if health_monitor_records.size == samples_to_keep - deleted_record = health_monitor_records.delete_at(0) - end - health_monitor_records.push(monitor.details) - health_monitor_instance_state.prev_records = health_monitor_records - @@monitor_states[monitor_instance_id] = health_monitor_instance_state - else - # if samples_to_keep == 1, then set new state to be the health_monitor_record state, else set it as none - - old_state = HealthMonitorStates::NONE - new_state = HealthMonitorStates::NONE - if samples_to_keep == 1 - new_state = monitor.state - end - - health_monitor_instance_state = HealthMonitorInstanceState.new( - monitor.transition_date_time, - old_state, - new_state, - monitor.transition_date_time, - [monitor.details]) - - health_monitor_instance_state.should_send = true - @@monitor_states[monitor_instance_id] = health_monitor_instance_state - end - - # update old and new state based on the history and latest record. - # TODO: this is a little hairy. Simplify - - health_monitor_records = health_monitor_instance_state.prev_records - if monitor_config['ConsecutiveSamplesForStateTransition'].nil? - samples_to_check = 1 - else - samples_to_check = monitor_config['ConsecutiveSamplesForStateTransition'].to_i - end - - latest_record = health_monitor_records[health_monitor_records.size-1] #since we push new records to the end, and remove oldest records from the beginning - latest_record_state = latest_record["state"] - latest_record_time = latest_record["timestamp"] #string representation of time - - new_state = health_monitor_instance_state.new_state - prev_sent_time = health_monitor_instance_state.prev_sent_record_time - - # if the last sent state (new_state is different from latest monitor state) - if latest_record_state.downcase == new_state.downcase - time_elapsed = (Time.parse(latest_record_time) - Time.parse(prev_sent_time)) / 60 - # check if health signal has "timed out" - if time_elapsed > @@health_signal_timeout # minutes - # update record for last sent record time - health_monitor_instance_state.old_state = health_monitor_instance_state.new_state - health_monitor_instance_state.new_state = latest_record_state - health_monitor_instance_state.prev_sent_record_time = current_time - health_monitor_instance_state.should_send = true - #log.debug "After Updating Monitor State #{health_monitor_instance_state}" - set_state(monitor_instance_id, health_monitor_instance_state) - log.debug "#{monitor_instance_id} condition: signal timeout should_send #{health_monitor_instance_state.should_send} #{health_monitor_instance_state.old_state} --> #{health_monitor_instance_state.new_state}" - # check if the first record has been sent - elsif !@@first_record_sent.key?(monitor_instance_id) - @@first_record_sent[monitor_instance_id] = true - health_monitor_instance_state.should_send = true - set_state(monitor_instance_id, health_monitor_instance_state) - elsif agg_monitor_details_changed?(is_aggregate_monitor, deleted_record, health_monitor_instance_state.prev_records[0]) - health_monitor_instance_state.should_send = true - set_state(monitor_instance_id, health_monitor_instance_state) - log.debug "#{monitor_instance_id} condition: agg monitor details changed should_send #{health_monitor_instance_state.should_send}" - end - # latest state is different that last sent state - else - #if latest_record_state is none, send - if latest_record_state.downcase == HealthMonitorStates::NONE - health_monitor_instance_state.old_state = health_monitor_instance_state.new_state #initially old = new, so when state change occurs, assign old to be new, and set new to be the latest record state - health_monitor_instance_state.new_state = latest_record_state - health_monitor_instance_state.state_change_time = current_time - health_monitor_instance_state.prev_sent_record_time = current_time - health_monitor_instance_state.should_send = true - if !is_aggregate_monitor - if !telemetry.nil? - telemetry.add_monitor_to_telemetry(monitor_id, health_monitor_instance_state.old_state, health_monitor_instance_state.new_state) - end - end - if !@@first_record_sent.key?(monitor_instance_id) - @@first_record_sent[monitor_instance_id] = true - end - set_state(monitor_instance_id, health_monitor_instance_state) - log.debug "#{monitor_instance_id} condition: NONE state should_send #{health_monitor_instance_state.should_send} #{health_monitor_instance_state.old_state} --> #{health_monitor_instance_state.new_state}" - # if it is a monitor that needs to instantly notify on state change, update the state - # mark the monitor to be sent - elsif samples_to_check == 1 - health_monitor_instance_state.old_state = health_monitor_instance_state.new_state #initially old = new, so when state change occurs, assign old to be new, and set new to be the latest record state - health_monitor_instance_state.new_state = latest_record_state - health_monitor_instance_state.state_change_time = current_time - health_monitor_instance_state.prev_sent_record_time = current_time - health_monitor_instance_state.should_send = true - if !is_aggregate_monitor - if !telemetry.nil? - telemetry.add_monitor_to_telemetry(monitor_id, health_monitor_instance_state.old_state, health_monitor_instance_state.new_state) - end - end - if !@@first_record_sent.key?(monitor_instance_id) - @@first_record_sent[monitor_instance_id] = true - end - set_state(monitor_instance_id, health_monitor_instance_state) - log.debug "#{monitor_instance_id} condition: state change, samples_to_check = #{samples_to_check} should_send #{health_monitor_instance_state.should_send} #{health_monitor_instance_state.old_state} --> #{health_monitor_instance_state.new_state}" - else - # state change from previous sent state to latest record state - #check state of last n records to see if they are all in the same state - if (is_state_change_consistent(health_monitor_records, samples_to_keep)) - first_record = health_monitor_records[0] - latest_record = health_monitor_records[health_monitor_records.size-1] #since we push new records to the end, and remove oldest records from the beginning - latest_record_state = latest_record["state"] - latest_record_time = latest_record["timestamp"] #string representation of time - - health_monitor_instance_state.old_state = health_monitor_instance_state.new_state - health_monitor_instance_state.is_state_change_consistent = true # This way it wont be recomputed in the optimizer. - health_monitor_instance_state.should_send = true - health_monitor_instance_state.new_state = latest_record_state - health_monitor_instance_state.prev_sent_record_time = current_time - health_monitor_instance_state.state_change_time = current_time - if !is_aggregate_monitor - if !telemetry.nil? - telemetry.add_monitor_to_telemetry(monitor_id, health_monitor_instance_state.old_state, health_monitor_instance_state.new_state) - end - end - - set_state(monitor_instance_id, health_monitor_instance_state) - - if !@@first_record_sent.key?(monitor_instance_id) - @@first_record_sent[monitor_instance_id] = true - end - log.debug "#{monitor_instance_id} condition: consistent state change, samples_to_check = #{samples_to_check} should_send #{health_monitor_instance_state.should_send} #{health_monitor_instance_state.old_state} --> #{health_monitor_instance_state.new_state}" - end - end - end - end - - private - def is_state_change_consistent(health_monitor_records, samples_to_check) - if health_monitor_records.nil? || health_monitor_records.size == 0 || health_monitor_records.size < samples_to_check - return false - end - i = 0 - while i < health_monitor_records.size - 1 - #log.debug "Prev: #{health_monitor_records[i].state} Current: #{health_monitor_records[i + 1].state}" - if health_monitor_records[i]["state"] != health_monitor_records[i + 1]["state"] - return false - end - i += 1 - end - return true - end - - def agg_monitor_details_changed?(is_aggregate_monitor, last_sent_details, latest_details) - log = HealthMonitorHelpers.get_log_handle - if !is_aggregate_monitor - return false - end - - # Do a deep comparison of the keys under details, since a shallow comparison is hit or miss. - # Actual bug was the array inside the keys were in random order and the previous equality comparison was failing - latest_details['details'].keys.each{|k| - if !last_sent_details['details'].key?(k) - return true - end - if latest_details['details'][k].size != last_sent_details['details'][k].size - return true - end - } - # Explanation: a = [1,2] b = [2,1] a & b = [1,2] , c = [2,3] d = [2] c & d = [2] c.size != (c&d).size - latest_details['details'].keys.each{|k| - if !(latest_details['details'][k].size == (last_sent_details['details'][k] & latest_details['details'][k]).size) - return true - end - } - return false - end - end -end \ No newline at end of file diff --git a/source/plugins/ruby/health/health_monitor_telemetry.rb b/source/plugins/ruby/health/health_monitor_telemetry.rb deleted file mode 100644 index 1227e1f83..000000000 --- a/source/plugins/ruby/health/health_monitor_telemetry.rb +++ /dev/null @@ -1,59 +0,0 @@ -# frozen_string_literal: true -require_relative 'health_model_constants' -require 'socket' -if Socket.gethostname.start_with?('omsagent-rs') - require_relative '../ApplicationInsightsUtility' -end - - -module HealthModel - class HealthMonitorTelemetry - - attr_reader :monitor_records, :last_sent_time - @@TELEMETRY_SEND_INTERVAL = 60 - - def initialize - @last_sent_time = Time.now - @monitor_records = {} - end - - def send - if Time.now > @last_sent_time + @@TELEMETRY_SEND_INTERVAL * 60 - log = HealthMonitorHelpers.get_log_handle - log.info "Sending #{@monitor_records.size} state change events" - if @monitor_records.size > 0 - hash_to_send = {} - @monitor_records.each{|k,v| - v.each{|k1,v1| - hash_to_send["#{k}-#{k1}"] = v1 - } - } - ApplicationInsightsUtility.sendCustomEvent("HealthMonitorStateChangeEvent", hash_to_send) - end - @monitor_records = {} - @last_sent_time = Time.now - end - end - - def add_monitor_to_telemetry(monitor_id, old_state, new_state) - if @monitor_records.nil? || @monitor_records.empty? - @monitor_records = {} - end - if @monitor_records.key?(monitor_id) - monitor_hash = @monitor_records[monitor_id] - if monitor_hash.key?("#{old_state}-#{new_state}") - count = monitor_hash["#{old_state}-#{new_state}"] - count = count + 1 - monitor_hash["#{old_state}-#{new_state}"] = count - else - monitor_hash["#{old_state}-#{new_state}"] = 1 - end - @monitor_records[monitor_id] = monitor_hash - else - monitor_hash = {} - monitor_hash["#{old_state}-#{new_state}"] = 1 - @monitor_records[monitor_id] = monitor_hash - end - end - end -end \ No newline at end of file diff --git a/source/plugins/ruby/health/health_monitor_utils.rb b/source/plugins/ruby/health/health_monitor_utils.rb deleted file mode 100644 index 58f2ecc36..000000000 --- a/source/plugins/ruby/health/health_monitor_utils.rb +++ /dev/null @@ -1,323 +0,0 @@ -# frozen_string_literal: true -require 'logger' -require 'digest' -require_relative 'health_model_constants' -require 'yajl/json_gem' -require_relative '../kubelet_utils' - -module HealthModel - # static class that provides a bunch of utility methods - class HealthMonitorUtils - - begin - if !Gem.win_platform? - require_relative '../KubernetesApiClient' - end - rescue => e - $log.info "Error loading KubernetesApiClient #{e.message}" - end - - @@nodeInventory = {} - - @log_path = "/var/opt/microsoft/docker-cimprov/log/health_monitors.log" - - if Gem.win_platform? #unit testing on windows dev machine - @log_path = "C:\Temp\health_monitors.log" - end - - @log = Logger.new(@log_path, 2, 10 * 1048576) #keep last 2 files, max log file size = 10M - @@last_refresh_time = '2019-01-01T00:00:00Z' - - class << self - # compute the percentage state given a value and a monitor configuration - #TODO : Add Unit Tests for this method - def compute_percentage_state(value, config) - if config.nil? || ( config['WarnIfGreaterThanPercentage'].nil? && config['WarnIfLesserThanPercentage'].nil? ) - warn_percentage = nil - else - warn_percentage = !config['WarnIfGreaterThanPercentage'].nil? ? config['WarnIfGreaterThanPercentage'].to_f : config['WarnIfLesserThanPercentage'].to_f - end - fail_percentage = !config['FailIfGreaterThanPercentage'].nil? ? config['FailIfGreaterThanPercentage'].to_f : config['FailIfLesserThanPercentage'].to_f - is_less_than_comparer = config['FailIfGreaterThanPercentage'].nil? ? true : false # Fail percentage config always present for percentage computation monitors - - if !config.nil? && is_less_than_comparer - if value < fail_percentage - return HealthMonitorStates::FAIL - elsif !warn_percentage.nil? && value < warn_percentage - return HealthMonitorStates::WARNING - else - return HealthMonitorStates::PASS - end - else - if value > fail_percentage - return HealthMonitorStates::FAIL - elsif !warn_percentage.nil? && value > warn_percentage - return HealthMonitorStates::WARNING - else - return HealthMonitorStates::PASS - end - end - end - - def is_node_monitor(monitor_id) - return (monitor_id == MonitorId::NODE_CPU_MONITOR_ID || monitor_id == MonitorId::NODE_MEMORY_MONITOR_ID || monitor_id == MonitorId::NODE_CONDITION_MONITOR_ID) - end - - def is_pods_ready_monitor(monitor_id) - return (monitor_id == MonitorId::USER_WORKLOAD_PODS_READY_MONITOR_ID || monitor_id == MonitorId::SYSTEM_WORKLOAD_PODS_READY_MONITOR_ID) - end - - def is_cluster_health_model_enabled - enabled = ENV["AZMON_CLUSTER_ENABLE_HEALTH_MODEL"] - if !enabled.nil? && enabled.casecmp("true") == 0 - return true - else - return false - end - end - - def get_pods_ready_hash(resources) - pods_ready_percentage_hash = {} - resources.pod_inventory['items'].each do |pod| - begin - workload_name = resources.get_workload_name(pod) - namespace = pod['metadata']['namespace'] - status = pod['status']['phase'] - owner_kind = resources.get_workload_kind(pod) - if owner_kind.casecmp('job') == 0 - next - end - if pods_ready_percentage_hash.key?(workload_name) - total_pods = pods_ready_percentage_hash[workload_name]['totalPods'] - pods_ready = pods_ready_percentage_hash[workload_name]['podsReady'] - else - total_pods = 0 - pods_ready = 0 - end - - total_pods += 1 - if status == 'Running' - pods_ready += 1 - end - - pods_ready_percentage_hash[workload_name] = {'totalPods' => total_pods, 'podsReady' => pods_ready, 'namespace' => namespace, 'workload_name' => workload_name, 'kind' => owner_kind} - rescue => e - @log.info "Error when processing pod #{pod['metadata']['name']} #{e.message}" - end - end - return pods_ready_percentage_hash - end - - def get_node_state_from_node_conditions(monitor_config, node_conditions) - pass = false - warn = false - fail = false - failtypes = ['outofdisk', 'networkunavailable'].to_set #default fail types - if !monitor_config.nil? && !monitor_config["NodeConditionTypesForFailedState"].nil? - failtypes = monitor_config["NodeConditionTypesForFailedState"] - if !failtypes.nil? - failtypes = failtypes.split(',').map{|x| x.downcase}.map{|x| x.gsub(" ","")}.to_set - end - end - log = get_log_handle - #log.info "Fail Types #{failtypes.inspect}" - node_conditions.each do |condition| - type = condition['type'] - status = condition['status'] - - #for each condition in the configuration, check if the type is not false. If yes, update state to fail - if (failtypes.include?(type.downcase) && (status == 'True' || status == 'Unknown')) - fail = true - elsif ((type == "DiskPressure" || type == "MemoryPressure" || type == "PIDPressure") && (status == 'True' || status == 'Unknown')) - warn = true - elsif type == "Ready" && status == 'True' - pass = true - end - end - - if fail - return HealthMonitorStates::FAIL - elsif warn - return HealthMonitorStates::WARNING - else - return HealthMonitorStates::PASS - end - end - - def get_resource_subscription(pod_inventory, metric_name, metric_capacity) - subscription = 0.0 - if !pod_inventory.empty? - pod_inventory['items'].each do |pod| - pod['spec']['containers'].each do |container| - if !container['resources']['requests'].nil? && !container['resources']['requests'][metric_name].nil? - subscription += KubernetesApiClient.getMetricNumericValue(metric_name, container['resources']['requests'][metric_name]) - end - end - end - end - #log.debug "#{metric_name} Subscription #{subscription}" - return subscription - end - - def get_cluster_cpu_memory_capacity(log, node_inventory: nil) - begin - if node_inventory.nil? - resourceUri = KubernetesApiClient.getNodesResourceUri("nodes") - node_inventory = JSON.parse(KubernetesApiClient.getKubeResourceInfo(resourceUri).body) - end - cluster_cpu_capacity = 0.0 - cluster_memory_capacity = 0.0 - if !node_inventory.empty? - cpu_capacity_json = KubernetesApiClient.parseNodeLimits(node_inventory, "capacity", "cpu", "cpuCapacityNanoCores") - if !cpu_capacity_json.nil? - cpu_capacity_json.each do |cpu_capacity_node| - metricVal = JSON.parse(cpu_capacity_node['json_Collections'])[0]['Value'] - if !metricVal.to_s.nil? - cluster_cpu_capacity += metricVal - end - end - else - log.info "Error getting cpu_capacity" - end - memory_capacity_json = KubernetesApiClient.parseNodeLimits(node_inventory, "capacity", "memory", "memoryCapacityBytes") - if !memory_capacity_json.nil? - memory_capacity_json.each do |memory_capacity_node| - metricVal = JSON.parse(memory_capacity_node['json_Collections'])[0]['Value'] - if !metricVal.to_s.nil? - cluster_memory_capacity += metricVal - end - end - else - log.info "Error getting memory_capacity" - end - else - log.info "Unable to get cpu and memory capacity" - return [0.0, 0.0] - end - return [cluster_cpu_capacity, cluster_memory_capacity] - rescue => e - log.info e - end - end - - def refresh_kubernetes_api_data(log, hostName, force: false) - #log.debug "refresh_kubernetes_api_data" - if ( ((Time.now.utc - Time.parse(@@last_refresh_time)) / 60 ) < 5.0 && !force) - log.debug "Less than 5 minutes since last refresh at #{@@last_refresh_time}" - return - end - if force - log.debug "Force Refresh" - end - - begin - resourceUri = KubernetesApiClient.getNodesResourceUri("nodes") - @@nodeInventory = JSON.parse(KubernetesApiClient.getKubeResourceInfo(resourceUri).body) - if !hostName.nil? - podInventory = JSON.parse(KubernetesApiClient.getKubeResourceInfo("pods?fieldSelector=spec.nodeName%3D#{hostName}").body) - else - podInventory = JSON.parse(KubernetesApiClient.getKubeResourceInfo("pods").body) - end - podInventory['items'].each do |pod| - has_owner = !pod['metadata']['ownerReferences'].nil? - if !has_owner - workload_name = pod['metadata']['name'] - else - workload_name = pod['metadata']['ownerReferences'][0]['name'] - end - namespace = pod['metadata']['namespace'] - #TODO: Figure this out for container cpu/memory - #@@controllerMapping[workload_name] = namespace - #log.debug "workload_name #{workload_name} namespace #{namespace}" - pod['spec']['containers'].each do |container| - key = [pod['metadata']['uid'], container['name']].join('/') - - if !container['resources'].empty? && !container['resources']['limits'].nil? && !container['resources']['limits']['cpu'].nil? - cpu_limit_value = KubernetesApiClient.getMetricNumericValue('cpu', container['resources']['limits']['cpu']) - else - log.info "CPU limit not set for container : #{container['name']}. Using Node Capacity" - #TODO: Send warning health event #bestpractices - cpu_limit_value = @cpu_capacity - end - - if !container['resources'].empty? && !container['resources']['limits'].nil? && !container['resources']['limits']['memory'].nil? - #log.info "Raw Memory Value #{container['resources']['limits']['memory']}" - memory_limit_value = KubernetesApiClient.getMetricNumericValue('memory', container['resources']['limits']['memory']) - else - log.info "Memory limit not set for container : #{container['name']}. Using Node Capacity" - memory_limit_value = @memory_capacity - end - - #TODO: Figure this out for container cpu/memory - #@@containerMetadata[key] = {"cpuLimit" => cpu_limit_value, "memoryLimit" => memory_limit_value, "controllerName" => workload_name, "namespace" => namespace} - end - end - rescue => e - log.info "Error Refreshing Container Resource Limits #{e.backtrace}" - end - # log.info "Controller Mapping #{@@controllerMapping}" - # log.info "Node Inventory #{@@nodeInventory}" - # log.info "Container Metadata #{@@containerMetadata}" - # log.info "------------------------------------" - @@last_refresh_time = Time.now.utc.iso8601 - end - - def get_monitor_instance_id(monitor_id, args = []) - string_to_hash = args.join("/") - return "#{monitor_id}-#{Digest::MD5.hexdigest(string_to_hash)}" - end - - def ensure_cpu_memory_capacity_set(log, cpu_capacity, memory_capacity, hostname) - log.info "ensure_cpu_memory_capacity_set cpu_capacity #{cpu_capacity} memory_capacity #{memory_capacity}" - if cpu_capacity != 1.0 && memory_capacity != 1.0 - log.info "CPU And Memory Capacity are already set" - return [cpu_capacity, memory_capacity] - end - log.info "CPU and Memory Capacity Not set" - return KubeletUtils.get_node_capacity - end - - def build_metrics_hash(metrics_to_collect) - metrics_to_collect_arr = metrics_to_collect.split(',').map(&:strip) - metrics_hash = metrics_to_collect_arr.map {|x| [x.downcase,true]}.to_h - return metrics_hash - end - - def get_health_monitor_config - health_monitor_config = {} - begin - file = File.open('/etc/opt/microsoft/docker-cimprov/health/healthmonitorconfig.json', "r") - if !file.nil? - fileContents = file.read - health_monitor_config = JSON.parse(fileContents) - file.close - end - rescue => e - log.info "Error when opening health config file #{e}" - end - return health_monitor_config - end - - def get_cluster_labels - labels = {} - cluster_id = KubernetesApiClient.getClusterId - region = KubernetesApiClient.getClusterRegion - labels['container.azm.ms/cluster-region'] = region - if !cluster_id.nil? - cluster_id_elements = cluster_id.split('/') - azure_sub_id = cluster_id_elements[2] - resource_group = cluster_id_elements[4] - cluster_name = cluster_id_elements[8] - labels['container.azm.ms/cluster-subscription-id'] = azure_sub_id - labels['container.azm.ms/cluster-resource-group'] = resource_group - labels['container.azm.ms/cluster-name'] = cluster_name - end - return labels - end - - def get_log_handle - return @log - end - end - end -end \ No newline at end of file diff --git a/source/plugins/ruby/health/health_signal_reducer.rb b/source/plugins/ruby/health/health_signal_reducer.rb deleted file mode 100644 index 4708c4ee5..000000000 --- a/source/plugins/ruby/health/health_signal_reducer.rb +++ /dev/null @@ -1,53 +0,0 @@ -# frozen_string_literal: true -require_relative 'health_model_constants' - -module HealthModel - # this class - # 1. dedupes daemonset signals and takes only the latest - # 2. removes signals for objects that are no longer in the inventory e.g. node might have sent signal before being scaled down - class HealthSignalReducer - def initialize - - end - - def reduce_signals(health_monitor_records, health_k8s_inventory) - nodes = health_k8s_inventory.get_nodes - workload_names = health_k8s_inventory.get_workload_names - reduced_signals_map = {} - reduced_signals = [] - health_monitor_records.each{|health_monitor_record| - monitor_instance_id = health_monitor_record.monitor_instance_id - monitor_id = health_monitor_record.monitor_id - if reduced_signals_map.key?(monitor_instance_id) - record = reduced_signals_map[monitor_instance_id] - if health_monitor_record.transition_date_time > record.transition_date_time # always take the latest record for a monitor instance id - reduced_signals_map[monitor_instance_id] = health_monitor_record - end - elsif HealthMonitorHelpers.is_node_monitor(monitor_id) - node_name = health_monitor_record.labels['kubernetes.io/hostname'] - if (node_name.nil? || !nodes.include?(node_name)) # only add daemon set records if node is present in the inventory - next - end - reduced_signals_map[monitor_instance_id] = health_monitor_record - elsif HealthMonitorHelpers.is_pods_ready_monitor(monitor_id) - workload_name = health_monitor_record.labels[HealthMonitorLabels::WORKLOAD_NAME] - namespace = health_monitor_record.labels[HealthMonitorLabels::NAMESPACE] - lookup = "#{namespace}~~#{workload_name}" - if (workload_name.nil? || !workload_names.include?(lookup)) #only add pod record if present in the inventory - next - end - reduced_signals_map[monitor_instance_id] = health_monitor_record - else - reduced_signals_map[monitor_instance_id] = health_monitor_record - end - } - - reduced_signals_map.each{|k,v| - reduced_signals.push(v) - } - - return reduced_signals - end - - end -end \ No newline at end of file diff --git a/source/plugins/ruby/health/monitor_factory.rb b/source/plugins/ruby/health/monitor_factory.rb deleted file mode 100644 index 1e4f6f5b8..000000000 --- a/source/plugins/ruby/health/monitor_factory.rb +++ /dev/null @@ -1,32 +0,0 @@ -# frozen_string_literal: true -require_relative 'aggregate_monitor' -require_relative 'unit_monitor' - -module HealthModel - class MonitorFactory - - def initialize - - end - - def create_unit_monitor(monitor_record) - return UnitMonitor.new(monitor_record.monitor_id, - monitor_record.monitor_instance_id, - monitor_record.state, - monitor_record.transition_date_time, - monitor_record.labels, - monitor_record.config, - monitor_record.details) - end - - def create_aggregate_monitor(monitor_id, monitor_instance_id, labels, aggregation_algorithm, aggregation_algorithm_params, child_monitor) - return AggregateMonitor.new(monitor_id, - monitor_instance_id, - child_monitor.state, - child_monitor.transition_date_time, - aggregation_algorithm, - aggregation_algorithm_params, - labels) - end - end -end \ No newline at end of file diff --git a/source/plugins/ruby/health/monitor_set.rb b/source/plugins/ruby/health/monitor_set.rb deleted file mode 100644 index 8d5994419..000000000 --- a/source/plugins/ruby/health/monitor_set.rb +++ /dev/null @@ -1,44 +0,0 @@ -# frozen_string_literal: true - -module HealthModel - class MonitorSet - attr_accessor :monitors - - #constructor - def initialize - @monitors = {} - end - - # checks if the monitor is present in the set - def contains?(monitor_instance_id) - @monitors.key?(monitor_instance_id) - end - - # adds or updates the monitor - def add_or_update(monitor) - @monitors[monitor.monitor_instance_id] = monitor - end - - # gets the monitor given the monitor instance id - def get_monitor(monitor_instance_id) - @monitors[monitor_instance_id] if @monitors.key?(monitor_instance_id) - end - - # deletes a monitor from the set - def delete(monitor_instance_id) - if @monitors.key?(monitor_instance_id) - @monitors.delete(monitor_instance_id) - end - end - - # gets the size of the monitor set - def get_size - @monitors.length - end - - # gets the map of monitor instance id to monitors - def get_map - @monitors - end - end -end diff --git a/source/plugins/ruby/health/node_monitor_hierarchy_reducer.rb b/source/plugins/ruby/health/node_monitor_hierarchy_reducer.rb deleted file mode 100644 index 0bad4517e..000000000 --- a/source/plugins/ruby/health/node_monitor_hierarchy_reducer.rb +++ /dev/null @@ -1,34 +0,0 @@ -# frozen_string_literal: true -require_relative 'health_model_constants' - -module HealthModel - class NodeMonitorHierarchyReducer - def initialize - end - - # Finalizes the Node Hierarchy. This removes node pools and node pool set from the hierarchy if they are not present. - def finalize(monitor_set) - monitors_to_reduce = [MonitorId::ALL_AGENT_NODE_POOLS, MonitorId::ALL_NODES] - # for the above monitors, which are constant per cluster, the monitor_id and monitor_instance_id are the same - monitors_to_reduce.each do |monitor_to_reduce| - monitor = monitor_set.get_monitor(monitor_to_reduce) - if !monitor.nil? - if monitor.is_aggregate_monitor && monitor.get_member_monitors.size == 1 - #copy the children of member monitor as children of parent - member_monitor_instance_id = monitor.get_member_monitors[0] #gets the only member monitor instance id - member_monitor = monitor_set.get_monitor(member_monitor_instance_id) - #reduce only if the aggregation algorithms are the same - if !member_monitor.aggregation_algorithm.nil? && member_monitor.aggregation_algorithm == AggregationAlgorithm::WORSTOF && monitor.aggregation_algorithm == member_monitor.aggregation_algorithm - member_monitor.get_member_monitors.each{|grandchild_monitor| - monitor.add_member_monitor(grandchild_monitor) - } - monitor.remove_member_monitor(member_monitor_instance_id) - # delete the member monitor from the monitor_set - monitor_set.delete(member_monitor_instance_id) - end - end - end - end - end - end -end diff --git a/source/plugins/ruby/health/parent_monitor_provider.rb b/source/plugins/ruby/health/parent_monitor_provider.rb deleted file mode 100644 index e5766ea1b..000000000 --- a/source/plugins/ruby/health/parent_monitor_provider.rb +++ /dev/null @@ -1,89 +0,0 @@ -# frozen_string_literal: true -require_relative 'health_model_constants' -module HealthModel - class ParentMonitorProvider - - attr_reader :health_model_definition, :parent_monitor_mapping, :parent_monitor_instance_mapping - - def initialize(definition) - @health_model_definition = definition - @parent_monitor_mapping = {} #monitorId --> parent_monitor_id mapping - @parent_monitor_instance_mapping = {} #child monitor id -- > parent monitor instance mapping. Used in instances when the node no longer exists and impossible to compute from kube api results - @log = HealthMonitorHelpers.get_log_handle - end - - # gets the parent monitor id given the state transition. It requires the monitor id and labels to determine the parent id - def get_parent_monitor_id(monitor) - monitor_id = monitor.monitor_id - - # cache the parent monitor id so it is not recomputed every time - if @parent_monitor_mapping.key?(monitor.monitor_instance_id) - return @parent_monitor_mapping[monitor.monitor_instance_id] - end - - if @health_model_definition.key?(monitor_id) - parent_monitor_id = @health_model_definition[monitor_id]['parent_monitor_id'] - # check parent_monitor_id is an array, then evaluate the conditions, else return the parent_monitor_id - if parent_monitor_id.is_a?(String) - @parent_monitor_mapping[monitor.monitor_instance_id] = parent_monitor_id - return parent_monitor_id - end - if parent_monitor_id.nil? - conditions = @health_model_definition[monitor_id]['conditions'] - if !conditions.nil? && conditions.is_a?(Array) - labels = monitor.labels - conditions.each{|condition| - left = "#{labels[condition['key']]}" - op = "#{condition['operator']}" - right = "#{condition['value']}" - cond = left.send(op.to_sym, right) - if cond - @parent_monitor_mapping[monitor.monitor_instance_id] = condition['parent_id'] - return condition['parent_id'] - end - } - end - return @health_model_definition[monitor_id]['default_parent_monitor_id'] - end - else - raise "Invalid Monitor Id #{monitor_id} in get_parent_monitor_id" - end - end - - def get_parent_monitor_labels(monitor_id, monitor_labels, parent_monitor_id) - labels_to_copy = @health_model_definition[monitor_id]['labels'] - if labels_to_copy.nil? - return {} - end - parent_monitor_labels = {} - labels_to_copy.each{|label| - parent_monitor_labels[label] = monitor_labels[label] - } - return parent_monitor_labels - end - - def get_parent_monitor_config(parent_monitor_id) - return @health_model_definition[parent_monitor_id] - end - - def get_parent_monitor_instance_id(monitor_instance_id, parent_monitor_id, parent_monitor_labels) - if @parent_monitor_instance_mapping.key?(monitor_instance_id) - return @parent_monitor_instance_mapping[monitor_instance_id] - end - - labels = AggregateMonitorInstanceIdLabels.get_labels_for(parent_monitor_id) - if !labels.is_a?(Array) - raise "Expected #{labels} to be an Array for #{parent_monitor_id}" - end - values = labels.map{|label| parent_monitor_labels[label]} - if values.nil? || values.empty? || values.size == 0 - @parent_monitor_instance_mapping[monitor_instance_id] = parent_monitor_id - return parent_monitor_id - end - parent_monitor_instance_id = "#{parent_monitor_id}-#{values.join('-')}" - @parent_monitor_instance_mapping[monitor_instance_id] = parent_monitor_instance_id - @log.info "parent_monitor_instance_id for #{monitor_instance_id} => #{parent_monitor_instance_id}" - return parent_monitor_instance_id - end - end -end \ No newline at end of file diff --git a/source/plugins/ruby/health/unit_monitor.rb b/source/plugins/ruby/health/unit_monitor.rb deleted file mode 100644 index 8e2de210b..000000000 --- a/source/plugins/ruby/health/unit_monitor.rb +++ /dev/null @@ -1,27 +0,0 @@ -# frozen_string_literal: true -require_relative 'health_model_constants' -require 'yajl/json_gem' - -module HealthModel - class UnitMonitor - - attr_accessor :monitor_id, :monitor_instance_id, :state, :transition_date_time, :labels, :config, :details, :is_aggregate_monitor - - # constructor - def initialize(monitor_id, monitor_instance_id, state, transition_date_time, labels, config, details) - @monitor_id = monitor_id - @monitor_instance_id = monitor_instance_id - @transition_date_time = transition_date_time - @state = state - @labels = labels - @config = config - @details = details - @is_aggregate_monitor = false - end - - def get_member_monitors - return nil - end - - end -end \ No newline at end of file diff --git a/source/plugins/ruby/in_cadvisor_perf.rb b/source/plugins/ruby/in_cadvisor_perf.rb index aba24ecc2..901ecefab 100644 --- a/source/plugins/ruby/in_cadvisor_perf.rb +++ b/source/plugins/ruby/in_cadvisor_perf.rb @@ -27,8 +27,6 @@ def initialize config_param :run_interval, :time, :default => 60 config_param :tag, :string, :default => "oneagent.containerInsights.LINUX_PERF_BLOB" config_param :mdmtag, :string, :default => "mdm.cadvisorperf" - config_param :nodehealthtag, :string, :default => "kubehealth.DaemonSet.Node" - config_param :containerhealthtag, :string, :default => "kubehealth.DaemonSet.Container" config_param :insightsmetricstag, :string, :default => "oneagent.containerInsights.INSIGHTS_METRICS_BLOB" def configure(conf) @@ -82,8 +80,6 @@ def enumerate() end router.emit_stream(@tag, eventStream) if eventStream router.emit_stream(@mdmtag, eventStream) if eventStream - router.emit_stream(@containerhealthtag, eventStream) if eventStream - router.emit_stream(@nodehealthtag, eventStream) if eventStream if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0 && eventStream.count > 0) $log.info("cAdvisorPerfEmitStreamSuccess @ #{Time.now.utc.iso8601}") diff --git a/source/plugins/ruby/in_kube_health.rb b/source/plugins/ruby/in_kube_health.rb deleted file mode 100644 index db981c53e..000000000 --- a/source/plugins/ruby/in_kube_health.rb +++ /dev/null @@ -1,370 +0,0 @@ -#!/usr/local/bin/ruby -# frozen_string_literal: true - -require 'fluent/plugin/input' - -require_relative "KubernetesApiClient" -require_relative "oms_common" -require_relative "omslog" -require_relative "ApplicationInsightsUtility" - -module Fluent::Plugin - Dir[File.join(__dir__, "./health", "*.rb")].each { |file| require file } - - class KubeHealthInput < Input - include HealthModel - Fluent::Plugin.register_input("kube_health", self) - - config_param :health_monitor_config_path, :default => "/etc/opt/microsoft/docker-cimprov/health/healthmonitorconfig.json" - - @@clusterCpuCapacity = 0.0 - @@clusterMemoryCapacity = 0.0 - @@cluster_health_model_enabled = HealthMonitorUtils.is_cluster_health_model_enabled - - def initialize - begin - super - require "yaml" - require "yajl/json_gem" - require "yajl" - require "time" - - @@cluster_id = KubernetesApiClient.getClusterId - @resources = HealthKubernetesResources.instance - @provider = HealthMonitorProvider.new(@@cluster_id, HealthMonitorUtils.get_cluster_labels, @resources, @health_monitor_config_path) - @@ApiGroupApps = "apps" - @@KubeInfraNamespace = "kube-system" - rescue => e - ApplicationInsightsUtility.sendExceptionTelemetry(e, {"FeatureArea" => "Health"}) - end - end - - config_param :run_interval, :time, :default => 60 - config_param :tag, :string, :default => "kubehealth.ReplicaSet" - - def configure(conf) - super - end - - def start - begin - super - if @run_interval - @finished = false - @condition = ConditionVariable.new - @mutex = Mutex.new - @thread = Thread.new(&method(:run_periodic)) - - @@hmlog = HealthMonitorUtils.get_log_handle - @@clusterName = KubernetesApiClient.getClusterName - @@clusterRegion = KubernetesApiClient.getClusterRegion - cluster_capacity = HealthMonitorUtils.get_cluster_cpu_memory_capacity(@@hmlog) - @@clusterCpuCapacity = cluster_capacity[0] - @@clusterMemoryCapacity = cluster_capacity[1] - @@hmlog.info "Cluster CPU Capacity: #{@@clusterCpuCapacity} Memory Capacity: #{@@clusterMemoryCapacity}" - initialize_inventory - if @@cluster_health_model_enabled - ApplicationInsightsUtility.sendCustomEvent("in_kube_health Plugin Start", {}) - end - end - rescue => e - ApplicationInsightsUtility.sendExceptionTelemetry(e, {"FeatureArea" => "Health"}) - end - end - - def shutdown - if @run_interval - @mutex.synchronize { - @finished = true - @condition.signal - } - @thread.join - super # This super must be at the end of shutdown method - end - end - - def enumerate - if !@@cluster_health_model_enabled - @@hmlog.info "Cluster Health Model disabled in in_kube_health" - return Fluent::MultiEventStream.new - end - begin - currentTime = Time.now - emitTime = Fluent::Engine.now - batchTime = currentTime.utc.iso8601 - health_monitor_records = [] - eventStream = Fluent::MultiEventStream.new - - #HealthMonitorUtils.refresh_kubernetes_api_data(@@hmlog, nil) - # we do this so that if the call fails, we get a response code/header etc. - resourceUri = KubernetesApiClient.getNodesResourceUri("nodes") - node_inventory_response = KubernetesApiClient.getKubeResourceInfo(resourceUri) - if !node_inventory_response.nil? && !node_inventory_response.body.nil? - node_inventory = Yajl::Parser.parse(StringIO.new(node_inventory_response.body)) - @resources.node_inventory = node_inventory - end - - pod_inventory_response = KubernetesApiClient.getKubeResourceInfo("pods?fieldSelector=metadata.namespace%3D#{@@KubeInfraNamespace}") - if !pod_inventory_response.nil? && !pod_inventory_response.body.nil? - pod_inventory = Yajl::Parser.parse(StringIO.new(pod_inventory_response.body)) - @resources.pod_inventory = pod_inventory - @resources.build_pod_uid_lookup - end - - replicaset_inventory_response = KubernetesApiClient.getKubeResourceInfo("replicasets?fieldSelector=metadata.namespace%3D#{@@KubeInfraNamespace}", api_group: @@ApiGroupApps) - if !replicaset_inventory_response.nil? && !replicaset_inventory_response.body.nil? - replicaset_inventory = Yajl::Parser.parse(StringIO.new(replicaset_inventory_response.body)) - @resources.set_replicaset_inventory(replicaset_inventory) - end - - if !node_inventory_response.nil? && !node_inventory_response.code.nil? - if node_inventory_response.code.to_i != 200 - record = process_kube_api_up_monitor("fail", node_inventory_response) - health_monitor_records.push(record) if record - else - record = process_kube_api_up_monitor("pass", node_inventory_response) - health_monitor_records.push(record) if record - end - end - - if !pod_inventory.nil? - record = process_cpu_oversubscribed_monitor(pod_inventory, node_inventory) - health_monitor_records.push(record) if record - record = process_memory_oversubscribed_monitor(pod_inventory, node_inventory) - health_monitor_records.push(record) if record - pods_ready_hash = HealthMonitorUtils.get_pods_ready_hash(@resources) - - system_pods = pods_ready_hash.keep_if { |k, v| v["namespace"] == @@KubeInfraNamespace } - workload_pods = Hash.new # pods_ready_hash.select{ |k, v| v["namespace"] != @@KubeInfraNamespace } - - system_pods_ready_percentage_records = process_pods_ready_percentage(system_pods, MonitorId::SYSTEM_WORKLOAD_PODS_READY_MONITOR_ID) - system_pods_ready_percentage_records.each do |record| - health_monitor_records.push(record) if record - end - - workload_pods_ready_percentage_records = process_pods_ready_percentage(workload_pods, MonitorId::USER_WORKLOAD_PODS_READY_MONITOR_ID) - workload_pods_ready_percentage_records.each do |record| - health_monitor_records.push(record) if record - end - else - @@hmlog.info "POD INVENTORY IS NIL" - end - - if !node_inventory.nil? - node_condition_records = process_node_condition_monitor(node_inventory) - node_condition_records.each do |record| - health_monitor_records.push(record) if record - end - else - @@hmlog.info "NODE INVENTORY IS NIL" - end - - health_monitor_records.each do |record| - eventStream.add(emitTime, record) - end - router.emit_stream(@tag, eventStream) if eventStream - rescue => errorStr - @@hmlog.warn("error in_kube_health: #{errorStr.to_s}") - @@hmlog.debug "backtrace Input #{errorStr.backtrace}" - ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) - end - end - - def process_cpu_oversubscribed_monitor(pod_inventory, node_inventory) - timestamp = Time.now.utc.iso8601 - @@clusterCpuCapacity = HealthMonitorUtils.get_cluster_cpu_memory_capacity(@@hmlog, node_inventory: node_inventory)[0] - subscription = HealthMonitorUtils.get_resource_subscription(pod_inventory, "cpu", @@clusterCpuCapacity) - @@hmlog.info "Refreshed Cluster CPU Capacity #{@@clusterCpuCapacity}" - state = subscription > @@clusterCpuCapacity ? "fail" : "pass" - - #CPU - monitor_id = MonitorId::WORKLOAD_CPU_OVERSUBSCRIBED_MONITOR_ID - health_monitor_record = {"timestamp" => timestamp, "state" => state, "details" => {"clusterCpuCapacity" => @@clusterCpuCapacity / 1000000.to_f, "clusterCpuRequests" => subscription / 1000000.to_f}} - # @@hmlog.info health_monitor_record - - monitor_instance_id = HealthMonitorUtils.get_monitor_instance_id(monitor_id, [@@cluster_id]) - #hmlog.info "Monitor Instance Id: #{monitor_instance_id}" - health_record = {} - time_now = Time.now.utc.iso8601 - health_record[HealthMonitorRecordFields::MONITOR_ID] = monitor_id - health_record[HealthMonitorRecordFields::MONITOR_INSTANCE_ID] = monitor_instance_id - health_record[HealthMonitorRecordFields::DETAILS] = health_monitor_record - health_record[HealthMonitorRecordFields::TIME_GENERATED] = time_now - health_record[HealthMonitorRecordFields::TIME_FIRST_OBSERVED] = time_now - health_record[HealthMonitorRecordFields::CLUSTER_ID] = @@cluster_id - #@@hmlog.info "Successfully processed process_cpu_oversubscribed_monitor" - return health_record - end - - def process_memory_oversubscribed_monitor(pod_inventory, node_inventory) - timestamp = Time.now.utc.iso8601 - @@clusterMemoryCapacity = HealthMonitorUtils.get_cluster_cpu_memory_capacity(@@hmlog, node_inventory: node_inventory)[1] - @@hmlog.info "Refreshed Cluster Memory Capacity #{@@clusterMemoryCapacity}" - subscription = HealthMonitorUtils.get_resource_subscription(pod_inventory, "memory", @@clusterMemoryCapacity) - state = subscription > @@clusterMemoryCapacity ? "fail" : "pass" - #@@hmlog.debug "Memory Oversubscribed Monitor State : #{state}" - - #CPU - monitor_id = MonitorId::WORKLOAD_MEMORY_OVERSUBSCRIBED_MONITOR_ID - health_monitor_record = {"timestamp" => timestamp, "state" => state, "details" => {"clusterMemoryCapacity" => @@clusterMemoryCapacity.to_f, "clusterMemoryRequests" => subscription.to_f}} - hmlog = HealthMonitorUtils.get_log_handle - - monitor_instance_id = HealthMonitorUtils.get_monitor_instance_id(monitor_id, [@@cluster_id]) - health_record = {} - time_now = Time.now.utc.iso8601 - health_record[HealthMonitorRecordFields::MONITOR_ID] = monitor_id - health_record[HealthMonitorRecordFields::MONITOR_INSTANCE_ID] = monitor_instance_id - health_record[HealthMonitorRecordFields::DETAILS] = health_monitor_record - health_record[HealthMonitorRecordFields::TIME_GENERATED] = time_now - health_record[HealthMonitorRecordFields::TIME_FIRST_OBSERVED] = time_now - health_record[HealthMonitorRecordFields::CLUSTER_ID] = @@cluster_id - #@@hmlog.info "Successfully processed process_memory_oversubscribed_monitor" - return health_record - end - - def process_kube_api_up_monitor(state, response) - timestamp = Time.now.utc.iso8601 - - monitor_id = MonitorId::KUBE_API_STATUS - details = response.each_header.to_h - details["ResponseCode"] = response.code - health_monitor_record = {"timestamp" => timestamp, "state" => state, "details" => details} - hmlog = HealthMonitorUtils.get_log_handle - #hmlog.info health_monitor_record - - monitor_instance_id = MonitorId::KUBE_API_STATUS - #hmlog.info "Monitor Instance Id: #{monitor_instance_id}" - health_record = {} - time_now = Time.now.utc.iso8601 - health_record[HealthMonitorRecordFields::MONITOR_ID] = monitor_id - health_record[HealthMonitorRecordFields::MONITOR_INSTANCE_ID] = monitor_instance_id - health_record[HealthMonitorRecordFields::DETAILS] = health_monitor_record - health_record[HealthMonitorRecordFields::TIME_GENERATED] = time_now - health_record[HealthMonitorRecordFields::TIME_FIRST_OBSERVED] = time_now - health_record[HealthMonitorRecordFields::CLUSTER_ID] = @@cluster_id - #@@hmlog.info "Successfully processed process_kube_api_up_monitor" - return health_record - end - - def process_pods_ready_percentage(pods_hash, config_monitor_id) - monitor_config = @provider.get_config(config_monitor_id) - hmlog = HealthMonitorUtils.get_log_handle - - records = [] - pods_hash.keys.each do |key| - workload_name = key - total_pods = pods_hash[workload_name]["totalPods"] - pods_ready = pods_hash[workload_name]["podsReady"] - namespace = pods_hash[workload_name]["namespace"] - workload_kind = pods_hash[workload_name]["kind"] - percent = pods_ready / total_pods * 100 - timestamp = Time.now.utc.iso8601 - - state = HealthMonitorUtils.compute_percentage_state(percent, monitor_config) - health_monitor_record = {"timestamp" => timestamp, "state" => state, "details" => {"totalPods" => total_pods, "podsReady" => pods_ready, "workload_name" => workload_name, "namespace" => namespace, "workload_kind" => workload_kind}} - monitor_instance_id = HealthMonitorUtils.get_monitor_instance_id(config_monitor_id, [@@cluster_id, namespace, workload_name]) - health_record = {} - time_now = Time.now.utc.iso8601 - health_record[HealthMonitorRecordFields::MONITOR_ID] = config_monitor_id - health_record[HealthMonitorRecordFields::MONITOR_INSTANCE_ID] = monitor_instance_id - health_record[HealthMonitorRecordFields::DETAILS] = health_monitor_record - health_record[HealthMonitorRecordFields::TIME_GENERATED] = time_now - health_record[HealthMonitorRecordFields::TIME_FIRST_OBSERVED] = time_now - health_record[HealthMonitorRecordFields::CLUSTER_ID] = @@cluster_id - records.push(health_record) - end - #@@hmlog.info "Successfully processed pods_ready_percentage for #{config_monitor_id} #{records.size}" - return records - end - - def process_node_condition_monitor(node_inventory) - monitor_id = MonitorId::NODE_CONDITION_MONITOR_ID - timestamp = Time.now.utc.iso8601 - monitor_config = @provider.get_config(monitor_id) - node_condition_monitor_records = [] - if !node_inventory.nil? - node_inventory["items"].each do |node| - node_name = node["metadata"]["name"] - conditions = node["status"]["conditions"] - node_state = HealthMonitorUtils.get_node_state_from_node_conditions(monitor_config, conditions) - details = {} - conditions.each do |condition| - condition_state = HealthMonitorStates::PASS - if condition["type"].downcase != "ready" - if (condition["status"].downcase == "true" || condition["status"].downcase == "unknown") - condition_state = HealthMonitorStates::FAIL - end - else #Condition == READY - if condition["status"].downcase != "true" - condition_state = HealthMonitorStates::FAIL - end - end - details[condition["type"]] = {"Reason" => condition["reason"], "Message" => condition["message"], "State" => condition_state} - end - health_monitor_record = {"timestamp" => timestamp, "state" => node_state, "details" => details} - monitor_instance_id = HealthMonitorUtils.get_monitor_instance_id(monitor_id, [@@cluster_id, node_name]) - health_record = {} - time_now = Time.now.utc.iso8601 - health_record[HealthMonitorRecordFields::MONITOR_ID] = monitor_id - health_record[HealthMonitorRecordFields::MONITOR_INSTANCE_ID] = monitor_instance_id - health_record[HealthMonitorRecordFields::DETAILS] = health_monitor_record - health_record[HealthMonitorRecordFields::TIME_GENERATED] = time_now - health_record[HealthMonitorRecordFields::TIME_FIRST_OBSERVED] = time_now - health_record[HealthMonitorRecordFields::CLUSTER_ID] = @@cluster_id - health_record[HealthMonitorRecordFields::NODE_NAME] = node_name - node_condition_monitor_records.push(health_record) - end - end - #@@hmlog.info "Successfully processed process_node_condition_monitor #{node_condition_monitor_records.size}" - return node_condition_monitor_records - end - - def initialize_inventory - #this is required because there are other components, like the container cpu memory aggregator, that depends on the mapping being initialized - resourceUri = KubernetesApiClient.getNodesResourceUri("nodes") - node_inventory_response = KubernetesApiClient.getKubeResourceInfo(resourceUri) - node_inventory = Yajl::Parser.parse(StringIO.new(node_inventory_response.body)) - pod_inventory_response = KubernetesApiClient.getKubeResourceInfo("pods?fieldSelector=metadata.namespace%3D#{@@KubeInfraNamespace}") - pod_inventory = Yajl::Parser.parse(StringIO.new(pod_inventory_response.body)) - replicaset_inventory_response = KubernetesApiClient.getKubeResourceInfo("replicasets?fieldSelector=metadata.namespace%3D#{@@KubeInfraNamespace}", api_group: @@ApiGroupApps) - replicaset_inventory = Yajl::Parser.parse(StringIO.new(replicaset_inventory_response.body)) - - @resources.node_inventory = node_inventory - @resources.pod_inventory = pod_inventory - @resources.set_replicaset_inventory(replicaset_inventory) - @resources.build_pod_uid_lookup - end - - def run_periodic - @mutex.lock - done = @finished - @nextTimeToRun = Time.now - @waitTimeout = @run_interval - until done - @nextTimeToRun = @nextTimeToRun + @run_interval - @now = Time.now - if @nextTimeToRun <= @now - @waitTimeout = 1 - @nextTimeToRun = @now - else - @waitTimeout = @nextTimeToRun - @now - end - @condition.wait(@mutex, @waitTimeout) - done = @finished - @mutex.unlock - if !done - begin - @@hmlog.info("in_kube_health::run_periodic.enumerate.start @ #{Time.now.utc.iso8601}") - enumerate - @@hmlog.info("in_kube_health::run_periodic.enumerate.end @ #{Time.now.utc.iso8601}") - rescue => errorStr - @@hmlog.warn "in_kube_health::run_periodic: enumerate Failed for kubeapi sourced data health: #{errorStr}" - ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) - end - end - @mutex.lock - end - @mutex.unlock - end - end -end diff --git a/source/plugins/ruby/out_health_forward.rb b/source/plugins/ruby/out_health_forward.rb deleted file mode 100644 index 59eed97da..000000000 --- a/source/plugins/ruby/out_health_forward.rb +++ /dev/null @@ -1,838 +0,0 @@ -# frozen_string_literal: true -# -# Fluentd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -require 'fluent/output' -require 'fluent/config/error' -require 'fluent/clock' -require 'fluent/tls' -require 'base64' -require 'forwardable' - -require 'fluent/compat/socket_util' -require 'fluent/plugin/out_forward/handshake_protocol' -require 'fluent/plugin/out_forward/load_balancer' -require 'fluent/plugin/out_forward/socket_cache' -require 'fluent/plugin/out_forward/failure_detector' -require 'fluent/plugin/out_forward/error' -require 'fluent/plugin/out_forward/connection_manager' -require 'fluent/plugin/out_forward/ack_handler' - -module Fluent::Plugin - class HealthForwardOutput < Output - Fluent::Plugin.register_output('health_forward', self) - - helpers :socket, :server, :timer, :thread, :compat_parameters, :service_discovery - - LISTEN_PORT = 25227 - - desc 'The transport protocol.' - config_param :transport, :enum, list: [:tcp, :tls], default: :tcp - # TODO: TLS session cache/tickets - - desc 'The timeout time when sending event logs.' - config_param :send_timeout, :time, default: 60 - desc 'The timeout time for socket connect' - config_param :connect_timeout, :time, default: nil - # TODO: add linger_timeout, recv_timeout - - desc 'The protocol to use for heartbeats (default is the same with "transport").' - config_param :heartbeat_type, :enum, list: [:transport, :tcp, :udp, :none], default: :transport - desc 'The interval of the heartbeat packer.' - config_param :heartbeat_interval, :time, default: 1 - desc 'The wait time before accepting a server fault recovery.' - config_param :recover_wait, :time, default: 10 - desc 'The hard timeout used to detect server failure.' - config_param :hard_timeout, :time, default: 60 - desc 'The threshold parameter used to detect server faults.' - config_param :phi_threshold, :integer, default: 16 - desc 'Use the "Phi accrual failure detector" to detect server failure.' - config_param :phi_failure_detector, :bool, default: true - - desc 'Change the protocol to at-least-once.' - config_param :require_ack_response, :bool, default: false # require in_forward to respond with ack - - ## The reason of default value of :ack_response_timeout: - # Linux default tcp_syn_retries is 5 (in many environment) - # 3 + 6 + 12 + 24 + 48 + 96 -> 189 (sec) - desc 'This option is used when require_ack_response is true.' - config_param :ack_response_timeout, :time, default: 190 - - desc 'The interval while reading data from server' - config_param :read_interval_msec, :integer, default: 50 # 50ms - desc 'Reading data size from server' - config_param :read_length, :size, default: 512 # 512bytes - - desc 'Set TTL to expire DNS cache in seconds.' - config_param :expire_dns_cache, :time, default: nil # 0 means disable cache - desc 'Enable client-side DNS round robin.' - config_param :dns_round_robin, :bool, default: false # heartbeat_type 'udp' is not available for this - - desc 'Ignore DNS resolution and errors at startup time.' - config_param :ignore_network_errors_at_startup, :bool, default: false - - desc 'Verify that a connection can be made with one of out_forward nodes at the time of startup.' - config_param :verify_connection_at_startup, :bool, default: false - - desc 'Compress buffered data.' - config_param :compress, :enum, list: [:text, :gzip], default: :text - - desc 'The default version of TLS transport.' - config_param :tls_version, :enum, list: Fluent::TLS::SUPPORTED_VERSIONS, default: Fluent::TLS::DEFAULT_VERSION - desc 'The cipher configuration of TLS transport.' - config_param :tls_ciphers, :string, default: Fluent::TLS::CIPHERS_DEFAULT - desc 'Skip all verification of certificates or not.' - config_param :tls_insecure_mode, :bool, default: false - desc 'Allow self signed certificates or not.' - config_param :tls_allow_self_signed_cert, :bool, default: false - desc 'Verify hostname of servers and certificates or not in TLS transport.' - config_param :tls_verify_hostname, :bool, default: true - desc 'The additional CA certificate path for TLS.' - config_param :tls_ca_cert_path, :array, value_type: :string, default: nil - desc 'The additional certificate path for TLS.' - config_param :tls_cert_path, :array, value_type: :string, default: nil - desc 'The client certificate path for TLS.' - config_param :tls_client_cert_path, :string, default: nil - desc 'The client private key path for TLS.' - config_param :tls_client_private_key_path, :string, default: nil - desc 'The client private key passphrase for TLS.' - config_param :tls_client_private_key_passphrase, :string, default: nil, secret: true - desc 'The certificate thumbprint for searching from Windows system certstore.' - config_param :tls_cert_thumbprint, :string, default: nil, secret: true - desc 'The certificate logical store name on Windows system certstore.' - config_param :tls_cert_logical_store_name, :string, default: nil - desc 'Enable to use certificate enterprise store on Windows system certstore.' - config_param :tls_cert_use_enterprise_store, :bool, default: true - desc "Enable keepalive connection." - config_param :keepalive, :bool, default: false - desc "Expired time of keepalive. Default value is nil, which means to keep connection as long as possible" - config_param :keepalive_timeout, :time, default: nil - - config_section :security, required: false, multi: false do - desc 'The hostname' - config_param :self_hostname, :string - desc 'Shared key for authentication' - config_param :shared_key, :string, secret: true - end - - config_section :server, param_name: :servers do - desc "The IP address or host name of the server." - config_param :host, :string - desc "The name of the server. Used for logging and certificate verification in TLS transport (when host is address)." - config_param :name, :string, default: nil - desc "The port number of the host." - config_param :port, :integer, default: LISTEN_PORT - desc "The shared key per server." - config_param :shared_key, :string, default: nil, secret: true - desc "The username for authentication." - config_param :username, :string, default: '' - desc "The password for authentication." - config_param :password, :string, default: '', secret: true - desc "Marks a node as the standby node for an Active-Standby model between Fluentd nodes." - config_param :standby, :bool, default: false - desc "The load balancing weight." - config_param :weight, :integer, default: 60 - end - - attr_reader :nodes - - config_param :port, :integer, default: LISTEN_PORT, obsoleted: "User section instead." - config_param :host, :string, default: nil, obsoleted: "Use section instead." - - config_section :buffer do - config_set_default :chunk_keys, ["tag"] - end - - attr_reader :read_interval, :recover_sample_size - - def initialize - super - - @nodes = [] #=> [Node] - @loop = nil - @thread = nil - - @usock = nil - @keep_alive_watcher_interval = 5 # TODO - @suspend_flush = false - end - - def configure(conf) - compat_parameters_convert(conf, :buffer, default_chunk_key: 'tag') - - super - - unless @chunk_key_tag - raise Fluent::ConfigError, "buffer chunk key must include 'tag' for forward output" - end - - @read_interval = @read_interval_msec / 1000.0 - @recover_sample_size = @recover_wait / @heartbeat_interval - - if @heartbeat_type == :tcp - log.warn "'heartbeat_type tcp' is deprecated. use 'transport' instead." - @heartbeat_type = :transport - end - - if @dns_round_robin && @heartbeat_type == :udp - raise Fluent::ConfigError, "forward output heartbeat type must be 'transport' or 'none' to use dns_round_robin option" - end - - if @transport == :tls - # socket helper adds CA cert or signed certificate to same cert store internally so unify it in this place. - if @tls_cert_path && !@tls_cert_path.empty? - @tls_ca_cert_path = @tls_cert_path - end - if @tls_ca_cert_path && !@tls_ca_cert_path.empty? - @tls_ca_cert_path.each do |path| - raise Fluent::ConfigError, "specified cert path does not exist:#{path}" unless File.exist?(path) - raise Fluent::ConfigError, "specified cert path is not readable:#{path}" unless File.readable?(path) - end - end - - if @tls_insecure_mode - log.warn "TLS transport is configured in insecure way" - @tls_verify_hostname = false - @tls_allow_self_signed_cert = true - end - - if Fluent.windows? - if (@tls_cert_path || @tls_ca_cert_path) && @tls_cert_logical_store_name - raise Fluent::ConfigError, "specified both cert path and tls_cert_logical_store_name is not permitted" - end - else - raise Fluent::ConfigError, "This parameter is for only Windows" if @tls_cert_logical_store_name - raise Fluent::ConfigError, "This parameter is for only Windows" if @tls_cert_thumbprint - end - end - - @ack_handler = @require_ack_response ? AckHandler.new(timeout: @ack_response_timeout, log: @log, read_length: @read_length) : nil - socket_cache = @keepalive ? SocketCache.new(@keepalive_timeout, @log) : nil - @connection_manager = Fluent::Plugin::ForwardOutput::ConnectionManager.new( - log: @log, - secure: !!@security, - connection_factory: method(:create_transfer_socket), - socket_cache: socket_cache, - ) - - configs = [] - - # rewrite for using server as sd_static - conf.elements(name: 'server').each do |s| - s.name = 'service' - end - - unless conf.elements(name: 'service').empty? - # To copy `services` element only - new_elem = Fluent::Config::Element.new('static_service_discovery', {}, {}, conf.elements(name: 'service')) - configs << { type: :static, conf: new_elem } - end - - conf.elements(name: 'service_discovery').each_with_index do |c, i| - configs << { type: @service_discovery[i][:@type], conf: c } - end - - service_discovery_create_manager( - :out_forward_service_discovery_watcher, - configurations: configs, - load_balancer: Fluent::Plugin::ForwardOutput::LoadBalancer.new(log), - custom_build_method: method(:build_node), - ) - - discovery_manager.services.each do |server| - # it's only for test - @nodes << server - unless @heartbeat_type == :none - begin - server.validate_host_resolution! - rescue => e - raise unless @ignore_network_errors_at_startup - log.warn "failed to resolve node name when configured", server: (server.name || server.host), error: e - server.disable! - end - end - end - - unless @as_secondary - if @compress == :gzip && @buffer.compress == :text - @buffer.compress = :gzip - elsif @compress == :text && @buffer.compress == :gzip - log.info "buffer is compressed. If you also want to save the bandwidth of a network, Add `compress` configuration in " - end - end - - if discovery_manager.services.empty? - raise Fluent::ConfigError, "forward output plugin requires at least one node is required. Add or " - end - - if !@keepalive && @keepalive_timeout - log.warn('The value of keepalive_timeout is ignored. if you want to use keepalive, please add `keepalive true` to your conf.') - end - - raise Fluent::ConfigError, "ack_response_timeout must be a positive integer" if @ack_response_timeout < 1 - end - - def multi_workers_ready? - true - end - - def prefer_delayed_commit - @require_ack_response - end - - def overwrite_delayed_commit_timeout - # Output#start sets @delayed_commit_timeout by @buffer_config.delayed_commit_timeout - # But it should be overwritten by ack_response_timeout to rollback chunks after timeout - if @delayed_commit_timeout != @ack_response_timeout - log.info "delayed_commit_timeout is overwritten by ack_response_timeout" - @delayed_commit_timeout = @ack_response_timeout + 2 # minimum ack_reader IO.select interval is 1s - end - end - - def start - super - - unless @heartbeat_type == :none - if @heartbeat_type == :udp - @usock = socket_create_udp(discovery_manager.services.first.host, discovery_manager.services.first.port, nonblock: true) - server_create_udp(:out_forward_heartbeat_receiver, 0, socket: @usock, max_bytes: @read_length, &method(:on_udp_heatbeat_response_recv)) - end - timer_execute(:out_forward_heartbeat_request, @heartbeat_interval, &method(:on_heartbeat_timer)) - end - - if @require_ack_response - overwrite_delayed_commit_timeout - thread_create(:out_forward_receiving_ack, &method(:ack_reader)) - end - - if @verify_connection_at_startup - discovery_manager.services.each do |node| - begin - node.verify_connection - rescue StandardError => e - log.fatal "forward's connection setting error: #{e.message}" - raise Fluent::UnrecoverableError, e.message - end - end - end - - if @keepalive - timer_execute(:out_forward_keep_alived_socket_watcher, @keep_alive_watcher_interval, &method(:on_purge_obsolete_socks)) - end - end - - def close - if @usock - # close socket and ignore errors: this socket will not be used anyway. - @usock.close rescue nil - end - - super - end - - def stop - super - - if @keepalive - @connection_manager.stop - end - end - - def before_shutdown - super - @suspend_flush = true - end - - def after_shutdown - last_ack if @require_ack_response - super - end - - def try_flush - return if @require_ack_response && @suspend_flush - super - end - - def last_ack - overwrite_delayed_commit_timeout - ack_check(ack_select_interval) - end - - def write(chunk) - return if chunk.empty? - tag = chunk.metadata.tag - - discovery_manager.select_service { |node| node.send_data(tag, chunk) } - end - - def try_write(chunk) - log.trace "writing a chunk to destination", chunk_id: dump_unique_id_hex(chunk.unique_id) - if chunk.empty? - commit_write(chunk.unique_id) - return - end - tag = chunk.metadata.tag - discovery_manager.select_service { |node| node.send_data(tag, chunk) } - last_ack if @require_ack_response && @suspend_flush - end - - def create_transfer_socket(host, port, hostname, &block) - case @transport - when :tls - socket_create_tls( - host, port, - version: @tls_version, - ciphers: @tls_ciphers, - insecure: @tls_insecure_mode, - verify_fqdn: @tls_verify_hostname, - fqdn: hostname, - allow_self_signed_cert: @tls_allow_self_signed_cert, - cert_paths: @tls_ca_cert_path, - cert_path: @tls_client_cert_path, - private_key_path: @tls_client_private_key_path, - private_key_passphrase: @tls_client_private_key_passphrase, - cert_thumbprint: @tls_cert_thumbprint, - cert_logical_store_name: @tls_cert_logical_store_name, - cert_use_enterprise_store: @tls_cert_use_enterprise_store, - - # Enabling SO_LINGER causes tcp port exhaustion on Windows. - # This is because dynamic ports are only 16384 (from 49152 to 65535) and - # expiring SO_LINGER enabled ports should wait 4 minutes - # where set by TcpTimeDelay. Its default value is 4 minutes. - # So, we should disable SO_LINGER on Windows to prevent flood of waiting ports. - linger_timeout: Fluent.windows? ? nil : @send_timeout, - send_timeout: @send_timeout, - recv_timeout: @ack_response_timeout, - connect_timeout: @connect_timeout, - &block - ) - when :tcp - socket_create_tcp( - host, port, - linger_timeout: @send_timeout, - send_timeout: @send_timeout, - recv_timeout: @ack_response_timeout, - connect_timeout: @connect_timeout, - &block - ) - else - raise "BUG: unknown transport protocol #{@transport}" - end - end - - def statistics - stats = super - services = discovery_manager.services - healthy_nodes_count = 0 - registed_nodes_count = services.size - services.each do |s| - if s.available? - healthy_nodes_count += 1 - end - end - - stats.merge( - 'healthy_nodes_count' => healthy_nodes_count, - 'registered_nodes_count' => registed_nodes_count, - ) - end - - # MessagePack FixArray length is 3 - FORWARD_HEADER = [0x93].pack('C').freeze - def forward_header - FORWARD_HEADER - end - - private - - def build_node(server) - name = server.name || "#{server.host}:#{server.port}" - log.info "adding forwarding server '#{name}'", host: server.host, port: server.port, weight: server.weight, plugin_id: plugin_id - - failure = Fluent::Plugin::ForwardOutput::FailureDetector.new(@heartbeat_interval, @hard_timeout, Time.now.to_i.to_f) - if @heartbeat_type == :none - NoneHeartbeatNode.new(self, server, failure: failure, connection_manager: @connection_manager, ack_handler: @ack_handler) - else - Node.new(self, server, failure: failure, connection_manager: @connection_manager, ack_handler: @ack_handler) - end - end - - def on_heartbeat_timer - need_rebuild = false - discovery_manager.services.each do |n| - begin - log.trace "sending heartbeat", host: n.host, port: n.port, heartbeat_type: @heartbeat_type - n.usock = @usock if @usock - need_rebuild = n.send_heartbeat || need_rebuild - rescue Errno::EAGAIN, Errno::EWOULDBLOCK, Errno::EINTR, Errno::ECONNREFUSED, Errno::ETIMEDOUT => e - log.debug "failed to send heartbeat packet", host: n.host, port: n.port, heartbeat_type: @heartbeat_type, error: e - rescue => e - log.debug "unexpected error happen during heartbeat", host: n.host, port: n.port, heartbeat_type: @heartbeat_type, error: e - end - - need_rebuild = n.tick || need_rebuild - end - - if need_rebuild - discovery_manager.rebalance - end - end - - def on_udp_heatbeat_response_recv(data, sock) - sockaddr = Socket.pack_sockaddr_in(sock.remote_port, sock.remote_host) - if node = discovery_manager.services.find { |n| n.sockaddr == sockaddr } - # log.trace "heartbeat arrived", name: node.name, host: node.host, port: node.port - if node.heartbeat - discovery_manager.rebalance - end - else - log.warn("Unknown heartbeat response received from #{sock.remote_host}:#{sock.remote_port}. It may service out") - end - end - - def on_purge_obsolete_socks - @connection_manager.purge_obsolete_socks - end - - def ack_select_interval - if @delayed_commit_timeout > 3 - 1 - else - @delayed_commit_timeout / 3.0 - end - end - - def ack_reader - select_interval = ack_select_interval - - while thread_current_running? - ack_check(select_interval) - end - end - - def ack_check(select_interval) - @ack_handler.collect_response(select_interval) do |chunk_id, node, sock, result| - @connection_manager.close(sock) - - case result - when AckHandler::Result::SUCCESS - commit_write(chunk_id) - when AckHandler::Result::FAILED - node.disable! - rollback_write(chunk_id, update_retry: false) - when AckHandler::Result::CHUNKID_UNMATCHED - rollback_write(chunk_id, update_retry: false) - else - log.warn("BUG: invalid status #{result} #{chunk_id}") - - if chunk_id - rollback_write(chunk_id, update_retry: false) - end - end - end - end - - class Node - extend Forwardable - def_delegators :@server, :discovery_id, :host, :port, :name, :weight, :standby - - # @param connection_manager [Fluent::Plugin::ForwardOutput::ConnectionManager] - # @param ack_handler [Fluent::Plugin::ForwardOutput::AckHandler] - def initialize(sender, server, failure:, connection_manager:, ack_handler:) - @sender = sender - @log = sender.log - @compress = sender.compress - @server = server - - @name = server.name - @host = server.host - @port = server.port - @weight = server.weight - @standby = server.standby - @failure = failure - @available = true - - # @hostname is used for certificate verification & TLS SNI - host_is_hostname = !(IPAddr.new(@host) rescue false) - @hostname = case - when host_is_hostname then @host - when @name then @name - else nil - end - - @usock = nil - - @handshake = Fluent::Plugin::ForwardOutput::HandshakeProtocol.new( - log: @log, - hostname: sender.security && sender.security.self_hostname, - shared_key: server.shared_key || (sender.security && sender.security.shared_key) || '', - password: server.password || '', - username: server.username || '', - ) - - @unpacker = Fluent::MessagePackFactory.msgpack_unpacker - - @resolved_host = nil - @resolved_time = 0 - @resolved_once = false - - @connection_manager = connection_manager - @ack_handler = ack_handler - end - - attr_accessor :usock - - attr_reader :state - attr_reader :sockaddr # used by on_udp_heatbeat_response_recv - attr_reader :failure # for test - - def validate_host_resolution! - resolved_host - end - - def available? - @available - end - - def disable! - @available = false - end - - def standby? - @standby - end - - def verify_connection - connect do |sock, ri| - ensure_established_connection(sock, ri) - end - end - - def establish_connection(sock, ri) - while ri.state != :established - begin - # TODO: On Ruby 2.2 or earlier, read_nonblock doesn't work expectedly. - # We need rewrite around here using new socket/server plugin helper. - buf = sock.read_nonblock(@sender.read_length) - if buf.empty? - sleep @sender.read_interval - next - end - @unpacker.feed_each(buf) do |data| - if @handshake.invoke(sock, ri, data) == :established - @log.debug "connection established", host: @host, port: @port - end - end - rescue IO::WaitReadable - # If the exception is Errno::EWOULDBLOCK or Errno::EAGAIN, it is extended by IO::WaitReadable. - # So IO::WaitReadable can be used to rescue the exceptions for retrying read_nonblock. - # https//docs.ruby-lang.org/en/2.3.0/IO.html#method-i-read_nonblock - sleep @sender.read_interval unless ri.state == :established - rescue SystemCallError => e - @log.warn "disconnected by error", host: @host, port: @port, error: e - disable! - break - rescue EOFError - @log.warn "disconnected", host: @host, port: @port - disable! - break - rescue HeloError => e - @log.warn "received invalid helo message from #{@name}" - disable! - break - rescue PingpongError => e - @log.warn "connection refused to #{@name || @host}: #{e.message}" - disable! - break - end - end - end - - def send_data_actual(sock, tag, chunk) - option = { 'size' => chunk.size, 'compressed' => @compress } - option['chunk'] = Base64.encode64(chunk.unique_id) if @ack_handler - - # https://github.com/fluent/fluentd/wiki/Forward-Protocol-Specification-v1#packedforward-mode - # out_forward always uses str32 type for entries. - # str16 can store only 64kbytes, and it should be much smaller than buffer chunk size. - - tag = tag.dup.force_encoding(Encoding::UTF_8) - - sock.write @sender.forward_header # array, size=3 - sock.write tag.to_msgpack # 1. tag: String (str) - chunk.open(compressed: @compress) do |chunk_io| - entries = [0xdb, chunk_io.size].pack('CN') - sock.write entries.force_encoding(Encoding::UTF_8) # 2. entries: String (str32) - IO.copy_stream(chunk_io, sock) # writeRawBody(packed_es) - end - sock.write option.to_msgpack # 3. option: Hash(map) - - # TODO: use bin32 for non-utf8 content(entries) when old msgpack-ruby (0.5.x or earlier) not supported - end - - def send_data(tag, chunk) - ack = @ack_handler && @ack_handler.create_ack(chunk.unique_id, self) - connect(nil, ack: ack) do |sock, ri| - ensure_established_connection(sock, ri) - send_data_actual(sock, tag, chunk) - end - - heartbeat(false) - nil - end - - # FORWARD_TCP_HEARTBEAT_DATA = FORWARD_HEADER + ''.to_msgpack + [].to_msgpack - # - # @return [Boolean] return true if it needs to rebuild nodes - def send_heartbeat - begin - dest_addr = resolved_host - @resolved_once = true - rescue ::SocketError => e - if !@resolved_once && @sender.ignore_network_errors_at_startup - @log.warn "failed to resolve node name in heartbeating", server: @name || @host, error: e - return false - end - raise - end - - case @sender.heartbeat_type - when :transport - connect(dest_addr) do |sock, ri| - ensure_established_connection(sock, ri) - - ## don't send any data to not cause a compatibility problem - # sock.write FORWARD_TCP_HEARTBEAT_DATA - - # successful tcp connection establishment is considered as valid heartbeat. - # When heartbeat is succeeded after detached, return true. It rebuilds weight array. - heartbeat(true) - end - when :udp - @usock.send "\0", 0, Socket.pack_sockaddr_in(@port, dest_addr) - # response is going to receive at on_udp_heatbeat_response_recv - false - when :none # :none doesn't use this class - raise "BUG: heartbeat_type none must not use Node" - else - raise "BUG: unknown heartbeat_type '#{@sender.heartbeat_type}'" - end - end - - def resolved_host - case @sender.expire_dns_cache - when 0 - # cache is disabled - resolve_dns! - - when nil - # persistent cache - @resolved_host ||= resolve_dns! - - else - now = Fluent::EventTime.now - rh = @resolved_host - if !rh || now - @resolved_time >= @sender.expire_dns_cache - rh = @resolved_host = resolve_dns! - @resolved_time = now - end - rh - end - end - - def resolve_dns! - addrinfo_list = Socket.getaddrinfo(@host, @port, nil, Socket::SOCK_STREAM) - addrinfo = @sender.dns_round_robin ? addrinfo_list.sample : addrinfo_list.first - @sockaddr = Socket.pack_sockaddr_in(addrinfo[1], addrinfo[3]) # used by on_udp_heatbeat_response_recv - addrinfo[3] - end - private :resolve_dns! - - def tick - now = Time.now.to_f - unless available? - if @failure.hard_timeout?(now) - @failure.clear - end - return nil - end - - if @failure.hard_timeout?(now) - @log.warn "detached forwarding server '#{@name}'", host: @host, port: @port, hard_timeout: true - disable! - @resolved_host = nil # expire cached host - @failure.clear - return true - end - - if @sender.phi_failure_detector - phi = @failure.phi(now) - if phi > @sender.phi_threshold - @log.warn "detached forwarding server '#{@name}'", host: @host, port: @port, phi: phi, phi_threshold: @sender.phi_threshold - disable! - @resolved_host = nil # expire cached host - @failure.clear - return true - end - end - false - end - - def heartbeat(detect=true) - now = Time.now.to_f - @failure.add(now) - if detect && !available? && @failure.sample_size > @sender.recover_sample_size - @available = true - @log.warn "recovered forwarding server '#{@name}'", host: @host, port: @port - true - else - nil - end - end - - private - - def ensure_established_connection(sock, request_info) - if request_info.state != :established - establish_connection(sock, request_info) - - if request_info.state != :established - raise ConnectionClosedError, "failed to establish connection with node #{@name}" - end - end - end - - def connect(host = nil, ack: false, &block) - @connection_manager.connect(host: host || resolved_host, port: port, hostname: @hostname, ack: ack, &block) - end - end - - # Override Node to disable heartbeat - class NoneHeartbeatNode < Node - def available? - true - end - - def tick - false - end - - def heartbeat(detect=true) - true - end - end - end -end diff --git a/test/unit-tests/plugins/filter_health_model_builder_test.rb b/test/unit-tests/plugins/filter_health_model_builder_test.rb deleted file mode 100644 index c5b17306a..000000000 --- a/test/unit-tests/plugins/filter_health_model_builder_test.rb +++ /dev/null @@ -1,54 +0,0 @@ -# frozen_string_literal: true - -require 'test/unit' -require 'json' -# require_relative '../../../source/plugins/ruby/health' - -Dir[File.join(__dir__, '../../../source/plugins/ruby/health', '*.rb')].each { |file| require file } - -class FilterHealthModelBuilderTest < Test::Unit::TestCase - include HealthModel - - def test_event_stream - health_definition_path = 'C:\AzureMonitor\ContainerInsights\Docker-Provider\installer\conf\health_model_definition.json' - health_model_definition = ParentMonitorProvider.new(HealthModelDefinitionParser.new(health_definition_path).parse_file) - monitor_factory = MonitorFactory.new - hierarchy_builder = HealthHierarchyBuilder.new(health_model_definition, monitor_factory) - # TODO: Figure out if we need to add NodeMonitorHierarchyReducer to the list of finalizers. For now, dont compress/optimize, since it becomes impossible to construct the model on the UX side - state_finalizers = [AggregateMonitorStateFinalizer.new] - monitor_set = MonitorSet.new - model_builder = HealthModelBuilder.new(hierarchy_builder, state_finalizers, monitor_set) - - i = 1 - loop do - mock_data_path = "C:/AzureMonitor/ContainerInsights/Docker-Provider/source/plugins/ruby/mock_data-#{i}.json" - file = File.read(mock_data_path) - data = JSON.parse(file) - - health_monitor_records = [] - data.each do |record| - health_monitor_record = HealthMonitorRecord.new( - record[HealthMonitorRecordFields::MONITOR_ID], - record[HealthMonitorRecordFields::MONITOR_INSTANCE_ID], - record[HealthMonitorRecordFields::TIME_FIRST_OBSERVED], - record[HealthMonitorRecordFields::DETAILS]["state"], - record[HealthMonitorRecordFields::MONITOR_LABELS], - record[HealthMonitorRecordFields::MONITOR_CONFIG], - record[HealthMonitorRecordFields::DETAILS] - ) - state_transitions.push(state_transition) - end - - model_builder.process_state_transitions(state_transitions) - changed_monitors = model_builder.finalize_model - changed_monitors.keys.each{|key| - puts key - } - i = i + 1 - if i == 6 - break - end - end - puts "Done" - end -end diff --git a/test/unit-tests/plugins/health/aggregate_monitor_spec.rb b/test/unit-tests/plugins/health/aggregate_monitor_spec.rb deleted file mode 100644 index a12a0aa7f..000000000 --- a/test/unit-tests/plugins/health/aggregate_monitor_spec.rb +++ /dev/null @@ -1,256 +0,0 @@ -require_relative '../test_helpers' - -Dir[File.join(File.expand_path(File.dirname(__FILE__)), "../../../../source/plugins/ruby/health/*.rb")].reject{|f| f.include?('health_monitor_utils')}.each { |file| require file } -include HealthModel - -describe "AggregateMonitor Spec" do - it "is_aggregate_monitor is true for AggregateMonitor" do - # Arrange/Act - monitor = AggregateMonitor.new(:monitor_id, :monitor_instance_id, :pass, :time, "worstOf", [], {}) - # Assert - assert_equal monitor.is_aggregate_monitor, true - end - - it "add_member_monitor tests -- adds a member monitor as a child monitor" do - # Arrange - monitor = AggregateMonitor.new(:monitor_id, :monitor_instance_id, :pass, :time, "worstOf", [], {}) - #Act - monitor.add_member_monitor("child_monitor_1") - #Assert - assert_equal monitor.get_member_monitors.include?("child_monitor_1"), true - - #Act - monitor.add_member_monitor("child_monitor_1") - #Assert - assert_equal monitor.get_member_monitors.size, 1 - end - - it "remove_member_monitor tests -- removes a member monitor as a child monitor" do - # Arrange - monitor = AggregateMonitor.new(:monitor_id, :monitor_instance_id, :pass, :time, "worstOf", [], {}) - monitor.add_member_monitor("child_monitor_1") - monitor.add_member_monitor("child_monitor_2") - - #Act - monitor.remove_member_monitor("child_monitor_1") - #Assert - assert_equal monitor.get_member_monitors.size, 1 - - #Act - monitor.remove_member_monitor("unknown_child") - #Assert - assert_equal monitor.get_member_monitors.size, 1 - end - - it "calculate_details tests -- calculates rollup details based on member monitor states" do - # Arrange - monitor = AggregateMonitor.new(:monitor_id, :monitor_instance_id, :pass, :time, "worstOf", [], {}) - - child_monitor_1 = UnitMonitor.new("monitor_1", "child_monitor_1", "pass", "time", {}, {}, {}) - child_monitor_2 = UnitMonitor.new("monitor_2", "child_monitor_2", "fail", "time", {}, {}, {}) - - monitor_set = MonitorSet.new - monitor_set.add_or_update(child_monitor_1) - monitor_set.add_or_update(child_monitor_2) - - monitor.add_member_monitor("child_monitor_1") - monitor.add_member_monitor("child_monitor_2") - - #Act - monitor.calculate_details(monitor_set) - #Assert - assert_equal monitor.details["details"], {"pass"=>["child_monitor_1"], "fail"=>["child_monitor_2"]} - - #Arrange - child_monitor_3 = UnitMonitor.new("monitor_3", "child_monitor_3", "pass", "time", {}, {}, {}) - monitor_set.add_or_update(child_monitor_3) - monitor.add_member_monitor("child_monitor_3") - - #Act - monitor.calculate_details(monitor_set) - #Assert - assert_equal monitor.details["details"], {"pass"=>["child_monitor_1", "child_monitor_3"], "fail"=>["child_monitor_2"]} - end - - it "calculate_state tests -- raises when right aggregation_algorithm NOT specified" do - # Arrange - monitor = AggregateMonitor.new(:monitor_id, :monitor_instance_id, :pass, :time, "", [], {}) - #Assert - assert_raises do - monitor.calculate_state(monitor_set) - end - end - - it "calculate_state tests -- calculate_worst_of_state " do - # Arrange -- pass, fail = fail - monitor = AggregateMonitor.new(:monitor_id, :monitor_instance_id, :pass, :time, "worstOf", [], {}) - - child_monitor_1 = UnitMonitor.new("monitor_1", "child_monitor_1", "pass", "time", {}, {}, {}) - child_monitor_2 = UnitMonitor.new("monitor_2", "child_monitor_2", "fail", "time", {}, {}, {}) - - monitor_set = MonitorSet.new - monitor_set.add_or_update(child_monitor_1) - monitor_set.add_or_update(child_monitor_2) - - monitor.add_member_monitor("child_monitor_1") - monitor.add_member_monitor("child_monitor_2") - #Act - monitor.calculate_state(monitor_set) - #Assert - assert_equal monitor.state, "fail" - - #Arrange -- pass, pass = pass - child_monitor_2 = UnitMonitor.new("monitor_2", "child_monitor_2", "pass", "time", {}, {}, {}) - monitor_set.add_or_update(child_monitor_2) - #Act - monitor.calculate_state(monitor_set) - #Assert - assert_equal monitor.state, "pass" - - #Arrange -- pass, warn = warn - child_monitor_2 = UnitMonitor.new("monitor_2", "child_monitor_2", "warn", "time", {}, {}, {}) - monitor_set.add_or_update(child_monitor_2) - #Act - monitor.calculate_state(monitor_set) - #Assert - assert_equal monitor.state, "warn" - - #Arrange -- warn, fail = fail - child_monitor_1 = UnitMonitor.new("monitor_1", "child_monitor_1", "warn", "time", {}, {}, {}) - child_monitor_2 = UnitMonitor.new("monitor_2", "child_monitor_2", "fail", "time", {}, {}, {}) - monitor_set.add_or_update(child_monitor_1) - monitor_set.add_or_update(child_monitor_2) - - #Act - monitor.calculate_state(monitor_set) - #Assert - assert_equal monitor.state, "fail" - - #Arrange -- warn, unknown = unknown - child_monitor_1 = UnitMonitor.new("monitor_1", "child_monitor_1", "warn", "time", {}, {}, {}) - child_monitor_2 = UnitMonitor.new("monitor_2", "child_monitor_2", "unknown", "time", {}, {}, {}) - monitor_set.add_or_update(child_monitor_1) - monitor_set.add_or_update(child_monitor_2) - - #Act - monitor.calculate_state(monitor_set) - #Assert - assert_equal monitor.state, "warn" - - #Arrange -- pass, unknown = unknown - child_monitor_1 = UnitMonitor.new("monitor_1", "child_monitor_1", "pass", "time", {}, {}, {}) - child_monitor_2 = UnitMonitor.new("monitor_2", "child_monitor_2", "unknown", "time", {}, {}, {}) - monitor_set.add_or_update(child_monitor_1) - monitor_set.add_or_update(child_monitor_2) - - #Act - monitor.calculate_state(monitor_set) - #Assert - assert_equal monitor.state, "unknown" - end - - it "calculate_state tests -- calculate_percentage_state " do - # Arrange - monitor = AggregateMonitor.new(:monitor_id, :monitor_instance_id, :pass, :time, "percentage", {"state_threshold" => 90.0}, {}) - - child_monitor_1 = UnitMonitor.new("monitor_1", "child_monitor_1", "pass", "time", {}, {}, {}) - child_monitor_2 = UnitMonitor.new("monitor_2", "child_monitor_2", "fail", "time", {}, {}, {}) - - monitor_set = MonitorSet.new - monitor_set.add_or_update(child_monitor_1) - monitor_set.add_or_update(child_monitor_2) - - monitor.add_member_monitor("child_monitor_1") - monitor.add_member_monitor("child_monitor_2") - #Act - monitor.calculate_state(monitor_set) - #Assert - assert_equal monitor.state, "fail" - - #Arrange - monitor = AggregateMonitor.new(:monitor_id, :monitor_instance_id, :pass, :time, "percentage", {"state_threshold" => 50.0}, {}) - child_monitor_1 = UnitMonitor.new("monitor_1", "child_monitor_1", "pass", "time", {}, {}, {}) - child_monitor_2 = UnitMonitor.new("monitor_2", "child_monitor_2", "fail", "time", {}, {}, {}) - - monitor_set = MonitorSet.new - monitor_set.add_or_update(child_monitor_1) - monitor_set.add_or_update(child_monitor_2) - - monitor.add_member_monitor("child_monitor_1") - monitor.add_member_monitor("child_monitor_2") - #Act - monitor.calculate_state(monitor_set) - #Assert - assert_equal monitor.state, "pass" - - #Arrange -- single child monitor - monitor = AggregateMonitor.new(:monitor_id, :monitor_instance_id, :pass, :time, "percentage", {"state_threshold" => 33.3}, {}) - child_monitor_1 = UnitMonitor.new("monitor_1", "child_monitor_1", "pass", "time", {}, {}, {}) - monitor_set = MonitorSet.new - monitor_set.add_or_update(child_monitor_1) - monitor.add_member_monitor("child_monitor_1") - #Act - monitor.calculate_state(monitor_set) - #Assert - assert_equal monitor.state, "pass" - - - #Arrange -- remove none state - monitor = AggregateMonitor.new(:monitor_id, :monitor_instance_id, :none, :time, "percentage", {"state_threshold" => 100.0}, {}) - child_monitor_1 = UnitMonitor.new("monitor_1", "child_monitor_1", "pass", "time", {}, {}, {}) - child_monitor_2 = UnitMonitor.new("monitor_2", "child_monitor_2", "none", "time", {}, {}, {}) - - monitor_set = MonitorSet.new - monitor_set.add_or_update(child_monitor_1) - monitor_set.add_or_update(child_monitor_2) - - monitor.add_member_monitor("child_monitor_1") - monitor.add_member_monitor("child_monitor_2") - #Act - monitor.calculate_state(monitor_set) - #Assert - assert_equal monitor.state, "pass" - - - # Arrange - monitor = AggregateMonitor.new(:monitor_id, :monitor_instance_id, :pass, :time, "percentage", {"state_threshold" => 50.0}, {}) - - child_monitor_1 = UnitMonitor.new("monitor_1", "child_monitor_1", "pass", "time", {}, {}, {}) - child_monitor_2 = UnitMonitor.new("monitor_2", "child_monitor_2", "fail", "time", {}, {}, {}) - child_monitor_3 = UnitMonitor.new("monitor_3", "child_monitor_3", "fail", "time", {}, {}, {}) - - monitor_set = MonitorSet.new - monitor_set.add_or_update(child_monitor_1) - monitor_set.add_or_update(child_monitor_2) - monitor_set.add_or_update(child_monitor_3) - - monitor.add_member_monitor("child_monitor_1") - monitor.add_member_monitor("child_monitor_2") - monitor.add_member_monitor("child_monitor_3") - #Act - monitor.calculate_state(monitor_set) - #Assert - assert_equal monitor.state, "fail" - - - # Arrange - monitor = AggregateMonitor.new(:monitor_id, :monitor_instance_id, :pass, :time, "percentage", {"state_threshold" => 90.0}, {}) - - child_monitor_1 = UnitMonitor.new("monitor_1", "child_monitor_1", "pass", "time", {}, {}, {}) - child_monitor_2 = UnitMonitor.new("monitor_2", "child_monitor_2", "pass", "time", {}, {}, {}) - child_monitor_3 = UnitMonitor.new("monitor_3", "child_monitor_3", "pass", "time", {}, {}, {}) - - monitor_set = MonitorSet.new - monitor_set.add_or_update(child_monitor_1) - monitor_set.add_or_update(child_monitor_2) - monitor_set.add_or_update(child_monitor_3) - - monitor.add_member_monitor("child_monitor_1") - monitor.add_member_monitor("child_monitor_2") - monitor.add_member_monitor("child_monitor_3") - #Act - monitor.calculate_state(monitor_set) - #Assert - assert_equal monitor.state, "pass" - end -end \ No newline at end of file diff --git a/test/unit-tests/plugins/health/aggregate_monitor_state_finalizer_spec.rb b/test/unit-tests/plugins/health/aggregate_monitor_state_finalizer_spec.rb deleted file mode 100644 index 71e4aa16a..000000000 --- a/test/unit-tests/plugins/health/aggregate_monitor_state_finalizer_spec.rb +++ /dev/null @@ -1,59 +0,0 @@ -require_relative '../test_helpers' -Dir[File.join(File.expand_path(File.dirname(__FILE__)), "../../../../source/plugins/ruby/health/*.rb")].reject{|f| f.include?('health_monitor_utils')}.each { |file| require file } -include HealthModel -include Minitest - -describe "AggregateMonitorStateFinalizer spec" do - it 'computes the right state and details' do - #arrange - monitor_set = Mock.new - - #mock unit monitors - child1 = Mock.new - def child1.state; "pass"; end - def child1.monitor_id; "child1";end - def child1.monitor_instance_id; "child1"; end - def child1.nil?; false; end - def child1.is_aggregate_monitor; false; end - - child2 = Mock.new - def child2.state; "fail"; end - def child2.monitor_id; "child2";end - def child2.monitor_instance_id; "child2"; end - def child2.nil?; false; end - def child2.is_aggregate_monitor; false; end - - parent_monitor = AggregateMonitor.new("parent_monitor", "parent_monitor", :none, :time, "worstOf", nil, {}) - parent_monitor.add_member_monitor("child1") - parent_monitor.add_member_monitor("child2") - - top_level_monitor = AggregateMonitor.new("cluster", "cluster", :none, :time, "worstOf", nil, {}) - top_level_monitor.add_member_monitor("parent_monitor") - - monitor_set.expect(:get_map, {"cluster" => top_level_monitor, "parent_monitor" => parent_monitor, "child1" => child1, "child2" => child2}) - monitor_set.expect(:get_monitor, top_level_monitor, ["cluster"]) - monitor_set.expect(:get_monitor, parent_monitor, ["parent_monitor"]) - monitor_set.expect(:get_monitor, child1, ["child1"]) - monitor_set.expect(:get_monitor, child2, ["child2"]) - monitor_set.expect(:get_monitor, child1, ["child1"]) - monitor_set.expect(:get_monitor, child2, ["child2"]) - monitor_set.expect(:get_monitor, parent_monitor, ["parent_monitor"]) - - - monitor_set.expect(:get_monitor, parent_monitor, ["parent_monitor"]) - monitor_set.expect(:get_monitor, child1, ["child1"]) - monitor_set.expect(:get_monitor, child2, ["child2"]) - - #act - finalizer = AggregateMonitorStateFinalizer.new - finalizer.finalize(monitor_set) - #assert - - assert_equal parent_monitor.state, "fail" - assert_equal parent_monitor.details, {"details"=>{"pass"=>["child1"], "fail"=>["child2"]}, "state"=>"fail", "timestamp"=>:time} - - assert_equal top_level_monitor.state, "fail" - assert_equal top_level_monitor.details, {"details"=>{"fail"=>["parent_monitor"]}, "state"=>"fail", "timestamp"=>:time} - - end -end \ No newline at end of file diff --git a/test/unit-tests/plugins/health/ca.crt b/test/unit-tests/plugins/health/ca.crt deleted file mode 100644 index 9daeafb98..000000000 --- a/test/unit-tests/plugins/health/ca.crt +++ /dev/null @@ -1 +0,0 @@ -test diff --git a/test/unit-tests/plugins/health/cadvisor_perf.json b/test/unit-tests/plugins/health/cadvisor_perf.json deleted file mode 100644 index 35eae32b6..000000000 --- a/test/unit-tests/plugins/health/cadvisor_perf.json +++ /dev/null @@ -1,2540 +0,0 @@ -[ - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:39Z", - "Host": "aks-nodepool1-19574989-1", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/952488f3-a1f2-11e9-8b08-d602e29755d5/sidecar", - "Collections": [ - { - "CounterName": "memoryWorkingSetBytes", - "Value": 14061568 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:44Z", - "Host": "aks-nodepool1-19574989-1", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/952488f3-a1f2-11e9-8b08-d602e29755d5/dnsmasq", - "Collections": [ - { - "CounterName": "memoryWorkingSetBytes", - "Value": 7249920 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:45Z", - "Host": "aks-nodepool1-19574989-1", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/952488f3-a1f2-11e9-8b08-d602e29755d5/kubedns", - "Collections": [ - { - "CounterName": "memoryWorkingSetBytes", - "Value": 14442496 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:49Z", - "Host": "aks-nodepool1-19574989-1", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/952488f3-a1f2-11e9-8b08-d602e29755d5/healthz", - "Collections": [ - { - "CounterName": "memoryWorkingSetBytes", - "Value": 5988352 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:43Z", - "Host": "aks-nodepool1-19574989-1", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/f65e6a62-c5c8-11e9-8736-86290fd7dd1f/kube-proxy", - "Collections": [ - { - "CounterName": "memoryWorkingSetBytes", - "Value": 40284160 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:41Z", - "Host": "aks-nodepool1-19574989-1", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/69e68b21-c5df-11e9-8736-86290fd7dd1f/omsagent", - "Collections": [ - { - "CounterName": "memoryWorkingSetBytes", - "Value": 101965824 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:37Z", - "Host": "aks-nodepool1-19574989-1", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/e690309f-a742-11e9-a38a-22d1c75c4357/redirector", - "Collections": [ - { - "CounterName": "memoryWorkingSetBytes", - "Value": 3203072 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:42Z", - "Host": "aks-nodepool1-19574989-1", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/e690309f-a742-11e9-a38a-22d1c75c4357/azureproxy", - "Collections": [ - { - "CounterName": "memoryWorkingSetBytes", - "Value": 9658368 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:42Z", - "Host": "aks-nodepool1-19574989-1", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/9543dbb7-a1f2-11e9-8b08-d602e29755d5/metrics-server", - "Collections": [ - { - "CounterName": "memoryWorkingSetBytes", - "Value": 21491712 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:50Z", - "Host": "aks-nodepool1-19574989-1", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/952488f3-a1f2-11e9-8b08-d602e29755d5/sidecar", - "Collections": [ - { - "CounterName": "restartTimeEpoch", - "Value": 1562639906 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:50Z", - "Host": "aks-nodepool1-19574989-1", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/952488f3-a1f2-11e9-8b08-d602e29755d5/dnsmasq", - "Collections": [ - { - "CounterName": "restartTimeEpoch", - "Value": 1562639899 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:50Z", - "Host": "aks-nodepool1-19574989-1", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/952488f3-a1f2-11e9-8b08-d602e29755d5/kubedns", - "Collections": [ - { - "CounterName": "restartTimeEpoch", - "Value": 1562639895 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:50Z", - "Host": "aks-nodepool1-19574989-1", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/952488f3-a1f2-11e9-8b08-d602e29755d5/healthz", - "Collections": [ - { - "CounterName": "restartTimeEpoch", - "Value": 1562639903 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:50Z", - "Host": "aks-nodepool1-19574989-1", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/f65e6a62-c5c8-11e9-8736-86290fd7dd1f/kube-proxy", - "Collections": [ - { - "CounterName": "restartTimeEpoch", - "Value": 1566580259 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:50Z", - "Host": "aks-nodepool1-19574989-1", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/69e68b21-c5df-11e9-8736-86290fd7dd1f/omsagent", - "Collections": [ - { - "CounterName": "restartTimeEpoch", - "Value": 1566589936 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:50Z", - "Host": "aks-nodepool1-19574989-1", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/e690309f-a742-11e9-a38a-22d1c75c4357/redirector", - "Collections": [ - { - "CounterName": "restartTimeEpoch", - "Value": 1563224142 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:50Z", - "Host": "aks-nodepool1-19574989-1", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/e690309f-a742-11e9-a38a-22d1c75c4357/azureproxy", - "Collections": [ - { - "CounterName": "restartTimeEpoch", - "Value": 1563224144 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:50Z", - "Host": "aks-nodepool1-19574989-1", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/9543dbb7-a1f2-11e9-8b08-d602e29755d5/metrics-server", - "Collections": [ - { - "CounterName": "restartTimeEpoch", - "Value": 1562639893 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:39Z", - "Host": "aks-nodepool1-19574989-1", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/952488f3-a1f2-11e9-8b08-d602e29755d5/sidecar", - "Collections": [ - { - "CounterName": "cpuUsageNanoCores", - "Value": 349987 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:44Z", - "Host": "aks-nodepool1-19574989-1", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/952488f3-a1f2-11e9-8b08-d602e29755d5/dnsmasq", - "Collections": [ - { - "CounterName": "cpuUsageNanoCores", - "Value": 773186 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:45Z", - "Host": "aks-nodepool1-19574989-1", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/952488f3-a1f2-11e9-8b08-d602e29755d5/kubedns", - "Collections": [ - { - "CounterName": "cpuUsageNanoCores", - "Value": 2718196 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:49Z", - "Host": "aks-nodepool1-19574989-1", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/952488f3-a1f2-11e9-8b08-d602e29755d5/healthz", - "Collections": [ - { - "CounterName": "cpuUsageNanoCores", - "Value": 2007695 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:43Z", - "Host": "aks-nodepool1-19574989-1", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/f65e6a62-c5c8-11e9-8736-86290fd7dd1f/kube-proxy", - "Collections": [ - { - "CounterName": "cpuUsageNanoCores", - "Value": 674463 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:41Z", - "Host": "aks-nodepool1-19574989-1", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/69e68b21-c5df-11e9-8736-86290fd7dd1f/omsagent", - "Collections": [ - { - "CounterName": "cpuUsageNanoCores", - "Value": 2159553 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:37Z", - "Host": "aks-nodepool1-19574989-1", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/e690309f-a742-11e9-a38a-22d1c75c4357/redirector", - "Collections": [ - { - "CounterName": "cpuUsageNanoCores", - "Value": 3575667 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:42Z", - "Host": "aks-nodepool1-19574989-1", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/e690309f-a742-11e9-a38a-22d1c75c4357/azureproxy", - "Collections": [ - { - "CounterName": "cpuUsageNanoCores", - "Value": 0 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:42Z", - "Host": "aks-nodepool1-19574989-1", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/9543dbb7-a1f2-11e9-8b08-d602e29755d5/metrics-server", - "Collections": [ - { - "CounterName": "cpuUsageNanoCores", - "Value": 633968 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:39Z", - "Host": "aks-nodepool1-19574989-1", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/952488f3-a1f2-11e9-8b08-d602e29755d5/sidecar", - "Collections": [ - { - "CounterName": "memoryRssBytes", - "Value": 11546624 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:14:39Z", - "Host": "aks-nodepool1-19574989-1", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/952488f3-a1f2-11e9-8b08-d602e29755d5/sidecar", - "Collections": [ - { - "CounterName": "memoryRssBytes", - "Value": 11546624 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:44Z", - "Host": "aks-nodepool1-19574989-1", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/952488f3-a1f2-11e9-8b08-d602e29755d5/dnsmasq", - "Collections": [ - { - "CounterName": "memoryRssBytes", - "Value": 5652480 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:45Z", - "Host": "aks-nodepool1-19574989-1", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/952488f3-a1f2-11e9-8b08-d602e29755d5/kubedns", - "Collections": [ - { - "CounterName": "memoryRssBytes", - "Value": 10981376 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:49Z", - "Host": "aks-nodepool1-19574989-1", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/952488f3-a1f2-11e9-8b08-d602e29755d5/healthz", - "Collections": [ - { - "CounterName": "memoryRssBytes", - "Value": 2875392 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:43Z", - "Host": "aks-nodepool1-19574989-1", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/f65e6a62-c5c8-11e9-8736-86290fd7dd1f/kube-proxy", - "Collections": [ - { - "CounterName": "memoryRssBytes", - "Value": 20627456 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:41Z", - "Host": "aks-nodepool1-19574989-1", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/69e68b21-c5df-11e9-8736-86290fd7dd1f/omsagent", - "Collections": [ - { - "CounterName": "memoryRssBytes", - "Value": 69353472 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:37Z", - "Host": "aks-nodepool1-19574989-1", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/e690309f-a742-11e9-a38a-22d1c75c4357/redirector", - "Collections": [ - { - "CounterName": "memoryRssBytes", - "Value": 462848 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:42Z", - "Host": "aks-nodepool1-19574989-1", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/e690309f-a742-11e9-a38a-22d1c75c4357/azureproxy", - "Collections": [ - { - "CounterName": "memoryRssBytes", - "Value": 8212480 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:42Z", - "Host": "aks-nodepool1-19574989-1", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/9543dbb7-a1f2-11e9-8b08-d602e29755d5/metrics-server", - "Collections": [ - { - "CounterName": "memoryRssBytes", - "Value": 16543744 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:45Z", - "Host": "aks-nodepool1-19574989-1", - "ObjectName": "K8SNode", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/aks-nodepool1-19574989-1", - "Collections": [ - { - "CounterName": "memoryRssBytes", - "Value": 814518272 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:45Z", - "Host": "aks-nodepool1-19574989-1", - "ObjectName": "K8SNode", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/aks-nodepool1-19574989-1", - "Collections": [ - { - "CounterName": "cpuUsageNanoCores", - "Value": 82091339.40983607 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:45Z", - "Host": "aks-nodepool1-19574989-1", - "ObjectName": "K8SNode", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/aks-nodepool1-19574989-1", - "Collections": [ - { - "CounterName": "memoryWorkingSetBytes", - "Value": 2089115648 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:50Z", - "Host": "aks-nodepool1-19574989-1", - "ObjectName": "K8SNode", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/aks-nodepool1-19574989-1", - "Collections": [ - { - "CounterName": "restartTimeEpoch", - "Value": 1552408751.22 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:12:56Z", - "Host": "aks-nodepool1-19574989-0", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/b1e04e1c-c5df-11e9-8736-86290fd7dd1f/omsagent", - "Collections": [ - { - "CounterName": "memoryWorkingSetBytes", - "Value": 85528576 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:12:54Z", - "Host": "aks-nodepool1-19574989-0", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/49e373c8-c5c9-11e9-8736-86290fd7dd1f/kube-proxy", - "Collections": [ - { - "CounterName": "memoryWorkingSetBytes", - "Value": 25415680 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:12:53Z", - "Host": "aks-nodepool1-19574989-0", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/65a6f978-c5df-11e9-8736-86290fd7dd1f/omsagent", - "Collections": [ - { - "CounterName": "memoryWorkingSetBytes", - "Value": 111738880 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:12:55Z", - "Host": "aks-nodepool1-19574989-0", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/24ab7e32-c5c9-11e9-8736-86290fd7dd1f/heapster-nanny", - "Collections": [ - { - "CounterName": "memoryWorkingSetBytes", - "Value": 8417280 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:01Z", - "Host": "aks-nodepool1-19574989-0", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/24ab7e32-c5c9-11e9-8736-86290fd7dd1f/heapster", - "Collections": [ - { - "CounterName": "memoryWorkingSetBytes", - "Value": 19492864 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:12:57Z", - "Host": "aks-nodepool1-19574989-0", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/9583b2ab-a1f2-11e9-8b08-d602e29755d5/main", - "Collections": [ - { - "CounterName": "memoryWorkingSetBytes", - "Value": 12918784 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:12:46Z", - "Host": "aks-nodepool1-19574989-0", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/bb3d3ef2-a742-11e9-a38a-22d1c75c4357/redirector", - "Collections": [ - { - "CounterName": "memoryWorkingSetBytes", - "Value": 3379200 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:12:57Z", - "Host": "aks-nodepool1-19574989-0", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/bb3d3ef2-a742-11e9-a38a-22d1c75c4357/azureproxy", - "Collections": [ - { - "CounterName": "memoryWorkingSetBytes", - "Value": 9818112 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:03Z", - "Host": "aks-nodepool1-19574989-0", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/b1e04e1c-c5df-11e9-8736-86290fd7dd1f/omsagent", - "Collections": [ - { - "CounterName": "restartTimeEpoch", - "Value": 1566590024 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:03Z", - "Host": "aks-nodepool1-19574989-0", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/49e373c8-c5c9-11e9-8736-86290fd7dd1f/kube-proxy", - "Collections": [ - { - "CounterName": "restartTimeEpoch", - "Value": 1566580398 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:03Z", - "Host": "aks-nodepool1-19574989-0", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/65a6f978-c5df-11e9-8736-86290fd7dd1f/omsagent", - "Collections": [ - { - "CounterName": "restartTimeEpoch", - "Value": 1566589942 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:03Z", - "Host": "aks-nodepool1-19574989-0", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/24ab7e32-c5c9-11e9-8736-86290fd7dd1f/heapster-nanny", - "Collections": [ - { - "CounterName": "restartTimeEpoch", - "Value": 1566580342 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:03Z", - "Host": "aks-nodepool1-19574989-0", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/24ab7e32-c5c9-11e9-8736-86290fd7dd1f/heapster", - "Collections": [ - { - "CounterName": "restartTimeEpoch", - "Value": 1566580337 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:03Z", - "Host": "aks-nodepool1-19574989-0", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/9583b2ab-a1f2-11e9-8b08-d602e29755d5/main", - "Collections": [ - { - "CounterName": "restartTimeEpoch", - "Value": 1562639936 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:03Z", - "Host": "aks-nodepool1-19574989-0", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/bb3d3ef2-a742-11e9-a38a-22d1c75c4357/redirector", - "Collections": [ - { - "CounterName": "restartTimeEpoch", - "Value": 1563224072 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:03Z", - "Host": "aks-nodepool1-19574989-0", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/bb3d3ef2-a742-11e9-a38a-22d1c75c4357/azureproxy", - "Collections": [ - { - "CounterName": "restartTimeEpoch", - "Value": 1563224077 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:12:56Z", - "Host": "aks-nodepool1-19574989-0", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/b1e04e1c-c5df-11e9-8736-86290fd7dd1f/omsagent", - "Collections": [ - { - "CounterName": "cpuUsageNanoCores", - "Value": 4447595 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:12:54Z", - "Host": "aks-nodepool1-19574989-0", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/49e373c8-c5c9-11e9-8736-86290fd7dd1f/kube-proxy", - "Collections": [ - { - "CounterName": "cpuUsageNanoCores", - "Value": 2765529 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:12:53Z", - "Host": "aks-nodepool1-19574989-0", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/65a6f978-c5df-11e9-8736-86290fd7dd1f/omsagent", - "Collections": [ - { - "CounterName": "cpuUsageNanoCores", - "Value": 5565414 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:12:55Z", - "Host": "aks-nodepool1-19574989-0", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/24ab7e32-c5c9-11e9-8736-86290fd7dd1f/heapster-nanny", - "Collections": [ - { - "CounterName": "cpuUsageNanoCores", - "Value": 863810 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:01Z", - "Host": "aks-nodepool1-19574989-0", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/24ab7e32-c5c9-11e9-8736-86290fd7dd1f/heapster", - "Collections": [ - { - "CounterName": "cpuUsageNanoCores", - "Value": 886196 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:12:57Z", - "Host": "aks-nodepool1-19574989-0", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/9583b2ab-a1f2-11e9-8b08-d602e29755d5/main", - "Collections": [ - { - "CounterName": "cpuUsageNanoCores", - "Value": 855014 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:12:46Z", - "Host": "aks-nodepool1-19574989-0", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/bb3d3ef2-a742-11e9-a38a-22d1c75c4357/redirector", - "Collections": [ - { - "CounterName": "cpuUsageNanoCores", - "Value": 1794634 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:12:57Z", - "Host": "aks-nodepool1-19574989-0", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/bb3d3ef2-a742-11e9-a38a-22d1c75c4357/azureproxy", - "Collections": [ - { - "CounterName": "cpuUsageNanoCores", - "Value": 0 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:12:56Z", - "Host": "aks-nodepool1-19574989-0", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/b1e04e1c-c5df-11e9-8736-86290fd7dd1f/omsagent", - "Collections": [ - { - "CounterName": "memoryRssBytes", - "Value": 76308480 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:12:54Z", - "Host": "aks-nodepool1-19574989-0", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/49e373c8-c5c9-11e9-8736-86290fd7dd1f/kube-proxy", - "Collections": [ - { - "CounterName": "memoryRssBytes", - "Value": 21319680 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:12:53Z", - "Host": "aks-nodepool1-19574989-0", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/65a6f978-c5df-11e9-8736-86290fd7dd1f/omsagent", - "Collections": [ - { - "CounterName": "memoryRssBytes", - "Value": 78180352 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:12:55Z", - "Host": "aks-nodepool1-19574989-0", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/24ab7e32-c5c9-11e9-8736-86290fd7dd1f/heapster-nanny", - "Collections": [ - { - "CounterName": "memoryRssBytes", - "Value": 7909376 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:01Z", - "Host": "aks-nodepool1-19574989-0", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/24ab7e32-c5c9-11e9-8736-86290fd7dd1f/heapster", - "Collections": [ - { - "CounterName": "memoryRssBytes", - "Value": 18968576 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:12:57Z", - "Host": "aks-nodepool1-19574989-0", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/9583b2ab-a1f2-11e9-8b08-d602e29755d5/main", - "Collections": [ - { - "CounterName": "memoryRssBytes", - "Value": 9871360 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:12:46Z", - "Host": "aks-nodepool1-19574989-0", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/bb3d3ef2-a742-11e9-a38a-22d1c75c4357/redirector", - "Collections": [ - { - "CounterName": "memoryRssBytes", - "Value": 462848 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:12:57Z", - "Host": "aks-nodepool1-19574989-0", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/bb3d3ef2-a742-11e9-a38a-22d1c75c4357/azureproxy", - "Collections": [ - { - "CounterName": "memoryRssBytes", - "Value": 8212480 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:12:57Z", - "Host": "aks-nodepool1-19574989-0", - "ObjectName": "K8SNode", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/aks-nodepool1-19574989-0", - "Collections": [ - { - "CounterName": "memoryRssBytes", - "Value": 865943552 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:12:57Z", - "Host": "aks-nodepool1-19574989-0", - "ObjectName": "K8SNode", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/aks-nodepool1-19574989-0", - "Collections": [ - { - "CounterName": "cpuUsageNanoCores", - "Value": 95432166.25 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:12:57Z", - "Host": "aks-nodepool1-19574989-0", - "ObjectName": "K8SNode", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/aks-nodepool1-19574989-0", - "Collections": [ - { - "CounterName": "memoryWorkingSetBytes", - "Value": 2191216640 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:03Z", - "Host": "aks-nodepool1-19574989-0", - "ObjectName": "K8SNode", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/aks-nodepool1-19574989-0", - "Collections": [ - { - "CounterName": "restartTimeEpoch", - "Value": 1552408749.66 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:14:07Z", - "Host": "aks-nodepool1-19574989-3", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/b2a0e1b3-bd3f-11e9-b2a7-d61658c73830/tunnel-front", - "Collections": [ - { - "CounterName": "memoryWorkingSetBytes", - "Value": 17743872 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:14:12Z", - "Host": "aks-nodepool1-19574989-3", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/114f7246-c5c9-11e9-8736-86290fd7dd1f/kube-proxy", - "Collections": [ - { - "CounterName": "memoryWorkingSetBytes", - "Value": 24162304 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:14:07Z", - "Host": "aks-nodepool1-19574989-3", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/44a61692-b945-11e9-a1b6-127094e7fd94/azureproxy", - "Collections": [ - { - "CounterName": "memoryWorkingSetBytes", - "Value": 11472896 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:14:06Z", - "Host": "aks-nodepool1-19574989-3", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/44a61692-b945-11e9-a1b6-127094e7fd94/redirector", - "Collections": [ - { - "CounterName": "memoryWorkingSetBytes", - "Value": 3821568 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:14:15Z", - "Host": "aks-nodepool1-19574989-3", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/8dbd5e8b-c5df-11e9-8736-86290fd7dd1f/omsagent", - "Collections": [ - { - "CounterName": "memoryWorkingSetBytes", - "Value": 92057600 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:14:15Z", - "Host": "aks-nodepool1-19574989-3", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/b2a0e1b3-bd3f-11e9-b2a7-d61658c73830/tunnel-front", - "Collections": [ - { - "CounterName": "restartTimeEpoch", - "Value": 1565641691 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:14:15Z", - "Host": "aks-nodepool1-19574989-3", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/114f7246-c5c9-11e9-8736-86290fd7dd1f/kube-proxy", - "Collections": [ - { - "CounterName": "restartTimeEpoch", - "Value": 1566580300 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:14:15Z", - "Host": "aks-nodepool1-19574989-3", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/44a61692-b945-11e9-a1b6-127094e7fd94/azureproxy", - "Collections": [ - { - "CounterName": "restartTimeEpoch", - "Value": 1565204288 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:14:15Z", - "Host": "aks-nodepool1-19574989-3", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/44a61692-b945-11e9-a1b6-127094e7fd94/redirector", - "Collections": [ - { - "CounterName": "restartTimeEpoch", - "Value": 1565204284 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:14:15Z", - "Host": "aks-nodepool1-19574989-3", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/8dbd5e8b-c5df-11e9-8736-86290fd7dd1f/omsagent", - "Collections": [ - { - "CounterName": "restartTimeEpoch", - "Value": 1566589995 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:14:07Z", - "Host": "aks-nodepool1-19574989-3", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/b2a0e1b3-bd3f-11e9-b2a7-d61658c73830/tunnel-front", - "Collections": [ - { - "CounterName": "cpuUsageNanoCores", - "Value": 35140951 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:14:12Z", - "Host": "aks-nodepool1-19574989-3", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/114f7246-c5c9-11e9-8736-86290fd7dd1f/kube-proxy", - "Collections": [ - { - "CounterName": "cpuUsageNanoCores", - "Value": 983407 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:14:07Z", - "Host": "aks-nodepool1-19574989-3", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/44a61692-b945-11e9-a1b6-127094e7fd94/azureproxy", - "Collections": [ - { - "CounterName": "cpuUsageNanoCores", - "Value": 0 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:14:06Z", - "Host": "aks-nodepool1-19574989-3", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/44a61692-b945-11e9-a1b6-127094e7fd94/redirector", - "Collections": [ - { - "CounterName": "cpuUsageNanoCores", - "Value": 4221562 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:14:15Z", - "Host": "aks-nodepool1-19574989-3", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/8dbd5e8b-c5df-11e9-8736-86290fd7dd1f/omsagent", - "Collections": [ - { - "CounterName": "cpuUsageNanoCores", - "Value": 1881274 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:14:07Z", - "Host": "aks-nodepool1-19574989-3", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/b2a0e1b3-bd3f-11e9-b2a7-d61658c73830/tunnel-front", - "Collections": [ - { - "CounterName": "memoryRssBytes", - "Value": 4161536 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:14:12Z", - "Host": "aks-nodepool1-19574989-3", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/114f7246-c5c9-11e9-8736-86290fd7dd1f/kube-proxy", - "Collections": [ - { - "CounterName": "memoryRssBytes", - "Value": 18952192 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:14:07Z", - "Host": "aks-nodepool1-19574989-3", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/44a61692-b945-11e9-a1b6-127094e7fd94/azureproxy", - "Collections": [ - { - "CounterName": "memoryRssBytes", - "Value": 8224768 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:14:06Z", - "Host": "aks-nodepool1-19574989-3", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/44a61692-b945-11e9-a1b6-127094e7fd94/redirector", - "Collections": [ - { - "CounterName": "memoryRssBytes", - "Value": 483328 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:14:15Z", - "Host": "aks-nodepool1-19574989-3", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/8dbd5e8b-c5df-11e9-8736-86290fd7dd1f/omsagent", - "Collections": [ - { - "CounterName": "memoryRssBytes", - "Value": 74915840 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:14:14Z", - "Host": "aks-nodepool1-19574989-3", - "ObjectName": "K8SNode", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/aks-nodepool1-19574989-3", - "Collections": [ - { - "CounterName": "memoryRssBytes", - "Value": 554704896 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:14:14Z", - "Host": "aks-nodepool1-19574989-3", - "ObjectName": "K8SNode", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/aks-nodepool1-19574989-3", - "Collections": [ - { - "CounterName": "cpuUsageNanoCores", - "Value": 88981130.86666666 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:14:14Z", - "Host": "aks-nodepool1-19574989-3", - "ObjectName": "K8SNode", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/aks-nodepool1-19574989-3", - "Collections": [ - { - "CounterName": "memoryWorkingSetBytes", - "Value": 1633976320 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:14:15Z", - "Host": "aks-nodepool1-19574989-3", - "ObjectName": "K8SNode", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/aks-nodepool1-19574989-3", - "Collections": [ - { - "CounterName": "restartTimeEpoch", - "Value": 1565204130.6 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:37Z", - "Host": "aks-nodepool1-19574989-2", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/be78d7f6-c5df-11e9-8736-86290fd7dd1f/omsagent", - "Collections": [ - { - "CounterName": "memoryWorkingSetBytes", - "Value": 92954624 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:33Z", - "Host": "aks-nodepool1-19574989-2", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/95046bc6-a1f2-11e9-8b08-d602e29755d5/dnsmasq", - "Collections": [ - { - "CounterName": "memoryWorkingSetBytes", - "Value": 7446528 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:22Z", - "Host": "aks-nodepool1-19574989-2", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/95046bc6-a1f2-11e9-8b08-d602e29755d5/sidecar", - "Collections": [ - { - "CounterName": "memoryWorkingSetBytes", - "Value": 14811136 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:31Z", - "Host": "aks-nodepool1-19574989-2", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/95046bc6-a1f2-11e9-8b08-d602e29755d5/kubedns", - "Collections": [ - { - "CounterName": "memoryWorkingSetBytes", - "Value": 15114240 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:35Z", - "Host": "aks-nodepool1-19574989-2", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/95046bc6-a1f2-11e9-8b08-d602e29755d5/healthz", - "Collections": [ - { - "CounterName": "memoryWorkingSetBytes", - "Value": 5406720 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:32Z", - "Host": "aks-nodepool1-19574989-2", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/94e52ab1-a1f2-11e9-8b08-d602e29755d5/autoscaler", - "Collections": [ - { - "CounterName": "memoryWorkingSetBytes", - "Value": 10043392 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:37Z", - "Host": "aks-nodepool1-19574989-2", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/2c3de48d-c5c9-11e9-8736-86290fd7dd1f/kube-proxy", - "Collections": [ - { - "CounterName": "memoryWorkingSetBytes", - "Value": 58052608 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:31Z", - "Host": "aks-nodepool1-19574989-2", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/06fef5f6-a743-11e9-a38a-22d1c75c4357/azureproxy", - "Collections": [ - { - "CounterName": "memoryWorkingSetBytes", - "Value": 9904128 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:31Z", - "Host": "aks-nodepool1-19574989-2", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/06fef5f6-a743-11e9-a38a-22d1c75c4357/redirector", - "Collections": [ - { - "CounterName": "memoryWorkingSetBytes", - "Value": 3645440 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:40Z", - "Host": "aks-nodepool1-19574989-2", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/be78d7f6-c5df-11e9-8736-86290fd7dd1f/omsagent", - "Collections": [ - { - "CounterName": "restartTimeEpoch", - "Value": 1566590079 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:40Z", - "Host": "aks-nodepool1-19574989-2", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/95046bc6-a1f2-11e9-8b08-d602e29755d5/dnsmasq", - "Collections": [ - { - "CounterName": "restartTimeEpoch", - "Value": 1562639920 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:40Z", - "Host": "aks-nodepool1-19574989-2", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/95046bc6-a1f2-11e9-8b08-d602e29755d5/sidecar", - "Collections": [ - { - "CounterName": "restartTimeEpoch", - "Value": 1562639940 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:40Z", - "Host": "aks-nodepool1-19574989-2", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/95046bc6-a1f2-11e9-8b08-d602e29755d5/kubedns", - "Collections": [ - { - "CounterName": "restartTimeEpoch", - "Value": 1562639904 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:40Z", - "Host": "aks-nodepool1-19574989-2", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/95046bc6-a1f2-11e9-8b08-d602e29755d5/healthz", - "Collections": [ - { - "CounterName": "restartTimeEpoch", - "Value": 1562639932 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:40Z", - "Host": "aks-nodepool1-19574989-2", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/94e52ab1-a1f2-11e9-8b08-d602e29755d5/autoscaler", - "Collections": [ - { - "CounterName": "restartTimeEpoch", - "Value": 1562639909 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:40Z", - "Host": "aks-nodepool1-19574989-2", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/2c3de48d-c5c9-11e9-8736-86290fd7dd1f/kube-proxy", - "Collections": [ - { - "CounterName": "restartTimeEpoch", - "Value": 1566580349 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:40Z", - "Host": "aks-nodepool1-19574989-2", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/06fef5f6-a743-11e9-a38a-22d1c75c4357/azureproxy", - "Collections": [ - { - "CounterName": "restartTimeEpoch", - "Value": 1563224204 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:40Z", - "Host": "aks-nodepool1-19574989-2", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/06fef5f6-a743-11e9-a38a-22d1c75c4357/redirector", - "Collections": [ - { - "CounterName": "restartTimeEpoch", - "Value": 1563224199 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:37Z", - "Host": "aks-nodepool1-19574989-2", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/be78d7f6-c5df-11e9-8736-86290fd7dd1f/omsagent", - "Collections": [ - { - "CounterName": "cpuUsageNanoCores", - "Value": 3004849 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:33Z", - "Host": "aks-nodepool1-19574989-2", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/95046bc6-a1f2-11e9-8b08-d602e29755d5/dnsmasq", - "Collections": [ - { - "CounterName": "cpuUsageNanoCores", - "Value": 796842 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:22Z", - "Host": "aks-nodepool1-19574989-2", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/95046bc6-a1f2-11e9-8b08-d602e29755d5/sidecar", - "Collections": [ - { - "CounterName": "cpuUsageNanoCores", - "Value": 708906 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:31Z", - "Host": "aks-nodepool1-19574989-2", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/95046bc6-a1f2-11e9-8b08-d602e29755d5/kubedns", - "Collections": [ - { - "CounterName": "cpuUsageNanoCores", - "Value": 3451625 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:35Z", - "Host": "aks-nodepool1-19574989-2", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/95046bc6-a1f2-11e9-8b08-d602e29755d5/healthz", - "Collections": [ - { - "CounterName": "cpuUsageNanoCores", - "Value": 2572419 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:32Z", - "Host": "aks-nodepool1-19574989-2", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/94e52ab1-a1f2-11e9-8b08-d602e29755d5/autoscaler", - "Collections": [ - { - "CounterName": "cpuUsageNanoCores", - "Value": 548275 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:37Z", - "Host": "aks-nodepool1-19574989-2", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/2c3de48d-c5c9-11e9-8736-86290fd7dd1f/kube-proxy", - "Collections": [ - { - "CounterName": "cpuUsageNanoCores", - "Value": 1740316 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:31Z", - "Host": "aks-nodepool1-19574989-2", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/06fef5f6-a743-11e9-a38a-22d1c75c4357/azureproxy", - "Collections": [ - { - "CounterName": "cpuUsageNanoCores", - "Value": 0 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:31Z", - "Host": "aks-nodepool1-19574989-2", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/06fef5f6-a743-11e9-a38a-22d1c75c4357/redirector", - "Collections": [ - { - "CounterName": "cpuUsageNanoCores", - "Value": 3156661 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:37Z", - "Host": "aks-nodepool1-19574989-2", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/be78d7f6-c5df-11e9-8736-86290fd7dd1f/omsagent", - "Collections": [ - { - "CounterName": "memoryRssBytes", - "Value": 66428928 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:33Z", - "Host": "aks-nodepool1-19574989-2", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/95046bc6-a1f2-11e9-8b08-d602e29755d5/dnsmasq", - "Collections": [ - { - "CounterName": "memoryRssBytes", - "Value": 5611520 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:22Z", - "Host": "aks-nodepool1-19574989-2", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/95046bc6-a1f2-11e9-8b08-d602e29755d5/sidecar", - "Collections": [ - { - "CounterName": "memoryRssBytes", - "Value": 11833344 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:31Z", - "Host": "aks-nodepool1-19574989-2", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/95046bc6-a1f2-11e9-8b08-d602e29755d5/kubedns", - "Collections": [ - { - "CounterName": "memoryRssBytes", - "Value": 11063296 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:35Z", - "Host": "aks-nodepool1-19574989-2", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/95046bc6-a1f2-11e9-8b08-d602e29755d5/healthz", - "Collections": [ - { - "CounterName": "memoryRssBytes", - "Value": 2551808 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:32Z", - "Host": "aks-nodepool1-19574989-2", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/94e52ab1-a1f2-11e9-8b08-d602e29755d5/autoscaler", - "Collections": [ - { - "CounterName": "memoryRssBytes", - "Value": 9244672 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:37Z", - "Host": "aks-nodepool1-19574989-2", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/2c3de48d-c5c9-11e9-8736-86290fd7dd1f/kube-proxy", - "Collections": [ - { - "CounterName": "memoryRssBytes", - "Value": 20402176 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:31Z", - "Host": "aks-nodepool1-19574989-2", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/06fef5f6-a743-11e9-a38a-22d1c75c4357/azureproxy", - "Collections": [ - { - "CounterName": "memoryRssBytes", - "Value": 8216576 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:31Z", - "Host": "aks-nodepool1-19574989-2", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/06fef5f6-a743-11e9-a38a-22d1c75c4357/redirector", - "Collections": [ - { - "CounterName": "memoryRssBytes", - "Value": 462848 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:30Z", - "Host": "aks-nodepool1-19574989-2", - "ObjectName": "K8SNode", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/aks-nodepool1-19574989-2", - "Collections": [ - { - "CounterName": "memoryRssBytes", - "Value": 853344256 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:30Z", - "Host": "aks-nodepool1-19574989-2", - "ObjectName": "K8SNode", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/aks-nodepool1-19574989-2", - "Collections": [ - { - "CounterName": "cpuUsageNanoCores", - "Value": 114265842.16 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:30Z", - "Host": "aks-nodepool1-19574989-2", - "ObjectName": "K8SNode", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/aks-nodepool1-19574989-2", - "Collections": [ - { - "CounterName": "memoryWorkingSetBytes", - "Value": 1892982784 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }, - { - "DataItems": [ - { - "Timestamp": "2019-08-23T22:13:40Z", - "Host": "aks-nodepool1-19574989-2", - "ObjectName": "K8SNode", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/aks-nodepool1-19574989-2", - "Collections": [ - { - "CounterName": "restartTimeEpoch", - "Value": 1561082409.36 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - } -] \ No newline at end of file diff --git a/test/unit-tests/plugins/health/cluster_health_state_spec.rb b/test/unit-tests/plugins/health/cluster_health_state_spec.rb deleted file mode 100644 index fd13213b1..000000000 --- a/test/unit-tests/plugins/health/cluster_health_state_spec.rb +++ /dev/null @@ -1,37 +0,0 @@ -require_relative '../test_helpers' -Dir[File.join(File.expand_path(File.dirname(__FILE__)), "../../../../source/plugins/ruby/health/*.rb")].reject{|f| f.include?('health_monitor_utils')}.each { |file| require file } -require 'time' -include HealthModel -include Minitest - -describe "Cluster Health State Spec" do - - it "ClusterHealthState.new throws if cert file is NOT present" do - state = { - "m1" => { - "state" => "pass", - "time" => Time.now.utc.iso8601 - } - } - - token_file_path = 'token' - cert_file_path = '/var/ca.crt' - - proc {ClusterHealthState.new(token_file_path, cert_file_path)}.must_raise - - end - - it "ClusterHealthState.new returns nil if token is NOT present" do - state = { - "m1" => { - "state" => "pass", - "time" => Time.now.utc.iso8601 - } - } - token_file_path = 'token' - cert_file_path = File.join(File.expand_path(File.dirname(__FILE__)), "ca.crt") - - chs = ClusterHealthState.new(token_file_path, cert_file_path) - chs.token.must_be_nil - end -end diff --git a/test/unit-tests/plugins/health/deployments.json b/test/unit-tests/plugins/health/deployments.json deleted file mode 100644 index 75586db04..000000000 --- a/test/unit-tests/plugins/health/deployments.json +++ /dev/null @@ -1,1385 +0,0 @@ -{ - "apiVersion": "v1", - "items": [ - { - "apiVersion": "extensions/v1beta1", - "kind": "Deployment", - "metadata": { - "annotations": { - "deployment.kubernetes.io/revision": "2" - }, - "creationTimestamp": "2019-08-23T17:12:00Z", - "generation": 2, - "labels": { - "addonmanager.kubernetes.io/mode": "EnsureExists", - "k8s-app": "heapster", - "kubernetes.io/cluster-service": "true" - }, - "name": "heapster", - "namespace": "kube-system", - "resourceVersion": "19048928", - "selfLink": "/apis/extensions/v1beta1/namespaces/kube-system/deployments/heapster", - "uid": "1e98c3d1-c5c9-11e9-8736-86290fd7dd1f" - }, - "spec": { - "progressDeadlineSeconds": 2147483647, - "replicas": 1, - "revisionHistoryLimit": 10, - "selector": { - "matchLabels": { - "k8s-app": "heapster" - } - }, - "strategy": { - "rollingUpdate": { - "maxSurge": 1, - "maxUnavailable": 1 - }, - "type": "RollingUpdate" - }, - "template": { - "metadata": { - "creationTimestamp": null, - "labels": { - "k8s-app": "heapster" - } - }, - "spec": { - "affinity": { - "nodeAffinity": { - "requiredDuringSchedulingIgnoredDuringExecution": { - "nodeSelectorTerms": [ - { - "matchExpressions": [ - { - "key": "kubernetes.azure.com/cluster", - "operator": "Exists" - } - ] - } - ] - } - } - }, - "containers": [ - { - "command": [ - "/heapster", - "--source=kubernetes.summary_api:\"\"" - ], - "image": "aksrepos.azurecr.io/mirror/heapster-amd64:v1.5.3", - "imagePullPolicy": "IfNotPresent", - "livenessProbe": { - "failureThreshold": 3, - "httpGet": { - "path": "/healthz", - "port": 8082, - "scheme": "HTTP" - }, - "initialDelaySeconds": 180, - "periodSeconds": 10, - "successThreshold": 1, - "timeoutSeconds": 5 - }, - "name": "heapster", - "resources": { - "limits": { - "cpu": "88m", - "memory": "204Mi" - }, - "requests": { - "cpu": "88m", - "memory": "204Mi" - } - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File" - }, - { - "command": [ - "/pod_nanny", - "--config-dir=/etc/config", - "--cpu=80m", - "--extra-cpu=0.5m", - "--memory=140Mi", - "--extra-memory=4Mi", - "--threshold=5", - "--deployment=heapster", - "--container=heapster", - "--poll-period=300000", - "--estimator=exponential" - ], - "env": [ - { - "name": "MY_POD_NAME", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.name" - } - } - }, - { - "name": "MY_POD_NAMESPACE", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.namespace" - } - } - } - ], - "image": "aksrepos.azurecr.io/mirror/addon-resizer:1.8.1", - "imagePullPolicy": "IfNotPresent", - "name": "heapster-nanny", - "resources": { - "limits": { - "cpu": "50m", - "memory": "90Mi" - }, - "requests": { - "cpu": "50m", - "memory": "90Mi" - } - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/etc/config", - "name": "heapster-config-volume" - } - ] - } - ], - "dnsPolicy": "ClusterFirst", - "imagePullSecrets": [ - { - "name": "emptyacrsecret" - } - ], - "nodeSelector": { - "beta.kubernetes.io/os": "linux" - }, - "priorityClassName": "system-node-critical", - "restartPolicy": "Always", - "schedulerName": "default-scheduler", - "securityContext": {}, - "serviceAccount": "heapster", - "serviceAccountName": "heapster", - "terminationGracePeriodSeconds": 30, - "tolerations": [ - { - "key": "CriticalAddonsOnly", - "operator": "Exists" - } - ], - "volumes": [ - { - "configMap": { - "defaultMode": 420, - "name": "heapster-config" - }, - "name": "heapster-config-volume" - } - ] - } - } - }, - "status": { - "availableReplicas": 1, - "conditions": [ - { - "lastTransitionTime": "2019-08-23T17:12:00Z", - "lastUpdateTime": "2019-08-23T17:12:00Z", - "message": "Deployment has minimum availability.", - "reason": "MinimumReplicasAvailable", - "status": "True", - "type": "Available" - } - ], - "observedGeneration": 2, - "readyReplicas": 1, - "replicas": 1, - "updatedReplicas": 1 - } - }, - { - "apiVersion": "extensions/v1beta1", - "kind": "Deployment", - "metadata": { - "annotations": { - "deployment.kubernetes.io/revision": "5", - "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"extensions/v1beta1\",\"kind\":\"Deployment\",\"metadata\":{\"annotations\":{},\"labels\":{\"addonmanager.kubernetes.io/mode\":\"Reconcile\",\"k8s-app\":\"kube-dns-autoscaler\",\"kubernetes.io/cluster-service\":\"true\"},\"name\":\"kube-dns-autoscaler\",\"namespace\":\"kube-system\"},\"spec\":{\"selector\":{\"matchLabels\":{\"k8s-app\":\"kube-dns-autoscaler\"}},\"template\":{\"metadata\":{\"annotations\":{\"scheduler.alpha.kubernetes.io/critical-pod\":\"\",\"seccomp.security.alpha.kubernetes.io/pod\":\"docker/default\"},\"labels\":{\"k8s-app\":\"kube-dns-autoscaler\"}},\"spec\":{\"containers\":[{\"command\":[\"/cluster-proportional-autoscaler\",\"--namespace=kube-system\",\"--configmap=kube-dns-autoscaler\",\"--target=deployment/kube-dns-v20\",\"--default-params={\\\"ladder\\\":{\\\"coresToReplicas\\\":[[1,2],[512,3],[1024,4],[2048,5]],\\\"nodesToReplicas\\\":[[1,2],[8,3],[16,4],[32,5]]}}\",\"--logtostderr=true\",\"--v=2\"],\"image\":\"aksrepos.azurecr.io/mirror/cluster-proportional-autoscaler-amd64:1.1.2-r2\",\"name\":\"autoscaler\",\"resources\":{\"requests\":{\"cpu\":\"20m\",\"memory\":\"10Mi\"}}}],\"dnsPolicy\":\"Default\",\"imagePullSecrets\":[{\"name\":\"emptyacrsecret\"}],\"priorityClassName\":\"system-node-critical\",\"serviceAccountName\":\"kube-dns-autoscaler\",\"tolerations\":[{\"key\":\"CriticalAddonsOnly\",\"operator\":\"Exists\"}]}}}}\n" - }, - "creationTimestamp": "2019-03-12T16:38:30Z", - "generation": 5, - "labels": { - "addonmanager.kubernetes.io/mode": "Reconcile", - "k8s-app": "kube-dns-autoscaler", - "kubernetes.io/cluster-service": "true" - }, - "name": "kube-dns-autoscaler", - "namespace": "kube-system", - "resourceVersion": "15144046", - "selfLink": "/apis/extensions/v1beta1/namespaces/kube-system/deployments/kube-dns-autoscaler", - "uid": "4509acaf-44e5-11e9-9920-423525a6b683" - }, - "spec": { - "progressDeadlineSeconds": 2147483647, - "replicas": 1, - "revisionHistoryLimit": 10, - "selector": { - "matchLabels": { - "k8s-app": "kube-dns-autoscaler" - } - }, - "strategy": { - "rollingUpdate": { - "maxSurge": 1, - "maxUnavailable": 1 - }, - "type": "RollingUpdate" - }, - "template": { - "metadata": { - "annotations": { - "scheduler.alpha.kubernetes.io/critical-pod": "", - "seccomp.security.alpha.kubernetes.io/pod": "docker/default" - }, - "creationTimestamp": null, - "labels": { - "k8s-app": "kube-dns-autoscaler" - } - }, - "spec": { - "containers": [ - { - "command": [ - "/cluster-proportional-autoscaler", - "--namespace=kube-system", - "--configmap=kube-dns-autoscaler", - "--target=deployment/kube-dns-v20", - "--default-params={\"ladder\":{\"coresToReplicas\":[[1,2],[512,3],[1024,4],[2048,5]],\"nodesToReplicas\":[[1,2],[8,3],[16,4],[32,5]]}}", - "--logtostderr=true", - "--v=2" - ], - "image": "aksrepos.azurecr.io/mirror/cluster-proportional-autoscaler-amd64:1.1.2-r2", - "imagePullPolicy": "IfNotPresent", - "name": "autoscaler", - "resources": { - "requests": { - "cpu": "20m", - "memory": "10Mi" - } - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File" - } - ], - "dnsPolicy": "Default", - "imagePullSecrets": [ - { - "name": "emptyacrsecret" - } - ], - "priorityClassName": "system-node-critical", - "restartPolicy": "Always", - "schedulerName": "default-scheduler", - "securityContext": {}, - "serviceAccount": "kube-dns-autoscaler", - "serviceAccountName": "kube-dns-autoscaler", - "terminationGracePeriodSeconds": 30, - "tolerations": [ - { - "key": "CriticalAddonsOnly", - "operator": "Exists" - } - ] - } - } - }, - "status": { - "availableReplicas": 1, - "conditions": [ - { - "lastTransitionTime": "2019-03-12T16:38:30Z", - "lastUpdateTime": "2019-03-12T16:38:30Z", - "message": "Deployment has minimum availability.", - "reason": "MinimumReplicasAvailable", - "status": "True", - "type": "Available" - } - ], - "observedGeneration": 5, - "readyReplicas": 1, - "replicas": 1, - "updatedReplicas": 1 - } - }, - { - "apiVersion": "extensions/v1beta1", - "kind": "Deployment", - "metadata": { - "annotations": { - "deployment.kubernetes.io/revision": "6", - "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"extensions/v1beta1\",\"kind\":\"Deployment\",\"metadata\":{\"annotations\":{},\"labels\":{\"addonmanager.kubernetes.io/mode\":\"Reconcile\",\"k8s-app\":\"kube-dns\",\"kubernetes.io/cluster-service\":\"true\",\"version\":\"v20\"},\"name\":\"kube-dns-v20\",\"namespace\":\"kube-system\"},\"spec\":{\"selector\":{\"matchLabels\":{\"k8s-app\":\"kube-dns\",\"version\":\"v20\"}},\"template\":{\"metadata\":{\"annotations\":{\"prometheus.io/port\":\"10055\",\"prometheus.io/scrape\":\"true\"},\"labels\":{\"k8s-app\":\"kube-dns\",\"kubernetes.io/cluster-service\":\"true\",\"version\":\"v20\"}},\"spec\":{\"affinity\":{\"nodeAffinity\":{\"requiredDuringSchedulingIgnoredDuringExecution\":{\"nodeSelectorTerms\":[{\"labelSelector\":null,\"matchExpressions\":[{\"key\":\"kubernetes.azure.com/cluster\",\"operator\":\"Exists\"}]}]}},\"podAntiAffinity\":{\"preferredDuringSchedulingIgnoredDuringExecution\":[{\"podAffinityTerm\":{\"labelSelector\":{\"matchExpressions\":[{\"key\":\"k8s-app\",\"operator\":\"In\",\"values\":[\"kube-dns\"]}]},\"topologyKey\":\"kubernetes.io/hostname\"},\"weight\":100}]}},\"containers\":[{\"args\":[\"--kubecfg-file=/config/kubeconfig\",\"--config-dir=/kube-dns-config\",\"--domain=cluster.local.\",\"--dns-port=10053\",\"--v=2\"],\"env\":[{\"name\":\"PROMETHEUS_PORT\",\"value\":\"10055\"}],\"image\":\"aksrepos.azurecr.io/mirror/k8s-dns-kube-dns-amd64:1.14.13\",\"livenessProbe\":{\"failureThreshold\":5,\"httpGet\":{\"path\":\"/healthcheck/kubedns\",\"port\":10054,\"scheme\":\"HTTP\"},\"initialDelaySeconds\":60,\"successThreshold\":1,\"timeoutSeconds\":5},\"name\":\"kubedns\",\"ports\":[{\"containerPort\":10053,\"name\":\"dns-local\",\"protocol\":\"UDP\"},{\"containerPort\":10053,\"name\":\"dns-tcp-local\",\"protocol\":\"TCP\"},{\"containerPort\":10055,\"name\":\"metrics\",\"protocol\":\"TCP\"}],\"readinessProbe\":{\"httpGet\":{\"path\":\"/readiness\",\"port\":8081,\"scheme\":\"HTTP\"},\"initialDelaySeconds\":30,\"timeoutSeconds\":5},\"resources\":{\"limits\":{\"memory\":\"170Mi\"},\"requests\":{\"cpu\":\"100m\",\"memory\":\"70Mi\"}},\"volumeMounts\":[{\"mountPath\":\"/kube-dns-config\",\"name\":\"kube-dns-config\"},{\"mountPath\":\"/config\",\"name\":\"kubedns-kubecfg\",\"readOnly\":true}]},{\"args\":[\"-v=2\",\"-logtostderr\",\"-configDir=/kube-dns-config\",\"-restartDnsmasq=true\",\"--\",\"-k\",\"--cache-size=1000\",\"--no-negcache\",\"--no-resolv\",\"--server=127.0.0.1#10053\",\"--server=/cluster.local/127.0.0.1#10053\",\"--server=/in-addr.arpa/127.0.0.1#10053\",\"--server=/ip6.arpa/127.0.0.1#10053\",\"--log-facility=-\"],\"image\":\"aksrepos.azurecr.io/mirror/k8s-dns-dnsmasq-nanny-amd64:1.14.10\",\"name\":\"dnsmasq\",\"ports\":[{\"containerPort\":53,\"name\":\"dns\",\"protocol\":\"UDP\"},{\"containerPort\":53,\"name\":\"dns-tcp\",\"protocol\":\"TCP\"}],\"volumeMounts\":[{\"mountPath\":\"/kube-dns-config\",\"name\":\"kube-dns-config\"}]},{\"args\":[\"--cmd=for d in $PROBE_DOMAINS; do nslookup $d 127.0.0.1 \\u003e/dev/null || exit 1; done\",\"--url=/healthz-dnsmasq\",\"--cmd=for d in $PROBE_DOMAINS; do nslookup $d 127.0.0.1:10053 \\u003e/dev/null || exit 1; done\",\"--url=/healthz-kubedns\",\"--port=8080\",\"--quiet\"],\"env\":[{\"name\":\"PROBE_DOMAINS\",\"value\":\"bing.com kubernetes.default.svc.cluster.local\"}],\"image\":\"aksrepos.azurecr.io/mirror/exechealthz-amd64:1.2\",\"livenessProbe\":{\"failureThreshold\":5,\"httpGet\":{\"path\":\"/healthz-dnsmasq\",\"port\":8080,\"scheme\":\"HTTP\"},\"initialDelaySeconds\":60,\"successThreshold\":1,\"timeoutSeconds\":5},\"name\":\"healthz\",\"ports\":[{\"containerPort\":8080,\"protocol\":\"TCP\"}],\"resources\":{\"limits\":{\"memory\":\"50Mi\"},\"requests\":{\"cpu\":\"10m\",\"memory\":\"50Mi\"}}},{\"args\":[\"--v=2\",\"--logtostderr\",\"--probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local,5,SRV\",\"--probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local,5,SRV\"],\"image\":\"aksrepos.azurecr.io/mirror/k8s-dns-sidecar-amd64:1.14.10\",\"livenessProbe\":{\"httpGet\":{\"path\":\"/metrics\",\"port\":10054,\"scheme\":\"HTTP\"},\"initialDelaySeconds\":60,\"successThreshold\":1,\"timeoutSeconds\":5},\"name\":\"sidecar\",\"ports\":[{\"containerPort\":10054,\"name\":\"metrics\",\"protocol\":\"TCP\"}],\"resources\":{\"requests\":{\"cpu\":\"10m\",\"memory\":\"20Mi\"}}}],\"dnsPolicy\":\"Default\",\"imagePullSecrets\":[{\"name\":\"emptyacrsecret\"}],\"nodeSelector\":{\"beta.kubernetes.io/os\":\"linux\"},\"priorityClassName\":\"system-node-critical\",\"serviceAccountName\":\"kube-dns\",\"tolerations\":[{\"key\":\"CriticalAddonsOnly\",\"operator\":\"Exists\"}],\"volumes\":[{\"configMap\":{\"name\":\"kube-dns\",\"optional\":true},\"name\":\"kube-dns-config\"},{\"configMap\":{\"name\":\"kubedns-kubecfg\"},\"name\":\"kubedns-kubecfg\"}]}}}}\n" - }, - "creationTimestamp": "2019-03-12T16:38:30Z", - "generation": 7, - "labels": { - "addonmanager.kubernetes.io/mode": "Reconcile", - "k8s-app": "kube-dns", - "kubernetes.io/cluster-service": "true", - "version": "v20" - }, - "name": "kube-dns-v20", - "namespace": "kube-system", - "resourceVersion": "15144054", - "selfLink": "/apis/extensions/v1beta1/namespaces/kube-system/deployments/kube-dns-v20", - "uid": "4523fcd7-44e5-11e9-9920-423525a6b683" - }, - "spec": { - "progressDeadlineSeconds": 2147483647, - "replicas": 2, - "revisionHistoryLimit": 10, - "selector": { - "matchLabels": { - "k8s-app": "kube-dns", - "version": "v20" - } - }, - "strategy": { - "rollingUpdate": { - "maxSurge": 1, - "maxUnavailable": 1 - }, - "type": "RollingUpdate" - }, - "template": { - "metadata": { - "annotations": { - "prometheus.io/port": "10055", - "prometheus.io/scrape": "true" - }, - "creationTimestamp": null, - "labels": { - "k8s-app": "kube-dns", - "kubernetes.io/cluster-service": "true", - "version": "v20" - } - }, - "spec": { - "affinity": { - "nodeAffinity": { - "requiredDuringSchedulingIgnoredDuringExecution": { - "nodeSelectorTerms": [ - { - "matchExpressions": [ - { - "key": "kubernetes.azure.com/cluster", - "operator": "Exists" - } - ] - } - ] - } - }, - "podAntiAffinity": { - "preferredDuringSchedulingIgnoredDuringExecution": [ - { - "podAffinityTerm": { - "labelSelector": { - "matchExpressions": [ - { - "key": "k8s-app", - "operator": "In", - "values": [ - "kube-dns" - ] - } - ] - }, - "topologyKey": "kubernetes.io/hostname" - }, - "weight": 100 - } - ] - } - }, - "containers": [ - { - "args": [ - "--kubecfg-file=/config/kubeconfig", - "--config-dir=/kube-dns-config", - "--domain=cluster.local.", - "--dns-port=10053", - "--v=2" - ], - "env": [ - { - "name": "PROMETHEUS_PORT", - "value": "10055" - } - ], - "image": "aksrepos.azurecr.io/mirror/k8s-dns-kube-dns-amd64:1.14.13", - "imagePullPolicy": "IfNotPresent", - "livenessProbe": { - "failureThreshold": 5, - "httpGet": { - "path": "/healthcheck/kubedns", - "port": 10054, - "scheme": "HTTP" - }, - "initialDelaySeconds": 60, - "periodSeconds": 10, - "successThreshold": 1, - "timeoutSeconds": 5 - }, - "name": "kubedns", - "ports": [ - { - "containerPort": 10053, - "name": "dns-local", - "protocol": "UDP" - }, - { - "containerPort": 10053, - "name": "dns-tcp-local", - "protocol": "TCP" - }, - { - "containerPort": 10055, - "name": "metrics", - "protocol": "TCP" - } - ], - "readinessProbe": { - "failureThreshold": 3, - "httpGet": { - "path": "/readiness", - "port": 8081, - "scheme": "HTTP" - }, - "initialDelaySeconds": 30, - "periodSeconds": 10, - "successThreshold": 1, - "timeoutSeconds": 5 - }, - "resources": { - "limits": { - "memory": "170Mi" - }, - "requests": { - "cpu": "100m", - "memory": "70Mi" - } - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/kube-dns-config", - "name": "kube-dns-config" - }, - { - "mountPath": "/config", - "name": "kubedns-kubecfg", - "readOnly": true - } - ] - }, - { - "args": [ - "-v=2", - "-logtostderr", - "-configDir=/kube-dns-config", - "-restartDnsmasq=true", - "--", - "-k", - "--cache-size=1000", - "--no-negcache", - "--no-resolv", - "--server=127.0.0.1#10053", - "--server=/cluster.local/127.0.0.1#10053", - "--server=/in-addr.arpa/127.0.0.1#10053", - "--server=/ip6.arpa/127.0.0.1#10053", - "--log-facility=-" - ], - "image": "aksrepos.azurecr.io/mirror/k8s-dns-dnsmasq-nanny-amd64:1.14.10", - "imagePullPolicy": "IfNotPresent", - "name": "dnsmasq", - "ports": [ - { - "containerPort": 53, - "name": "dns", - "protocol": "UDP" - }, - { - "containerPort": 53, - "name": "dns-tcp", - "protocol": "TCP" - } - ], - "resources": {}, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/kube-dns-config", - "name": "kube-dns-config" - } - ] - }, - { - "args": [ - "--cmd=for d in $PROBE_DOMAINS; do nslookup $d 127.0.0.1 \u003e/dev/null || exit 1; done", - "--url=/healthz-dnsmasq", - "--cmd=for d in $PROBE_DOMAINS; do nslookup $d 127.0.0.1:10053 \u003e/dev/null || exit 1; done", - "--url=/healthz-kubedns", - "--port=8080", - "--quiet" - ], - "env": [ - { - "name": "PROBE_DOMAINS", - "value": "bing.com kubernetes.default.svc.cluster.local" - } - ], - "image": "aksrepos.azurecr.io/mirror/exechealthz-amd64:1.2", - "imagePullPolicy": "IfNotPresent", - "livenessProbe": { - "failureThreshold": 5, - "httpGet": { - "path": "/healthz-dnsmasq", - "port": 8080, - "scheme": "HTTP" - }, - "initialDelaySeconds": 60, - "periodSeconds": 10, - "successThreshold": 1, - "timeoutSeconds": 5 - }, - "name": "healthz", - "ports": [ - { - "containerPort": 8080, - "protocol": "TCP" - } - ], - "resources": { - "limits": { - "memory": "50Mi" - }, - "requests": { - "cpu": "10m", - "memory": "50Mi" - } - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File" - }, - { - "args": [ - "--v=2", - "--logtostderr", - "--probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local,5,SRV", - "--probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local,5,SRV" - ], - "image": "aksrepos.azurecr.io/mirror/k8s-dns-sidecar-amd64:1.14.10", - "imagePullPolicy": "IfNotPresent", - "livenessProbe": { - "failureThreshold": 3, - "httpGet": { - "path": "/metrics", - "port": 10054, - "scheme": "HTTP" - }, - "initialDelaySeconds": 60, - "periodSeconds": 10, - "successThreshold": 1, - "timeoutSeconds": 5 - }, - "name": "sidecar", - "ports": [ - { - "containerPort": 10054, - "name": "metrics", - "protocol": "TCP" - } - ], - "resources": { - "requests": { - "cpu": "10m", - "memory": "20Mi" - } - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File" - } - ], - "dnsPolicy": "Default", - "imagePullSecrets": [ - { - "name": "emptyacrsecret" - } - ], - "nodeSelector": { - "beta.kubernetes.io/os": "linux" - }, - "priorityClassName": "system-node-critical", - "restartPolicy": "Always", - "schedulerName": "default-scheduler", - "securityContext": {}, - "serviceAccount": "kube-dns", - "serviceAccountName": "kube-dns", - "terminationGracePeriodSeconds": 30, - "tolerations": [ - { - "key": "CriticalAddonsOnly", - "operator": "Exists" - } - ], - "volumes": [ - { - "configMap": { - "defaultMode": 420, - "name": "kube-dns", - "optional": true - }, - "name": "kube-dns-config" - }, - { - "configMap": { - "defaultMode": 420, - "name": "kubedns-kubecfg" - }, - "name": "kubedns-kubecfg" - } - ] - } - } - }, - "status": { - "availableReplicas": 2, - "conditions": [ - { - "lastTransitionTime": "2019-07-23T14:46:03Z", - "lastUpdateTime": "2019-07-23T14:46:03Z", - "message": "Deployment has minimum availability.", - "reason": "MinimumReplicasAvailable", - "status": "True", - "type": "Available" - } - ], - "observedGeneration": 7, - "readyReplicas": 2, - "replicas": 2, - "updatedReplicas": 2 - } - }, - { - "apiVersion": "extensions/v1beta1", - "kind": "Deployment", - "metadata": { - "annotations": { - "deployment.kubernetes.io/revision": "6", - "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"extensions/v1beta1\",\"kind\":\"Deployment\",\"metadata\":{\"annotations\":{},\"labels\":{\"addonmanager.kubernetes.io/mode\":\"Reconcile\",\"k8s-app\":\"kubernetes-dashboard\",\"kubernetes.io/cluster-service\":\"true\"},\"name\":\"kubernetes-dashboard\",\"namespace\":\"kube-system\"},\"spec\":{\"replicas\":1,\"strategy\":{\"rollingUpdate\":{\"maxSurge\":0,\"maxUnavailable\":1},\"type\":\"RollingUpdate\"},\"template\":{\"metadata\":{\"labels\":{\"k8s-app\":\"kubernetes-dashboard\",\"kubernetes.io/cluster-service\":\"true\"}},\"spec\":{\"affinity\":{\"nodeAffinity\":{\"requiredDuringSchedulingIgnoredDuringExecution\":{\"nodeSelectorTerms\":[{\"labelSelector\":null,\"matchExpressions\":[{\"key\":\"kubernetes.azure.com/cluster\",\"operator\":\"Exists\"}]}]}}},\"containers\":[{\"image\":\"aksrepos.azurecr.io/mirror/kubernetes-dashboard-amd64:v1.10.1\",\"livenessProbe\":{\"failureThreshold\":3,\"httpGet\":{\"path\":\"/\",\"port\":9090,\"scheme\":\"HTTP\"},\"initialDelaySeconds\":30,\"periodSeconds\":10,\"successThreshold\":1,\"timeoutSeconds\":30},\"name\":\"main\",\"ports\":[{\"containerPort\":9090,\"name\":\"http\",\"protocol\":\"TCP\"}],\"resources\":{\"limits\":{\"cpu\":\"100m\",\"memory\":\"500Mi\"},\"requests\":{\"cpu\":\"100m\",\"memory\":\"50Mi\"}}}],\"imagePullSecrets\":[{\"name\":\"emptyacrsecret\"}],\"nodeSelector\":{\"beta.kubernetes.io/os\":\"linux\"},\"priorityClassName\":\"system-node-critical\",\"serviceAccountName\":\"kubernetes-dashboard\",\"tolerations\":[{\"key\":\"CriticalAddonsOnly\",\"operator\":\"Exists\"}]}}}}\n" - }, - "creationTimestamp": "2019-03-12T16:38:31Z", - "generation": 6, - "labels": { - "addonmanager.kubernetes.io/mode": "Reconcile", - "k8s-app": "kubernetes-dashboard", - "kubernetes.io/cluster-service": "true" - }, - "name": "kubernetes-dashboard", - "namespace": "kube-system", - "resourceVersion": "15831521", - "selfLink": "/apis/extensions/v1beta1/namespaces/kube-system/deployments/kubernetes-dashboard", - "uid": "45b9cc8d-44e5-11e9-9920-423525a6b683" - }, - "spec": { - "progressDeadlineSeconds": 2147483647, - "replicas": 1, - "revisionHistoryLimit": 10, - "selector": { - "matchLabels": { - "k8s-app": "kubernetes-dashboard", - "kubernetes.io/cluster-service": "true" - } - }, - "strategy": { - "rollingUpdate": { - "maxSurge": 0, - "maxUnavailable": 1 - }, - "type": "RollingUpdate" - }, - "template": { - "metadata": { - "creationTimestamp": null, - "labels": { - "k8s-app": "kubernetes-dashboard", - "kubernetes.io/cluster-service": "true" - } - }, - "spec": { - "affinity": { - "nodeAffinity": { - "requiredDuringSchedulingIgnoredDuringExecution": { - "nodeSelectorTerms": [ - { - "matchExpressions": [ - { - "key": "kubernetes.azure.com/cluster", - "operator": "Exists" - } - ] - } - ] - } - } - }, - "containers": [ - { - "image": "aksrepos.azurecr.io/mirror/kubernetes-dashboard-amd64:v1.10.1", - "imagePullPolicy": "IfNotPresent", - "livenessProbe": { - "failureThreshold": 3, - "httpGet": { - "path": "/", - "port": 9090, - "scheme": "HTTP" - }, - "initialDelaySeconds": 30, - "periodSeconds": 10, - "successThreshold": 1, - "timeoutSeconds": 30 - }, - "name": "main", - "ports": [ - { - "containerPort": 9090, - "name": "http", - "protocol": "TCP" - } - ], - "resources": { - "limits": { - "cpu": "100m", - "memory": "500Mi" - }, - "requests": { - "cpu": "100m", - "memory": "50Mi" - } - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File" - } - ], - "dnsPolicy": "ClusterFirst", - "imagePullSecrets": [ - { - "name": "emptyacrsecret" - } - ], - "nodeSelector": { - "beta.kubernetes.io/os": "linux" - }, - "priorityClassName": "system-node-critical", - "restartPolicy": "Always", - "schedulerName": "default-scheduler", - "securityContext": {}, - "serviceAccount": "kubernetes-dashboard", - "serviceAccountName": "kubernetes-dashboard", - "terminationGracePeriodSeconds": 30, - "tolerations": [ - { - "key": "CriticalAddonsOnly", - "operator": "Exists" - } - ] - } - } - }, - "status": { - "availableReplicas": 1, - "conditions": [ - { - "lastTransitionTime": "2019-03-12T16:38:32Z", - "lastUpdateTime": "2019-03-12T16:38:32Z", - "message": "Deployment has minimum availability.", - "reason": "MinimumReplicasAvailable", - "status": "True", - "type": "Available" - } - ], - "observedGeneration": 6, - "readyReplicas": 1, - "replicas": 1, - "updatedReplicas": 1 - } - }, - { - "apiVersion": "extensions/v1beta1", - "kind": "Deployment", - "metadata": { - "annotations": { - "deployment.kubernetes.io/revision": "5", - "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"extensions/v1beta1\",\"kind\":\"Deployment\",\"metadata\":{\"annotations\":{},\"labels\":{\"addonmanager.kubernetes.io/mode\":\"Reconcile\",\"k8s-app\":\"metrics-server\",\"kubernetes.io/cluster-service\":\"true\"},\"name\":\"metrics-server\",\"namespace\":\"kube-system\"},\"spec\":{\"selector\":{\"matchLabels\":{\"k8s-app\":\"metrics-server\"}},\"template\":{\"metadata\":{\"labels\":{\"k8s-app\":\"metrics-server\"},\"name\":\"metrics-server\"},\"spec\":{\"affinity\":{\"nodeAffinity\":{\"requiredDuringSchedulingIgnoredDuringExecution\":{\"nodeSelectorTerms\":[{\"labelSelector\":null,\"matchExpressions\":[{\"key\":\"kubernetes.azure.com/cluster\",\"operator\":\"Exists\"}]}]}}},\"containers\":[{\"command\":[\"/metrics-server\",\"--source=kubernetes.summary_api:''\"],\"image\":\"aksrepos.azurecr.io/mirror/metrics-server-amd64:v0.2.1\",\"imagePullPolicy\":\"IfNotPresent\",\"name\":\"metrics-server\"}],\"imagePullSecrets\":[{\"name\":\"emptyacrsecret\"}],\"nodeSelector\":{\"beta.kubernetes.io/os\":\"linux\"},\"priorityClassName\":\"system-node-critical\",\"serviceAccountName\":\"metrics-server\",\"tolerations\":[{\"key\":\"CriticalAddonsOnly\",\"operator\":\"Exists\"}]}}}}\n" - }, - "creationTimestamp": "2019-03-12T16:38:31Z", - "generation": 5, - "labels": { - "addonmanager.kubernetes.io/mode": "Reconcile", - "k8s-app": "metrics-server", - "kubernetes.io/cluster-service": "true" - }, - "name": "metrics-server", - "namespace": "kube-system", - "resourceVersion": "15144043", - "selfLink": "/apis/extensions/v1beta1/namespaces/kube-system/deployments/metrics-server", - "uid": "45556857-44e5-11e9-9920-423525a6b683" - }, - "spec": { - "progressDeadlineSeconds": 2147483647, - "replicas": 1, - "revisionHistoryLimit": 10, - "selector": { - "matchLabels": { - "k8s-app": "metrics-server" - } - }, - "strategy": { - "rollingUpdate": { - "maxSurge": 1, - "maxUnavailable": 1 - }, - "type": "RollingUpdate" - }, - "template": { - "metadata": { - "creationTimestamp": null, - "labels": { - "k8s-app": "metrics-server" - }, - "name": "metrics-server" - }, - "spec": { - "affinity": { - "nodeAffinity": { - "requiredDuringSchedulingIgnoredDuringExecution": { - "nodeSelectorTerms": [ - { - "matchExpressions": [ - { - "key": "kubernetes.azure.com/cluster", - "operator": "Exists" - } - ] - } - ] - } - } - }, - "containers": [ - { - "command": [ - "/metrics-server", - "--source=kubernetes.summary_api:''" - ], - "image": "aksrepos.azurecr.io/mirror/metrics-server-amd64:v0.2.1", - "imagePullPolicy": "IfNotPresent", - "name": "metrics-server", - "resources": {}, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File" - } - ], - "dnsPolicy": "ClusterFirst", - "imagePullSecrets": [ - { - "name": "emptyacrsecret" - } - ], - "nodeSelector": { - "beta.kubernetes.io/os": "linux" - }, - "priorityClassName": "system-node-critical", - "restartPolicy": "Always", - "schedulerName": "default-scheduler", - "securityContext": {}, - "serviceAccount": "metrics-server", - "serviceAccountName": "metrics-server", - "terminationGracePeriodSeconds": 30, - "tolerations": [ - { - "key": "CriticalAddonsOnly", - "operator": "Exists" - } - ] - } - } - }, - "status": { - "availableReplicas": 1, - "conditions": [ - { - "lastTransitionTime": "2019-03-12T16:38:31Z", - "lastUpdateTime": "2019-03-12T16:38:31Z", - "message": "Deployment has minimum availability.", - "reason": "MinimumReplicasAvailable", - "status": "True", - "type": "Available" - } - ], - "observedGeneration": 5, - "readyReplicas": 1, - "replicas": 1, - "updatedReplicas": 1 - } - }, - { - "apiVersion": "extensions/v1beta1", - "kind": "Deployment", - "metadata": { - "annotations": { - "deployment.kubernetes.io/revision": "7", - "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"extensions/v1beta1\",\"kind\":\"Deployment\",\"metadata\":{\"annotations\":{},\"name\":\"omsagent-rs\",\"namespace\":\"kube-system\"},\"spec\":{\"replicas\":1,\"selector\":{\"matchLabels\":{\"rsName\":\"omsagent-rs\"}},\"strategy\":{\"type\":\"RollingUpdate\"},\"template\":{\"metadata\":{\"annotations\":{\"agentVersion\":\"1.10.0.1\",\"dockerProviderVersion\":\"6.0.0-0\",\"schema-versions\":\"v1\"},\"labels\":{\"rsName\":\"omsagent-rs\"}},\"spec\":{\"containers\":[{\"env\":[{\"name\":\"AKS_RESOURCE_ID\",\"value\":\"/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test\"},{\"name\":\"AKS_REGION\",\"value\":\"eastus\"},{\"name\":\"CONTROLLER_TYPE\",\"value\":\"ReplicaSet\"},{\"name\":\"NODE_IP\",\"valueFrom\":{\"fieldRef\":{\"fieldPath\":\"status.hostIP\"}}}],\"image\":\"mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08222019\",\"imagePullPolicy\":\"IfNotPresent\",\"livenessProbe\":{\"exec\":{\"command\":[\"/bin/bash\",\"-c\",\"/opt/livenessprobe.sh\"]},\"initialDelaySeconds\":60,\"periodSeconds\":60},\"name\":\"omsagent\",\"ports\":[{\"containerPort\":25225,\"protocol\":\"TCP\"},{\"containerPort\":25224,\"protocol\":\"UDP\"},{\"containerPort\":25227,\"name\":\"in-rs-tcp\",\"protocol\":\"TCP\"}],\"resources\":{\"limits\":{\"cpu\":\"150m\",\"memory\":\"500Mi\"},\"requests\":{\"cpu\":\"110m\",\"memory\":\"250Mi\"}},\"securityContext\":{\"privileged\":true},\"volumeMounts\":[{\"mountPath\":\"/var/run/host\",\"name\":\"docker-sock\"},{\"mountPath\":\"/var/log\",\"name\":\"host-log\"},{\"mountPath\":\"/var/lib/docker/containers\",\"name\":\"containerlog-path\"},{\"mountPath\":\"/etc/kubernetes/host\",\"name\":\"azure-json-path\"},{\"mountPath\":\"/etc/omsagent-secret\",\"name\":\"omsagent-secret\",\"readOnly\":true},{\"mountPath\":\"/etc/config\",\"name\":\"omsagent-rs-config\"},{\"mountPath\":\"/etc/config/settings\",\"name\":\"settings-vol-config\",\"readOnly\":true}]}],\"nodeSelector\":{\"beta.kubernetes.io/os\":\"linux\",\"kubernetes.io/role\":\"agent\"},\"serviceAccountName\":\"omsagent\",\"volumes\":[{\"hostPath\":{\"path\":\"/var/run\"},\"name\":\"docker-sock\"},{\"hostPath\":{\"path\":\"/etc/hostname\"},\"name\":\"container-hostname\"},{\"hostPath\":{\"path\":\"/var/log\"},\"name\":\"host-log\"},{\"hostPath\":{\"path\":\"/var/lib/docker/containers\"},\"name\":\"containerlog-path\"},{\"hostPath\":{\"path\":\"/etc/kubernetes\"},\"name\":\"azure-json-path\"},{\"name\":\"omsagent-secret\",\"secret\":{\"secretName\":\"omsagent-secret\"}},{\"configMap\":{\"name\":\"omsagent-rs-config\"},\"name\":\"omsagent-rs-config\"},{\"configMap\":{\"name\":\"container-azm-ms-agentconfig\",\"optional\":true},\"name\":\"settings-vol-config\"}]}}}}\n" - }, - "creationTimestamp": "2019-08-19T22:44:22Z", - "generation": 7, - "labels": { - "rsName": "omsagent-rs" - }, - "name": "omsagent-rs", - "namespace": "kube-system", - "resourceVersion": "19063500", - "selfLink": "/apis/extensions/v1beta1/namespaces/kube-system/deployments/omsagent-rs", - "uid": "e32d7e82-c2d2-11e9-8736-86290fd7dd1f" - }, - "spec": { - "progressDeadlineSeconds": 2147483647, - "replicas": 1, - "revisionHistoryLimit": 10, - "selector": { - "matchLabels": { - "rsName": "omsagent-rs" - } - }, - "strategy": { - "rollingUpdate": { - "maxSurge": 1, - "maxUnavailable": 1 - }, - "type": "RollingUpdate" - }, - "template": { - "metadata": { - "annotations": { - "agentVersion": "1.10.0.1", - "dockerProviderVersion": "6.0.0-0", - "schema-versions": "v1" - }, - "creationTimestamp": null, - "labels": { - "rsName": "omsagent-rs" - } - }, - "spec": { - "containers": [ - { - "env": [ - { - "name": "AKS_RESOURCE_ID", - "value": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test" - }, - { - "name": "AKS_REGION", - "value": "eastus" - }, - { - "name": "CONTROLLER_TYPE", - "value": "ReplicaSet" - }, - { - "name": "NODE_IP", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "status.hostIP" - } - } - } - ], - "image": "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08222019", - "imagePullPolicy": "IfNotPresent", - "livenessProbe": { - "exec": { - "command": [ - "/bin/bash", - "-c", - "/opt/livenessprobe.sh" - ] - }, - "failureThreshold": 3, - "initialDelaySeconds": 60, - "periodSeconds": 60, - "successThreshold": 1, - "timeoutSeconds": 1 - }, - "name": "omsagent", - "ports": [ - { - "containerPort": 25225, - "protocol": "TCP" - }, - { - "containerPort": 25224, - "protocol": "UDP" - }, - { - "containerPort": 25227, - "name": "in-rs-tcp", - "protocol": "TCP" - } - ], - "resources": { - "limits": { - "cpu": "150m", - "memory": "500Mi" - }, - "requests": { - "cpu": "110m", - "memory": "250Mi" - } - }, - "securityContext": { - "privileged": true - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/var/run/host", - "name": "docker-sock" - }, - { - "mountPath": "/var/log", - "name": "host-log" - }, - { - "mountPath": "/var/lib/docker/containers", - "name": "containerlog-path" - }, - { - "mountPath": "/etc/kubernetes/host", - "name": "azure-json-path" - }, - { - "mountPath": "/etc/omsagent-secret", - "name": "omsagent-secret", - "readOnly": true - }, - { - "mountPath": "/etc/config", - "name": "omsagent-rs-config" - }, - { - "mountPath": "/etc/config/settings", - "name": "settings-vol-config", - "readOnly": true - } - ] - } - ], - "dnsPolicy": "ClusterFirst", - "nodeSelector": { - "beta.kubernetes.io/os": "linux", - "kubernetes.io/role": "agent" - }, - "restartPolicy": "Always", - "schedulerName": "default-scheduler", - "securityContext": {}, - "serviceAccount": "omsagent", - "serviceAccountName": "omsagent", - "terminationGracePeriodSeconds": 30, - "volumes": [ - { - "hostPath": { - "path": "/var/run", - "type": "" - }, - "name": "docker-sock" - }, - { - "hostPath": { - "path": "/etc/hostname", - "type": "" - }, - "name": "container-hostname" - }, - { - "hostPath": { - "path": "/var/log", - "type": "" - }, - "name": "host-log" - }, - { - "hostPath": { - "path": "/var/lib/docker/containers", - "type": "" - }, - "name": "containerlog-path" - }, - { - "hostPath": { - "path": "/etc/kubernetes", - "type": "" - }, - "name": "azure-json-path" - }, - { - "name": "omsagent-secret", - "secret": { - "defaultMode": 420, - "secretName": "omsagent-secret" - } - }, - { - "configMap": { - "defaultMode": 420, - "name": "omsagent-rs-config" - }, - "name": "omsagent-rs-config" - }, - { - "configMap": { - "defaultMode": 420, - "name": "container-azm-ms-agentconfig", - "optional": true - }, - "name": "settings-vol-config" - } - ] - } - } - }, - "status": { - "availableReplicas": 1, - "conditions": [ - { - "lastTransitionTime": "2019-08-19T22:44:22Z", - "lastUpdateTime": "2019-08-19T22:44:22Z", - "message": "Deployment has minimum availability.", - "reason": "MinimumReplicasAvailable", - "status": "True", - "type": "Available" - } - ], - "observedGeneration": 7, - "readyReplicas": 1, - "replicas": 1, - "updatedReplicas": 1 - } - }, - { - "apiVersion": "extensions/v1beta1", - "kind": "Deployment", - "metadata": { - "annotations": { - "deployment.kubernetes.io/revision": "9", - "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"extensions/v1beta1\",\"kind\":\"Deployment\",\"metadata\":{\"annotations\":{},\"labels\":{\"addonmanager.kubernetes.io/mode\":\"Reconcile\",\"component\":\"tunnel\",\"kubernetes.io/cluster-service\":\"true\",\"tier\":\"node\"},\"name\":\"tunnelfront\",\"namespace\":\"kube-system\"},\"spec\":{\"replicas\":1,\"selector\":{\"matchLabels\":{\"component\":\"tunnel\"}},\"template\":{\"metadata\":{\"labels\":{\"component\":\"tunnel\"}},\"spec\":{\"affinity\":{\"nodeAffinity\":{\"requiredDuringSchedulingIgnoredDuringExecution\":{\"nodeSelectorTerms\":[{\"labelSelector\":null,\"matchExpressions\":[{\"key\":\"kubernetes.azure.com/cluster\",\"operator\":\"Exists\"}]}]}}},\"containers\":[{\"env\":[{\"name\":\"OVERRIDE_TUNNEL_SERVER_NAME\",\"value\":\"t_dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io\"},{\"name\":\"TUNNEL_CLUSTERUSER_NAME\",\"value\":\"28957308\"},{\"name\":\"TUNNELGATEWAY_SERVER_NAME\",\"value\":\"dilipr-hea-dilipr-health-te-72c8e8-0b16acad.tun.eastus.azmk8s.io\"},{\"name\":\"TUNNELGATEWAY_SSH_PORT\",\"value\":\"22\"},{\"name\":\"TUNNELGATEWAY_TLS_PORT\",\"value\":\"443\"},{\"name\":\"KUBE_CONFIG\",\"value\":\"/etc/kubernetes/kubeconfig/kubeconfig\"}],\"image\":\"aksrepos.azurecr.io/prod/hcp-tunnel-front:v1.9.2-v4.0.7\",\"imagePullPolicy\":\"IfNotPresent\",\"livenessProbe\":{\"exec\":{\"command\":[\"/lib/tunnel-front/check-tunnel-connection.sh\"]},\"failureThreshold\":12,\"initialDelaySeconds\":10,\"periodSeconds\":60},\"name\":\"tunnel-front\",\"resources\":{\"requests\":{\"cpu\":\"10m\",\"memory\":\"64Mi\"}},\"securityContext\":{\"privileged\":true},\"volumeMounts\":[{\"mountPath\":\"/etc/kubernetes/kubeconfig\",\"name\":\"kubeconfig\",\"readOnly\":true},{\"mountPath\":\"/etc/kubernetes/certs\",\"name\":\"certificates\",\"readOnly\":true}]}],\"dnsPolicy\":\"Default\",\"imagePullSecrets\":[{\"name\":\"emptyacrsecret\"}],\"nodeSelector\":{\"beta.kubernetes.io/os\":\"linux\"},\"priorityClassName\":\"system-node-critical\",\"serviceAccountName\":\"tunnelfront\",\"tolerations\":[{\"key\":\"CriticalAddonsOnly\",\"operator\":\"Exists\"}],\"volumes\":[{\"configMap\":{\"name\":\"tunnelfront-kubecfg\",\"optional\":true},\"name\":\"kubeconfig\"},{\"hostPath\":{\"path\":\"/etc/kubernetes/certs\"},\"name\":\"certificates\"}]}}}}\n" - }, - "creationTimestamp": "2019-03-12T16:38:32Z", - "generation": 9, - "labels": { - "addonmanager.kubernetes.io/mode": "Reconcile", - "component": "tunnel", - "kubernetes.io/cluster-service": "true", - "tier": "node" - }, - "name": "tunnelfront", - "namespace": "kube-system", - "resourceVersion": "17628811", - "selfLink": "/apis/extensions/v1beta1/namespaces/kube-system/deployments/tunnelfront", - "uid": "45e524e6-44e5-11e9-9920-423525a6b683" - }, - "spec": { - "progressDeadlineSeconds": 2147483647, - "replicas": 1, - "revisionHistoryLimit": 10, - "selector": { - "matchLabels": { - "component": "tunnel" - } - }, - "strategy": { - "rollingUpdate": { - "maxSurge": 1, - "maxUnavailable": 1 - }, - "type": "RollingUpdate" - }, - "template": { - "metadata": { - "creationTimestamp": null, - "labels": { - "component": "tunnel" - } - }, - "spec": { - "affinity": { - "nodeAffinity": { - "requiredDuringSchedulingIgnoredDuringExecution": { - "nodeSelectorTerms": [ - { - "matchExpressions": [ - { - "key": "kubernetes.azure.com/cluster", - "operator": "Exists" - } - ] - } - ] - } - } - }, - "containers": [ - { - "env": [ - { - "name": "OVERRIDE_TUNNEL_SERVER_NAME", - "value": "t_dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - }, - { - "name": "TUNNEL_CLUSTERUSER_NAME", - "value": "28957308" - }, - { - "name": "TUNNELGATEWAY_SERVER_NAME", - "value": "dilipr-hea-dilipr-health-te-72c8e8-0b16acad.tun.eastus.azmk8s.io" - }, - { - "name": "TUNNELGATEWAY_SSH_PORT", - "value": "22" - }, - { - "name": "TUNNELGATEWAY_TLS_PORT", - "value": "443" - }, - { - "name": "KUBE_CONFIG", - "value": "/etc/kubernetes/kubeconfig/kubeconfig" - } - ], - "image": "aksrepos.azurecr.io/prod/hcp-tunnel-front:v1.9.2-v4.0.7", - "imagePullPolicy": "IfNotPresent", - "livenessProbe": { - "exec": { - "command": [ - "/lib/tunnel-front/check-tunnel-connection.sh" - ] - }, - "failureThreshold": 12, - "initialDelaySeconds": 10, - "periodSeconds": 60, - "successThreshold": 1, - "timeoutSeconds": 1 - }, - "name": "tunnel-front", - "resources": { - "requests": { - "cpu": "10m", - "memory": "64Mi" - } - }, - "securityContext": { - "privileged": true - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/etc/kubernetes/kubeconfig", - "name": "kubeconfig", - "readOnly": true - }, - { - "mountPath": "/etc/kubernetes/certs", - "name": "certificates", - "readOnly": true - } - ] - } - ], - "dnsPolicy": "Default", - "imagePullSecrets": [ - { - "name": "emptyacrsecret" - } - ], - "nodeSelector": { - "beta.kubernetes.io/os": "linux" - }, - "priorityClassName": "system-node-critical", - "restartPolicy": "Always", - "schedulerName": "default-scheduler", - "securityContext": {}, - "serviceAccount": "tunnelfront", - "serviceAccountName": "tunnelfront", - "terminationGracePeriodSeconds": 30, - "tolerations": [ - { - "key": "CriticalAddonsOnly", - "operator": "Exists" - } - ], - "volumes": [ - { - "configMap": { - "defaultMode": 420, - "name": "tunnelfront-kubecfg", - "optional": true - }, - "name": "kubeconfig" - }, - { - "hostPath": { - "path": "/etc/kubernetes/certs", - "type": "" - }, - "name": "certificates" - } - ] - } - } - }, - "status": { - "availableReplicas": 1, - "conditions": [ - { - "lastTransitionTime": "2019-03-12T16:38:32Z", - "lastUpdateTime": "2019-03-12T16:38:32Z", - "message": "Deployment has minimum availability.", - "reason": "MinimumReplicasAvailable", - "status": "True", - "type": "Available" - } - ], - "observedGeneration": 9, - "readyReplicas": 1, - "replicas": 1, - "updatedReplicas": 1 - } - } - ], - "kind": "List", - "metadata": { - "resourceVersion": "", - "selfLink": "" - } -} diff --git a/test/unit-tests/plugins/health/health_container_cpu_memory_aggregator_spec.rb b/test/unit-tests/plugins/health/health_container_cpu_memory_aggregator_spec.rb deleted file mode 100644 index 89eebb509..000000000 --- a/test/unit-tests/plugins/health/health_container_cpu_memory_aggregator_spec.rb +++ /dev/null @@ -1,190 +0,0 @@ -require_relative '../test_helpers' -Dir[File.join(File.expand_path(File.dirname(__FILE__)), "../../../../source/plugins/ruby/health/*.rb")].reject{|f| f.include?('health_monitor_utils')}.each { |file| require file } -include HealthModel - -describe 'HealthContainerCpuMemoryAggregator spec' do - - it 'dedupes and drops older records' do - formatted_records = JSON.parse'[{ - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/952488f3-a1f2-11e9-8b08-d602e29755d5/sidecar", - "CounterName": "memoryRssBytes", - "CounterValue": 14061568, - "Timestamp": "2019-08-23T23:13:39Z" - }, - { - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/952488f3-a1f2-11e9-8b08-d602e29755d5/sidecar", - "CounterName": "memoryRssBytes", - "CounterValue": 14061568, - "Timestamp": "2019-08-23T22:13:39Z" - }]' - - resources = HealthKubernetesResources.instance - nodes = JSON.parse(File.read(File.join(File.expand_path(File.dirname(__FILE__)),'nodes.json'))) - pods = JSON.parse(File.read(File.join(File.expand_path(File.dirname(__FILE__)),'pods.json'))) - deployments = JSON.parse(File.read(File.join(File.expand_path(File.dirname(__FILE__)),'deployments.json'))) - - resources.pod_inventory = pods - resources.node_inventory = nodes - resources.set_replicaset_inventory(deployments) - resources.build_pod_uid_lookup #call this in in_kube_health every min - - cluster_labels = { - 'container.azm.ms/cluster-region' => 'eastus', - 'container.azm.ms/cluster-subscription-id' => '72c8e8ca-dc16-47dc-b65c-6b5875eb600a', - 'container.azm.ms/cluster-resource-group' => 'dilipr-health-test', - 'container.azm.ms/cluster-name' => 'dilipr-health-test' - } - cluster_id = 'fake_cluster_id' - provider = HealthMonitorProvider.new(cluster_id, cluster_labels, resources, File.join(__dir__, "../../../../installer/conf/healthmonitorconfig.json")) - aggregator = HealthContainerCpuMemoryAggregator.new(resources, provider) - deduped_records = aggregator.dedupe_records(formatted_records) - deduped_records.size.must_equal 1 - deduped_records[0]["Timestamp"].must_equal "2019-08-23T23:13:39Z" - end - - it 'aggregates based on container name' do - file = File.read(File.join(File.expand_path(File.dirname(__FILE__)),'cadvisor_perf.json')) - records = JSON.parse(file) - records = records.select{|record| record['DataItems'][0]['ObjectName'] == 'K8SContainer'} - formatted_records = [] - formatter = HealthContainerCpuMemoryRecordFormatter.new - records.each{|record| - formatted_record = formatter.get_record_from_cadvisor_record(record) - formatted_records.push(formatted_record) - } - - resources = HealthKubernetesResources.instance - nodes = JSON.parse(File.read(File.join(File.expand_path(File.dirname(__FILE__)),'nodes.json'))) - pods = JSON.parse(File.read(File.join(File.expand_path(File.dirname(__FILE__)),'pods.json'))) - deployments = JSON.parse(File.read(File.join(File.expand_path(File.dirname(__FILE__)),'deployments.json'))) - - resources.pod_inventory = pods - resources.node_inventory = nodes - resources.set_replicaset_inventory(deployments) - resources.build_pod_uid_lookup #call this in in_kube_health every min - - cluster_labels = { - 'container.azm.ms/cluster-region' => 'eastus', - 'container.azm.ms/cluster-subscription-id' => '72c8e8ca-dc16-47dc-b65c-6b5875eb600a', - 'container.azm.ms/cluster-resource-group' => 'dilipr-health-test', - 'container.azm.ms/cluster-name' => 'dilipr-health-test' - } - - cluster_id = 'fake_cluster_id' - - provider = HealthMonitorProvider.new(cluster_id, cluster_labels, resources, File.join(__dir__, "../../../../installer/conf/healthmonitorconfig.json")) - - aggregator = HealthContainerCpuMemoryAggregator.new(resources, provider) - deduped_records = aggregator.dedupe_records(formatted_records) - aggregator.aggregate(deduped_records) - aggregator.compute_state - records = aggregator.get_records - records.size.must_equal 30 - #records have all the required details - records.each{|record| - record["Details"]["details"]["container"].wont_be_nil - record["Details"]["details"]["workload_name"].wont_be_nil - record["Details"]["details"]["workload_kind"].wont_be_nil - record["Details"]["details"]["namespace"].wont_be_nil - record["Details"]["timestamp"].wont_be_nil - record["Details"]["state"].wont_be_nil - record["MonitorTypeId"].wont_be_nil - record["MonitorInstanceId"].wont_be_nil - record["TimeFirstObserved"].wont_be_nil - record["TimeGenerated"].wont_be_nil - } - end - - it "calculates the state correctly" do - file = File.read(File.join(File.expand_path(File.dirname(__FILE__)),'cadvisor_perf.json')) - records = JSON.parse(file) - records = records.select{|record| record['DataItems'][0]['ObjectName'] == 'K8SContainer'} - formatted_records = [] - formatter = HealthContainerCpuMemoryRecordFormatter.new - records.each{|record| - formatted_record = formatter.get_record_from_cadvisor_record(record) - formatted_records.push(formatted_record) - } - - resources = HealthKubernetesResources.instance - nodes = JSON.parse(File.read(File.join(File.expand_path(File.dirname(__FILE__)),'nodes.json'))) - pods = JSON.parse(File.read(File.join(File.expand_path(File.dirname(__FILE__)),'pods.json'))) - deployments = JSON.parse(File.read(File.join(File.expand_path(File.dirname(__FILE__)),'deployments.json'))) - - resources.pod_inventory = pods - resources.node_inventory = nodes - resources.set_replicaset_inventory(deployments) - resources.build_pod_uid_lookup #call this in in_kube_health every min - - cluster_labels = { - 'container.azm.ms/cluster-region' => 'eastus', - 'container.azm.ms/cluster-subscription-id' => '72c8e8ca-dc16-47dc-b65c-6b5875eb600a', - 'container.azm.ms/cluster-resource-group' => 'dilipr-health-test', - 'container.azm.ms/cluster-name' => 'dilipr-health-test' - } - - cluster_id = 'fake_cluster_id' - - provider = HealthMonitorProvider.new(cluster_id, cluster_labels, resources, File.join(__dir__, "../../../../installer/conf/healthmonitorconfig.json")) - - aggregator = HealthContainerCpuMemoryAggregator.new(resources, provider) - deduped_records = aggregator.dedupe_records(formatted_records) - aggregator.aggregate(deduped_records) - aggregator.compute_state - records = aggregator.get_records - - #omsagent has limit set. So its state should be set to pass. - #sidecar has no limit set. its state should be set to warning - omsagent_record = records.select{|r| r["MonitorTypeId"] == MonitorId::CONTAINER_CPU_MONITOR_ID && r["Details"]["details"]["container"] == "omsagent"}[0] - sidecar_record = records.select{|r| r["MonitorTypeId"] == MonitorId::CONTAINER_CPU_MONITOR_ID && r["Details"]["details"]["container"] == "sidecar"}[0] - omsagent_record['Details']['state'].must_equal HealthMonitorStates::PASS #limit is set - sidecar_record['Details']['state'].must_equal HealthMonitorStates::PASS - end - - - it "calculates the state as unknown when signals are missing" do - file = File.read(File.join(File.expand_path(File.dirname(__FILE__)),'cadvisor_perf.json')) - records = JSON.parse(file) - records = records.select{|record| record['DataItems'][0]['ObjectName'] == 'K8SContainer'} - formatted_records = [] - formatter = HealthContainerCpuMemoryRecordFormatter.new - records.each{|record| - formatted_record = formatter.get_record_from_cadvisor_record(record) - formatted_records.push(formatted_record) - } - - formatted_records = formatted_records.reject{|r| r["InstanceName"] == "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/69e68b21-c5df-11e9-8736-86290fd7dd1f/omsagent" && r["CounterName"] == "cpuUsageNanoCores"} - formatted_records = formatted_records.reject{|r| r["InstanceName"] == "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/b1e04e1c-c5df-11e9-8736-86290fd7dd1f/omsagent" && r["CounterName"] == "cpuUsageNanoCores"} - - resources = HealthKubernetesResources.instance - nodes = JSON.parse(File.read(File.join(File.expand_path(File.dirname(__FILE__)),'nodes.json'))) - pods = JSON.parse(File.read(File.join(File.expand_path(File.dirname(__FILE__)),'pods.json'))) - deployments = JSON.parse(File.read(File.join(File.expand_path(File.dirname(__FILE__)),'deployments.json'))) - - resources.pod_inventory = pods - resources.node_inventory = nodes - resources.set_replicaset_inventory(deployments) - resources.build_pod_uid_lookup #call this in in_kube_health every min - - cluster_labels = { - 'container.azm.ms/cluster-region' => 'eastus', - 'container.azm.ms/cluster-subscription-id' => '72c8e8ca-dc16-47dc-b65c-6b5875eb600a', - 'container.azm.ms/cluster-resource-group' => 'dilipr-health-test', - 'container.azm.ms/cluster-name' => 'dilipr-health-test' - } - - cluster_id = 'fake_cluster_id' - - provider = HealthMonitorProvider.new(cluster_id, cluster_labels, resources, File.join(__dir__, "../../../../installer/conf/healthmonitorconfig.json")) - - aggregator = HealthContainerCpuMemoryAggregator.new(resources, provider) - deduped_records = aggregator.dedupe_records(formatted_records) - aggregator.aggregate(deduped_records) - aggregator.compute_state - records = aggregator.get_records - - #removed(missed) omsagent records should result in state being unknown - omsagent_record = records.select{|r| r["MonitorTypeId"] == MonitorId::CONTAINER_CPU_MONITOR_ID && r["Details"]["details"]["container"] == "omsagent" && !r["Details"]["details"]["workload_name"].include?("omsagent-rs") }[0] - omsagent_record['Details']['state'].must_equal HealthMonitorStates::UNKNOWN #limit is set - end -end \ No newline at end of file diff --git a/test/unit-tests/plugins/health/health_container_cpu_memory_record_formatter_spec.rb b/test/unit-tests/plugins/health/health_container_cpu_memory_record_formatter_spec.rb deleted file mode 100644 index e19eb15dc..000000000 --- a/test/unit-tests/plugins/health/health_container_cpu_memory_record_formatter_spec.rb +++ /dev/null @@ -1,58 +0,0 @@ -require_relative '../test_helpers' -Dir[File.join(File.expand_path(File.dirname(__FILE__)), "../../../../source/plugins/ruby/health/*.rb")].reject{|f| f.include?('health_monitor_utils')}.each { |file| require file } -include HealthModel -include Minitest - -describe "HealthContainerCpuMemoryRecordFormatter spec" do - it 'returns the record in expected format when cadvisor record is well formed' do - formatter = HealthContainerCpuMemoryRecordFormatter.new - cadvisor_record = JSON.parse('{ - "DataItems": [ - { - "Timestamp": "2019-08-01T23:19:19Z", - "Host": "aks-nodepool1-19574989-2", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourceGroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/6708e4ac-b49a-11e9-8a49-52a94e80d897/omsagent", - "Collections": [ - { - "CounterName": "memoryWorkingSetBytes", - "Value": 85143552 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }') - record = formatter.get_record_from_cadvisor_record(cadvisor_record) - record.wont_equal nil - record["InstanceName"].must_equal "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourceGroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/6708e4ac-b49a-11e9-8a49-52a94e80d897/omsagent" - record["CounterName"].must_equal "memoryWorkingSetBytes" - record["CounterValue"].must_equal 85143552 - record["Timestamp"].must_equal "2019-08-01T23:19:19Z" - end - - it 'returns nil for invalid cadvisor record' do - formatter = HealthContainerCpuMemoryRecordFormatter.new - cadvisor_record = JSON.parse('{ - "DataItms": [ - { - "Timestamp": "2019-08-01T23:19:19Z", - "Host": "aks-nodepool1-19574989-2", - "ObjectName": "K8SContainer", - "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourceGroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/6708e4ac-b49a-11e9-8a49-52a94e80d897/omsagent", - "Collections": [ - { - "CounterName": "memoryWorkingSetBytes", - "Value": 85143552 - } - ] - } - ], - "DataType": "LINUX_PERF_BLOB", - "IPName": "LogManagement" - }') - record = formatter.get_record_from_cadvisor_record(cadvisor_record) - record.must_be_nil - end -end \ No newline at end of file diff --git a/test/unit-tests/plugins/health/health_hierarchy_builder_spec.rb b/test/unit-tests/plugins/health/health_hierarchy_builder_spec.rb deleted file mode 100644 index 615826c03..000000000 --- a/test/unit-tests/plugins/health/health_hierarchy_builder_spec.rb +++ /dev/null @@ -1,11 +0,0 @@ -require_relative '../test_helpers' -Dir[File.join(File.expand_path(File.dirname(__FILE__)), "../../../../source/plugins/ruby/health/*.rb")].reject{|f| f.include?('health_monitor_utils')}.each { |file| require file } -include HealthModel -include Minitest - -describe "HealthHierarchyBuilder spec" do - it 'builds right hierarchy given a child monitor and a parent monitor provider' do - - end - -end \ No newline at end of file diff --git a/test/unit-tests/plugins/health/health_kubernetes_resource_spec.rb b/test/unit-tests/plugins/health/health_kubernetes_resource_spec.rb deleted file mode 100644 index a1a013052..000000000 --- a/test/unit-tests/plugins/health/health_kubernetes_resource_spec.rb +++ /dev/null @@ -1,222 +0,0 @@ -require_relative '../test_helpers' -Dir[File.join(File.expand_path(File.dirname(__FILE__)), "../../../../source/plugins/ruby/health/*.rb")].reject{|f| f.include?('health_monitor_utils')}.each { |file| require file } -include HealthModel - -describe "HealthKubernetesResources spec" do - it "returns the right set of nodes and workloads given node and pod inventory" do - - #arrange - nodes_json = '{ - "items": [ - { - "metadata": { - "name": "aks-nodepool1-19574989-0" - } - }, - { - "metadata": { - "name": "aks-nodepool1-19574989-1" - } - } - ] - }' - - pods_json = '{ - "items": [ - { - "metadata": { - "name": "diliprdeploymentnodeapps-c4fdfb446-mzcsr", - "generateName": "diliprdeploymentnodeapps-c4fdfb446-", - "namespace": "default", - "selfLink": "/api/v1/namespaces/default/pods/diliprdeploymentnodeapps-c4fdfb446-mzcsr", - "uid": "ee31a9ce-526e-11e9-a899-6a5520730c61", - "resourceVersion": "4597573", - "creationTimestamp": "2019-03-29T22:06:40Z", - "labels": { - "app": "diliprsnodeapppod", - "diliprPodLabel1": "p1", - "diliprPodLabel2": "p2", - "pod-template-hash": "709896002" - }, - "ownerReferences": [ - { - "apiVersion": "apps/v1", - "kind": "ReplicaSet", - "name": "diliprdeploymentnodeapps-c4fdfb446", - "uid": "ee1e78e0-526e-11e9-a899-6a5520730c61", - "controller": true, - "blockOwnerDeletion": true - } - ] - }, - "apiVersion": "v1", - "kind": "Pod" - }, - { - "metadata": { - "name": "pi-m8ccw", - "generateName": "pi-", - "namespace": "default", - "selfLink": "/api/v1/namespaces/default/pods/pi-m8ccw", - "uid": "9fb16aaa-7ccc-11e9-8d23-32c49ee6f300", - "resourceVersion": "7940877", - "creationTimestamp": "2019-05-22T20:03:10Z", - "labels": { - "controller-uid": "9fad836f-7ccc-11e9-8d23-32c49ee6f300", - "job-name": "pi" - }, - "ownerReferences": [ - { - "apiVersion": "batch/v1", - "kind": "Job", - "name": "pi", - "uid": "9fad836f-7ccc-11e9-8d23-32c49ee6f300", - "controller": true, - "blockOwnerDeletion": true - } - ] - }, - "apiVersion": "v1", - "kind": "Pod" - }, - { - "metadata": { - "name": "rss-site", - "namespace": "default", - "selfLink": "/api/v1/namespaces/default/pods/rss-site", - "uid": "68a34ea4-7ce4-11e9-8d23-32c49ee6f300", - "resourceVersion": "7954135", - "creationTimestamp": "2019-05-22T22:53:26Z", - "labels": { - "app": "web" - }, - "annotations": { - "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"kind\":\"Pod\",\"metadata\":{\"annotations\":{},\"labels\":{\"app\":\"web\"},\"name\":\"rss-site\",\"namespace\":\"default\"},\"spec\":{\"containers\":[{\"image\":\"nginx\",\"name\":\"front-end\",\"ports\":[{\"containerPort\":80}]},{\"image\":\"nickchase/rss-php-nginx:v1\",\"name\":\"rss-reader\",\"ports\":[{\"containerPort\":88}]}]}}\n" - } - }, - "apiVersion": "v1", - "kind": "Pod" - }, - { - "metadata": { - "name": "kube-proxy-4hjws", - "generateName": "kube-proxy-", - "namespace": "kube-system", - "selfLink": "/api/v1/namespaces/kube-system/pods/kube-proxy-4hjws", - "uid": "8cf7c410-88f4-11e9-b1b0-5eb4a3e9de7d", - "resourceVersion": "9661065", - "creationTimestamp": "2019-06-07T07:19:12Z", - "labels": { - "component": "kube-proxy", - "controller-revision-hash": "1271944371", - "pod-template-generation": "16", - "tier": "node" - }, - "annotations": { - "aks.microsoft.com/release-time": "seconds:1559735217 nanos:797729016 ", - "remediator.aks.microsoft.com/kube-proxy-restart": "7" - }, - "ownerReferences": [ - { - "apiVersion": "apps/v1", - "kind": "DaemonSet", - "name": "kube-proxy", - "uid": "45640bf6-44e5-11e9-9920-423525a6b683", - "controller": true, - "blockOwnerDeletion": true - } - ] - }, - "apiVersion": "v1", - "kind": "Pod" - } - ] - }' - deployments_json = '{ - "items": [ - { - "metadata": { - "name": "diliprdeploymentnodeapps", - "namespace": "default", - "selfLink": "/apis/extensions/v1beta1/namespaces/default/deployments/diliprdeploymentnodeapps", - "uid": "ee1b111d-526e-11e9-a899-6a5520730c61", - "resourceVersion": "4597575", - "generation": 1, - "creationTimestamp": "2019-03-29T22:06:40Z", - "labels": { - "diliprdeploymentLabel1": "d1", - "diliprdeploymentLabel2": "d2" - }, - "annotations": { - "deployment.kubernetes.io/revision": "1", - "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"apps/v1beta1\",\"kind\":\"Deployment\",\"metadata\":{\"annotations\":{},\"labels\":{\"diliprdeploymentLabel1\":\"d1\",\"diliprdeploymentLabel2\":\"d2\"},\"name\":\"diliprdeploymentnodeapps\",\"namespace\":\"default\"},\"spec\":{\"replicas\":1,\"selector\":{\"matchLabels\":{\"app\":\"diliprsnodeapppod\"}},\"template\":{\"metadata\":{\"labels\":{\"app\":\"diliprsnodeapppod\",\"diliprPodLabel1\":\"p1\",\"diliprPodLabel2\":\"p2\"}},\"spec\":{\"containers\":[{\"image\":\"rdilip83/logeverysecond:v2\",\"name\":\"diliprcontainerhelloapp\"}]}}}}\n" - } - }, - "spec": { - "replicas": 1, - "selector": { - "matchLabels": { - "app": "diliprsnodeapppod" - } - }, - "template": { - "metadata": { - "creationTimestamp": null, - "labels": { - "app": "diliprsnodeapppod", - "diliprPodLabel1": "p1", - "diliprPodLabel2": "p2" - } - }, - "spec": { - "containers": [ - { - "name": "diliprcontainerhelloapp", - "image": "rdilip83/logeverysecond:v2", - "resources": {}, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "imagePullPolicy": "IfNotPresent" - } - ], - "restartPolicy": "Always", - "terminationGracePeriodSeconds": 30, - "dnsPolicy": "ClusterFirst", - "securityContext": {}, - "schedulerName": "default-scheduler" - } - }, - "strategy": { - "type": "RollingUpdate", - "rollingUpdate": { - "maxUnavailable": "25%", - "maxSurge": "25%" - } - }, - "revisionHistoryLimit": 2, - "progressDeadlineSeconds": 600 - }, - "apiVersion": "extensions/v1beta1", - "kind": "Deployment" - } - ] - }' - nodes = JSON.parse(nodes_json) - pods = JSON.parse(pods_json) - deployments = JSON.parse(deployments_json) - resources = HealthKubernetesResources.instance - resources.node_inventory = nodes - resources.pod_inventory = pods - resources.set_replicaset_inventory(deployments) - #act - parsed_nodes = resources.get_nodes - parsed_workloads = resources.get_workload_names - - #assert - assert_equal parsed_nodes.size, 2 - assert_equal parsed_workloads.size, 3 - - assert_equal parsed_nodes, ['aks-nodepool1-19574989-0', 'aks-nodepool1-19574989-1'] - parsed_workloads.sort.must_equal ['default~~diliprdeploymentnodeapps-c4fdfb446', 'default~~rss-site', 'kube-system~~kube-proxy'].sort - end -end \ No newline at end of file diff --git a/test/unit-tests/plugins/health/health_missing_signal_generator_spec.rb b/test/unit-tests/plugins/health/health_missing_signal_generator_spec.rb deleted file mode 100644 index 125d02fe0..000000000 --- a/test/unit-tests/plugins/health/health_missing_signal_generator_spec.rb +++ /dev/null @@ -1,79 +0,0 @@ -require_relative '../test_helpers' -Dir[File.join(File.expand_path(File.dirname(__FILE__)), "../../../../source/plugins/ruby/health/*.rb")].reject{|f| f.include?('health_monitor_utils')}.each {|file| require file} -include HealthModel -include Minitest - -describe "HealthMissingSignalGenerator spec" do - it 'generates missing node signals' do - #arrange - resources = Mock.new - resources.expect(:get_nodes, ["node1"]) - resources.expect(:get_workload_names, ["default~~workload1"]) - - provider = Mock.new - provider.expect(:get_node_labels, {HealthMonitorLabels::HOSTNAME => "node1"}, ["node1"]) - - node1_cpu_record = Mock.new - def node1_cpu_record.monitor_id; "node_cpu_utilization"; end - def node1_cpu_record.monitor_instance_id; "node_cpu_utilization"; end - def node1_cpu_record.labels; {HealthMonitorLabels::HOSTNAME => "node1"}; end - def node1_cpu_record.config; {}; end - def node1_cpu_record.state; "pass"; end - - node1_memory_record = Mock.new - def node1_memory_record.monitor_id; "node_memory_utilization"; end - def node1_memory_record.monitor_instance_id; "node_memory_utilization"; end - def node1_memory_record.labels; {HealthMonitorLabels::HOSTNAME => "node1"}; end - def node1_memory_record.config; {}; end - def node1_memory_record.state; "pass"; end - - node1_condition_record = Mock.new - def node1_condition_record.monitor_id; "node_condition"; end - def node1_condition_record.monitor_instance_id; "node_condition-0c593682737a955dc8e0947ad12754fe"; end - def node1_condition_record.labels; {HealthMonitorLabels::HOSTNAME => "node1"}; end - def node1_condition_record.config; {}; end - def node1_condition_record.state; "pass"; end - - - workload1_pods_ready_record = Mock.new - def workload1_pods_ready_record.monitor_id; "user_workload_pods_ready"; end - def workload1_pods_ready_record.monitor_instance_id; "user_workload_pods_ready-workload1"; end - def workload1_pods_ready_record.labels; {HealthMonitorLabels::NAMESPACE => "default", HealthMonitorLabels::WORKLOAD_NAME => "workload1"}; end - def workload1_pods_ready_record.config; {}; end - def workload1_pods_ready_record.state; "pass"; end - - generator = HealthMissingSignalGenerator.new - generator.update_last_received_records([node1_cpu_record, node1_memory_record, node1_condition_record, workload1_pods_ready_record]) - - #act - missing = generator.get_missing_signals('fake_cluster_id', [node1_cpu_record, node1_memory_record], resources, provider) - - #assert - assert_equal missing.size, 2 - - assert_equal missing[0].monitor_id, "node_condition" - assert_equal missing[0].state, "unknown" - assert_equal missing[0].monitor_instance_id, "node_condition-0c593682737a955dc8e0947ad12754fe" - - assert_equal missing[1].monitor_id, "user_workload_pods_ready" - assert_equal missing[1].state, "unknown" - assert_equal missing[1].monitor_instance_id, "user_workload_pods_ready-workload1" - - #arrange - resources.expect(:get_nodes, ["node1"]) - resources.expect(:get_workload_names, ["default~~workload1"]) - provider.expect(:get_node_labels, {HealthMonitorLabels::HOSTNAME => "node1"}, ["node1"]) - generator.update_last_received_records([node1_cpu_record, node1_memory_record]) - #act - missing = generator.get_missing_signals('fake_cluster_id', [node1_cpu_record, node1_memory_record], resources, provider) - #assert - assert_equal missing.size, 2 - assert_equal missing[0].monitor_id, "node_condition" - assert_equal missing[0].state, "unknown" - assert_equal missing[0].monitor_instance_id, "node_condition-0c593682737a955dc8e0947ad12754fe" - - assert_equal missing[1].monitor_id, "user_workload_pods_ready" - assert_equal missing[1].state, "none" - assert_equal missing[1].monitor_instance_id, "user_workload_pods_ready-workload1" - end -end \ No newline at end of file diff --git a/test/unit-tests/plugins/health/health_model_buffer_spec.rb b/test/unit-tests/plugins/health/health_model_buffer_spec.rb deleted file mode 100644 index a19969082..000000000 --- a/test/unit-tests/plugins/health/health_model_buffer_spec.rb +++ /dev/null @@ -1,25 +0,0 @@ -require_relative '../../../../source/plugins/ruby/health/health_model_buffer' -require_relative '../test_helpers' - -include HealthModel - -describe "HealthModelBuffer Spec" do - it "get_buffer returns the correct buffer data" do - # Arrange - buffer = HealthModelBuffer.new - # Act - buffer.add_to_buffer(['mockRecord']) - # Assert - assert_equal buffer.get_buffer.length, 1 - - #Act - buffer.add_to_buffer(['mockRecord1', 'mockRecord2']) - #Assert - assert_equal buffer.get_buffer.length, 3 - - #Act - buffer.reset_buffer - #Assert - assert_equal buffer.get_buffer.length, 0 - end -end \ No newline at end of file diff --git a/test/unit-tests/plugins/health/health_model_builder_spec.rb b/test/unit-tests/plugins/health/health_model_builder_spec.rb deleted file mode 100644 index c21524982..000000000 --- a/test/unit-tests/plugins/health/health_model_builder_spec.rb +++ /dev/null @@ -1,37 +0,0 @@ -require_relative '../test_helpers' -# consider doing this in test_helpers.rb so that this code is common -Dir[File.join(File.expand_path(File.dirname(__FILE__)), "../../../../source/plugins/ruby/health/*.rb")].reject{|f| f.include?('health_monitor_utils')}.each { |file| require file } -include HealthModel -include Minitest - -describe "HealthModelBuilder spec" do - it "Verify hierarchy builder and finalizer public methods are called" do - #arrange - mock_hierarchy_builder = Mock::new - health_record = Mock::new - mock_monitor_set = Mock::new - mock_state_finalizer = Mock::new - mock_hierarchy_builder.expect(:process_record, nil, [health_record, mock_monitor_set]) - mock_state_finalizer.expect(:finalize, {}, [mock_monitor_set]) - def mock_monitor_set.get_map; {}; end - - #act - builder = HealthModelBuilder.new(mock_hierarchy_builder, [mock_state_finalizer], mock_monitor_set) - builder.process_records([health_record]) - builder.finalize_model - #assert - assert mock_hierarchy_builder.verify - assert mock_state_finalizer.verify - end - - it "Verify finalize_model raises if state_finalizers is empty" do - #arrange - mock_hierarchy_builder = Mock.new - mock_monitor_set = Mock.new - builder = HealthModelBuilder.new(mock_hierarchy_builder, [], mock_monitor_set) - #act and assert - assert_raises do - builder.finalize_model - end - end -end \ No newline at end of file diff --git a/test/unit-tests/plugins/health/health_model_builder_test.rb b/test/unit-tests/plugins/health/health_model_builder_test.rb deleted file mode 100644 index 42f1b60a8..000000000 --- a/test/unit-tests/plugins/health/health_model_builder_test.rb +++ /dev/null @@ -1,516 +0,0 @@ -require 'test/unit' -require 'json' -# require_relative '../../../source/plugins/ruby/health' - -Dir[File.join(__dir__, '../../../../source/plugins/ruby/health', '*.rb')].each { |file| require file } - -class FilterHealthModelBuilderTest < Test::Unit::TestCase - include HealthModel - - # def test_event_stream - # #setup - # health_definition_path = File.join(__dir__, '../../../../installer/conf/health_model_definition.json') - # health_model_definition = ParentMonitorProvider.new(HealthModelDefinitionParser.new(health_definition_path).parse_file) - # monitor_factory = MonitorFactory.new - # hierarchy_builder = HealthHierarchyBuilder.new(health_model_definition, monitor_factory) - # # TODO: Figure out if we need to add NodeMonitorHierarchyReducer to the list of finalizers. For now, dont compress/optimize, since it becomes impossible to construct the model on the UX side - # state_finalizers = [AggregateMonitorStateFinalizer.new] - # monitor_set = MonitorSet.new - # model_builder = HealthModelBuilder.new(hierarchy_builder, state_finalizers, monitor_set) - - # nodes_file_map = { - # #"extra" => "C:/AzureMonitor/ContainerInsights/Docker-Provider/inventory/extra_nodes.json", - # "first" => "C:/AzureMonitor/ContainerInsights/Docker-Provider/inventory/nodes.json", - # #"first-nosecondnode" => "C:/AzureMonitor/ContainerInsights/Docker-Provider/inventory/nodes.json", - # "second" => "C:/AzureMonitor/ContainerInsights/Docker-Provider/inventory/nodes.json", - # "third" => "C:/AzureMonitor/ContainerInsights/Docker-Provider/inventory/nodes.json", - # #"fourth" => "C:/AzureMonitor/ContainerInsights/Docker-Provider/inventory/nodes.json", - # #"missing" => "C:/AzureMonitor/ContainerInsights/Docker-Provider/inventory/nodes.json", - # #"kube_api_down" => "C:/AzureMonitor/ContainerInsights/Docker-Provider/inventory/nodes.json", - # } - - # pods_file_map = { - # #"extra" => "C:/AzureMonitor/ContainerInsights/Docker-Provider/inventory/extra_pods.json", - # "first" => "C:/AzureMonitor/ContainerInsights/Docker-Provider/inventory/pods.json", - # #"first-nosecondnode" => "C:/AzureMonitor/ContainerInsights/Docker-Provider/inventory/pods.json", - # "second" => "C:/AzureMonitor/ContainerInsights/Docker-Provider/inventory/pods.json", - # "third" => "C:/AzureMonitor/ContainerInsights/Docker-Provider/inventory/pods.json", - # #"fourth" => "C:/AzureMonitor/ContainerInsights/Docker-Provider/inventory/pods.json", - # #"missing" => "C:/AzureMonitor/ContainerInsights/Docker-Provider/inventory/pods.json", - # #"kube_api_down" => "C:/AzureMonitor/ContainerInsights/Docker-Provider/inventory/pods.json", - # } - - # cluster_labels = { - # 'container.azm.ms/cluster-region' => 'eastus', - # 'container.azm.ms/cluster-subscription-id' => '72c8e8ca-dc16-47dc-b65c-6b5875eb600a', - # 'container.azm.ms/cluster-resource-group' => 'dilipr-health-test', - # 'container.azm.ms/cluster-name' => 'dilipr-health-test' - # } - - # cluster_id = 'fake_cluster_id' - - # #test - # state = HealthMonitorState.new() - # generator = HealthMissingSignalGenerator.new - - # for scenario in ["first", "second", "third"] - # mock_data_path = File.join(__dir__, "../../../../health_records/#{scenario}_daemon_set_signals.json") - # file = File.read(mock_data_path) - # records = JSON.parse(file) - - # node_inventory = JSON.parse(File.read(nodes_file_map[scenario])) - # pod_inventory = JSON.parse(File.read(pods_file_map[scenario])) - # deployment_inventory = JSON.parse(File.read(File.join(__dir__, "../../../../inventory/deployments.json"))) - # resources = HealthKubernetesResources.instance - # resources.node_inventory = node_inventory - # resources.pod_inventory = pod_inventory - # resources.set_replicaset_inventory(deployment_inventory) - - # workload_names = resources.get_workload_names - # provider = HealthMonitorProvider.new(cluster_id, cluster_labels, resources, File.join(__dir__, "../../../../installer/conf/healthmonitorconfig.json")) - - # health_monitor_records = [] - # records.each do |record| - # monitor_instance_id = record[HealthMonitorRecordFields::MONITOR_INSTANCE_ID] - # monitor_id = record[HealthMonitorRecordFields::MONITOR_ID] - # health_monitor_record = HealthMonitorRecord.new( - # record[HealthMonitorRecordFields::MONITOR_ID], - # record[HealthMonitorRecordFields::MONITOR_INSTANCE_ID], - # record[HealthMonitorRecordFields::TIME_FIRST_OBSERVED], - # record[HealthMonitorRecordFields::DETAILS]["state"], - # provider.get_labels(record), - # provider.get_config(monitor_id), - # record[HealthMonitorRecordFields::DETAILS] - # ) - - # state.update_state(health_monitor_record, - # provider.get_config(health_monitor_record.monitor_id) - # ) - - # # get the health state based on the monitor's operational state - # # update state calls updates the state of the monitor based on configuration and history of the the monitor records - # health_monitor_record.state = state.get_state(monitor_instance_id).new_state - # health_monitor_records.push(health_monitor_record) - # instance_state = state.get_state(monitor_instance_id) - # #puts "#{monitor_instance_id} #{instance_state.new_state} #{instance_state.old_state} #{instance_state.should_send}" - # end - - - # #handle kube api down - # kube_api_down_handler = HealthKubeApiDownHandler.new - # health_monitor_records = kube_api_down_handler.handle_kube_api_down(health_monitor_records) - - # # Dedupe daemonset signals - # # Remove unit monitor signals for “gone” objects - # reducer = HealthSignalReducer.new() - # reduced_records = reducer.reduce_signals(health_monitor_records, resources) - - # cluster_id = 'fake_cluster_id' - - # #get the list of 'none' and 'unknown' signals - # missing_signals = generator.get_missing_signals(cluster_id, reduced_records, resources, provider) - # #update state for missing signals - # missing_signals.each{|signal| - # state.update_state(signal, - # provider.get_config(signal.monitor_id) - # ) - # } - # generator.update_last_received_records(reduced_records) - # reduced_records.push(*missing_signals) - - # # build the health model - # all_records = reduced_records - # model_builder.process_records(all_records) - # all_monitors = model_builder.finalize_model - - # # update the state for aggregate monitors (unit monitors are updated above) - # all_monitors.each{|monitor_instance_id, monitor| - # if monitor.is_aggregate_monitor - # state.update_state(monitor, - # provider.get_config(monitor.monitor_id) - # ) - # end - - # instance_state = state.get_state(monitor_instance_id) - # #puts "#{monitor_instance_id} #{instance_state.new_state} #{instance_state.old_state} #{instance_state.should_send}" - # should_send = instance_state.should_send - - # # always send cluster monitor as a heartbeat - # if !should_send && monitor_instance_id != MonitorId::CLUSTER - # all_monitors.delete(monitor_instance_id) - # end - # } - - # records_to_send = [] - # all_monitors.keys.each{|key| - # record = provider.get_record(all_monitors[key], state) - # #puts "#{record["MonitorInstanceId"]} #{record["OldState"]} #{record["NewState"]}" - # } - - # if scenario == "first" - # assert_equal 50, all_monitors.size - # elsif scenario == "second" - # assert_equal 34, all_monitors.size - # elsif scenario == "third" - # assert_equal 5, all_monitors.size - # end - # # for each key in monitor.keys, - # # get the state from health_monitor_state - # # generate the record to send - # serializer = HealthStateSerializer.new(File.join(__dir__, '../../../../health_records\health_model_state.json')) - # serializer.serialize(state) - - # deserializer = HealthStateDeserializer.new(File.join(__dir__, '../../../../health_records\health_model_state.json')) - # deserialized_state = deserializer.deserialize - - # after_state = HealthMonitorState.new - # after_state.initialize_state(deserialized_state) - # end - # end - - # def test_event_stream_aks_engine - - # #setup - # health_definition_path = File.join(__dir__, '../../../../installer\conf\health_model_definition.json') - # health_model_definition = ParentMonitorProvider.new(HealthModelDefinitionParser.new(health_definition_path).parse_file) - # monitor_factory = MonitorFactory.new - # hierarchy_builder = HealthHierarchyBuilder.new(health_model_definition, monitor_factory) - # state_finalizers = [AggregateMonitorStateFinalizer.new] - # monitor_set = MonitorSet.new - # model_builder = HealthModelBuilder.new(hierarchy_builder, state_finalizers, monitor_set) - - # nodes_file_map = { - # #"extra" => "C:/AzureMonitor/ContainerInsights/Docker-Provider/inventory/extra_nodes.json", - # #"first" => "C:/AzureMonitor/ContainerInsights/Docker-Provider/inventory/nodes.json", - # #"first-nosecondnode" => "C:/AzureMonitor/ContainerInsights/Docker-Provider/inventory/nodes.json", - # #"second" => "C:/AzureMonitor/ContainerInsights/Docker-Provider/inventory/nodes.json", - # #"third" => "C:/AzureMonitor/ContainerInsights/Docker-Provider/inventory/nodes.json", - # #"fourth" => "C:/AzureMonitor/ContainerInsights/Docker-Provider/inventory/nodes.json", - # #"missing" => "C:/AzureMonitor/ContainerInsights/Docker-Provider/inventory/nodes.json", - # #"kube_api_down" => "C:/AzureMonitor/ContainerInsights/Docker-Provider/inventory/nodes.json", - # "aks-engine-1" => "C:/AzureMonitor/ContainerInsights/Docker-Provider/inventory/aks-engine/nodes.json", - # "aks-engine-2" => "C:/AzureMonitor/ContainerInsights/Docker-Provider/inventory/aks-engine/nodes.json", - # "aks-engine-3" => "C:/AzureMonitor/ContainerInsights/Docker-Provider/inventory/aks-engine/nodes.json", - # } - - # pods_file_map = { - # #"extra" => "C:/AzureMonitor/ContainerInsights/Docker-Provider/inventory/extra_pods.json", - # #"first" => "C:/AzureMonitor/ContainerInsights/Docker-Provider/inventory/pods.json", - # #"first-nosecondnode" => "C:/AzureMonitor/ContainerInsights/Docker-Provider/inventory/pods.json", - # #"second" => "C:/AzureMonitor/ContainerInsights/Docker-Provider/inventory/pods.json", - # #"third" => "C:/AzureMonitor/ContainerInsights/Docker-Provider/inventory/pods.json", - # #"fourth" => "C:/AzureMonitor/ContainerInsights/Docker-Provider/inventory/pods.json", - # #"missing" => "C:/AzureMonitor/ContainerInsights/Docker-Provider/inventory/pods.json", - # #"kube_api_down" => "C:/AzureMonitor/ContainerInsights/Docker-Provider/inventory/pods.json", - # "aks-engine-1" => "C:/AzureMonitor/ContainerInsights/Docker-Provider/inventory/aks-engine/pods.json", - # "aks-engine-2" => "C:/AzureMonitor/ContainerInsights/Docker-Provider/inventory/aks-engine/pods.json", - # "aks-engine-3" => "C:/AzureMonitor/ContainerInsights/Docker-Provider/inventory/aks-engine/pods.json", - # } - - # cluster_labels = { - # 'container.azm.ms/cluster-region' => 'eastus', - # 'container.azm.ms/cluster-subscription-id' => '72c8e8ca-dc16-47dc-b65c-6b5875eb600a', - # 'container.azm.ms/cluster-resource-group' => 'aks-engine-health', - # 'container.azm.ms/cluster-name' => 'aks-engine-health' - # } - - # cluster_id = 'fake_cluster_id' - - # #test - # state = HealthMonitorState.new() - # generator = HealthMissingSignalGenerator.new - - # for scenario in 1..3 - # mock_data_path = File.join(__dir__, "../../../../health_records/aks-engine/aks-engine-#{scenario}.json") - # file = File.read(mock_data_path) - # records = JSON.parse(file) - - # node_inventory = JSON.parse(File.read(nodes_file_map["aks-engine-#{scenario}"])) - # pod_inventory = JSON.parse(File.read(pods_file_map["aks-engine-#{scenario}"])) - # deployment_inventory = JSON.parse(File.read(File.join(__dir__, "../../../../inventory/aks-engine/deployments.json"))) - # resources = HealthKubernetesResources.instance - # resources.node_inventory = node_inventory - # resources.pod_inventory = pod_inventory - # resources.deployment_inventory = deployment_inventory - - # workload_names = resources.get_workload_names - # provider = HealthMonitorProvider.new(cluster_id, cluster_labels, resources, File.join(__dir__, "../../../../installer/conf/healthmonitorconfig.json")) - - # health_monitor_records = [] - # records.each do |record| - # monitor_instance_id = record[HealthMonitorRecordFields::MONITOR_INSTANCE_ID] - # monitor_id = record[HealthMonitorRecordFields::MONITOR_ID] - # health_monitor_record = HealthMonitorRecord.new( - # record[HealthMonitorRecordFields::MONITOR_ID], - # record[HealthMonitorRecordFields::MONITOR_INSTANCE_ID], - # record[HealthMonitorRecordFields::TIME_FIRST_OBSERVED], - # record[HealthMonitorRecordFields::DETAILS]["state"], - # provider.get_labels(record), - # provider.get_config(monitor_id), - # record[HealthMonitorRecordFields::DETAILS] - # ) - - # state.update_state(health_monitor_record, - # provider.get_config(health_monitor_record.monitor_id) - # ) - - # # get the health state based on the monitor's operational state - # # update state calls updates the state of the monitor based on configuration and history of the the monitor records - # health_monitor_record.state = state.get_state(monitor_instance_id).new_state - # health_monitor_records.push(health_monitor_record) - # instance_state = state.get_state(monitor_instance_id) - # #puts "#{monitor_instance_id} #{instance_state.new_state} #{instance_state.old_state} #{instance_state.should_send}" - # end - - - # #handle kube api down - # kube_api_down_handler = HealthKubeApiDownHandler.new - # health_monitor_records = kube_api_down_handler.handle_kube_api_down(health_monitor_records) - - # # Dedupe daemonset signals - # # Remove unit monitor signals for “gone” objects - # reducer = HealthSignalReducer.new() - # reduced_records = reducer.reduce_signals(health_monitor_records, resources) - - # cluster_id = 'fake_cluster_id' - - # #get the list of 'none' and 'unknown' signals - # missing_signals = generator.get_missing_signals(cluster_id, reduced_records, resources, provider) - # #update state for missing signals - # missing_signals.each{|signal| - # state.update_state(signal, - # provider.get_config(signal.monitor_id) - # ) - # } - # generator.update_last_received_records(reduced_records) - # reduced_records.push(*missing_signals) - - # # build the health model - # all_records = reduced_records - # model_builder.process_records(all_records) - # all_monitors = model_builder.finalize_model - - # # update the state for aggregate monitors (unit monitors are updated above) - # all_monitors.each{|monitor_instance_id, monitor| - # if monitor.is_aggregate_monitor - # state.update_state(monitor, - # provider.get_config(monitor.monitor_id) - # ) - # end - - # instance_state = state.get_state(monitor_instance_id) - # #puts "#{monitor_instance_id} #{instance_state.new_state} #{instance_state.old_state} #{instance_state.should_send}" - # should_send = instance_state.should_send - - # # always send cluster monitor as a heartbeat - # if !should_send && monitor_instance_id != MonitorId::CLUSTER - # all_monitors.delete(monitor_instance_id) - # end - # } - - # records_to_send = [] - # all_monitors.keys.each{|key| - # record = provider.get_record(all_monitors[key], state) - # #puts "#{record["MonitorInstanceId"]} #{record["OldState"]} #{record["NewState"]}" - # } - - # if scenario == 1 - # assert_equal 58, all_monitors.size - # elsif scenario == 2 - # assert_equal 37, all_monitors.size - # elsif scenario == 3 - # assert_equal 6, all_monitors.size - # end - # # for each key in monitor.keys, - # # get the state from health_monitor_state - # # generate the record to send - # serializer = HealthStateSerializer.new(File.join(__dir__, '../../../../health_records\health_model_state_aks-engine.json')) - # serializer.serialize(state) - - # deserializer = HealthStateDeserializer.new(File.join(__dir__, '../../../../health_records\health_model_state_aks-engine.json')) - # deserialized_state = deserializer.deserialize - - # after_state = HealthMonitorState.new - # after_state.initialize_state(deserialized_state) - # end - # end - - # def test_container_memory_cpu_with_model - # health_definition_path = File.join(__dir__, '../../../../installer/conf/health_model_definition.json') - # health_model_definition = ParentMonitorProvider.new(HealthModelDefinitionParser.new(health_definition_path).parse_file) - # monitor_factory = MonitorFactory.new - # hierarchy_builder = HealthHierarchyBuilder.new(health_model_definition, monitor_factory) - # # TODO: Figure out if we need to add NodeMonitorHierarchyReducer to the list of finalizers. For now, dont compress/optimize, since it becomes impossible to construct the model on the UX side - # state_finalizers = [AggregateMonitorStateFinalizer.new] - # monitor_set = MonitorSet.new - # model_builder = HealthModelBuilder.new(hierarchy_builder, state_finalizers, monitor_set) - - # nodes_file_map = { - # "first" => "C:/Users/dilipr/desktop/health/container_cpu_memory/nodes.json", - # "second" => "C:/Users/dilipr/desktop/health/container_cpu_memory/nodes.json", - # "third" => "C:/Users/dilipr/desktop/health/container_cpu_memory/nodes.json", - # } - - # pods_file_map = { - # "first" => "C:/Users/dilipr/desktop/health/container_cpu_memory/pods.json", - # "second" => "C:/Users/dilipr/desktop/health/container_cpu_memory/pods.json", - # "third" => "C:/Users/dilipr/desktop/health/container_cpu_memory/pods.json", - # } - - # cluster_labels = { - # 'container.azm.ms/cluster-region' => 'eastus', - # 'container.azm.ms/cluster-subscription-id' => '72c8e8ca-dc16-47dc-b65c-6b5875eb600a', - # 'container.azm.ms/cluster-resource-group' => 'dilipr-health-test', - # 'container.azm.ms/cluster-name' => 'dilipr-health-test' - # } - - # cluster_id = 'fake_cluster_id' - - # #test - # state = HealthMonitorState.new() - # generator = HealthMissingSignalGenerator.new - - # mock_data_path = "C:/Users/dilipr/desktop/health/container_cpu_memory/daemonset.json" - # file = File.read(mock_data_path) - # records = JSON.parse(file) - - # node_inventory = JSON.parse(File.read("C:/Users/dilipr/desktop/health/container_cpu_memory/nodes.json")) - # pod_inventory = JSON.parse(File.read("C:/Users/dilipr/desktop/health/container_cpu_memory/pods.json")) - # deployment_inventory = JSON.parse(File.read("C:/Users/dilipr/desktop/health/container_cpu_memory/deployments.json")) - # resources = HealthKubernetesResources.instance - # resources.node_inventory = node_inventory - # resources.pod_inventory = pod_inventory - # resources.set_replicaset_inventory(deployment_inventory) - - # workload_names = resources.get_workload_names - # provider = HealthMonitorProvider.new(cluster_id, cluster_labels, resources, File.join(__dir__, "../../../../installer/conf/healthmonitorconfig.json")) - - - # #container memory cpu records - # file = File.read('C:/Users/dilipr/desktop/health/container_cpu_memory/cadvisor_perf.json') - # cadvisor_records = JSON.parse(file) - # cadvisor_records = cadvisor_records.select{|record| record['DataItems'][0]['ObjectName'] == 'K8SContainer'} - # formatted_records = [] - # formatter = HealthContainerCpuMemoryRecordFormatter.new - # cadvisor_records.each{|record| - # formatted_record = formatter.get_record_from_cadvisor_record(record) - # formatted_records.push(formatted_record) - # } - - # resources.build_pod_uid_lookup #call this in in_kube_health every min - - # cluster_labels = { - # 'container.azm.ms/cluster-region' => 'eastus', - # 'container.azm.ms/cluster-subscription-id' => '72c8e8ca-dc16-47dc-b65c-6b5875eb600a', - # 'container.azm.ms/cluster-resource-group' => 'dilipr-health-test', - # 'container.azm.ms/cluster-name' => 'dilipr-health-test' - # } - - # cluster_id = 'fake_cluster_id' - - # aggregator = HealthContainerCpuMemoryAggregator.new(resources, provider) - # deduped_records = aggregator.dedupe_records(formatted_records) - # aggregator.aggregate(deduped_records) - # aggregator.compute_state - # container_cpu_memory_records = aggregator.get_records - - # records.concat(container_cpu_memory_records) - - # health_monitor_records = [] - # records.each do |record| - # monitor_instance_id = record[HealthMonitorRecordFields::MONITOR_INSTANCE_ID] - # monitor_id = record[HealthMonitorRecordFields::MONITOR_ID] - # health_monitor_record = HealthMonitorRecord.new( - # record[HealthMonitorRecordFields::MONITOR_ID], - # record[HealthMonitorRecordFields::MONITOR_INSTANCE_ID], - # record[HealthMonitorRecordFields::TIME_FIRST_OBSERVED], - # record[HealthMonitorRecordFields::DETAILS]["state"], - # provider.get_labels(record), - # provider.get_config(monitor_id), - # record[HealthMonitorRecordFields::DETAILS] - # ) - - # state.update_state(health_monitor_record, - # provider.get_config(health_monitor_record.monitor_id) - # ) - - # # get the health state based on the monitor's operational state - # # update state calls updates the state of the monitor based on configuration and history of the the monitor records - # health_monitor_record.state = state.get_state(monitor_instance_id).new_state - # health_monitor_records.push(health_monitor_record) - # #puts "#{monitor_instance_id} #{instance_state.new_state} #{instance_state.old_state} #{instance_state.should_send}" - # end - - # #handle kube api down - # kube_api_down_handler = HealthKubeApiDownHandler.new - # health_monitor_records = kube_api_down_handler.handle_kube_api_down(health_monitor_records) - - # # Dedupe daemonset signals - # # Remove unit monitor signals for “gone” objects - # reducer = HealthSignalReducer.new() - # reduced_records = reducer.reduce_signals(health_monitor_records, resources) - - # cluster_id = 'fake_cluster_id' - - # #get the list of 'none' and 'unknown' signals - # missing_signals = generator.get_missing_signals(cluster_id, reduced_records, resources, provider) - # #update state for missing signals - # missing_signals.each{|signal| - # state.update_state(signal, - # provider.get_config(signal.monitor_id) - # ) - # } - # generator.update_last_received_records(reduced_records) - # reduced_records.push(*missing_signals) - - # # build the health model - # all_records = reduced_records - # model_builder.process_records(all_records) - # all_monitors = model_builder.finalize_model - - # # update the state for aggregate monitors (unit monitors are updated above) - # all_monitors.each{|monitor_instance_id, monitor| - # if monitor.is_aggregate_monitor - # state.update_state(monitor, - # provider.get_config(monitor.monitor_id) - # ) - # end - - # instance_state = state.get_state(monitor_instance_id) - # #puts "#{monitor_instance_id} #{instance_state.new_state} #{instance_state.old_state} #{instance_state.should_send}" - # should_send = instance_state.should_send - - # # always send cluster monitor as a heartbeat - # if !should_send && monitor_instance_id != MonitorId::CLUSTER - # all_monitors.delete(monitor_instance_id) - # end - # } - - # records_to_send = [] - # all_monitors.keys.each{|key| - # record = provider.get_record(all_monitors[key], state) - # #puts "#{record["MonitorInstanceId"]} #{record["OldState"]} #{record["NewState"]}" - # } - # end - - def test_get_workload_name - # node_inventory = JSON.parse(File.read("C:/AzureMonitor/ContainerInsights/Docker-Provider/test/code/plugin/health/dilipr-health-test-nodes.json")) - # pod_inventory = JSON.parse(File.read('C:/AzureMonitor/ContainerInsights/Docker-Provider/test/code/plugin/health/dilipr-health-test-pods.json')) - # replicaset_inventory = JSON.parse(File.read('C:/AzureMonitor/ContainerInsights/Docker-Provider/test/code/plugin/health/dilipr-health-test-rs.json')) - node_inventory = JSON.parse(File.read("C:/AzureMonitor/ContainerInsights/Docker-Provider/test/code/plugin/health/jobyaks2-nodes.json")) - pod_inventory = JSON.parse(File.read('C:/AzureMonitor/ContainerInsights/Docker-Provider/test/code/plugin/health/jobyaks2-pods.json')) - replicaset_inventory = JSON.parse(File.read('C:/AzureMonitor/ContainerInsights/Docker-Provider/test/code/plugin/health/jobyaks2-rs.json')) - resources = HealthKubernetesResources.instance - resources.node_inventory = node_inventory - resources.pod_inventory = pod_inventory - resources.set_replicaset_inventory(replicaset_inventory) - pod_inventory['items'].each{|pod| - workload_name = resources.get_workload_name(pod) - puts "POD #{pod['metadata']['name']} Workload Name #{workload_name}" - } - - pods_ready_hash = HealthMonitorUtils.get_pods_ready_hash(resources) - - puts JSON.pretty_generate(pods_ready_hash) - end -end \ No newline at end of file diff --git a/test/unit-tests/plugins/health/health_model_definition_parser_spec.rb b/test/unit-tests/plugins/health/health_model_definition_parser_spec.rb deleted file mode 100644 index 52a98a113..000000000 --- a/test/unit-tests/plugins/health/health_model_definition_parser_spec.rb +++ /dev/null @@ -1,24 +0,0 @@ -require_relative '../test_helpers' -# consider doing this in test_helpers.rb so that this code is common -Dir[File.join(File.expand_path(File.dirname(__FILE__)), "../../../../source/plugins/ruby/health/*.rb")].reject{|f| f.include?('health_monitor_utils')}.each { |file| require file } -include HealthModel - -describe "HealthModelDefinitionParser spec " do - it "parses the definition file correctly with the right conditions" do - #arrange - - parser = HealthModelDefinitionParser.new(File.join(File.expand_path(File.dirname(__FILE__)), 'test_health_model_definition.json')) - #act - model_definition = parser.parse_file - - #assert - assert_equal model_definition['conditional_monitor_id'].key?("conditions"), true - assert_equal model_definition['conditional_monitor_id']["conditions"].size, 2 - assert_equal model_definition['conditional_monitor_id'].key?("parent_monitor_id"), false - - #assert - assert_equal model_definition['monitor_id'].key?("conditions"), false - assert_equal model_definition['monitor_id'].key?("parent_monitor_id"), true - end - -end \ No newline at end of file diff --git a/test/unit-tests/plugins/health/health_monitor_state_spec.rb b/test/unit-tests/plugins/health/health_monitor_state_spec.rb deleted file mode 100644 index 34c61a2df..000000000 --- a/test/unit-tests/plugins/health/health_monitor_state_spec.rb +++ /dev/null @@ -1,176 +0,0 @@ -require_relative '../test_helpers' -# consider doing this in test_helpers.rb so that this code is common -Dir[File.join(File.expand_path(File.dirname(__FILE__)), "../../../../source/plugins/ruby/health/*.rb")].reject{|f| f.include?('health_monitor_utils')}.each { |file| require file } -include HealthModel -include Minitest - -describe "HealthMonitorState spec" do - it 'updates should_send to true for monitors which hasnt been sent before' do - #arrange - state = HealthMonitorState.new - mock_monitor = Mock.new - def mock_monitor.state; "pass"; end - def mock_monitor.monitor_id; "monitor_id"; end - def mock_monitor.monitor_instance_id; "monitor_instance_id"; end - def mock_monitor.transition_date_time; Time.now.utc.iso8601; end - def mock_monitor.details; {"state" => "pass", "timestamp" => Time.now.utc.iso8601, "details" => {}}; end - - #act - state.update_state(mock_monitor, {}) - monitor_state = state.get_state("monitor_instance_id") - #assert - monitor_state.should_send.must_equal true - monitor_state.old_state.must_equal "none" - monitor_state.new_state.must_equal "pass" - end - - it 'updates should_send to true for monitors which need no consistent state change' do - #arrange - state = HealthMonitorState.new - mock_monitor = Mock.new - def mock_monitor.state; "pass"; end - def mock_monitor.monitor_id; "monitor_id"; end - def mock_monitor.monitor_instance_id; "monitor_instance_id"; end - def mock_monitor.transition_date_time; Time.now.utc.iso8601; end - def mock_monitor.details; {"state" => "pass", "timestamp" => Time.now.utc.iso8601, "details" => {}}; end - - #act - state.update_state(mock_monitor, {}) - monitor_state = state.get_state("monitor_instance_id") - #assert - monitor_state.should_send.must_equal true - monitor_state.old_state.must_equal "none" - monitor_state.new_state.must_equal "pass" - - #arrange - def mock_monitor.state; "fail"; end - def mock_monitor.details; {"state" => "fail", "timestamp" => Time.now.utc.iso8601, "details" => {}}; end - #act - state.update_state(mock_monitor, {}) - monitor_state = state.get_state("monitor_instance_id") - #assert - monitor_state.should_send.must_equal true - monitor_state.old_state.must_equal "pass" - monitor_state.new_state.must_equal "fail" - end - - it 'updates should_send to false for monitors which need consistent state change and has no consistent state change' do - #arrange - state = HealthMonitorState.new - mock_monitor = Mock.new - def mock_monitor.state; "pass"; end - def mock_monitor.monitor_id; "monitor_id"; end - def mock_monitor.monitor_instance_id; "monitor_instance_id"; end - def mock_monitor.transition_date_time; Time.now.utc.iso8601; end - def mock_monitor.details; {"state" => "pass", "timestamp" => Time.now.utc.iso8601, "details" => {}}; end - - config = JSON.parse('{ - "WarnIfGreaterThanPercentage": 80.0, - "FailIfGreaterThanPercentage": 90.0, - "ConsecutiveSamplesForStateTransition": 3 - }') - #act - state.update_state(mock_monitor, config) - monitor_state = state.get_state("monitor_instance_id") - #assert - monitor_state.should_send.must_equal true - - #arrange - def mock_monitor.state; "fail"; end - def mock_monitor.details; {"state" => "fail", "timestamp" => Time.now.utc.iso8601, "details" => {}}; end - #act - state.update_state(mock_monitor, config) - monitor_state = state.get_state("monitor_instance_id") - #assert - monitor_state.should_send.must_equal false - end - - it 'updates should_send to true for monitors which need consistent state change and has a consistent state change' do - #arrange - state = HealthMonitorState.new - mock_monitor = Mock.new - def mock_monitor.state; "pass"; end - def mock_monitor.monitor_id; "monitor_id"; end - def mock_monitor.monitor_instance_id; "monitor_instance_id"; end - def mock_monitor.transition_date_time; Time.now.utc.iso8601; end - def mock_monitor.details; {"state" => "pass", "timestamp" => Time.now.utc.iso8601, "details" => {}}; end - - config = JSON.parse('{ - "WarnIfGreaterThanPercentage": 80.0, - "FailIfGreaterThanPercentage": 90.0, - "ConsecutiveSamplesForStateTransition": 3 - }') - #act - state.update_state(mock_monitor, config) - monitor_state = state.get_state("monitor_instance_id") - #assert - monitor_state.should_send.must_equal true - - #arrange - def mock_monitor.state; "fail"; end - def mock_monitor.details; {"state" => "fail", "timestamp" => Time.now.utc.iso8601, "details" => {}}; end - #act - state.update_state(mock_monitor, config) - monitor_state = state.get_state("monitor_instance_id") - #assert - monitor_state.should_send.must_equal false - - #act - state.update_state(mock_monitor, config) - state.update_state(mock_monitor, config) - monitor_state = state.get_state("monitor_instance_id") - #assert - monitor_state.should_send.must_equal true - monitor_state.old_state.must_equal "none" - monitor_state.new_state.must_equal "fail" - end - - it 'updates should_send to false for monitors which need consistent state change and has NO state change' do - #arrange - state = HealthMonitorState.new - mock_monitor = Mock.new - def mock_monitor.state; "pass"; end - def mock_monitor.monitor_id; "monitor_id"; end - def mock_monitor.monitor_instance_id; "monitor_instance_id"; end - def mock_monitor.transition_date_time; Time.now.utc.iso8601; end - def mock_monitor.details; {"state" => "pass", "timestamp" => Time.now.utc.iso8601, "details" => {}}; end - - config = JSON.parse('{ - "WarnIfGreaterThanPercentage": 80.0, - "FailIfGreaterThanPercentage": 90.0, - "ConsecutiveSamplesForStateTransition": 3 - }') - #act - state.update_state(mock_monitor, config) - monitor_state = state.get_state("monitor_instance_id") - #assert - monitor_state.should_send.must_equal true - monitor_state.old_state.must_equal "none" - monitor_state.new_state.must_equal "none" - - - #arrange - def mock_monitor.state; "pass"; end - def mock_monitor.details; {"state" => "pass", "timestamp" => Time.now.utc.iso8601, "details" => {}}; end - #act - state.update_state(mock_monitor, config) - monitor_state = state.get_state("monitor_instance_id") - #assert - monitor_state.should_send.must_equal false - - #act - state.update_state(mock_monitor, config) - monitor_state.should_send.must_equal true - monitor_state.old_state.must_equal "none" - monitor_state.new_state.must_equal "pass" - - #act - state.update_state(mock_monitor, config) - monitor_state = state.get_state("monitor_instance_id") - #assert - monitor_state.should_send.must_equal false - monitor_state.old_state.must_equal "none" - monitor_state.new_state.must_equal "pass" - end - -end \ No newline at end of file diff --git a/test/unit-tests/plugins/health/health_signal_reducer_spec.rb b/test/unit-tests/plugins/health/health_signal_reducer_spec.rb deleted file mode 100644 index 90f4ab352..000000000 --- a/test/unit-tests/plugins/health/health_signal_reducer_spec.rb +++ /dev/null @@ -1,96 +0,0 @@ -require_relative '../test_helpers' -# consider doing this in test_helpers.rb so that this code is common -Dir[File.join(File.expand_path(File.dirname(__FILE__)), "../../../../source/plugins/ruby/health/*.rb")].reject{|f| f.include?('health_monitor_utils')}.each { |file| require file } -include HealthModel -include Minitest - -describe "HealthSignalReducer spec" do - it "returns the right set of records -- no reduction" do - #arrange - record1 = Mock.new - def record1.monitor_id; "node_cpu_utilization"; end - def record1.monitor_instance_id; "node_cpu_utilization-node1"; end - def record1.labels; {HealthMonitorLabels::HOSTNAME => "node1"}; end - inventory = Mock.new - def inventory.get_nodes; ["node1"]; end - def inventory.get_workload_names; []; end - reducer = HealthSignalReducer.new - #act - reduced = reducer.reduce_signals([record1], inventory) - #Assert - assert_equal reduced.size, 1 - end - - it "returns only the latest record if multiple records are present for the same monitor" do - #arrange - record1 = Mock.new - def record1.monitor_id; "node_cpu_utilization"; end - def record1.monitor_instance_id; "node_cpu_utilization-node1"; end - def record1.labels; {HealthMonitorLabels::HOSTNAME => "node1"}; end - def record1.transition_date_time; Time.now.utc.iso8601 ; end - - - record2 = Mock.new - def record2.monitor_id; "node_cpu_utilization"; end - def record2.monitor_instance_id; "node_cpu_utilization-node1"; end - def record2.labels; {HealthMonitorLabels::HOSTNAME => "node1"}; end - def record2.transition_date_time; "#{Time.now.utc.iso8601}" ; end - - inventory = Mock.new - def inventory.get_nodes; ["node1"]; end - def inventory.get_workload_names; []; end - reducer = HealthSignalReducer.new - #act - reduced = reducer.reduce_signals([record1, record2], inventory) - #Assert - assert_equal reduced.size, 1 - end - - it "returns only those records if the node is present in the inventory" do - #arrange - record1 = Mock.new - def record1.monitor_id; "node_cpu_utilization"; end - def record1.monitor_instance_id; "node_cpu_utilization-node1"; end - def record1.labels; {HealthMonitorLabels::HOSTNAME => "node1"}; end - inventory = Mock.new - def inventory.get_nodes; ["node2"]; end - def inventory.get_workload_names; []; end - - #act - reducer = HealthSignalReducer.new - #assert - assert_equal reducer.reduce_signals([record1], inventory).size, 0 - end - - it "returns only those records if the workdload name is present in the inventory" do - #arrange - record1 = Mock.new - def record1.monitor_id; "user_workload_pods_ready"; end - def record1.monitor_instance_id; "user_workload_pods_ready-workload1"; end - def record1.labels; {HealthMonitorLabels::NAMESPACE => "default", HealthMonitorLabels::WORKLOAD_NAME => "workload1"}; end - def record1.transition_date_time; Time.now.utc.iso8601 ; end - - inventory = Mock.new - def inventory.get_nodes; ["node2"]; end - def inventory.get_workload_names; ["default~~workload1"]; end - reducer = HealthSignalReducer.new - - #act - reduced = reducer.reduce_signals([record1], inventory) - - #assert - assert_equal reduced.size, 1 - - #arrange - record2 = Mock.new - def record2.monitor_id; "user_workload_pods_ready"; end - def record2.monitor_instance_id; "user_workload_pods_ready-workload2"; end - def record2.labels; {HealthMonitorLabels::NAMESPACE => "default1", HealthMonitorLabels::WORKLOAD_NAME => "workload2"}; end - def record1.transition_date_time; Time.now.utc.iso8601 ; end - #act - reduced = reducer.reduce_signals([record1, record2], inventory) - #assert - assert_equal reduced.size, 1 - end - -end diff --git a/test/unit-tests/plugins/health/kube_api_down_handler_spec.rb b/test/unit-tests/plugins/health/kube_api_down_handler_spec.rb deleted file mode 100644 index 5ace7c724..000000000 --- a/test/unit-tests/plugins/health/kube_api_down_handler_spec.rb +++ /dev/null @@ -1,26 +0,0 @@ -require_relative '../test_helpers' -Dir[File.join(File.expand_path(File.dirname(__FILE__)), "../../../../source/plugins/ruby/health/*.rb")].reject{|f| f.include?('health_monitor_utils')}.each { |file| require file } -include HealthModel - -describe "KubeApiDownHandler spec" do - it "updates states for monitors in monitors_to_change" do - #arrange - record1 = HealthMonitorRecord.new("node_condition", "node_condition-node1", Time.now.utc.iso8601, "pass", {}, {}, {}) - record2 = HealthMonitorRecord.new("kube_api_status", "kube_api_status", Time.now.utc.iso8601, "fail", {}, {}, {}) - record3 = HealthMonitorRecord.new("user_workload_pods_ready", "user_workload_pods_ready-workload1", Time.now.utc.iso8601, "pass", {}, {}, {}) - record4 = HealthMonitorRecord.new("system_workload_pods_ready", "system_workload_pods_ready-workload2", Time.now.utc.iso8601, "pass", {}, {}, {}) - record5 = HealthMonitorRecord.new("subscribed_capacity_cpu", "subscribed_capacity_cpu", Time.now.utc.iso8601, "pass", {}, {}, {}) - record6 = HealthMonitorRecord.new("subscribed_capacity_memory", "subscribed_capacity_memory", Time.now.utc.iso8601, "pass", {}, {}, {}) - handler = HealthKubeApiDownHandler.new - - #act - handler.handle_kube_api_down([record1, record2, record3, record4, record5, record6]) - #assert - assert_equal record1.state, HealthMonitorStates::UNKNOWN - assert_equal record3.state, HealthMonitorStates::UNKNOWN - assert_equal record4.state, HealthMonitorStates::UNKNOWN - assert_equal record5.state, HealthMonitorStates::UNKNOWN - assert_equal record6.state, HealthMonitorStates::UNKNOWN - - end -end diff --git a/test/unit-tests/plugins/health/monitor_factory_spec.rb b/test/unit-tests/plugins/health/monitor_factory_spec.rb deleted file mode 100644 index 4d4ac5b31..000000000 --- a/test/unit-tests/plugins/health/monitor_factory_spec.rb +++ /dev/null @@ -1,28 +0,0 @@ -require_relative '../test_helpers' -# consider doing this in test_helpers.rb so that this code is common -Dir[File.join(File.expand_path(File.dirname(__FILE__)), "../../../../source/plugins/ruby/health/*.rb")].reject{|f| f.include?('health_monitor_utils')}.each { |file| require file } -include HealthModel - -describe "MonitorFactory Spec" do - it "returns UnitMonitor for create_unit_monitor" do - #Arrange - factory = MonitorFactory.new() - monitor_record = HealthMonitorRecord.new(:monitor_id, :monitor_instance_id, :time, :pass, {}, {}, {}) - #act - monitor = factory.create_unit_monitor(monitor_record) - # assert - monitor.must_be_kind_of(UnitMonitor) - end - - it "returns AggregateMonitor for create_aggregate_monitor" do - #arrange - factory = MonitorFactory.new() - mock = Minitest::Mock.new - def mock.state; :pass; end - def mock.transition_date_time; :time; end - #act - monitor = factory.create_aggregate_monitor(:monitor_id, :monitor_instance_id, :pass, {}, {}, mock) - #assert - monitor.must_be_kind_of(AggregateMonitor) - end -end \ No newline at end of file diff --git a/test/unit-tests/plugins/health/monitor_set_spec.rb b/test/unit-tests/plugins/health/monitor_set_spec.rb deleted file mode 100644 index b5cd01f50..000000000 --- a/test/unit-tests/plugins/health/monitor_set_spec.rb +++ /dev/null @@ -1,58 +0,0 @@ -require_relative '../test_helpers' -# consider doing this in test_helpers.rb so that this code is common -Dir[File.join(File.expand_path(File.dirname(__FILE__)), "../../../../source/plugins/ruby/health/*.rb")].reject{|f| f.include?('health_monitor_utils')}.each { |file| require file } -include HealthModel - -describe "MonitorSet Spec" do - it "add_or_update -- adds a monitor" do - #arrange - set = MonitorSet.new - mock_monitor = MiniTest::Mock.new - def mock_monitor.monitor_instance_id; "monitor_instance_id_1"; end - def mock_monitor.state; :pass;end - #act - set.add_or_update(mock_monitor) - #assert - assert_equal set.get_map.size, 1 - assert_equal set.get_map.key?("monitor_instance_id_1"), true - end - - it "add_or_update -- updates a monitor" do - #arrange - set = MonitorSet.new - mock_monitor = MiniTest::Mock.new - def mock_monitor.monitor_instance_id; "monitor_instance_id_1"; end - def mock_monitor.state; :pass;end - #act - set.add_or_update(mock_monitor) - #assert - assert_equal set.get_map["monitor_instance_id_1"].state, :pass - - #act - def mock_monitor.state; :fail;end - set.add_or_update(mock_monitor) - #assert - assert_equal set.get_map["monitor_instance_id_1"].state, :fail - end - - it "delete -- delete a monitor" do - #arrange - set = MonitorSet.new - mock_monitor = MiniTest::Mock.new - def mock_monitor.monitor_instance_id; "monitor_instance_id_1"; end - def mock_monitor.state; :pass;end - set.add_or_update(mock_monitor) - - #act - set.delete("monitor_instance_id_1") - #assert - assert_equal set.get_map.size, 0 - end - - it "get_map -- returns a hash" do - #arrange - set = MonitorSet.new - #act and assert - set.get_map.must_be_kind_of(Hash) - end -end diff --git a/test/unit-tests/plugins/health/nodes.json b/test/unit-tests/plugins/health/nodes.json deleted file mode 100644 index f1721e076..000000000 --- a/test/unit-tests/plugins/health/nodes.json +++ /dev/null @@ -1,1966 +0,0 @@ -{ - "apiVersion": "v1", - "items": [ - { - "apiVersion": "v1", - "kind": "Node", - "metadata": { - "annotations": { - "node.alpha.kubernetes.io/ttl": "0", - "volumes.kubernetes.io/controller-managed-attach-detach": "true" - }, - "creationTimestamp": "2019-03-12T16:40:36Z", - "labels": { - "agentpool": "nodepool1", - "beta.kubernetes.io/arch": "amd64", - "beta.kubernetes.io/instance-type": "Standard_DS1_v2", - "beta.kubernetes.io/os": "linux", - "failure-domain.beta.kubernetes.io/region": "eastus", - "failure-domain.beta.kubernetes.io/zone": "0", - "kubernetes.azure.com/cluster": "MC_dilipr-health-test_dilipr-health-test_eastus", - "kubernetes.io/hostname": "aks-nodepool1-19574989-0", - "kubernetes.io/role": "agent", - "node-role.kubernetes.io/agent": "", - "storageprofile": "managed", - "storagetier": "Premium_LRS" - }, - "name": "aks-nodepool1-19574989-0", - "resourceVersion": "19068106", - "selfLink": "/api/v1/nodes/aks-nodepool1-19574989-0", - "uid": "9012b16c-44e5-11e9-9920-423525a6b683" - }, - "spec": { - "podCIDR": "10.244.1.0/24", - "providerID": "azure:///subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourceGroups/MC_dilipr-health-test_dilipr-health-test_eastus/providers/Microsoft.Compute/virtualMachines/aks-nodepool1-19574989-0" - }, - "status": { - "addresses": [ - { - "address": "aks-nodepool1-19574989-0", - "type": "Hostname" - }, - { - "address": "10.240.0.4", - "type": "InternalIP" - } - ], - "allocatable": { - "cpu": "940m", - "ephemeral-storage": "28043041951", - "hugepages-1Gi": "0", - "hugepages-2Mi": "0", - "memory": "2504708Ki", - "pods": "110" - }, - "capacity": { - "cpu": "1", - "ephemeral-storage": "30428648Ki", - "hugepages-1Gi": "0", - "hugepages-2Mi": "0", - "memory": "3524612Ki", - "pods": "110" - }, - "conditions": [ - { - "lastHeartbeatTime": "2019-03-12T16:42:18Z", - "lastTransitionTime": "2019-03-12T16:42:18Z", - "message": "RouteController created a route", - "reason": "RouteCreated", - "status": "False", - "type": "NetworkUnavailable" - }, - { - "lastHeartbeatTime": "2019-08-23T20:43:22Z", - "lastTransitionTime": "2019-07-29T08:16:01Z", - "message": "kubelet has sufficient disk space available", - "reason": "KubeletHasSufficientDisk", - "status": "False", - "type": "OutOfDisk" - }, - { - "lastHeartbeatTime": "2019-08-23T20:43:22Z", - "lastTransitionTime": "2019-07-29T08:16:01Z", - "message": "kubelet has sufficient memory available", - "reason": "KubeletHasSufficientMemory", - "status": "False", - "type": "MemoryPressure" - }, - { - "lastHeartbeatTime": "2019-08-23T20:43:22Z", - "lastTransitionTime": "2019-07-29T08:16:01Z", - "message": "kubelet has no disk pressure", - "reason": "KubeletHasNoDiskPressure", - "status": "False", - "type": "DiskPressure" - }, - { - "lastHeartbeatTime": "2019-08-23T20:43:22Z", - "lastTransitionTime": "2019-03-12T16:40:36Z", - "message": "kubelet has sufficient PID available", - "reason": "KubeletHasSufficientPID", - "status": "False", - "type": "PIDPressure" - }, - { - "lastHeartbeatTime": "2019-08-23T20:43:22Z", - "lastTransitionTime": "2019-07-29T08:16:01Z", - "message": "kubelet is posting ready status. AppArmor enabled", - "reason": "KubeletReady", - "status": "True", - "type": "Ready" - } - ], - "daemonEndpoints": { - "kubeletEndpoint": { - "Port": 10250 - } - }, - "images": [ - { - "names": [ - "nickchase/rss-php-nginx@sha256:48da56a77fe4ecff4917121365d8e0ce615ebbdfe31f48a996255f5592894e2b", - "nickchase/rss-php-nginx:v1" - ], - "sizeBytes": 677038498 - }, - { - "names": [ - "rdilip83/logeverysecond@sha256:6fe5624808609c507178a77f94384fb9794a4d6b7d102ed8016a4baf608164a1", - "rdilip83/logeverysecond:v2" - ], - "sizeBytes": 674931590 - }, - { - "names": [ - "aksrepos.azurecr.io/mirror/hyperkube-amd64@sha256:1447d5b491fcee503c9f8fb712e1593dc3772c7e661251f54c297477cc716913", - "k8s.gcr.io/hyperkube-amd64@sha256:1447d5b491fcee503c9f8fb712e1593dc3772c7e661251f54c297477cc716913", - "aksrepos.azurecr.io/mirror/hyperkube-amd64:v1.11.8", - "k8s.gcr.io/hyperkube-amd64:v1.11.8" - ], - "sizeBytes": 615263658 - }, - { - "names": [ - "microsoft/oms@sha256:de83d1df24cb86a3a3110bd03abbd5704d7a7345565b1996f49ff001a3665385", - "microsoft/oms:healthpreview04262019" - ], - "sizeBytes": 514907213 - }, - { - "names": [ - "rdilip83/fixrubyerror@sha256:6b7f36cf6258b311015493ab025f06577d758c45bc5010d022ac160b9f40ea5d", - "rdilip83/fixrubyerror:latest" - ], - "sizeBytes": 494068028 - }, - { - "names": [ - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:69b420bdb4081293c37e2d0f8ad2e4054bd516f5c08c7512d6b695660a36eccf", - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08222019" - ], - "sizeBytes": 494067935 - }, - { - "names": [ - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:fb2b90ce9bf7186fd9dfae97f5f72f9b9c80c8a0493af3cff74179cd4ff847c0", - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08212019" - ], - "sizeBytes": 494067572 - }, - { - "names": [ - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:c646e180483d295ffac114fb9df513db02553af7879681814d5910764653dd2d", - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08202019" - ], - "sizeBytes": 494067210 - }, - { - "names": [ - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:c21b596a22a1338ed293d01681f327acc871ee502ed779ec1109d6a93375bb3b", - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08192019" - ], - "sizeBytes": 494055088 - }, - { - "names": [ - "rdilip83/cifeatureprod08192019@sha256:7815bba9a805e4e8df33356fd532671de45525ce9c6e936e14f9b126e2097ecd", - "rdilip83/cifeatureprod08192019:v1" - ], - "sizeBytes": 494055088 - }, - { - "names": [ - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:59e34aab9f6e16a87e880b1ee1c9dd5434ee40dd29502e74aceefabf51443717", - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:internaltesthealth08192019" - ], - "sizeBytes": 494053562 - }, - { - "names": [ - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:6387d0dedf4de0bab430f681ef61361f63a20e1c4c287a9b60ea5460283ac6cf", - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ci_feature_prod_health08192019" - ], - "sizeBytes": 494053562 - }, - { - "names": [ - "rdilip83/hc08192019@sha256:014d936771508d499ac4c15043e23b16bce8de0019fb2048b99540cbe9084895", - "rdilip83/hc08192019:1" - ], - "sizeBytes": 494053562 - }, - { - "names": [ - "rdilip83/health-rc@sha256:8ad12bce5ffd27b301bc6fe4355c8affa6fce080ae7e2291dec3a0ed11bb9483", - "rdilip83/health-rc:3" - ], - "sizeBytes": 494052863 - }, - { - "names": [ - "rdilip83/health_ci_feature_image@sha256:1a574d25884483083e8cbaacbf0cb7c4e442dc736d480615c65f5c71f8969b13", - "rdilip83/health_ci_feature_image:v1" - ], - "sizeBytes": 494052147 - }, - { - "names": [ - "rdilip83/healthrc@sha256:816c8cef09822daf050a0fca6f92e7ac19147ff4bf1a823d43fe70f73470cc0c", - "rdilip83/healthrc:v3" - ], - "sizeBytes": 494052138 - }, - { - "names": [ - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:d35aac044d1adc3d02269fde78f8dfd923db94b81288447cf6fdd482970a333b", - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:healthmerge08142019" - ], - "sizeBytes": 494052135 - }, - { - "names": [ - "rdilip83/healthrc@sha256:a130780e56ac0edb3ca29477e12edd5e9b5d08b5732dbd59ede9beb58e21eca7", - "rdilip83/healthrc:v2" - ], - "sizeBytes": 494051682 - }, - { - "names": [ - "rdilip83/healthmerge@sha256:24d270b0f59fb484c283922474736c3cba50f8aad0270bc0a3acd14284694eea", - "rdilip83/healthmerge:v8" - ], - "sizeBytes": 494010139 - }, - { - "names": [ - "rdilip83/health-rc@sha256:b1d24728eb808d301da426b76b7f7b79606204c4c2b695a24ac670be8276d55d", - "rdilip83/health-rc:1" - ], - "sizeBytes": 494000891 - }, - { - "names": [ - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:a0666957cccbfdf5784accd1133408bf017c28a6e694d9a2ae74da94eef2d285", - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:healthpreview08012019" - ], - "sizeBytes": 493994261 - }, - { - "names": [ - "rdilip83/mergehealth@sha256:32c9b35a6809c54d5296e2ca2b122b35a4ad8c852622174cc5a9f92cc27e56e4", - "rdilip83/mergehealth:v3" - ], - "sizeBytes": 493988815 - }, - { - "names": [ - "rdilip83/mergehealth@sha256:a3521e8f36e007b3cb949e0356a75394ac61fd2024ca1ec4827b8d54fb068534", - "rdilip83/mergehealth:v1" - ], - "sizeBytes": 493981585 - }, - { - "names": [ - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:0438e4690e042b195917e160b8949aeb339520ee19c898a8bb9452f36d1f84f1", - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:healthpreview07182019" - ], - "sizeBytes": 493977357 - }, - { - "names": [ - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:9ebc410a36856176921dba81b5bd43132469209b315f52be346690435419b9bb" - ], - "sizeBytes": 493946790 - }, - { - "names": [ - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:4e51195a9c77bd166fc90ee5f6143a4604b502ab7ef0f06431dec10c341b10f3", - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:healthpreview06272019" - ], - "sizeBytes": 493893635 - }, - { - "names": [ - "rdilip83/healthpreview06272019@sha256:d888ba5ff5e5810113a32f9c9812a5e28088cc81b902e95a185fe465a514029c", - "rdilip83/healthpreview06272019:latest" - ], - "sizeBytes": 493893633 - }, - { - "names": [ - "rdilip83/healthpreview06252019-1@sha256:1561876cffe94433a569f29f5231548e039193ebaa7ec640d22439675179e43f", - "rdilip83/healthpreview06252019-1:latest" - ], - "sizeBytes": 493887387 - }, - { - "names": [ - "rdilip83/healthpreview06252019@sha256:6597ff599a78ac452a4138dedb9e08c0ccd3e8b01594b033fd78ba9dbb41fe9e", - "rdilip83/healthpreview06252019:latest" - ], - "sizeBytes": 493887384 - }, - { - "names": [ - "rdilip83/healthpreview06242019@sha256:c4f565d92086d1ee56e6016178fed5c668352dc0ca0047f02910bdcb87a482c4", - "rdilip83/healthpreview06242019:latest" - ], - "sizeBytes": 493850850 - }, - { - "names": [ - "rdilip83/healthpreview06212019-1@sha256:937ce5801a0097a1cbc4eff5399c1973b4c6223ece9279b35207368b99f82b96", - "rdilip83/healthpreview06212019-1:latest" - ], - "sizeBytes": 493850674 - }, - { - "names": [ - "rdilip83/healthpreview06192019@sha256:f92cb5283814d446f0acde6a489648ea197496d5f85b27ca959ec97bce742d8a", - "rdilip83/healthpreview06192019:latest" - ], - "sizeBytes": 493799437 - }, - { - "names": [ - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:0f798cb7d56931b231f71e38e7fa5bf898b69e611247a566701f70a5f29a9799", - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod07092019" - ], - "sizeBytes": 467692116 - }, - { - "names": [ - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:3734a084fa9681c7e930eb90cad45a8f282c24af63065a720a2327b1683f3ba4", - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod06142019" - ], - "sizeBytes": 466882569 - }, - { - "names": [ - "rdilip83/mergehealth@sha256:16402c34e2d7de72c2ebc18ec8e9f7933fa25f6a7f83bceb84483ba95e3902f7", - "rdilip83/mergehealth:v2" - ], - "sizeBytes": 448931997 - }, - { - "names": [ - "rdilip83/healthpreview06212019@sha256:5860c9caaf544f2e7c46edad5cdfb69e22398e20dc87cb8a4cd630b5b7000074", - "rdilip83/healthpreview06212019:latest" - ], - "sizeBytes": 448366491 - }, - { - "names": [ - "aksrepos.azurecr.io/mirror/hcp-tunnel-front@sha256:68878ee3ea1781b322ea3952c3370e31dd89be8bb0864e2bf27bdba6dc904c41", - "aksrepos.azurecr.io/prod/hcp-tunnel-front@sha256:68878ee3ea1781b322ea3952c3370e31dd89be8bb0864e2bf27bdba6dc904c41", - "aksrepos.azurecr.io/mirror/hcp-tunnel-front:v1.9.2-v4.0.7", - "aksrepos.azurecr.io/prod/hcp-tunnel-front:v1.9.2-v4.0.7" - ], - "sizeBytes": 383483267 - }, - { - "names": [ - "aksrepos.azurecr.io/mirror/kubernetes-dashboard-amd64@sha256:0ae6b69432e78069c5ce2bcde0fe409c5c4d6f0f4d9cd50a17974fea38898747", - "k8s.gcr.io/kubernetes-dashboard-amd64@sha256:0ae6b69432e78069c5ce2bcde0fe409c5c4d6f0f4d9cd50a17974fea38898747", - "aksrepos.azurecr.io/mirror/kubernetes-dashboard-amd64:v1.10.1", - "k8s.gcr.io/kubernetes-dashboard-amd64:v1.10.1" - ], - "sizeBytes": 121711221 - }, - { - "names": [ - "nginx@sha256:23b4dcdf0d34d4a129755fc6f52e1c6e23bb34ea011b315d87e193033bcd1b68" - ], - "sizeBytes": 109331233 - }, - { - "names": [ - "nginx@sha256:bdbf36b7f1f77ffe7bd2a32e59235dff6ecf131e3b6b5b96061c652f30685f3a", - "nginx:latest" - ], - "sizeBytes": 109258867 - }, - { - "names": [ - "nginx@sha256:b73f527d86e3461fd652f62cf47e7b375196063bbbd503e853af5be16597cb2e", - "nginx:1.15.5" - ], - "sizeBytes": 109083698 - }, - { - "names": [ - "debian@sha256:118cf8f3557e1ea766c02f36f05f6ac3e63628427ea8965fb861be904ec35a6f", - "debian:latest" - ], - "sizeBytes": 100594230 - }, - { - "names": [ - "nginx@sha256:e3456c851a152494c3e4ff5fcc26f240206abac0c9d794affb40e0714846c451", - "nginx:1.7.9" - ], - "sizeBytes": 91664166 - }, - { - "names": [ - "aksrepos.azurecr.io/mirror/kube-svc-redirect@sha256:a448687b78d24dae388bd3d54591c179c891fa078404752bc9c9dfdaecdc02ef", - "aksrepos.azurecr.io/prod/kube-svc-redirect@sha256:a448687b78d24dae388bd3d54591c179c891fa078404752bc9c9dfdaecdc02ef", - "deis/kube-svc-redirect@sha256:a448687b78d24dae388bd3d54591c179c891fa078404752bc9c9dfdaecdc02ef", - "aksrepos.azurecr.io/mirror/kube-svc-redirect:v1.0.2", - "aksrepos.azurecr.io/prod/kube-svc-redirect:v1.0.2" - ], - "sizeBytes": 82897218 - }, - { - "names": [ - "aksrepos.azurecr.io/mirror/heapster-amd64@sha256:fc33c690a3a446de5abc24b048b88050810a58b9e4477fa763a43d7df029301a", - "k8s.gcr.io/heapster-amd64@sha256:fc33c690a3a446de5abc24b048b88050810a58b9e4477fa763a43d7df029301a", - "aksrepos.azurecr.io/mirror/heapster-amd64:v1.5.3", - "k8s.gcr.io/heapster-amd64:v1.5.3" - ], - "sizeBytes": 75318342 - }, - { - "names": [ - "vishiy/hello@sha256:99d60766e39df52d28fe8db9c659633d96ba1d84fd672298dce047d8a86c478a", - "vishiy/hello:err100eps" - ], - "sizeBytes": 54649865 - }, - { - "names": [ - "k8s.gcr.io/k8s-dns-kube-dns-amd64@sha256:618a82fa66cf0c75e4753369a6999032372be7308866fc9afb381789b1e5ad52", - "k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.13" - ], - "sizeBytes": 51157394 - }, - { - "names": [ - "k8s.gcr.io/metrics-server-amd64@sha256:49a9f12f7067d11f42c803dbe61ed2c1299959ad85cb315b25ff7eef8e6b8892", - "k8s.gcr.io/metrics-server-amd64:v0.2.1" - ], - "sizeBytes": 42541759 - }, - { - "names": [ - "k8s.gcr.io/k8s-dns-sidecar-amd64@sha256:4f1ab957f87b94a5ec1edc26fae50da2175461f00afecf68940c4aa079bd08a4", - "k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.10" - ], - "sizeBytes": 41635309 - }, - { - "names": [ - "k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64@sha256:bbb2a290a568125b3b996028958eb773f33b5b87a6b37bf38a28f8b62dddb3c8", - "k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.10" - ], - "sizeBytes": 40372149 - } - ], - "nodeInfo": { - "architecture": "amd64", - "bootID": "d8f6c00f-a085-450e-bf5c-12e651a0fcfc", - "containerRuntimeVersion": "docker://3.0.4", - "kernelVersion": "4.15.0-1037-azure", - "kubeProxyVersion": "v1.11.8", - "kubeletVersion": "v1.11.8", - "machineID": "cc9ed99e383540a4b0379995bb779221", - "operatingSystem": "linux", - "osImage": "Ubuntu 16.04.5 LTS", - "systemUUID": "301B3B88-C7BD-3D45-A3CB-3CD66A42EB6F" - } - } - }, - { - "apiVersion": "v1", - "kind": "Node", - "metadata": { - "annotations": { - "node.alpha.kubernetes.io/ttl": "0", - "volumes.kubernetes.io/controller-managed-attach-detach": "true" - }, - "creationTimestamp": "2019-03-12T16:40:33Z", - "labels": { - "agentpool": "nodepool1", - "beta.kubernetes.io/arch": "amd64", - "beta.kubernetes.io/instance-type": "Standard_DS1_v2", - "beta.kubernetes.io/os": "linux", - "failure-domain.beta.kubernetes.io/region": "eastus", - "failure-domain.beta.kubernetes.io/zone": "1", - "kubernetes.azure.com/cluster": "MC_dilipr-health-test_dilipr-health-test_eastus", - "kubernetes.io/hostname": "aks-nodepool1-19574989-1", - "kubernetes.io/role": "agent", - "node-role.kubernetes.io/agent": "", - "storageprofile": "managed", - "storagetier": "Premium_LRS" - }, - "name": "aks-nodepool1-19574989-1", - "resourceVersion": "19068104", - "selfLink": "/api/v1/nodes/aks-nodepool1-19574989-1", - "uid": "8e1b5c77-44e5-11e9-9920-423525a6b683" - }, - "spec": { - "podCIDR": "10.244.0.0/24", - "providerID": "azure:///subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourceGroups/MC_dilipr-health-test_dilipr-health-test_eastus/providers/Microsoft.Compute/virtualMachines/aks-nodepool1-19574989-1" - }, - "status": { - "addresses": [ - { - "address": "aks-nodepool1-19574989-1", - "type": "Hostname" - }, - { - "address": "10.240.0.5", - "type": "InternalIP" - } - ], - "allocatable": { - "cpu": "940m", - "ephemeral-storage": "28043041951", - "hugepages-1Gi": "0", - "hugepages-2Mi": "0", - "memory": "2504708Ki", - "pods": "110" - }, - "capacity": { - "cpu": "1", - "ephemeral-storage": "30428648Ki", - "hugepages-1Gi": "0", - "hugepages-2Mi": "0", - "memory": "3524612Ki", - "pods": "110" - }, - "conditions": [ - { - "lastHeartbeatTime": "2019-03-12T16:42:30Z", - "lastTransitionTime": "2019-03-12T16:42:30Z", - "message": "RouteController created a route", - "reason": "RouteCreated", - "status": "False", - "type": "NetworkUnavailable" - }, - { - "lastHeartbeatTime": "2019-08-23T20:43:21Z", - "lastTransitionTime": "2019-07-23T14:46:10Z", - "message": "kubelet has sufficient disk space available", - "reason": "KubeletHasSufficientDisk", - "status": "False", - "type": "OutOfDisk" - }, - { - "lastHeartbeatTime": "2019-08-23T20:43:21Z", - "lastTransitionTime": "2019-07-23T14:46:10Z", - "message": "kubelet has sufficient memory available", - "reason": "KubeletHasSufficientMemory", - "status": "False", - "type": "MemoryPressure" - }, - { - "lastHeartbeatTime": "2019-08-23T20:43:21Z", - "lastTransitionTime": "2019-07-23T14:46:10Z", - "message": "kubelet has no disk pressure", - "reason": "KubeletHasNoDiskPressure", - "status": "False", - "type": "DiskPressure" - }, - { - "lastHeartbeatTime": "2019-08-23T20:43:21Z", - "lastTransitionTime": "2019-03-12T16:40:33Z", - "message": "kubelet has sufficient PID available", - "reason": "KubeletHasSufficientPID", - "status": "False", - "type": "PIDPressure" - }, - { - "lastHeartbeatTime": "2019-08-23T20:43:21Z", - "lastTransitionTime": "2019-07-23T14:46:10Z", - "message": "kubelet is posting ready status. AppArmor enabled", - "reason": "KubeletReady", - "status": "True", - "type": "Ready" - } - ], - "daemonEndpoints": { - "kubeletEndpoint": { - "Port": 10250 - } - }, - "images": [ - { - "names": [ - "perl@sha256:268e7af9853bcc6d2100e2ad76e928c2ca861518217c269b8a762849a8617c12", - "perl:latest" - ], - "sizeBytes": 890592834 - }, - { - "names": [ - "nickchase/rss-php-nginx@sha256:48da56a77fe4ecff4917121365d8e0ce615ebbdfe31f48a996255f5592894e2b", - "nickchase/rss-php-nginx:v1" - ], - "sizeBytes": 677038498 - }, - { - "names": [ - "rdilip83/jsonlogger@sha256:82b67ca5e0650cd5e47f5b51659d61cee035e5d8dcd8a79c50358cd2beb3b5a8", - "rdilip83/jsonlogger:v12" - ], - "sizeBytes": 676594134 - }, - { - "names": [ - "aksrepos.azurecr.io/mirror/hyperkube-amd64@sha256:1447d5b491fcee503c9f8fb712e1593dc3772c7e661251f54c297477cc716913", - "k8s.gcr.io/hyperkube-amd64@sha256:1447d5b491fcee503c9f8fb712e1593dc3772c7e661251f54c297477cc716913", - "aksrepos.azurecr.io/mirror/hyperkube-amd64:v1.11.8", - "k8s.gcr.io/hyperkube-amd64:v1.11.8" - ], - "sizeBytes": 615263658 - }, - { - "names": [ - "rdilip83/fixrubyerror@sha256:6b7f36cf6258b311015493ab025f06577d758c45bc5010d022ac160b9f40ea5d", - "rdilip83/fixrubyerror:latest" - ], - "sizeBytes": 494068028 - }, - { - "names": [ - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:69b420bdb4081293c37e2d0f8ad2e4054bd516f5c08c7512d6b695660a36eccf", - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08222019" - ], - "sizeBytes": 494067935 - }, - { - "names": [ - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:fb2b90ce9bf7186fd9dfae97f5f72f9b9c80c8a0493af3cff74179cd4ff847c0", - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08212019" - ], - "sizeBytes": 494067572 - }, - { - "names": [ - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:c646e180483d295ffac114fb9df513db02553af7879681814d5910764653dd2d", - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08202019" - ], - "sizeBytes": 494067210 - }, - { - "names": [ - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:c21b596a22a1338ed293d01681f327acc871ee502ed779ec1109d6a93375bb3b", - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08192019" - ], - "sizeBytes": 494055088 - }, - { - "names": [ - "rdilip83/cifeatureprod08192019@sha256:7815bba9a805e4e8df33356fd532671de45525ce9c6e936e14f9b126e2097ecd", - "rdilip83/cifeatureprod08192019:v1" - ], - "sizeBytes": 494055088 - }, - { - "names": [ - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:59e34aab9f6e16a87e880b1ee1c9dd5434ee40dd29502e74aceefabf51443717", - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:internaltesthealth08192019" - ], - "sizeBytes": 494053562 - }, - { - "names": [ - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:6387d0dedf4de0bab430f681ef61361f63a20e1c4c287a9b60ea5460283ac6cf", - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ci_feature_prod_health08192019" - ], - "sizeBytes": 494053562 - }, - { - "names": [ - "rdilip83/hc08192019@sha256:014d936771508d499ac4c15043e23b16bce8de0019fb2048b99540cbe9084895", - "rdilip83/hc08192019:1" - ], - "sizeBytes": 494053562 - }, - { - "names": [ - "rdilip83/health-rc@sha256:8ad12bce5ffd27b301bc6fe4355c8affa6fce080ae7e2291dec3a0ed11bb9483", - "rdilip83/health-rc:3" - ], - "sizeBytes": 494052863 - }, - { - "names": [ - "rdilip83/health_ci_feature_image@sha256:1a574d25884483083e8cbaacbf0cb7c4e442dc736d480615c65f5c71f8969b13", - "rdilip83/health_ci_feature_image:v1" - ], - "sizeBytes": 494052147 - }, - { - "names": [ - "rdilip83/healthrc@sha256:816c8cef09822daf050a0fca6f92e7ac19147ff4bf1a823d43fe70f73470cc0c", - "rdilip83/healthrc:v3" - ], - "sizeBytes": 494052138 - }, - { - "names": [ - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:d35aac044d1adc3d02269fde78f8dfd923db94b81288447cf6fdd482970a333b", - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:healthmerge08142019" - ], - "sizeBytes": 494052135 - }, - { - "names": [ - "rdilip83/healthrc@sha256:a130780e56ac0edb3ca29477e12edd5e9b5d08b5732dbd59ede9beb58e21eca7", - "rdilip83/healthrc:v2" - ], - "sizeBytes": 494051682 - }, - { - "names": [ - "rdilip83/healthmerge@sha256:24d270b0f59fb484c283922474736c3cba50f8aad0270bc0a3acd14284694eea", - "rdilip83/healthmerge:v8" - ], - "sizeBytes": 494010139 - }, - { - "names": [ - "rdilip83/health-rc@sha256:b1d24728eb808d301da426b76b7f7b79606204c4c2b695a24ac670be8276d55d", - "rdilip83/health-rc:1" - ], - "sizeBytes": 494000891 - }, - { - "names": [ - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:a0666957cccbfdf5784accd1133408bf017c28a6e694d9a2ae74da94eef2d285", - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:healthpreview08012019" - ], - "sizeBytes": 493994261 - }, - { - "names": [ - "rdilip83/mergehealth@sha256:32c9b35a6809c54d5296e2ca2b122b35a4ad8c852622174cc5a9f92cc27e56e4", - "rdilip83/mergehealth:v3" - ], - "sizeBytes": 493988815 - }, - { - "names": [ - "rdilip83/mergehealth@sha256:a3521e8f36e007b3cb949e0356a75394ac61fd2024ca1ec4827b8d54fb068534", - "rdilip83/mergehealth:v1" - ], - "sizeBytes": 493981585 - }, - { - "names": [ - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:0438e4690e042b195917e160b8949aeb339520ee19c898a8bb9452f36d1f84f1", - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:healthpreview07182019" - ], - "sizeBytes": 493977357 - }, - { - "names": [ - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:9ebc410a36856176921dba81b5bd43132469209b315f52be346690435419b9bb" - ], - "sizeBytes": 493946790 - }, - { - "names": [ - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:4e51195a9c77bd166fc90ee5f6143a4604b502ab7ef0f06431dec10c341b10f3", - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:healthpreview06272019" - ], - "sizeBytes": 493893635 - }, - { - "names": [ - "rdilip83/healthpreview06272019@sha256:d888ba5ff5e5810113a32f9c9812a5e28088cc81b902e95a185fe465a514029c", - "rdilip83/healthpreview06272019:latest" - ], - "sizeBytes": 493893633 - }, - { - "names": [ - "rdilip83/healthpreview06252019-1@sha256:1561876cffe94433a569f29f5231548e039193ebaa7ec640d22439675179e43f", - "rdilip83/healthpreview06252019-1:latest" - ], - "sizeBytes": 493887387 - }, - { - "names": [ - "rdilip83/healthpreview06252019@sha256:6597ff599a78ac452a4138dedb9e08c0ccd3e8b01594b033fd78ba9dbb41fe9e", - "rdilip83/healthpreview06252019:latest" - ], - "sizeBytes": 493887384 - }, - { - "names": [ - "rdilip83/healthpreview06242019@sha256:c4f565d92086d1ee56e6016178fed5c668352dc0ca0047f02910bdcb87a482c4", - "rdilip83/healthpreview06242019:latest" - ], - "sizeBytes": 493850850 - }, - { - "names": [ - "rdilip83/healthpreview06212019-1@sha256:937ce5801a0097a1cbc4eff5399c1973b4c6223ece9279b35207368b99f82b96", - "rdilip83/healthpreview06212019-1:latest" - ], - "sizeBytes": 493850674 - }, - { - "names": [ - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:0f798cb7d56931b231f71e38e7fa5bf898b69e611247a566701f70a5f29a9799", - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod07092019" - ], - "sizeBytes": 467692116 - }, - { - "names": [ - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:3734a084fa9681c7e930eb90cad45a8f282c24af63065a720a2327b1683f3ba4", - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod06142019" - ], - "sizeBytes": 466882569 - }, - { - "names": [ - "rdilip83/mergehealth@sha256:16402c34e2d7de72c2ebc18ec8e9f7933fa25f6a7f83bceb84483ba95e3902f7", - "rdilip83/mergehealth:v2" - ], - "sizeBytes": 448931997 - }, - { - "names": [ - "deis/hcp-tunnel-front@sha256:68878ee3ea1781b322ea3952c3370e31dd89be8bb0864e2bf27bdba6dc904c41", - "deis/hcp-tunnel-front:v1.9.2-v4.0.7" - ], - "sizeBytes": 383483267 - }, - { - "names": [ - "nginx@sha256:23b4dcdf0d34d4a129755fc6f52e1c6e23bb34ea011b315d87e193033bcd1b68" - ], - "sizeBytes": 109331233 - }, - { - "names": [ - "nginx@sha256:bdbf36b7f1f77ffe7bd2a32e59235dff6ecf131e3b6b5b96061c652f30685f3a", - "nginx:latest" - ], - "sizeBytes": 109258867 - }, - { - "names": [ - "debian@sha256:118cf8f3557e1ea766c02f36f05f6ac3e63628427ea8965fb861be904ec35a6f", - "debian:latest" - ], - "sizeBytes": 100594230 - }, - { - "names": [ - "nginx@sha256:e3456c851a152494c3e4ff5fcc26f240206abac0c9d794affb40e0714846c451", - "nginx:1.7.9" - ], - "sizeBytes": 91664166 - }, - { - "names": [ - "aksrepos.azurecr.io/mirror/kube-svc-redirect@sha256:a448687b78d24dae388bd3d54591c179c891fa078404752bc9c9dfdaecdc02ef", - "aksrepos.azurecr.io/prod/kube-svc-redirect@sha256:a448687b78d24dae388bd3d54591c179c891fa078404752bc9c9dfdaecdc02ef", - "deis/kube-svc-redirect@sha256:a448687b78d24dae388bd3d54591c179c891fa078404752bc9c9dfdaecdc02ef", - "aksrepos.azurecr.io/mirror/kube-svc-redirect:v1.0.2", - "aksrepos.azurecr.io/prod/kube-svc-redirect:v1.0.2" - ], - "sizeBytes": 82897218 - }, - { - "names": [ - "aksrepos.azurecr.io/mirror/heapster-amd64@sha256:fc33c690a3a446de5abc24b048b88050810a58b9e4477fa763a43d7df029301a", - "k8s.gcr.io/heapster-amd64@sha256:fc33c690a3a446de5abc24b048b88050810a58b9e4477fa763a43d7df029301a", - "aksrepos.azurecr.io/mirror/heapster-amd64:v1.5.3", - "k8s.gcr.io/heapster-amd64:v1.5.3" - ], - "sizeBytes": 75318342 - }, - { - "names": [ - "vishiy/hello@sha256:99d60766e39df52d28fe8db9c659633d96ba1d84fd672298dce047d8a86c478a", - "vishiy/hello:err100eps" - ], - "sizeBytes": 54649865 - }, - { - "names": [ - "aksrepos.azurecr.io/mirror/k8s-dns-kube-dns-amd64@sha256:618a82fa66cf0c75e4753369a6999032372be7308866fc9afb381789b1e5ad52", - "k8s.gcr.io/k8s-dns-kube-dns-amd64@sha256:618a82fa66cf0c75e4753369a6999032372be7308866fc9afb381789b1e5ad52", - "aksrepos.azurecr.io/mirror/k8s-dns-kube-dns-amd64:1.14.13", - "k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.13" - ], - "sizeBytes": 51157394 - }, - { - "names": [ - "k8s.gcr.io/cluster-proportional-autoscaler-amd64@sha256:003f98d9f411ddfa6ff6d539196355e03ddd69fa4ed38c7ffb8fec6f729afe2d", - "k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.1.2-r2" - ], - "sizeBytes": 49648481 - }, - { - "names": [ - "aksrepos.azurecr.io/mirror/metrics-server-amd64@sha256:220c0ed3451cb95e4b2f72dd5dc8d9d39d9f529722e5b29d8286373ce27b117e", - "k8s.gcr.io/metrics-server-amd64@sha256:49a9f12f7067d11f42c803dbe61ed2c1299959ad85cb315b25ff7eef8e6b8892", - "aksrepos.azurecr.io/mirror/metrics-server-amd64:v0.2.1", - "k8s.gcr.io/metrics-server-amd64:v0.2.1" - ], - "sizeBytes": 42541759 - }, - { - "names": [ - "aksrepos.azurecr.io/mirror/k8s-dns-sidecar-amd64@sha256:4f1ab957f87b94a5ec1edc26fae50da2175461f00afecf68940c4aa079bd08a4", - "k8s.gcr.io/k8s-dns-sidecar-amd64@sha256:4f1ab957f87b94a5ec1edc26fae50da2175461f00afecf68940c4aa079bd08a4", - "aksrepos.azurecr.io/mirror/k8s-dns-sidecar-amd64:1.14.10", - "k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.10" - ], - "sizeBytes": 41635309 - }, - { - "names": [ - "aksrepos.azurecr.io/mirror/k8s-dns-dnsmasq-nanny-amd64@sha256:bbb2a290a568125b3b996028958eb773f33b5b87a6b37bf38a28f8b62dddb3c8", - "k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64@sha256:bbb2a290a568125b3b996028958eb773f33b5b87a6b37bf38a28f8b62dddb3c8", - "aksrepos.azurecr.io/mirror/k8s-dns-dnsmasq-nanny-amd64:1.14.10", - "k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.10" - ], - "sizeBytes": 40372149 - }, - { - "names": [ - "aksrepos.azurecr.io/mirror/addon-resizer@sha256:8ac3ffa4232046feb297cefc40734641fa2954c16308f9e0d70ec152f22231ca", - "k8s.gcr.io/addon-resizer@sha256:507aa9845ecce1fdde4d61f530c802f4dc2974c700ce0db7730866e442db958d", - "aksrepos.azurecr.io/mirror/addon-resizer:1.8.1", - "k8s.gcr.io/addon-resizer:1.8.1" - ], - "sizeBytes": 32968591 - }, - { - "names": [ - "aksrepos.azurecr.io/mirror/nginx@sha256:91d22184f3f9b1be658c2cc2c12d324de7ff12c8b9c9a597905457b4d93b069d", - "nginx@sha256:9d46fd628d54ebe1633ee3cf0fe2acfcc419cfae541c63056530e39cd5620366", - "aksrepos.azurecr.io/mirror/nginx:1.13.12-alpine", - "nginx:1.13.12-alpine" - ], - "sizeBytes": 18002931 - }, - { - "names": [ - "aksrepos.azurecr.io/mirror/exechealthz-amd64@sha256:34722333f0cd0b891b61c9e0efa31913f22157e341a3aabb79967305d4e78260", - "k8s.gcr.io/exechealthz-amd64@sha256:503e158c3f65ed7399f54010571c7c977ade7fe59010695f48d9650d83488c0a", - "aksrepos.azurecr.io/mirror/exechealthz-amd64:1.2", - "k8s.gcr.io/exechealthz-amd64:1.2" - ], - "sizeBytes": 8374840 - } - ], - "nodeInfo": { - "architecture": "amd64", - "bootID": "4c822e6d-c2e5-4697-9a01-467e04804fc1", - "containerRuntimeVersion": "docker://3.0.4", - "kernelVersion": "4.15.0-1037-azure", - "kubeProxyVersion": "v1.11.8", - "kubeletVersion": "v1.11.8", - "machineID": "1954026de5e6436788f214eb0dfd6a13", - "operatingSystem": "linux", - "osImage": "Ubuntu 16.04.5 LTS", - "systemUUID": "17A6A78E-D3E2-2A4F-852B-C91D933C8D5B" - } - } - }, - { - "apiVersion": "v1", - "kind": "Node", - "metadata": { - "annotations": { - "node.alpha.kubernetes.io/ttl": "0", - "volumes.kubernetes.io/controller-managed-attach-detach": "true" - }, - "creationTimestamp": "2019-06-21T02:01:53Z", - "labels": { - "agentpool": "nodepool1", - "beta.kubernetes.io/arch": "amd64", - "beta.kubernetes.io/instance-type": "Standard_DS1_v2", - "beta.kubernetes.io/os": "linux", - "failure-domain.beta.kubernetes.io/region": "eastus", - "failure-domain.beta.kubernetes.io/zone": "0", - "kubernetes.azure.com/cluster": "MC_dilipr-health-test_dilipr-health-test_eastus", - "kubernetes.io/hostname": "aks-nodepool1-19574989-2", - "kubernetes.io/role": "agent", - "node-role.kubernetes.io/agent": "", - "storageprofile": "managed", - "storagetier": "Premium_LRS" - }, - "name": "aks-nodepool1-19574989-2", - "resourceVersion": "19068101", - "selfLink": "/api/v1/nodes/aks-nodepool1-19574989-2", - "uid": "8a62e1bc-93c8-11e9-854d-ee76584a3c00" - }, - "spec": { - "podCIDR": "10.244.12.0/24", - "providerID": "azure:///subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourceGroups/MC_dilipr-health-test_dilipr-health-test_eastus/providers/Microsoft.Compute/virtualMachines/aks-nodepool1-19574989-2" - }, - "status": { - "addresses": [ - { - "address": "aks-nodepool1-19574989-2", - "type": "Hostname" - }, - { - "address": "10.240.0.7", - "type": "InternalIP" - } - ], - "allocatable": { - "cpu": "940m", - "ephemeral-storage": "28043041951", - "hugepages-1Gi": "0", - "hugepages-2Mi": "0", - "memory": "2480548Ki", - "pods": "110" - }, - "capacity": { - "cpu": "1", - "ephemeral-storage": "30428648Ki", - "hugepages-1Gi": "0", - "hugepages-2Mi": "0", - "memory": "3500452Ki", - "pods": "110" - }, - "conditions": [ - { - "lastHeartbeatTime": "2019-06-21T02:02:24Z", - "lastTransitionTime": "2019-06-21T02:02:24Z", - "message": "RouteController created a route", - "reason": "RouteCreated", - "status": "False", - "type": "NetworkUnavailable" - }, - { - "lastHeartbeatTime": "2019-08-23T20:43:20Z", - "lastTransitionTime": "2019-07-23T14:46:10Z", - "message": "kubelet has sufficient disk space available", - "reason": "KubeletHasSufficientDisk", - "status": "False", - "type": "OutOfDisk" - }, - { - "lastHeartbeatTime": "2019-08-23T20:43:20Z", - "lastTransitionTime": "2019-07-23T14:46:10Z", - "message": "kubelet has sufficient memory available", - "reason": "KubeletHasSufficientMemory", - "status": "False", - "type": "MemoryPressure" - }, - { - "lastHeartbeatTime": "2019-08-23T20:43:20Z", - "lastTransitionTime": "2019-07-23T14:46:10Z", - "message": "kubelet has no disk pressure", - "reason": "KubeletHasNoDiskPressure", - "status": "False", - "type": "DiskPressure" - }, - { - "lastHeartbeatTime": "2019-08-23T20:43:20Z", - "lastTransitionTime": "2019-06-21T02:01:53Z", - "message": "kubelet has sufficient PID available", - "reason": "KubeletHasSufficientPID", - "status": "False", - "type": "PIDPressure" - }, - { - "lastHeartbeatTime": "2019-08-23T20:43:20Z", - "lastTransitionTime": "2019-07-23T14:46:10Z", - "message": "kubelet is posting ready status. AppArmor enabled", - "reason": "KubeletReady", - "status": "True", - "type": "Ready" - } - ], - "daemonEndpoints": { - "kubeletEndpoint": { - "Port": 10250 - } - }, - "images": [ - { - "names": [ - "nickchase/rss-php-nginx@sha256:48da56a77fe4ecff4917121365d8e0ce615ebbdfe31f48a996255f5592894e2b", - "nickchase/rss-php-nginx:v1" - ], - "sizeBytes": 677038498 - }, - { - "names": [ - "rdilip83/jsonlogger@sha256:82b67ca5e0650cd5e47f5b51659d61cee035e5d8dcd8a79c50358cd2beb3b5a8", - "rdilip83/jsonlogger:v12" - ], - "sizeBytes": 676594134 - }, - { - "names": [ - "aksrepos.azurecr.io/mirror/hyperkube-amd64@sha256:1447d5b491fcee503c9f8fb712e1593dc3772c7e661251f54c297477cc716913", - "k8s.gcr.io/hyperkube-amd64@sha256:1447d5b491fcee503c9f8fb712e1593dc3772c7e661251f54c297477cc716913", - "aksrepos.azurecr.io/mirror/hyperkube-amd64:v1.11.8", - "k8s.gcr.io/hyperkube-amd64:v1.11.8" - ], - "sizeBytes": 615263658 - }, - { - "names": [ - "rdilip83/fixrubyerror@sha256:6b7f36cf6258b311015493ab025f06577d758c45bc5010d022ac160b9f40ea5d", - "rdilip83/fixrubyerror:latest" - ], - "sizeBytes": 494068028 - }, - { - "names": [ - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:69b420bdb4081293c37e2d0f8ad2e4054bd516f5c08c7512d6b695660a36eccf", - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08222019" - ], - "sizeBytes": 494067935 - }, - { - "names": [ - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:fb2b90ce9bf7186fd9dfae97f5f72f9b9c80c8a0493af3cff74179cd4ff847c0", - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08212019" - ], - "sizeBytes": 494067572 - }, - { - "names": [ - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:c646e180483d295ffac114fb9df513db02553af7879681814d5910764653dd2d", - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08202019" - ], - "sizeBytes": 494067210 - }, - { - "names": [ - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:c21b596a22a1338ed293d01681f327acc871ee502ed779ec1109d6a93375bb3b", - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08192019" - ], - "sizeBytes": 494055088 - }, - { - "names": [ - "rdilip83/cifeatureprod08192019@sha256:7815bba9a805e4e8df33356fd532671de45525ce9c6e936e14f9b126e2097ecd", - "rdilip83/cifeatureprod08192019:v1" - ], - "sizeBytes": 494055088 - }, - { - "names": [ - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:6387d0dedf4de0bab430f681ef61361f63a20e1c4c287a9b60ea5460283ac6cf", - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ci_feature_prod_health08192019" - ], - "sizeBytes": 494053562 - }, - { - "names": [ - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:59e34aab9f6e16a87e880b1ee1c9dd5434ee40dd29502e74aceefabf51443717", - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:internaltesthealth08192019" - ], - "sizeBytes": 494053562 - }, - { - "names": [ - "rdilip83/hc08192019@sha256:014d936771508d499ac4c15043e23b16bce8de0019fb2048b99540cbe9084895", - "rdilip83/hc08192019:1" - ], - "sizeBytes": 494053562 - }, - { - "names": [ - "rdilip83/health-rc@sha256:8ad12bce5ffd27b301bc6fe4355c8affa6fce080ae7e2291dec3a0ed11bb9483", - "rdilip83/health-rc:3" - ], - "sizeBytes": 494052863 - }, - { - "names": [ - "rdilip83/health_ci_feature_image@sha256:1a574d25884483083e8cbaacbf0cb7c4e442dc736d480615c65f5c71f8969b13", - "rdilip83/health_ci_feature_image:v1" - ], - "sizeBytes": 494052147 - }, - { - "names": [ - "rdilip83/healthrc@sha256:816c8cef09822daf050a0fca6f92e7ac19147ff4bf1a823d43fe70f73470cc0c", - "rdilip83/healthrc:v3" - ], - "sizeBytes": 494052138 - }, - { - "names": [ - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:d35aac044d1adc3d02269fde78f8dfd923db94b81288447cf6fdd482970a333b", - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:healthmerge08142019" - ], - "sizeBytes": 494052135 - }, - { - "names": [ - "rdilip83/healthrc@sha256:a130780e56ac0edb3ca29477e12edd5e9b5d08b5732dbd59ede9beb58e21eca7", - "rdilip83/healthrc:v2" - ], - "sizeBytes": 494051682 - }, - { - "names": [ - "rdilip83/healthmerge@sha256:24d270b0f59fb484c283922474736c3cba50f8aad0270bc0a3acd14284694eea", - "rdilip83/healthmerge:v8" - ], - "sizeBytes": 494010139 - }, - { - "names": [ - "rdilip83/health-rc@sha256:b1d24728eb808d301da426b76b7f7b79606204c4c2b695a24ac670be8276d55d", - "rdilip83/health-rc:1" - ], - "sizeBytes": 494000891 - }, - { - "names": [ - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:a0666957cccbfdf5784accd1133408bf017c28a6e694d9a2ae74da94eef2d285", - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:healthpreview08012019" - ], - "sizeBytes": 493994261 - }, - { - "names": [ - "rdilip83/mergehealth@sha256:32c9b35a6809c54d5296e2ca2b122b35a4ad8c852622174cc5a9f92cc27e56e4", - "rdilip83/mergehealth:v3" - ], - "sizeBytes": 493988815 - }, - { - "names": [ - "rdilip83/mergehealth@sha256:a3521e8f36e007b3cb949e0356a75394ac61fd2024ca1ec4827b8d54fb068534", - "rdilip83/mergehealth:v1" - ], - "sizeBytes": 493981585 - }, - { - "names": [ - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:0438e4690e042b195917e160b8949aeb339520ee19c898a8bb9452f36d1f84f1", - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:healthpreview07182019" - ], - "sizeBytes": 493977357 - }, - { - "names": [ - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:9ebc410a36856176921dba81b5bd43132469209b315f52be346690435419b9bb" - ], - "sizeBytes": 493946790 - }, - { - "names": [ - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:4e51195a9c77bd166fc90ee5f6143a4604b502ab7ef0f06431dec10c341b10f3", - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:healthpreview06272019" - ], - "sizeBytes": 493893635 - }, - { - "names": [ - "rdilip83/healthpreview06272019@sha256:d888ba5ff5e5810113a32f9c9812a5e28088cc81b902e95a185fe465a514029c", - "rdilip83/healthpreview06272019:latest" - ], - "sizeBytes": 493893633 - }, - { - "names": [ - "rdilip83/healthpreview06252019-1@sha256:1561876cffe94433a569f29f5231548e039193ebaa7ec640d22439675179e43f", - "rdilip83/healthpreview06252019-1:latest" - ], - "sizeBytes": 493887387 - }, - { - "names": [ - "rdilip83/healthpreview06252019@sha256:6597ff599a78ac452a4138dedb9e08c0ccd3e8b01594b033fd78ba9dbb41fe9e", - "rdilip83/healthpreview06252019:latest" - ], - "sizeBytes": 493887384 - }, - { - "names": [ - "rdilip83/healthpreview06242019@sha256:c4f565d92086d1ee56e6016178fed5c668352dc0ca0047f02910bdcb87a482c4", - "rdilip83/healthpreview06242019:latest" - ], - "sizeBytes": 493850850 - }, - { - "names": [ - "rdilip83/healthpreview06212019-1@sha256:937ce5801a0097a1cbc4eff5399c1973b4c6223ece9279b35207368b99f82b96", - "rdilip83/healthpreview06212019-1:latest" - ], - "sizeBytes": 493850674 - }, - { - "names": [ - "rdilip83/healthpreview06192019@sha256:f92cb5283814d446f0acde6a489648ea197496d5f85b27ca959ec97bce742d8a", - "rdilip83/healthpreview06192019:latest" - ], - "sizeBytes": 493799437 - }, - { - "names": [ - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:0f798cb7d56931b231f71e38e7fa5bf898b69e611247a566701f70a5f29a9799", - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod07092019" - ], - "sizeBytes": 467692116 - }, - { - "names": [ - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:3734a084fa9681c7e930eb90cad45a8f282c24af63065a720a2327b1683f3ba4", - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod06142019" - ], - "sizeBytes": 466882569 - }, - { - "names": [ - "rdilip83/mergehealth@sha256:16402c34e2d7de72c2ebc18ec8e9f7933fa25f6a7f83bceb84483ba95e3902f7", - "rdilip83/mergehealth:v2" - ], - "sizeBytes": 448931997 - }, - { - "names": [ - "rdilip83/healthpreview06212019@sha256:5860c9caaf544f2e7c46edad5cdfb69e22398e20dc87cb8a4cd630b5b7000074", - "rdilip83/healthpreview06212019:latest" - ], - "sizeBytes": 448366491 - }, - { - "names": [ - "deis/hcp-tunnel-front@sha256:68878ee3ea1781b322ea3952c3370e31dd89be8bb0864e2bf27bdba6dc904c41", - "deis/hcp-tunnel-front:v1.9.2-v4.0.7" - ], - "sizeBytes": 383483267 - }, - { - "names": [ - "progrium/stress@sha256:e34d56d60f5caae79333cee395aae93b74791d50e3841986420d23c2ee4697bf", - "progrium/stress:latest" - ], - "sizeBytes": 281783943 - }, - { - "names": [ - "k8s.gcr.io/cluster-autoscaler@sha256:b6834bb69e8fad88110b1dc57097a45bc79e6f2c5f2c2773c871d07389794771", - "k8s.gcr.io/cluster-autoscaler:v1.12.3" - ], - "sizeBytes": 232229241 - }, - { - "names": [ - "k8s.gcr.io/cluster-autoscaler@sha256:dc5744fd8c22aebfe40d6b62ab97d18d7bfbfc7ab1782509d69a5a9ec514df2c", - "k8s.gcr.io/cluster-autoscaler:v1.12.2" - ], - "sizeBytes": 232167833 - }, - { - "names": [ - "k8s.gcr.io/cluster-autoscaler@sha256:e71851267764a068fbb091a4ef3bb874b5ce34db48cb757fcf77779f30ef0207", - "k8s.gcr.io/cluster-autoscaler:v1.3.7" - ], - "sizeBytes": 217353965 - }, - { - "names": [ - "k8s.gcr.io/cluster-autoscaler@sha256:36a369ca4643542d501bce0addf8b903f2141ae9e2608662b77a3d24f01d7780", - "k8s.gcr.io/cluster-autoscaler:v1.2.2" - ], - "sizeBytes": 208688449 - }, - { - "names": [ - "containernetworking/azure-npm@sha256:4735da6dc0d5393d68be72498f5ce563cb930fa21b26faec8fdc844001057a56", - "containernetworking/azure-npm:v1.0.18" - ], - "sizeBytes": 170727162 - }, - { - "names": [ - "containernetworking/networkmonitor@sha256:d875511410502c3e37804e1f313cc2b0a03d7a03d3d5e6adaf8994b753a76f8e", - "containernetworking/networkmonitor:v0.0.6" - ], - "sizeBytes": 123663837 - }, - { - "names": [ - "containernetworking/networkmonitor@sha256:944408a497c451b0e79d2596dc2e9fe5036cdbba7fa831bff024e1c9ed44190d", - "containernetworking/networkmonitor:v0.0.5" - ], - "sizeBytes": 122043325 - }, - { - "names": [ - "nginx@sha256:bdbf36b7f1f77ffe7bd2a32e59235dff6ecf131e3b6b5b96061c652f30685f3a", - "nginx:latest" - ], - "sizeBytes": 109258867 - }, - { - "names": [ - "debian@sha256:118cf8f3557e1ea766c02f36f05f6ac3e63628427ea8965fb861be904ec35a6f", - "debian:latest" - ], - "sizeBytes": 100594230 - }, - { - "names": [ - "k8s.gcr.io/kube-addon-manager-amd64@sha256:3da3f17cd4f02fe5696f29a5e6cd4aef7111f20dab9bec54ea35942346cfeb60", - "k8s.gcr.io/kube-addon-manager-amd64:v8.8" - ], - "sizeBytes": 99631084 - }, - { - "names": [ - "k8s.gcr.io/kube-addon-manager-amd64@sha256:672794ee3582521eb8bc4f257d0f70c92893f1989f39a200f9c84bcfe1aea7c9", - "k8s.gcr.io/kube-addon-manager-amd64:v9.0" - ], - "sizeBytes": 83077558 - }, - { - "names": [ - "aksrepos.azurecr.io/mirror/kube-svc-redirect@sha256:a448687b78d24dae388bd3d54591c179c891fa078404752bc9c9dfdaecdc02ef", - "aksrepos.azurecr.io/prod/kube-svc-redirect@sha256:a448687b78d24dae388bd3d54591c179c891fa078404752bc9c9dfdaecdc02ef", - "deis/kube-svc-redirect@sha256:a448687b78d24dae388bd3d54591c179c891fa078404752bc9c9dfdaecdc02ef", - "aksrepos.azurecr.io/mirror/kube-svc-redirect:v1.0.2", - "aksrepos.azurecr.io/prod/kube-svc-redirect:v1.0.2" - ], - "sizeBytes": 82897218 - }, - { - "names": [ - "k8s.gcr.io/heapster-amd64@sha256:dccaabb0c20cf05c29baefa1e9bf0358b083ccc0fab492b9b3b47fb7e4db5472", - "k8s.gcr.io/heapster-amd64:v1.5.4" - ], - "sizeBytes": 75318342 - } - ], - "nodeInfo": { - "architecture": "amd64", - "bootID": "ee529550-afa8-43bb-90a6-f157e7e22e18", - "containerRuntimeVersion": "docker://3.0.4", - "kernelVersion": "4.15.0-1045-azure", - "kubeProxyVersion": "v1.11.8", - "kubeletVersion": "v1.11.8", - "machineID": "0e5d932888da4e17a3c58210f6c8c9db", - "operatingSystem": "linux", - "osImage": "Ubuntu 16.04.6 LTS", - "systemUUID": "5DBFC273-947F-0140-AD1F-BF6758D30B37" - } - } - }, - { - "apiVersion": "v1", - "kind": "Node", - "metadata": { - "annotations": { - "node.alpha.kubernetes.io/ttl": "0", - "volumes.kubernetes.io/controller-managed-attach-detach": "true" - }, - "creationTimestamp": "2019-08-07T18:57:56Z", - "labels": { - "agentpool": "nodepool1", - "beta.kubernetes.io/arch": "amd64", - "beta.kubernetes.io/instance-type": "Standard_DS1_v2", - "beta.kubernetes.io/os": "linux", - "failure-domain.beta.kubernetes.io/region": "eastus", - "failure-domain.beta.kubernetes.io/zone": "1", - "kubernetes.azure.com/cluster": "MC_dilipr-health-test_dilipr-health-test_eastus", - "kubernetes.io/hostname": "aks-nodepool1-19574989-3", - "kubernetes.io/role": "agent", - "node-role.kubernetes.io/agent": "", - "storageprofile": "managed", - "storagetier": "Premium_LRS" - }, - "name": "aks-nodepool1-19574989-3", - "resourceVersion": "19068105", - "selfLink": "/api/v1/nodes/aks-nodepool1-19574989-3", - "uid": "448ea0a7-b945-11e9-a1b6-127094e7fd94" - }, - "spec": { - "podCIDR": "10.244.2.0/24", - "providerID": "azure:///subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourceGroups/MC_dilipr-health-test_dilipr-health-test_eastus/providers/Microsoft.Compute/virtualMachines/aks-nodepool1-19574989-3" - }, - "status": { - "addresses": [ - { - "address": "aks-nodepool1-19574989-3", - "type": "Hostname" - }, - { - "address": "10.240.0.6", - "type": "InternalIP" - } - ], - "allocatable": { - "cpu": "940m", - "ephemeral-storage": "28043041951", - "hugepages-1Gi": "0", - "hugepages-2Mi": "0", - "memory": "2480544Ki", - "pods": "110" - }, - "capacity": { - "cpu": "1", - "ephemeral-storage": "30428648Ki", - "hugepages-1Gi": "0", - "hugepages-2Mi": "0", - "memory": "3500448Ki", - "pods": "110" - }, - "conditions": [ - { - "lastHeartbeatTime": "2019-08-07T18:59:32Z", - "lastTransitionTime": "2019-08-07T18:59:32Z", - "message": "RouteController created a route", - "reason": "RouteCreated", - "status": "False", - "type": "NetworkUnavailable" - }, - { - "lastHeartbeatTime": "2019-08-23T20:43:22Z", - "lastTransitionTime": "2019-08-07T18:57:56Z", - "message": "kubelet has sufficient disk space available", - "reason": "KubeletHasSufficientDisk", - "status": "False", - "type": "OutOfDisk" - }, - { - "lastHeartbeatTime": "2019-08-23T20:43:22Z", - "lastTransitionTime": "2019-08-07T18:57:56Z", - "message": "kubelet has sufficient memory available", - "reason": "KubeletHasSufficientMemory", - "status": "False", - "type": "MemoryPressure" - }, - { - "lastHeartbeatTime": "2019-08-23T20:43:22Z", - "lastTransitionTime": "2019-08-07T18:57:56Z", - "message": "kubelet has no disk pressure", - "reason": "KubeletHasNoDiskPressure", - "status": "False", - "type": "DiskPressure" - }, - { - "lastHeartbeatTime": "2019-08-23T20:43:22Z", - "lastTransitionTime": "2019-08-07T18:57:56Z", - "message": "kubelet has sufficient PID available", - "reason": "KubeletHasSufficientPID", - "status": "False", - "type": "PIDPressure" - }, - { - "lastHeartbeatTime": "2019-08-23T20:43:22Z", - "lastTransitionTime": "2019-08-07T18:58:06Z", - "message": "kubelet is posting ready status. AppArmor enabled", - "reason": "KubeletReady", - "status": "True", - "type": "Ready" - } - ], - "daemonEndpoints": { - "kubeletEndpoint": { - "Port": 10250 - } - }, - "images": [ - { - "names": [ - "deis/hcp-tunnel-front@sha256:a067679f0ab376197a344cd410821cf07d69fc322dcd9af4a9229250da725ce2", - "deis/hcp-tunnel-front:v1.9.2-v4.0.4" - ], - "sizeBytes": 640504769 - }, - { - "names": [ - "aksrepos.azurecr.io/mirror/hyperkube-amd64@sha256:1447d5b491fcee503c9f8fb712e1593dc3772c7e661251f54c297477cc716913", - "k8s.gcr.io/hyperkube-amd64@sha256:1447d5b491fcee503c9f8fb712e1593dc3772c7e661251f54c297477cc716913", - "aksrepos.azurecr.io/mirror/hyperkube-amd64:v1.11.8", - "k8s.gcr.io/hyperkube-amd64:v1.11.8" - ], - "sizeBytes": 615263658 - }, - { - "names": [ - "rdilip83/fixrubyerror@sha256:6b7f36cf6258b311015493ab025f06577d758c45bc5010d022ac160b9f40ea5d", - "rdilip83/fixrubyerror:latest" - ], - "sizeBytes": 494068028 - }, - { - "names": [ - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:69b420bdb4081293c37e2d0f8ad2e4054bd516f5c08c7512d6b695660a36eccf", - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08222019" - ], - "sizeBytes": 494067935 - }, - { - "names": [ - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:fb2b90ce9bf7186fd9dfae97f5f72f9b9c80c8a0493af3cff74179cd4ff847c0", - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08212019" - ], - "sizeBytes": 494067572 - }, - { - "names": [ - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:c646e180483d295ffac114fb9df513db02553af7879681814d5910764653dd2d", - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08202019" - ], - "sizeBytes": 494067210 - }, - { - "names": [ - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:c21b596a22a1338ed293d01681f327acc871ee502ed779ec1109d6a93375bb3b", - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08192019" - ], - "sizeBytes": 494055088 - }, - { - "names": [ - "rdilip83/cifeatureprod08192019@sha256:7815bba9a805e4e8df33356fd532671de45525ce9c6e936e14f9b126e2097ecd", - "rdilip83/cifeatureprod08192019:v1" - ], - "sizeBytes": 494055088 - }, - { - "names": [ - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:6387d0dedf4de0bab430f681ef61361f63a20e1c4c287a9b60ea5460283ac6cf", - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ci_feature_prod_health08192019" - ], - "sizeBytes": 494053562 - }, - { - "names": [ - "rdilip83/hc08192019@sha256:014d936771508d499ac4c15043e23b16bce8de0019fb2048b99540cbe9084895", - "rdilip83/hc08192019:1" - ], - "sizeBytes": 494053562 - }, - { - "names": [ - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:59e34aab9f6e16a87e880b1ee1c9dd5434ee40dd29502e74aceefabf51443717", - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:internaltesthealth08192019" - ], - "sizeBytes": 494053562 - }, - { - "names": [ - "rdilip83/health-rc@sha256:8ad12bce5ffd27b301bc6fe4355c8affa6fce080ae7e2291dec3a0ed11bb9483", - "rdilip83/health-rc:3" - ], - "sizeBytes": 494052863 - }, - { - "names": [ - "rdilip83/health_ci_feature_image@sha256:1a574d25884483083e8cbaacbf0cb7c4e442dc736d480615c65f5c71f8969b13", - "rdilip83/health_ci_feature_image:v1" - ], - "sizeBytes": 494052147 - }, - { - "names": [ - "rdilip83/healthrc@sha256:816c8cef09822daf050a0fca6f92e7ac19147ff4bf1a823d43fe70f73470cc0c", - "rdilip83/healthrc:v3" - ], - "sizeBytes": 494052138 - }, - { - "names": [ - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:d35aac044d1adc3d02269fde78f8dfd923db94b81288447cf6fdd482970a333b", - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:healthmerge08142019" - ], - "sizeBytes": 494052135 - }, - { - "names": [ - "rdilip83/healthrc@sha256:a130780e56ac0edb3ca29477e12edd5e9b5d08b5732dbd59ede9beb58e21eca7", - "rdilip83/healthrc:v2" - ], - "sizeBytes": 494051682 - }, - { - "names": [ - "rdilip83/healthmerge@sha256:24d270b0f59fb484c283922474736c3cba50f8aad0270bc0a3acd14284694eea", - "rdilip83/healthmerge:v8" - ], - "sizeBytes": 494010139 - }, - { - "names": [ - "rdilip83/health-rc@sha256:b1d24728eb808d301da426b76b7f7b79606204c4c2b695a24ac670be8276d55d", - "rdilip83/health-rc:1" - ], - "sizeBytes": 494000891 - }, - { - "names": [ - "rdilip83/mergehealth@sha256:32c9b35a6809c54d5296e2ca2b122b35a4ad8c852622174cc5a9f92cc27e56e4", - "rdilip83/mergehealth:v3" - ], - "sizeBytes": 493988815 - }, - { - "names": [ - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:0438e4690e042b195917e160b8949aeb339520ee19c898a8bb9452f36d1f84f1", - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:healthpreview07182019" - ], - "sizeBytes": 493977357 - }, - { - "names": [ - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:4e51195a9c77bd166fc90ee5f6143a4604b502ab7ef0f06431dec10c341b10f3", - "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:healthpreview06272019" - ], - "sizeBytes": 493893635 - }, - { - "names": [ - "rdilip83/healthpreview06272019@sha256:d888ba5ff5e5810113a32f9c9812a5e28088cc81b902e95a185fe465a514029c", - "rdilip83/healthpreview06272019:latest" - ], - "sizeBytes": 493893633 - }, - { - "names": [ - "aksrepos.azurecr.io/prod/hcp-tunnel-front@sha256:68878ee3ea1781b322ea3952c3370e31dd89be8bb0864e2bf27bdba6dc904c41", - "aksrepos.azurecr.io/prod/hcp-tunnel-front:v1.9.2-v4.0.7" - ], - "sizeBytes": 383483267 - }, - { - "names": [ - "k8s.gcr.io/cluster-autoscaler@sha256:b6834bb69e8fad88110b1dc57097a45bc79e6f2c5f2c2773c871d07389794771", - "k8s.gcr.io/cluster-autoscaler:v1.12.3" - ], - "sizeBytes": 232229241 - }, - { - "names": [ - "k8s.gcr.io/cluster-autoscaler@sha256:dc5744fd8c22aebfe40d6b62ab97d18d7bfbfc7ab1782509d69a5a9ec514df2c", - "k8s.gcr.io/cluster-autoscaler:v1.12.2" - ], - "sizeBytes": 232167833 - }, - { - "names": [ - "k8s.gcr.io/cluster-autoscaler@sha256:193eaf37788dd5f971dd400b7e3d28e650bfd81c90fa46b234f03eb3d43880e3", - "k8s.gcr.io/cluster-autoscaler:v1.12.5" - ], - "sizeBytes": 231543459 - }, - { - "names": [ - "k8s.gcr.io/cluster-autoscaler@sha256:e71851267764a068fbb091a4ef3bb874b5ce34db48cb757fcf77779f30ef0207", - "k8s.gcr.io/cluster-autoscaler:v1.3.7" - ], - "sizeBytes": 217353965 - }, - { - "names": [ - "k8s.gcr.io/cluster-autoscaler@sha256:156b7b9bcba24ed474f67d0feaf27f2506013f15b030341bbd41c630283161b8", - "k8s.gcr.io/cluster-autoscaler:v1.3.4" - ], - "sizeBytes": 217264129 - }, - { - "names": [ - "k8s.gcr.io/cluster-autoscaler@sha256:97896235bf66bde573d6f2ee150e212ea7010d314eb5d2cfb2ff1af93335db30", - "k8s.gcr.io/cluster-autoscaler:v1.3.3" - ], - "sizeBytes": 217259793 - }, - { - "names": [ - "k8s.gcr.io/cluster-autoscaler@sha256:b416bf3b6687788b4da4c7ede2bcf067b34ad781862ee3d3dac1d720c5fa38b3", - "k8s.gcr.io/cluster-autoscaler:v1.3.9" - ], - "sizeBytes": 216696035 - }, - { - "names": [ - "k8s.gcr.io/cluster-autoscaler@sha256:f37a2c84614bdd02475ccb020182caec562cde97fdfd9dae58de66ff89614bc5", - "k8s.gcr.io/cluster-autoscaler:v1.3.8" - ], - "sizeBytes": 216693526 - }, - { - "names": [ - "k8s.gcr.io/cluster-autoscaler@sha256:b0777becbfc7a56e66b079d2767fdc173121a29165523bbbe309bcb2c0a226aa", - "k8s.gcr.io/cluster-autoscaler:v1.2.5" - ], - "sizeBytes": 212991966 - }, - { - "names": [ - "k8s.gcr.io/cluster-autoscaler@sha256:36a369ca4643542d501bce0addf8b903f2141ae9e2608662b77a3d24f01d7780", - "k8s.gcr.io/cluster-autoscaler:v1.2.2" - ], - "sizeBytes": 208688449 - }, - { - "names": [ - "mcr.microsoft.com/containernetworking/azure-npm@sha256:7b9e7dec6b06a21595f9aa06b319c99b579950619fa869dd85dc637b2235d79f", - "mcr.microsoft.com/containernetworking/azure-npm:v1.0.18" - ], - "sizeBytes": 170727162 - }, - { - "names": [ - "k8s.gcr.io/cluster-autoscaler@sha256:760232bed2097b5ca742f05b15c94d56ff96ed6b5c93251edc613be045c8d78b", - "k8s.gcr.io/cluster-autoscaler:v1.15.0" - ], - "sizeBytes": 152214996 - }, - { - "names": [ - "k8s.gcr.io/cluster-autoscaler@sha256:a4e5a8e6d4dc011e6e7a104d6abdfda56274b90357ee9f6e42cc22b70482420b", - "k8s.gcr.io/cluster-autoscaler:v1.14.0" - ], - "sizeBytes": 142102721 - }, - { - "names": [ - "k8s.gcr.io/cluster-autoscaler@sha256:cbc61e0f6c3ef1c591a0f22ec483826110e2c10acddd5415c0cc2305fd085e69", - "k8s.gcr.io/cluster-autoscaler:v1.14.2" - ], - "sizeBytes": 142099784 - }, - { - "names": [ - "k8s.gcr.io/cluster-autoscaler@sha256:9dcbd91e79f33c44529de58a0024deb3da23a3a0bc7fd4d028c1255c68f62fb7", - "k8s.gcr.io/cluster-autoscaler:v1.13.2" - ], - "sizeBytes": 136684274 - }, - { - "names": [ - "k8s.gcr.io/cluster-autoscaler@sha256:e4140dc3ab54e115ab4464331b25022fc5ffb947b568aaf81089efb72506c895", - "k8s.gcr.io/cluster-autoscaler:v1.13.4" - ], - "sizeBytes": 136681463 - }, - { - "names": [ - "k8s.gcr.io/cluster-autoscaler@sha256:7ff5a60304b344f2f29c804c7253632bbc818794f6932236a56db107a6a8f5af", - "k8s.gcr.io/cluster-autoscaler:v1.13.1" - ], - "sizeBytes": 136618018 - }, - { - "names": [ - "mcr.microsoft.com/containernetworking/networkmonitor@sha256:d875511410502c3e37804e1f313cc2b0a03d7a03d3d5e6adaf8994b753a76f8e", - "mcr.microsoft.com/containernetworking/networkmonitor:v0.0.6" - ], - "sizeBytes": 123663837 - }, - { - "names": [ - "mcr.microsoft.com/containernetworking/networkmonitor@sha256:944408a497c451b0e79d2596dc2e9fe5036cdbba7fa831bff024e1c9ed44190d", - "mcr.microsoft.com/containernetworking/networkmonitor:v0.0.5" - ], - "sizeBytes": 122043325 - }, - { - "names": [ - "k8s.gcr.io/kubernetes-dashboard-amd64@sha256:0ae6b69432e78069c5ce2bcde0fe409c5c4d6f0f4d9cd50a17974fea38898747", - "k8s.gcr.io/kubernetes-dashboard-amd64:v1.10.1" - ], - "sizeBytes": 121711221 - }, - { - "names": [ - "k8s.gcr.io/kube-addon-manager-amd64@sha256:3da3f17cd4f02fe5696f29a5e6cd4aef7111f20dab9bec54ea35942346cfeb60", - "k8s.gcr.io/kube-addon-manager-amd64:v8.8" - ], - "sizeBytes": 99631084 - }, - { - "names": [ - "k8s.gcr.io/kube-addon-manager-amd64@sha256:2fd1daf3d3cf0e94a753f2263b60dbb0d42b107b5cde0c75ee3fc5c830e016e4", - "k8s.gcr.io/kube-addon-manager-amd64:v8.9" - ], - "sizeBytes": 99240637 - }, - { - "names": [ - "microsoft/virtual-kubelet@sha256:efc397d741d7e590c892c0ea5dccc9a800656c3adb95da4dae25c1cdd5eb6d9f", - "microsoft/virtual-kubelet:latest" - ], - "sizeBytes": 87436458 - }, - { - "names": [ - "k8s.gcr.io/kube-addon-manager-amd64@sha256:672794ee3582521eb8bc4f257d0f70c92893f1989f39a200f9c84bcfe1aea7c9", - "k8s.gcr.io/kube-addon-manager-amd64:v9.0" - ], - "sizeBytes": 83077558 - }, - { - "names": [ - "k8s.gcr.io/kube-addon-manager-amd64@sha256:382c220b3531d9f95bf316a16b7282cc2ef929cd8a89a9dd3f5933edafc41a8e", - "k8s.gcr.io/kube-addon-manager-amd64:v9.0.1" - ], - "sizeBytes": 83076194 - }, - { - "names": [ - "aksrepos.azurecr.io/prod/kube-svc-redirect@sha256:a448687b78d24dae388bd3d54591c179c891fa078404752bc9c9dfdaecdc02ef", - "deis/kube-svc-redirect@sha256:a448687b78d24dae388bd3d54591c179c891fa078404752bc9c9dfdaecdc02ef", - "aksrepos.azurecr.io/prod/kube-svc-redirect:v1.0.2", - "deis/kube-svc-redirect:v1.0.2" - ], - "sizeBytes": 82897218 - }, - { - "names": [ - "k8s.gcr.io/kube-addon-manager-amd64@sha256:3519273916ba45cfc9b318448d4629819cb5fbccbb0822cce054dd8c1f68cb60", - "k8s.gcr.io/kube-addon-manager-amd64:v8.6" - ], - "sizeBytes": 78384272 - } - ], - "nodeInfo": { - "architecture": "amd64", - "bootID": "47e7c02b-3741-42be-a2a1-76c76aa8ccde", - "containerRuntimeVersion": "docker://3.0.6", - "kernelVersion": "4.15.0-1050-azure", - "kubeProxyVersion": "v1.11.8", - "kubeletVersion": "v1.11.8", - "machineID": "a4a4bc2f5a944cd38aba89365df05227", - "operatingSystem": "linux", - "osImage": "Ubuntu 16.04.6 LTS", - "systemUUID": "BB102B43-9922-264C-8C23-22A7DE0F950F" - } - } - } - ], - "kind": "List", - "metadata": { - "resourceVersion": "", - "selfLink": "" - } -} diff --git a/test/unit-tests/plugins/health/parent_monitor_provider_spec.rb b/test/unit-tests/plugins/health/parent_monitor_provider_spec.rb deleted file mode 100644 index 851a18002..000000000 --- a/test/unit-tests/plugins/health/parent_monitor_provider_spec.rb +++ /dev/null @@ -1,146 +0,0 @@ -require_relative '../test_helpers' -Dir[File.join(File.expand_path(File.dirname(__FILE__)), "../../../../source/plugins/ruby/health/*.rb")].reject{|f| f.include?('health_monitor_utils')}.each { |file| require file } -include HealthModel -include Minitest - -describe "ParentMonitorProvider spec" do - it 'returns correct parent_monitor_id for a non-condition case' do - #arrange - definition = JSON.parse('{ - "monitor_id" : { - "parent_monitor_id": "parent_monitor_id", - "labels": [ - "label_1", - "label_2" - ] - } - }' - ) - health_model_definition = ParentMonitorProvider.new(definition) - - monitor = Mock.new - def monitor.monitor_id; "monitor_id"; end - def monitor.monitor_instance_id; "monitor_instance_id"; end - - #act - parent_id = health_model_definition.get_parent_monitor_id(monitor) - #assert - assert_equal parent_id, "parent_monitor_id" - end - - it 'returns raises for an incorrect monitor id' do - #arrange - definition = JSON.parse('{ - "monitor_id" : { - "parent_monitor_id": "parent_monitor_id", - "labels": [ - "label_1", - "label_2" - ] - } - }' - ) - health_model_definition = ParentMonitorProvider.new(definition) - - monitor = Mock.new - def monitor.monitor_id; "monitor_id_!"; end - def monitor.monitor_instance_id; "monitor_instance_id"; end - - #act and assert - assert_raises do - parent_id = health_model_definition.get_parent_monitor_id(monitor) - end - end - - it 'returns correct parent_monitor_id for a conditional case' do - #arrange - definition = JSON.parse('{"conditional_monitor_id": { - "conditions": [ - { - "key": "kubernetes.io/role", - "operator": "==", - "value": "master", - "parent_id": "master_node_pool" - }, - { - "key": "kubernetes.io/role", - "operator": "==", - "value": "agent", - "parent_id": "agent_node_pool" - } - ], - "labels": [ - "kubernetes.io/hostname", - "agentpool", - "kubernetes.io/role", - "container.azm.ms/cluster-region", - "container.azm.ms/cluster-subscription-id", - "container.azm.ms/cluster-resource-group", - "container.azm.ms/cluster-name" - ], - "aggregation_algorithm": "worstOf", - "aggregation_algorithm_params": null - } - - }' - ) - health_model_definition = ParentMonitorProvider.new(definition) - - monitor = Mock.new - def monitor.monitor_id; "conditional_monitor_id"; end - def monitor.monitor_instance_id; "conditional_monitor_instance_id"; end - def monitor.labels; {HealthMonitorLabels::ROLE => "master"}; end - - #act - parent_id = health_model_definition.get_parent_monitor_id(monitor) - #assert - assert_equal parent_id, "master_node_pool" - end - - it 'returns defaultParentMonitorTypeId if conditions are not met' do - #arrange - definition = JSON.parse('{"conditional_monitor_id": { - "conditions": [ - { - "key": "kubernetes.io/role", - "operator": "==", - "value": "master", - "parent_id": "master_node_pool" - }, - { - "key": "kubernetes.io/role", - "operator": "==", - "value": "agent", - "parent_id": "agent_node_pool" - } - ], - "labels": [ - "kubernetes.io/hostname", - "agentpool", - "kubernetes.io/role", - "container.azm.ms/cluster-region", - "container.azm.ms/cluster-subscription-id", - "container.azm.ms/cluster-resource-group", - "container.azm.ms/cluster-name" - ], - "default_parent_monitor_id": "default_parent_monitor_id", - "aggregation_algorithm": "worstOf", - "aggregation_algorithm_params": null - } - - }' - ) - health_model_definition = ParentMonitorProvider.new(definition) - - monitor = Mock.new - def monitor.monitor_id; "conditional_monitor_id"; end - def monitor.monitor_instance_id; "conditional_monitor_instance_id"; end - def monitor.labels; {HealthMonitorLabels::ROLE => "master1"}; end - - #act and assert - - parent_id = health_model_definition.get_parent_monitor_id(monitor) - parent_id.must_equal('default_parent_monitor_id') - - end -end diff --git a/test/unit-tests/plugins/health/pods.json b/test/unit-tests/plugins/health/pods.json deleted file mode 100644 index b7c202a19..000000000 --- a/test/unit-tests/plugins/health/pods.json +++ /dev/null @@ -1,5987 +0,0 @@ -{ - "apiVersion": "v1", - "items": [ - { - "apiVersion": "v1", - "kind": "Pod", - "metadata": { - "creationTimestamp": "2019-08-23T17:12:10Z", - "generateName": "heapster-9bcbfdcf5-", - "labels": { - "k8s-app": "heapster", - "pod-template-hash": "567698791" - }, - "name": "heapster-9bcbfdcf5-zp9tl", - "namespace": "kube-system", - "ownerReferences": [ - { - "apiVersion": "apps/v1", - "blockOwnerDeletion": true, - "controller": true, - "kind": "ReplicaSet", - "name": "heapster-9bcbfdcf5", - "uid": "24a0036e-c5c9-11e9-8736-86290fd7dd1f" - } - ], - "resourceVersion": "19048925", - "selfLink": "/api/v1/namespaces/kube-system/pods/heapster-9bcbfdcf5-zp9tl", - "uid": "24ab7e32-c5c9-11e9-8736-86290fd7dd1f" - }, - "spec": { - "affinity": { - "nodeAffinity": { - "requiredDuringSchedulingIgnoredDuringExecution": { - "nodeSelectorTerms": [ - { - "matchExpressions": [ - { - "key": "kubernetes.azure.com/cluster", - "operator": "Exists" - } - ] - } - ] - } - } - }, - "containers": [ - { - "command": [ - "/heapster", - "--source=kubernetes.summary_api:\"\"" - ], - "env": [ - { - "name": "KUBERNETES_PORT_443_TCP_ADDR", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - }, - { - "name": "KUBERNETES_PORT", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_PORT_443_TCP", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_SERVICE_HOST", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - } - ], - "image": "aksrepos.azurecr.io/mirror/heapster-amd64:v1.5.3", - "imagePullPolicy": "IfNotPresent", - "livenessProbe": { - "failureThreshold": 3, - "httpGet": { - "path": "/healthz", - "port": 8082, - "scheme": "HTTP" - }, - "initialDelaySeconds": 180, - "periodSeconds": 10, - "successThreshold": 1, - "timeoutSeconds": 5 - }, - "name": "heapster", - "resources": { - "limits": { - "cpu": "88m", - "memory": "204Mi" - }, - "requests": { - "cpu": "88m", - "memory": "204Mi" - } - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "heapster-token-7z7c5", - "readOnly": true - } - ] - }, - { - "command": [ - "/pod_nanny", - "--config-dir=/etc/config", - "--cpu=80m", - "--extra-cpu=0.5m", - "--memory=140Mi", - "--extra-memory=4Mi", - "--threshold=5", - "--deployment=heapster", - "--container=heapster", - "--poll-period=300000", - "--estimator=exponential" - ], - "env": [ - { - "name": "MY_POD_NAME", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.name" - } - } - }, - { - "name": "MY_POD_NAMESPACE", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.namespace" - } - } - }, - { - "name": "KUBERNETES_PORT_443_TCP_ADDR", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - }, - { - "name": "KUBERNETES_PORT", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_PORT_443_TCP", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_SERVICE_HOST", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - } - ], - "image": "aksrepos.azurecr.io/mirror/addon-resizer:1.8.1", - "imagePullPolicy": "IfNotPresent", - "name": "heapster-nanny", - "resources": { - "limits": { - "cpu": "50m", - "memory": "90Mi" - }, - "requests": { - "cpu": "50m", - "memory": "90Mi" - } - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/etc/config", - "name": "heapster-config-volume" - }, - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "heapster-token-7z7c5", - "readOnly": true - } - ] - } - ], - "dnsPolicy": "ClusterFirst", - "imagePullSecrets": [ - { - "name": "emptyacrsecret" - } - ], - "nodeName": "aks-nodepool1-19574989-0", - "nodeSelector": { - "beta.kubernetes.io/os": "linux" - }, - "priority": 2000001000, - "priorityClassName": "system-node-critical", - "restartPolicy": "Always", - "schedulerName": "default-scheduler", - "securityContext": {}, - "serviceAccount": "heapster", - "serviceAccountName": "heapster", - "terminationGracePeriodSeconds": 30, - "tolerations": [ - { - "key": "CriticalAddonsOnly", - "operator": "Exists" - }, - { - "effect": "NoExecute", - "key": "node.kubernetes.io/not-ready", - "operator": "Exists", - "tolerationSeconds": 300 - }, - { - "effect": "NoExecute", - "key": "node.kubernetes.io/unreachable", - "operator": "Exists", - "tolerationSeconds": 300 - } - ], - "volumes": [ - { - "configMap": { - "defaultMode": 420, - "name": "heapster-config" - }, - "name": "heapster-config-volume" - }, - { - "name": "heapster-token-7z7c5", - "secret": { - "defaultMode": 420, - "secretName": "heapster-token-7z7c5" - } - } - ] - }, - "status": { - "conditions": [ - { - "lastProbeTime": null, - "lastTransitionTime": "2019-08-23T17:12:10Z", - "status": "True", - "type": "Initialized" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-08-23T17:12:26Z", - "status": "True", - "type": "Ready" - }, - { - "lastProbeTime": null, - "lastTransitionTime": null, - "status": "True", - "type": "ContainersReady" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-08-23T17:12:10Z", - "status": "True", - "type": "PodScheduled" - } - ], - "containerStatuses": [ - { - "containerID": "docker://8ab1ee82d29d0351cb21dbce4db9eb2a270407d2ebe10377be02edd46cb34027", - "image": "aksrepos.azurecr.io/mirror/heapster-amd64:v1.5.3", - "imageID": "docker-pullable://aksrepos.azurecr.io/mirror/heapster-amd64@sha256:fc33c690a3a446de5abc24b048b88050810a58b9e4477fa763a43d7df029301a", - "lastState": {}, - "name": "heapster", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-08-23T17:12:21Z" - } - } - }, - { - "containerID": "docker://42154ff41fed196c3f4b8a485436537330d16bcef23c743a34cf63202d023453", - "image": "aksrepos.azurecr.io/mirror/addon-resizer:1.8.1", - "imageID": "docker-pullable://aksrepos.azurecr.io/mirror/addon-resizer@sha256:8ac3ffa4232046feb297cefc40734641fa2954c16308f9e0d70ec152f22231ca", - "lastState": {}, - "name": "heapster-nanny", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-08-23T17:12:25Z" - } - } - } - ], - "hostIP": "10.240.0.4", - "phase": "Running", - "podIP": "10.244.1.33", - "qosClass": "Guaranteed", - "startTime": "2019-08-23T17:12:10Z" - } - }, - { - "apiVersion": "v1", - "kind": "Pod", - "metadata": { - "annotations": { - "scheduler.alpha.kubernetes.io/critical-pod": "", - "seccomp.security.alpha.kubernetes.io/pod": "docker/default" - }, - "creationTimestamp": "2019-07-09T02:38:06Z", - "generateName": "kube-dns-autoscaler-7d64798d95-", - "labels": { - "k8s-app": "kube-dns-autoscaler", - "pod-template-hash": "3820354851" - }, - "name": "kube-dns-autoscaler-7d64798d95-f9wcv", - "namespace": "kube-system", - "ownerReferences": [ - { - "apiVersion": "apps/v1", - "blockOwnerDeletion": true, - "controller": true, - "kind": "ReplicaSet", - "name": "kube-dns-autoscaler-7d64798d95", - "uid": "71655f71-a1f2-11e9-9bc6-127bb0ec03b8" - } - ], - "resourceVersion": "15144041", - "selfLink": "/api/v1/namespaces/kube-system/pods/kube-dns-autoscaler-7d64798d95-f9wcv", - "uid": "94e52ab1-a1f2-11e9-8b08-d602e29755d5" - }, - "spec": { - "containers": [ - { - "command": [ - "/cluster-proportional-autoscaler", - "--namespace=kube-system", - "--configmap=kube-dns-autoscaler", - "--target=deployment/kube-dns-v20", - "--default-params={\"ladder\":{\"coresToReplicas\":[[1,2],[512,3],[1024,4],[2048,5]],\"nodesToReplicas\":[[1,2],[8,3],[16,4],[32,5]]}}", - "--logtostderr=true", - "--v=2" - ], - "env": [ - { - "name": "KUBERNETES_PORT_443_TCP_ADDR", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - }, - { - "name": "KUBERNETES_PORT", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_PORT_443_TCP", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_SERVICE_HOST", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - } - ], - "image": "aksrepos.azurecr.io/mirror/cluster-proportional-autoscaler-amd64:1.1.2-r2", - "imagePullPolicy": "IfNotPresent", - "name": "autoscaler", - "resources": { - "requests": { - "cpu": "20m", - "memory": "10Mi" - } - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "kube-dns-autoscaler-token-zkxt8", - "readOnly": true - } - ] - } - ], - "dnsPolicy": "Default", - "imagePullSecrets": [ - { - "name": "emptyacrsecret" - } - ], - "nodeName": "aks-nodepool1-19574989-2", - "priority": 2000001000, - "priorityClassName": "system-node-critical", - "restartPolicy": "Always", - "schedulerName": "default-scheduler", - "securityContext": {}, - "serviceAccount": "kube-dns-autoscaler", - "serviceAccountName": "kube-dns-autoscaler", - "terminationGracePeriodSeconds": 30, - "tolerations": [ - { - "key": "CriticalAddonsOnly", - "operator": "Exists" - }, - { - "effect": "NoExecute", - "key": "node.kubernetes.io/not-ready", - "operator": "Exists", - "tolerationSeconds": 300 - }, - { - "effect": "NoExecute", - "key": "node.kubernetes.io/unreachable", - "operator": "Exists", - "tolerationSeconds": 300 - } - ], - "volumes": [ - { - "name": "kube-dns-autoscaler-token-zkxt8", - "secret": { - "defaultMode": 420, - "secretName": "kube-dns-autoscaler-token-zkxt8" - } - } - ] - }, - "status": { - "conditions": [ - { - "lastProbeTime": null, - "lastTransitionTime": "2019-07-09T02:38:07Z", - "status": "True", - "type": "Initialized" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-07-09T02:38:44Z", - "status": "True", - "type": "Ready" - }, - { - "lastProbeTime": null, - "lastTransitionTime": null, - "status": "True", - "type": "ContainersReady" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-07-09T02:38:06Z", - "status": "True", - "type": "PodScheduled" - } - ], - "containerStatuses": [ - { - "containerID": "docker://43f5fde3ce0f375a40c08de56087fc3b53f6269b239a3e6383d2082779504b96", - "image": "aksrepos.azurecr.io/mirror/cluster-proportional-autoscaler-amd64:1.1.2-r2", - "imageID": "docker-pullable://aksrepos.azurecr.io/mirror/cluster-proportional-autoscaler-amd64@sha256:ccd2b031b116750091443930a8e6d0f785cfde38f137969e472b2ac850aeddfb", - "lastState": {}, - "name": "autoscaler", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-07-09T02:38:40Z" - } - } - } - ], - "hostIP": "10.240.0.7", - "phase": "Running", - "podIP": "10.244.12.118", - "qosClass": "Burstable", - "startTime": "2019-07-09T02:38:07Z" - } - }, - { - "apiVersion": "v1", - "kind": "Pod", - "metadata": { - "annotations": { - "prometheus.io/port": "10055", - "prometheus.io/scrape": "true" - }, - "creationTimestamp": "2019-07-09T02:38:06Z", - "generateName": "kube-dns-v20-55cb5d96f7-", - "labels": { - "k8s-app": "kube-dns", - "kubernetes.io/cluster-service": "true", - "pod-template-hash": "1176185293", - "version": "v20" - }, - "name": "kube-dns-v20-55cb5d96f7-lmrpl", - "namespace": "kube-system", - "ownerReferences": [ - { - "apiVersion": "apps/v1", - "blockOwnerDeletion": true, - "controller": true, - "kind": "ReplicaSet", - "name": "kube-dns-v20-55cb5d96f7", - "uid": "71892fd6-a1f2-11e9-9bc6-127bb0ec03b8" - } - ], - "resourceVersion": "15144030", - "selfLink": "/api/v1/namespaces/kube-system/pods/kube-dns-v20-55cb5d96f7-lmrpl", - "uid": "952488f3-a1f2-11e9-8b08-d602e29755d5" - }, - "spec": { - "affinity": { - "nodeAffinity": { - "requiredDuringSchedulingIgnoredDuringExecution": { - "nodeSelectorTerms": [ - { - "matchExpressions": [ - { - "key": "kubernetes.azure.com/cluster", - "operator": "Exists" - } - ] - } - ] - } - }, - "podAntiAffinity": { - "preferredDuringSchedulingIgnoredDuringExecution": [ - { - "podAffinityTerm": { - "labelSelector": { - "matchExpressions": [ - { - "key": "k8s-app", - "operator": "In", - "values": [ - "kube-dns" - ] - } - ] - }, - "topologyKey": "kubernetes.io/hostname" - }, - "weight": 100 - } - ] - } - }, - "containers": [ - { - "args": [ - "--kubecfg-file=/config/kubeconfig", - "--config-dir=/kube-dns-config", - "--domain=cluster.local.", - "--dns-port=10053", - "--v=2" - ], - "env": [ - { - "name": "PROMETHEUS_PORT", - "value": "10055" - }, - { - "name": "KUBERNETES_PORT_443_TCP_ADDR", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - }, - { - "name": "KUBERNETES_PORT", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_PORT_443_TCP", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_SERVICE_HOST", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - } - ], - "image": "aksrepos.azurecr.io/mirror/k8s-dns-kube-dns-amd64:1.14.13", - "imagePullPolicy": "IfNotPresent", - "livenessProbe": { - "failureThreshold": 5, - "httpGet": { - "path": "/healthcheck/kubedns", - "port": 10054, - "scheme": "HTTP" - }, - "initialDelaySeconds": 60, - "periodSeconds": 10, - "successThreshold": 1, - "timeoutSeconds": 5 - }, - "name": "kubedns", - "ports": [ - { - "containerPort": 10053, - "name": "dns-local", - "protocol": "UDP" - }, - { - "containerPort": 10053, - "name": "dns-tcp-local", - "protocol": "TCP" - }, - { - "containerPort": 10055, - "name": "metrics", - "protocol": "TCP" - } - ], - "readinessProbe": { - "failureThreshold": 3, - "httpGet": { - "path": "/readiness", - "port": 8081, - "scheme": "HTTP" - }, - "initialDelaySeconds": 30, - "periodSeconds": 10, - "successThreshold": 1, - "timeoutSeconds": 5 - }, - "resources": { - "limits": { - "memory": "170Mi" - }, - "requests": { - "cpu": "100m", - "memory": "70Mi" - } - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/kube-dns-config", - "name": "kube-dns-config" - }, - { - "mountPath": "/config", - "name": "kubedns-kubecfg", - "readOnly": true - }, - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "kube-dns-token-ghgtl", - "readOnly": true - } - ] - }, - { - "args": [ - "-v=2", - "-logtostderr", - "-configDir=/kube-dns-config", - "-restartDnsmasq=true", - "--", - "-k", - "--cache-size=1000", - "--no-negcache", - "--no-resolv", - "--server=127.0.0.1#10053", - "--server=/cluster.local/127.0.0.1#10053", - "--server=/in-addr.arpa/127.0.0.1#10053", - "--server=/ip6.arpa/127.0.0.1#10053", - "--log-facility=-" - ], - "env": [ - { - "name": "KUBERNETES_PORT_443_TCP_ADDR", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - }, - { - "name": "KUBERNETES_PORT", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_PORT_443_TCP", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_SERVICE_HOST", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - } - ], - "image": "aksrepos.azurecr.io/mirror/k8s-dns-dnsmasq-nanny-amd64:1.14.10", - "imagePullPolicy": "IfNotPresent", - "name": "dnsmasq", - "ports": [ - { - "containerPort": 53, - "name": "dns", - "protocol": "UDP" - }, - { - "containerPort": 53, - "name": "dns-tcp", - "protocol": "TCP" - } - ], - "resources": {}, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/kube-dns-config", - "name": "kube-dns-config" - }, - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "kube-dns-token-ghgtl", - "readOnly": true - } - ] - }, - { - "args": [ - "--cmd=for d in $PROBE_DOMAINS; do nslookup $d 127.0.0.1 \u003e/dev/null || exit 1; done", - "--url=/healthz-dnsmasq", - "--cmd=for d in $PROBE_DOMAINS; do nslookup $d 127.0.0.1:10053 \u003e/dev/null || exit 1; done", - "--url=/healthz-kubedns", - "--port=8080", - "--quiet" - ], - "env": [ - { - "name": "PROBE_DOMAINS", - "value": "bing.com kubernetes.default.svc.cluster.local" - }, - { - "name": "KUBERNETES_PORT_443_TCP_ADDR", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - }, - { - "name": "KUBERNETES_PORT", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_PORT_443_TCP", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_SERVICE_HOST", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - } - ], - "image": "aksrepos.azurecr.io/mirror/exechealthz-amd64:1.2", - "imagePullPolicy": "IfNotPresent", - "livenessProbe": { - "failureThreshold": 5, - "httpGet": { - "path": "/healthz-dnsmasq", - "port": 8080, - "scheme": "HTTP" - }, - "initialDelaySeconds": 60, - "periodSeconds": 10, - "successThreshold": 1, - "timeoutSeconds": 5 - }, - "name": "healthz", - "ports": [ - { - "containerPort": 8080, - "protocol": "TCP" - } - ], - "resources": { - "limits": { - "memory": "50Mi" - }, - "requests": { - "cpu": "10m", - "memory": "50Mi" - } - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "kube-dns-token-ghgtl", - "readOnly": true - } - ] - }, - { - "args": [ - "--v=2", - "--logtostderr", - "--probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local,5,SRV", - "--probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local,5,SRV" - ], - "env": [ - { - "name": "KUBERNETES_PORT_443_TCP_ADDR", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - }, - { - "name": "KUBERNETES_PORT", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_PORT_443_TCP", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_SERVICE_HOST", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - } - ], - "image": "aksrepos.azurecr.io/mirror/k8s-dns-sidecar-amd64:1.14.10", - "imagePullPolicy": "IfNotPresent", - "livenessProbe": { - "failureThreshold": 3, - "httpGet": { - "path": "/metrics", - "port": 10054, - "scheme": "HTTP" - }, - "initialDelaySeconds": 60, - "periodSeconds": 10, - "successThreshold": 1, - "timeoutSeconds": 5 - }, - "name": "sidecar", - "ports": [ - { - "containerPort": 10054, - "name": "metrics", - "protocol": "TCP" - } - ], - "resources": { - "requests": { - "cpu": "10m", - "memory": "20Mi" - } - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "kube-dns-token-ghgtl", - "readOnly": true - } - ] - } - ], - "dnsPolicy": "Default", - "imagePullSecrets": [ - { - "name": "emptyacrsecret" - } - ], - "nodeName": "aks-nodepool1-19574989-1", - "nodeSelector": { - "beta.kubernetes.io/os": "linux" - }, - "priority": 2000001000, - "priorityClassName": "system-node-critical", - "restartPolicy": "Always", - "schedulerName": "default-scheduler", - "securityContext": {}, - "serviceAccount": "kube-dns", - "serviceAccountName": "kube-dns", - "terminationGracePeriodSeconds": 30, - "tolerations": [ - { - "key": "CriticalAddonsOnly", - "operator": "Exists" - }, - { - "effect": "NoExecute", - "key": "node.kubernetes.io/not-ready", - "operator": "Exists", - "tolerationSeconds": 300 - }, - { - "effect": "NoExecute", - "key": "node.kubernetes.io/unreachable", - "operator": "Exists", - "tolerationSeconds": 300 - } - ], - "volumes": [ - { - "configMap": { - "defaultMode": 420, - "name": "kube-dns", - "optional": true - }, - "name": "kube-dns-config" - }, - { - "configMap": { - "defaultMode": 420, - "name": "kubedns-kubecfg" - }, - "name": "kubedns-kubecfg" - }, - { - "name": "kube-dns-token-ghgtl", - "secret": { - "defaultMode": 420, - "secretName": "kube-dns-token-ghgtl" - } - } - ] - }, - "status": { - "conditions": [ - { - "lastProbeTime": null, - "lastTransitionTime": "2019-07-09T02:38:09Z", - "status": "True", - "type": "Initialized" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-07-09T02:38:50Z", - "status": "True", - "type": "Ready" - }, - { - "lastProbeTime": null, - "lastTransitionTime": null, - "status": "True", - "type": "ContainersReady" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-07-09T02:38:06Z", - "status": "True", - "type": "PodScheduled" - } - ], - "containerStatuses": [ - { - "containerID": "docker://8aa7d794d423f29469d8a35cc295bfaf2434a26756d7063fb19e06ce838aa5d9", - "image": "aksrepos.azurecr.io/mirror/k8s-dns-dnsmasq-nanny-amd64:1.14.10", - "imageID": "docker-pullable://aksrepos.azurecr.io/mirror/k8s-dns-dnsmasq-nanny-amd64@sha256:bbb2a290a568125b3b996028958eb773f33b5b87a6b37bf38a28f8b62dddb3c8", - "lastState": {}, - "name": "dnsmasq", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-07-09T02:38:23Z" - } - } - }, - { - "containerID": "docker://7ee72258ca97555017c3096c3c125935b22e1735dafd494bec7f5480a408314a", - "image": "aksrepos.azurecr.io/mirror/exechealthz-amd64:1.2", - "imageID": "docker-pullable://aksrepos.azurecr.io/mirror/exechealthz-amd64@sha256:34722333f0cd0b891b61c9e0efa31913f22157e341a3aabb79967305d4e78260", - "lastState": {}, - "name": "healthz", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-07-09T02:38:25Z" - } - } - }, - { - "containerID": "docker://bf6c7e823d08306e6ba13353ae89319080990a5d302b1d7370e76acd34c34a52", - "image": "aksrepos.azurecr.io/mirror/k8s-dns-kube-dns-amd64:1.14.13", - "imageID": "docker-pullable://aksrepos.azurecr.io/mirror/k8s-dns-kube-dns-amd64@sha256:618a82fa66cf0c75e4753369a6999032372be7308866fc9afb381789b1e5ad52", - "lastState": {}, - "name": "kubedns", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-07-09T02:38:19Z" - } - } - }, - { - "containerID": "docker://2e4faf4da65a23316dc7065e3de27bf1ebd9ac2a8f07b9053de5ab63ab4c2d7e", - "image": "aksrepos.azurecr.io/mirror/k8s-dns-sidecar-amd64:1.14.10", - "imageID": "docker-pullable://aksrepos.azurecr.io/mirror/k8s-dns-sidecar-amd64@sha256:4f1ab957f87b94a5ec1edc26fae50da2175461f00afecf68940c4aa079bd08a4", - "lastState": {}, - "name": "sidecar", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-07-09T02:38:28Z" - } - } - } - ], - "hostIP": "10.240.0.5", - "phase": "Running", - "podIP": "10.244.0.192", - "qosClass": "Burstable", - "startTime": "2019-07-09T02:38:09Z" - } - }, - { - "apiVersion": "v1", - "kind": "Pod", - "metadata": { - "annotations": { - "prometheus.io/port": "10055", - "prometheus.io/scrape": "true" - }, - "creationTimestamp": "2019-07-09T02:38:06Z", - "generateName": "kube-dns-v20-55cb5d96f7-", - "labels": { - "k8s-app": "kube-dns", - "kubernetes.io/cluster-service": "true", - "pod-template-hash": "1176185293", - "version": "v20" - }, - "name": "kube-dns-v20-55cb5d96f7-pl7sh", - "namespace": "kube-system", - "ownerReferences": [ - { - "apiVersion": "apps/v1", - "blockOwnerDeletion": true, - "controller": true, - "kind": "ReplicaSet", - "name": "kube-dns-v20-55cb5d96f7", - "uid": "71892fd6-a1f2-11e9-9bc6-127bb0ec03b8" - } - ], - "resourceVersion": "15144050", - "selfLink": "/api/v1/namespaces/kube-system/pods/kube-dns-v20-55cb5d96f7-pl7sh", - "uid": "95046bc6-a1f2-11e9-8b08-d602e29755d5" - }, - "spec": { - "affinity": { - "nodeAffinity": { - "requiredDuringSchedulingIgnoredDuringExecution": { - "nodeSelectorTerms": [ - { - "matchExpressions": [ - { - "key": "kubernetes.azure.com/cluster", - "operator": "Exists" - } - ] - } - ] - } - }, - "podAntiAffinity": { - "preferredDuringSchedulingIgnoredDuringExecution": [ - { - "podAffinityTerm": { - "labelSelector": { - "matchExpressions": [ - { - "key": "k8s-app", - "operator": "In", - "values": [ - "kube-dns" - ] - } - ] - }, - "topologyKey": "kubernetes.io/hostname" - }, - "weight": 100 - } - ] - } - }, - "containers": [ - { - "args": [ - "--kubecfg-file=/config/kubeconfig", - "--config-dir=/kube-dns-config", - "--domain=cluster.local.", - "--dns-port=10053", - "--v=2" - ], - "env": [ - { - "name": "PROMETHEUS_PORT", - "value": "10055" - }, - { - "name": "KUBERNETES_PORT_443_TCP_ADDR", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - }, - { - "name": "KUBERNETES_PORT", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_PORT_443_TCP", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_SERVICE_HOST", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - } - ], - "image": "aksrepos.azurecr.io/mirror/k8s-dns-kube-dns-amd64:1.14.13", - "imagePullPolicy": "IfNotPresent", - "livenessProbe": { - "failureThreshold": 5, - "httpGet": { - "path": "/healthcheck/kubedns", - "port": 10054, - "scheme": "HTTP" - }, - "initialDelaySeconds": 60, - "periodSeconds": 10, - "successThreshold": 1, - "timeoutSeconds": 5 - }, - "name": "kubedns", - "ports": [ - { - "containerPort": 10053, - "name": "dns-local", - "protocol": "UDP" - }, - { - "containerPort": 10053, - "name": "dns-tcp-local", - "protocol": "TCP" - }, - { - "containerPort": 10055, - "name": "metrics", - "protocol": "TCP" - } - ], - "readinessProbe": { - "failureThreshold": 3, - "httpGet": { - "path": "/readiness", - "port": 8081, - "scheme": "HTTP" - }, - "initialDelaySeconds": 30, - "periodSeconds": 10, - "successThreshold": 1, - "timeoutSeconds": 5 - }, - "resources": { - "limits": { - "memory": "170Mi" - }, - "requests": { - "cpu": "100m", - "memory": "70Mi" - } - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/kube-dns-config", - "name": "kube-dns-config" - }, - { - "mountPath": "/config", - "name": "kubedns-kubecfg", - "readOnly": true - }, - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "kube-dns-token-ghgtl", - "readOnly": true - } - ] - }, - { - "args": [ - "-v=2", - "-logtostderr", - "-configDir=/kube-dns-config", - "-restartDnsmasq=true", - "--", - "-k", - "--cache-size=1000", - "--no-negcache", - "--no-resolv", - "--server=127.0.0.1#10053", - "--server=/cluster.local/127.0.0.1#10053", - "--server=/in-addr.arpa/127.0.0.1#10053", - "--server=/ip6.arpa/127.0.0.1#10053", - "--log-facility=-" - ], - "env": [ - { - "name": "KUBERNETES_PORT_443_TCP_ADDR", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - }, - { - "name": "KUBERNETES_PORT", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_PORT_443_TCP", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_SERVICE_HOST", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - } - ], - "image": "aksrepos.azurecr.io/mirror/k8s-dns-dnsmasq-nanny-amd64:1.14.10", - "imagePullPolicy": "IfNotPresent", - "name": "dnsmasq", - "ports": [ - { - "containerPort": 53, - "name": "dns", - "protocol": "UDP" - }, - { - "containerPort": 53, - "name": "dns-tcp", - "protocol": "TCP" - } - ], - "resources": {}, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/kube-dns-config", - "name": "kube-dns-config" - }, - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "kube-dns-token-ghgtl", - "readOnly": true - } - ] - }, - { - "args": [ - "--cmd=for d in $PROBE_DOMAINS; do nslookup $d 127.0.0.1 \u003e/dev/null || exit 1; done", - "--url=/healthz-dnsmasq", - "--cmd=for d in $PROBE_DOMAINS; do nslookup $d 127.0.0.1:10053 \u003e/dev/null || exit 1; done", - "--url=/healthz-kubedns", - "--port=8080", - "--quiet" - ], - "env": [ - { - "name": "PROBE_DOMAINS", - "value": "bing.com kubernetes.default.svc.cluster.local" - }, - { - "name": "KUBERNETES_PORT_443_TCP_ADDR", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - }, - { - "name": "KUBERNETES_PORT", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_PORT_443_TCP", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_SERVICE_HOST", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - } - ], - "image": "aksrepos.azurecr.io/mirror/exechealthz-amd64:1.2", - "imagePullPolicy": "IfNotPresent", - "livenessProbe": { - "failureThreshold": 5, - "httpGet": { - "path": "/healthz-dnsmasq", - "port": 8080, - "scheme": "HTTP" - }, - "initialDelaySeconds": 60, - "periodSeconds": 10, - "successThreshold": 1, - "timeoutSeconds": 5 - }, - "name": "healthz", - "ports": [ - { - "containerPort": 8080, - "protocol": "TCP" - } - ], - "resources": { - "limits": { - "memory": "50Mi" - }, - "requests": { - "cpu": "10m", - "memory": "50Mi" - } - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "kube-dns-token-ghgtl", - "readOnly": true - } - ] - }, - { - "args": [ - "--v=2", - "--logtostderr", - "--probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local,5,SRV", - "--probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local,5,SRV" - ], - "env": [ - { - "name": "KUBERNETES_PORT_443_TCP_ADDR", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - }, - { - "name": "KUBERNETES_PORT", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_PORT_443_TCP", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_SERVICE_HOST", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - } - ], - "image": "aksrepos.azurecr.io/mirror/k8s-dns-sidecar-amd64:1.14.10", - "imagePullPolicy": "IfNotPresent", - "livenessProbe": { - "failureThreshold": 3, - "httpGet": { - "path": "/metrics", - "port": 10054, - "scheme": "HTTP" - }, - "initialDelaySeconds": 60, - "periodSeconds": 10, - "successThreshold": 1, - "timeoutSeconds": 5 - }, - "name": "sidecar", - "ports": [ - { - "containerPort": 10054, - "name": "metrics", - "protocol": "TCP" - } - ], - "resources": { - "requests": { - "cpu": "10m", - "memory": "20Mi" - } - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "kube-dns-token-ghgtl", - "readOnly": true - } - ] - } - ], - "dnsPolicy": "Default", - "imagePullSecrets": [ - { - "name": "emptyacrsecret" - } - ], - "nodeName": "aks-nodepool1-19574989-2", - "nodeSelector": { - "beta.kubernetes.io/os": "linux" - }, - "priority": 2000001000, - "priorityClassName": "system-node-critical", - "restartPolicy": "Always", - "schedulerName": "default-scheduler", - "securityContext": {}, - "serviceAccount": "kube-dns", - "serviceAccountName": "kube-dns", - "terminationGracePeriodSeconds": 30, - "tolerations": [ - { - "key": "CriticalAddonsOnly", - "operator": "Exists" - }, - { - "effect": "NoExecute", - "key": "node.kubernetes.io/not-ready", - "operator": "Exists", - "tolerationSeconds": 300 - }, - { - "effect": "NoExecute", - "key": "node.kubernetes.io/unreachable", - "operator": "Exists", - "tolerationSeconds": 300 - } - ], - "volumes": [ - { - "configMap": { - "defaultMode": 420, - "name": "kube-dns", - "optional": true - }, - "name": "kube-dns-config" - }, - { - "configMap": { - "defaultMode": 420, - "name": "kubedns-kubecfg" - }, - "name": "kubedns-kubecfg" - }, - { - "name": "kube-dns-token-ghgtl", - "secret": { - "defaultMode": 420, - "secretName": "kube-dns-token-ghgtl" - } - } - ] - }, - "status": { - "conditions": [ - { - "lastProbeTime": null, - "lastTransitionTime": "2019-07-09T02:38:10Z", - "status": "True", - "type": "Initialized" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-07-09T02:39:14Z", - "status": "True", - "type": "Ready" - }, - { - "lastProbeTime": null, - "lastTransitionTime": null, - "status": "True", - "type": "ContainersReady" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-07-09T02:38:06Z", - "status": "True", - "type": "PodScheduled" - } - ], - "containerStatuses": [ - { - "containerID": "docker://c16dce3b5c1f06c6fbfdf52edb98f9916740c0f652dc72b2fe0f9f0cc5c4c4de", - "image": "aksrepos.azurecr.io/mirror/k8s-dns-dnsmasq-nanny-amd64:1.14.10", - "imageID": "docker-pullable://aksrepos.azurecr.io/mirror/k8s-dns-dnsmasq-nanny-amd64@sha256:bbb2a290a568125b3b996028958eb773f33b5b87a6b37bf38a28f8b62dddb3c8", - "lastState": {}, - "name": "dnsmasq", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-07-09T02:38:51Z" - } - } - }, - { - "containerID": "docker://410ceb88fcbc2c3cdf19ffc5ce88adb0ba933bbc3cf446a90e669a978a7d933c", - "image": "aksrepos.azurecr.io/mirror/exechealthz-amd64:1.2", - "imageID": "docker-pullable://aksrepos.azurecr.io/mirror/exechealthz-amd64@sha256:34722333f0cd0b891b61c9e0efa31913f22157e341a3aabb79967305d4e78260", - "lastState": {}, - "name": "healthz", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-07-09T02:38:58Z" - } - } - }, - { - "containerID": "docker://694f575606b51234a98b3e22d2afd04f3fa11c30b6090a901e64922eeb9fba95", - "image": "aksrepos.azurecr.io/mirror/k8s-dns-kube-dns-amd64:1.14.13", - "imageID": "docker-pullable://aksrepos.azurecr.io/mirror/k8s-dns-kube-dns-amd64@sha256:618a82fa66cf0c75e4753369a6999032372be7308866fc9afb381789b1e5ad52", - "lastState": {}, - "name": "kubedns", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-07-09T02:38:39Z" - } - } - }, - { - "containerID": "docker://d7865fb7465b2f9cd218cdf6694018aee55260966f2bf51e6b628a86c6b9041f", - "image": "aksrepos.azurecr.io/mirror/k8s-dns-sidecar-amd64:1.14.10", - "imageID": "docker-pullable://aksrepos.azurecr.io/mirror/k8s-dns-sidecar-amd64@sha256:4f1ab957f87b94a5ec1edc26fae50da2175461f00afecf68940c4aa079bd08a4", - "lastState": {}, - "name": "sidecar", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-07-09T02:39:04Z" - } - } - } - ], - "hostIP": "10.240.0.7", - "phase": "Running", - "podIP": "10.244.12.117", - "qosClass": "Burstable", - "startTime": "2019-07-09T02:38:10Z" - } - }, - { - "apiVersion": "v1", - "kind": "Pod", - "metadata": { - "annotations": { - "aks.microsoft.com/release-time": "seconds:1566580134 nanos:758740921 ", - "remediator.aks.microsoft.com/kube-proxy-restart": "24" - }, - "creationTimestamp": "2019-08-23T17:13:13Z", - "generateName": "kube-proxy-", - "labels": { - "component": "kube-proxy", - "controller-revision-hash": "3559350992", - "pod-template-generation": "141", - "tier": "node" - }, - "name": "kube-proxy-ct2tl", - "namespace": "kube-system", - "ownerReferences": [ - { - "apiVersion": "apps/v1", - "blockOwnerDeletion": true, - "controller": true, - "kind": "DaemonSet", - "name": "kube-proxy", - "uid": "45640bf6-44e5-11e9-9920-423525a6b683" - } - ], - "resourceVersion": "19049034", - "selfLink": "/api/v1/namespaces/kube-system/pods/kube-proxy-ct2tl", - "uid": "49e373c8-c5c9-11e9-8736-86290fd7dd1f" - }, - "spec": { - "affinity": { - "nodeAffinity": { - "requiredDuringSchedulingIgnoredDuringExecution": { - "nodeSelectorTerms": [ - { - "matchExpressions": [ - { - "key": "kubernetes.azure.com/cluster", - "operator": "Exists" - } - ] - } - ] - } - } - }, - "containers": [ - { - "command": [ - "/hyperkube", - "proxy", - "--kubeconfig=/var/lib/kubelet/kubeconfig", - "--cluster-cidr=10.244.0.0/16", - "--feature-gates=ExperimentalCriticalPodAnnotation=true", - "--v=3" - ], - "env": [ - { - "name": "KUBERNETES_PORT_443_TCP_ADDR", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - }, - { - "name": "KUBERNETES_PORT", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_PORT_443_TCP", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_SERVICE_HOST", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - } - ], - "image": "aksrepos.azurecr.io/mirror/hyperkube-amd64:v1.11.8", - "imagePullPolicy": "IfNotPresent", - "name": "kube-proxy", - "resources": { - "requests": { - "cpu": "100m" - } - }, - "securityContext": { - "privileged": true - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/var/lib/kubelet", - "name": "kubeconfig", - "readOnly": true - }, - { - "mountPath": "/etc/kubernetes/certs", - "name": "certificates", - "readOnly": true - }, - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "kube-proxy-token-f5vbg", - "readOnly": true - } - ] - } - ], - "dnsPolicy": "ClusterFirst", - "hostNetwork": true, - "imagePullSecrets": [ - { - "name": "emptyacrsecret" - } - ], - "nodeName": "aks-nodepool1-19574989-0", - "nodeSelector": { - "beta.kubernetes.io/os": "linux" - }, - "priority": 2000001000, - "priorityClassName": "system-node-critical", - "restartPolicy": "Always", - "schedulerName": "default-scheduler", - "securityContext": {}, - "serviceAccount": "kube-proxy", - "serviceAccountName": "kube-proxy", - "terminationGracePeriodSeconds": 30, - "tolerations": [ - { - "key": "CriticalAddonsOnly", - "operator": "Exists" - }, - { - "effect": "NoExecute", - "operator": "Exists" - }, - { - "effect": "NoSchedule", - "operator": "Exists" - }, - { - "effect": "NoExecute", - "key": "node.kubernetes.io/not-ready", - "operator": "Exists" - }, - { - "effect": "NoExecute", - "key": "node.kubernetes.io/unreachable", - "operator": "Exists" - }, - { - "effect": "NoSchedule", - "key": "node.kubernetes.io/disk-pressure", - "operator": "Exists" - }, - { - "effect": "NoSchedule", - "key": "node.kubernetes.io/memory-pressure", - "operator": "Exists" - }, - { - "effect": "NoSchedule", - "key": "node.kubernetes.io/unschedulable", - "operator": "Exists" - }, - { - "effect": "NoSchedule", - "key": "node.kubernetes.io/network-unavailable", - "operator": "Exists" - } - ], - "volumes": [ - { - "hostPath": { - "path": "/var/lib/kubelet", - "type": "" - }, - "name": "kubeconfig" - }, - { - "hostPath": { - "path": "/etc/kubernetes/certs", - "type": "" - }, - "name": "certificates" - }, - { - "name": "kube-proxy-token-f5vbg", - "secret": { - "defaultMode": 420, - "secretName": "kube-proxy-token-f5vbg" - } - } - ] - }, - "status": { - "conditions": [ - { - "lastProbeTime": null, - "lastTransitionTime": "2019-08-23T17:13:13Z", - "status": "True", - "type": "Initialized" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-08-23T17:13:23Z", - "status": "True", - "type": "Ready" - }, - { - "lastProbeTime": null, - "lastTransitionTime": null, - "status": "True", - "type": "ContainersReady" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-08-23T17:13:13Z", - "status": "True", - "type": "PodScheduled" - } - ], - "containerStatuses": [ - { - "containerID": "docker://ef115b31792ece39d1526075f9f3763f8cbf526814624795a05786d83367427e", - "image": "aksrepos.azurecr.io/mirror/hyperkube-amd64:v1.11.8", - "imageID": "docker-pullable://aksrepos.azurecr.io/mirror/hyperkube-amd64@sha256:1447d5b491fcee503c9f8fb712e1593dc3772c7e661251f54c297477cc716913", - "lastState": {}, - "name": "kube-proxy", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-08-23T17:13:22Z" - } - } - } - ], - "hostIP": "10.240.0.4", - "phase": "Running", - "podIP": "10.240.0.4", - "qosClass": "Burstable", - "startTime": "2019-08-23T17:13:13Z" - } - }, - { - "apiVersion": "v1", - "kind": "Pod", - "metadata": { - "annotations": { - "aks.microsoft.com/release-time": "seconds:1566580134 nanos:758740921 ", - "remediator.aks.microsoft.com/kube-proxy-restart": "24" - }, - "creationTimestamp": "2019-08-23T17:10:52Z", - "generateName": "kube-proxy-", - "labels": { - "component": "kube-proxy", - "controller-revision-hash": "3559350992", - "pod-template-generation": "141", - "tier": "node" - }, - "name": "kube-proxy-d59xd", - "namespace": "kube-system", - "ownerReferences": [ - { - "apiVersion": "apps/v1", - "blockOwnerDeletion": true, - "controller": true, - "kind": "DaemonSet", - "name": "kube-proxy", - "uid": "45640bf6-44e5-11e9-9920-423525a6b683" - } - ], - "resourceVersion": "19048698", - "selfLink": "/api/v1/namespaces/kube-system/pods/kube-proxy-d59xd", - "uid": "f65e6a62-c5c8-11e9-8736-86290fd7dd1f" - }, - "spec": { - "affinity": { - "nodeAffinity": { - "requiredDuringSchedulingIgnoredDuringExecution": { - "nodeSelectorTerms": [ - { - "matchExpressions": [ - { - "key": "kubernetes.azure.com/cluster", - "operator": "Exists" - } - ] - } - ] - } - } - }, - "containers": [ - { - "command": [ - "/hyperkube", - "proxy", - "--kubeconfig=/var/lib/kubelet/kubeconfig", - "--cluster-cidr=10.244.0.0/16", - "--feature-gates=ExperimentalCriticalPodAnnotation=true", - "--v=3" - ], - "env": [ - { - "name": "KUBERNETES_PORT_443_TCP_ADDR", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - }, - { - "name": "KUBERNETES_PORT", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_PORT_443_TCP", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_SERVICE_HOST", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - } - ], - "image": "aksrepos.azurecr.io/mirror/hyperkube-amd64:v1.11.8", - "imagePullPolicy": "IfNotPresent", - "name": "kube-proxy", - "resources": { - "requests": { - "cpu": "100m" - } - }, - "securityContext": { - "privileged": true - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/var/lib/kubelet", - "name": "kubeconfig", - "readOnly": true - }, - { - "mountPath": "/etc/kubernetes/certs", - "name": "certificates", - "readOnly": true - }, - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "kube-proxy-token-f5vbg", - "readOnly": true - } - ] - } - ], - "dnsPolicy": "ClusterFirst", - "hostNetwork": true, - "imagePullSecrets": [ - { - "name": "emptyacrsecret" - } - ], - "nodeName": "aks-nodepool1-19574989-1", - "nodeSelector": { - "beta.kubernetes.io/os": "linux" - }, - "priority": 2000001000, - "priorityClassName": "system-node-critical", - "restartPolicy": "Always", - "schedulerName": "default-scheduler", - "securityContext": {}, - "serviceAccount": "kube-proxy", - "serviceAccountName": "kube-proxy", - "terminationGracePeriodSeconds": 30, - "tolerations": [ - { - "key": "CriticalAddonsOnly", - "operator": "Exists" - }, - { - "effect": "NoExecute", - "operator": "Exists" - }, - { - "effect": "NoSchedule", - "operator": "Exists" - }, - { - "effect": "NoExecute", - "key": "node.kubernetes.io/not-ready", - "operator": "Exists" - }, - { - "effect": "NoExecute", - "key": "node.kubernetes.io/unreachable", - "operator": "Exists" - }, - { - "effect": "NoSchedule", - "key": "node.kubernetes.io/disk-pressure", - "operator": "Exists" - }, - { - "effect": "NoSchedule", - "key": "node.kubernetes.io/memory-pressure", - "operator": "Exists" - }, - { - "effect": "NoSchedule", - "key": "node.kubernetes.io/unschedulable", - "operator": "Exists" - }, - { - "effect": "NoSchedule", - "key": "node.kubernetes.io/network-unavailable", - "operator": "Exists" - } - ], - "volumes": [ - { - "hostPath": { - "path": "/var/lib/kubelet", - "type": "" - }, - "name": "kubeconfig" - }, - { - "hostPath": { - "path": "/etc/kubernetes/certs", - "type": "" - }, - "name": "certificates" - }, - { - "name": "kube-proxy-token-f5vbg", - "secret": { - "defaultMode": 420, - "secretName": "kube-proxy-token-f5vbg" - } - } - ] - }, - "status": { - "conditions": [ - { - "lastProbeTime": null, - "lastTransitionTime": "2019-08-23T17:10:52Z", - "status": "True", - "type": "Initialized" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-08-23T17:11:05Z", - "status": "True", - "type": "Ready" - }, - { - "lastProbeTime": null, - "lastTransitionTime": null, - "status": "True", - "type": "ContainersReady" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-08-23T17:10:52Z", - "status": "True", - "type": "PodScheduled" - } - ], - "containerStatuses": [ - { - "containerID": "docker://c4e9d0e372116b9cab048f7bb381e93b423dac2285da75f66664a473fcc043b3", - "image": "aksrepos.azurecr.io/mirror/hyperkube-amd64:v1.11.8", - "imageID": "docker-pullable://aksrepos.azurecr.io/mirror/hyperkube-amd64@sha256:1447d5b491fcee503c9f8fb712e1593dc3772c7e661251f54c297477cc716913", - "lastState": {}, - "name": "kube-proxy", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-08-23T17:11:04Z" - } - } - } - ], - "hostIP": "10.240.0.5", - "phase": "Running", - "podIP": "10.240.0.5", - "qosClass": "Burstable", - "startTime": "2019-08-23T17:10:52Z" - } - }, - { - "apiVersion": "v1", - "kind": "Pod", - "metadata": { - "annotations": { - "aks.microsoft.com/release-time": "seconds:1566580134 nanos:758740921 ", - "remediator.aks.microsoft.com/kube-proxy-restart": "24" - }, - "creationTimestamp": "2019-08-23T17:12:23Z", - "generateName": "kube-proxy-", - "labels": { - "component": "kube-proxy", - "controller-revision-hash": "3559350992", - "pod-template-generation": "141", - "tier": "node" - }, - "name": "kube-proxy-kpm8j", - "namespace": "kube-system", - "ownerReferences": [ - { - "apiVersion": "apps/v1", - "blockOwnerDeletion": true, - "controller": true, - "kind": "DaemonSet", - "name": "kube-proxy", - "uid": "45640bf6-44e5-11e9-9920-423525a6b683" - } - ], - "resourceVersion": "19048942", - "selfLink": "/api/v1/namespaces/kube-system/pods/kube-proxy-kpm8j", - "uid": "2c3de48d-c5c9-11e9-8736-86290fd7dd1f" - }, - "spec": { - "affinity": { - "nodeAffinity": { - "requiredDuringSchedulingIgnoredDuringExecution": { - "nodeSelectorTerms": [ - { - "matchExpressions": [ - { - "key": "kubernetes.azure.com/cluster", - "operator": "Exists" - } - ] - } - ] - } - } - }, - "containers": [ - { - "command": [ - "/hyperkube", - "proxy", - "--kubeconfig=/var/lib/kubelet/kubeconfig", - "--cluster-cidr=10.244.0.0/16", - "--feature-gates=ExperimentalCriticalPodAnnotation=true", - "--v=3" - ], - "env": [ - { - "name": "KUBERNETES_PORT_443_TCP_ADDR", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - }, - { - "name": "KUBERNETES_PORT", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_PORT_443_TCP", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_SERVICE_HOST", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - } - ], - "image": "aksrepos.azurecr.io/mirror/hyperkube-amd64:v1.11.8", - "imagePullPolicy": "IfNotPresent", - "name": "kube-proxy", - "resources": { - "requests": { - "cpu": "100m" - } - }, - "securityContext": { - "privileged": true - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/var/lib/kubelet", - "name": "kubeconfig", - "readOnly": true - }, - { - "mountPath": "/etc/kubernetes/certs", - "name": "certificates", - "readOnly": true - }, - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "kube-proxy-token-f5vbg", - "readOnly": true - } - ] - } - ], - "dnsPolicy": "ClusterFirst", - "hostNetwork": true, - "imagePullSecrets": [ - { - "name": "emptyacrsecret" - } - ], - "nodeName": "aks-nodepool1-19574989-2", - "nodeSelector": { - "beta.kubernetes.io/os": "linux" - }, - "priority": 2000001000, - "priorityClassName": "system-node-critical", - "restartPolicy": "Always", - "schedulerName": "default-scheduler", - "securityContext": {}, - "serviceAccount": "kube-proxy", - "serviceAccountName": "kube-proxy", - "terminationGracePeriodSeconds": 30, - "tolerations": [ - { - "key": "CriticalAddonsOnly", - "operator": "Exists" - }, - { - "effect": "NoExecute", - "operator": "Exists" - }, - { - "effect": "NoSchedule", - "operator": "Exists" - }, - { - "effect": "NoExecute", - "key": "node.kubernetes.io/not-ready", - "operator": "Exists" - }, - { - "effect": "NoExecute", - "key": "node.kubernetes.io/unreachable", - "operator": "Exists" - }, - { - "effect": "NoSchedule", - "key": "node.kubernetes.io/disk-pressure", - "operator": "Exists" - }, - { - "effect": "NoSchedule", - "key": "node.kubernetes.io/memory-pressure", - "operator": "Exists" - }, - { - "effect": "NoSchedule", - "key": "node.kubernetes.io/unschedulable", - "operator": "Exists" - }, - { - "effect": "NoSchedule", - "key": "node.kubernetes.io/network-unavailable", - "operator": "Exists" - } - ], - "volumes": [ - { - "hostPath": { - "path": "/var/lib/kubelet", - "type": "" - }, - "name": "kubeconfig" - }, - { - "hostPath": { - "path": "/etc/kubernetes/certs", - "type": "" - }, - "name": "certificates" - }, - { - "name": "kube-proxy-token-f5vbg", - "secret": { - "defaultMode": 420, - "secretName": "kube-proxy-token-f5vbg" - } - } - ] - }, - "status": { - "conditions": [ - { - "lastProbeTime": null, - "lastTransitionTime": "2019-08-23T17:12:24Z", - "status": "True", - "type": "Initialized" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-08-23T17:12:34Z", - "status": "True", - "type": "Ready" - }, - { - "lastProbeTime": null, - "lastTransitionTime": null, - "status": "True", - "type": "ContainersReady" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-08-23T17:12:24Z", - "status": "True", - "type": "PodScheduled" - } - ], - "containerStatuses": [ - { - "containerID": "docker://51067a965113e6d285a676e0d1e212ffbb60046aab6c4702f5554617415b2031", - "image": "aksrepos.azurecr.io/mirror/hyperkube-amd64:v1.11.8", - "imageID": "docker-pullable://aksrepos.azurecr.io/mirror/hyperkube-amd64@sha256:1447d5b491fcee503c9f8fb712e1593dc3772c7e661251f54c297477cc716913", - "lastState": {}, - "name": "kube-proxy", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-08-23T17:12:33Z" - } - } - } - ], - "hostIP": "10.240.0.7", - "phase": "Running", - "podIP": "10.240.0.7", - "qosClass": "Burstable", - "startTime": "2019-08-23T17:12:24Z" - } - }, - { - "apiVersion": "v1", - "kind": "Pod", - "metadata": { - "annotations": { - "aks.microsoft.com/release-time": "seconds:1566580134 nanos:758740921 ", - "remediator.aks.microsoft.com/kube-proxy-restart": "24" - }, - "creationTimestamp": "2019-08-23T17:11:38Z", - "generateName": "kube-proxy-", - "labels": { - "component": "kube-proxy", - "controller-revision-hash": "3559350992", - "pod-template-generation": "141", - "tier": "node" - }, - "name": "kube-proxy-skzg4", - "namespace": "kube-system", - "ownerReferences": [ - { - "apiVersion": "apps/v1", - "blockOwnerDeletion": true, - "controller": true, - "kind": "DaemonSet", - "name": "kube-proxy", - "uid": "45640bf6-44e5-11e9-9920-423525a6b683" - } - ], - "resourceVersion": "19048774", - "selfLink": "/api/v1/namespaces/kube-system/pods/kube-proxy-skzg4", - "uid": "114f7246-c5c9-11e9-8736-86290fd7dd1f" - }, - "spec": { - "affinity": { - "nodeAffinity": { - "requiredDuringSchedulingIgnoredDuringExecution": { - "nodeSelectorTerms": [ - { - "matchExpressions": [ - { - "key": "kubernetes.azure.com/cluster", - "operator": "Exists" - } - ] - } - ] - } - } - }, - "containers": [ - { - "command": [ - "/hyperkube", - "proxy", - "--kubeconfig=/var/lib/kubelet/kubeconfig", - "--cluster-cidr=10.244.0.0/16", - "--feature-gates=ExperimentalCriticalPodAnnotation=true", - "--v=3" - ], - "env": [ - { - "name": "KUBERNETES_PORT_443_TCP_ADDR", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - }, - { - "name": "KUBERNETES_PORT", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_PORT_443_TCP", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_SERVICE_HOST", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - } - ], - "image": "aksrepos.azurecr.io/mirror/hyperkube-amd64:v1.11.8", - "imagePullPolicy": "IfNotPresent", - "name": "kube-proxy", - "resources": { - "requests": { - "cpu": "100m" - } - }, - "securityContext": { - "privileged": true - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/var/lib/kubelet", - "name": "kubeconfig", - "readOnly": true - }, - { - "mountPath": "/etc/kubernetes/certs", - "name": "certificates", - "readOnly": true - }, - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "kube-proxy-token-f5vbg", - "readOnly": true - } - ] - } - ], - "dnsPolicy": "ClusterFirst", - "hostNetwork": true, - "imagePullSecrets": [ - { - "name": "emptyacrsecret" - } - ], - "nodeName": "aks-nodepool1-19574989-3", - "nodeSelector": { - "beta.kubernetes.io/os": "linux" - }, - "priority": 2000001000, - "priorityClassName": "system-node-critical", - "restartPolicy": "Always", - "schedulerName": "default-scheduler", - "securityContext": {}, - "serviceAccount": "kube-proxy", - "serviceAccountName": "kube-proxy", - "terminationGracePeriodSeconds": 30, - "tolerations": [ - { - "key": "CriticalAddonsOnly", - "operator": "Exists" - }, - { - "effect": "NoExecute", - "operator": "Exists" - }, - { - "effect": "NoSchedule", - "operator": "Exists" - }, - { - "effect": "NoExecute", - "key": "node.kubernetes.io/not-ready", - "operator": "Exists" - }, - { - "effect": "NoExecute", - "key": "node.kubernetes.io/unreachable", - "operator": "Exists" - }, - { - "effect": "NoSchedule", - "key": "node.kubernetes.io/disk-pressure", - "operator": "Exists" - }, - { - "effect": "NoSchedule", - "key": "node.kubernetes.io/memory-pressure", - "operator": "Exists" - }, - { - "effect": "NoSchedule", - "key": "node.kubernetes.io/unschedulable", - "operator": "Exists" - }, - { - "effect": "NoSchedule", - "key": "node.kubernetes.io/network-unavailable", - "operator": "Exists" - } - ], - "volumes": [ - { - "hostPath": { - "path": "/var/lib/kubelet", - "type": "" - }, - "name": "kubeconfig" - }, - { - "hostPath": { - "path": "/etc/kubernetes/certs", - "type": "" - }, - "name": "certificates" - }, - { - "name": "kube-proxy-token-f5vbg", - "secret": { - "defaultMode": 420, - "secretName": "kube-proxy-token-f5vbg" - } - } - ] - }, - "status": { - "conditions": [ - { - "lastProbeTime": null, - "lastTransitionTime": "2019-08-23T17:11:38Z", - "status": "True", - "type": "Initialized" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-08-23T17:11:42Z", - "status": "True", - "type": "Ready" - }, - { - "lastProbeTime": null, - "lastTransitionTime": null, - "status": "True", - "type": "ContainersReady" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-08-23T17:11:38Z", - "status": "True", - "type": "PodScheduled" - } - ], - "containerStatuses": [ - { - "containerID": "docker://a3172e9191547b0ea3eb7db629cd4bba2240f5c9d0186ea37be49d9877034541", - "image": "aksrepos.azurecr.io/mirror/hyperkube-amd64:v1.11.8", - "imageID": "docker-pullable://aksrepos.azurecr.io/mirror/hyperkube-amd64@sha256:1447d5b491fcee503c9f8fb712e1593dc3772c7e661251f54c297477cc716913", - "lastState": {}, - "name": "kube-proxy", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-08-23T17:11:41Z" - } - } - } - ], - "hostIP": "10.240.0.6", - "phase": "Running", - "podIP": "10.240.0.6", - "qosClass": "Burstable", - "startTime": "2019-08-23T17:11:38Z" - } - }, - { - "apiVersion": "v1", - "kind": "Pod", - "metadata": { - "creationTimestamp": "2019-07-15T20:54:26Z", - "generateName": "kube-svc-redirect-", - "labels": { - "component": "kube-svc-redirect", - "controller-revision-hash": "1216437240", - "pod-template-generation": "9", - "tier": "node" - }, - "name": "kube-svc-redirect-czm8d", - "namespace": "kube-system", - "ownerReferences": [ - { - "apiVersion": "apps/v1", - "blockOwnerDeletion": true, - "controller": true, - "kind": "DaemonSet", - "name": "kube-svc-redirect", - "uid": "45a5fc62-44e5-11e9-9920-423525a6b683" - } - ], - "resourceVersion": "15831523", - "selfLink": "/api/v1/namespaces/kube-system/pods/kube-svc-redirect-czm8d", - "uid": "bb3d3ef2-a742-11e9-a38a-22d1c75c4357" - }, - "spec": { - "affinity": { - "nodeAffinity": { - "requiredDuringSchedulingIgnoredDuringExecution": { - "nodeSelectorTerms": [ - { - "matchExpressions": [ - { - "key": "kubernetes.azure.com/cluster", - "operator": "Exists" - } - ] - } - ] - } - } - }, - "containers": [ - { - "env": [ - { - "name": "KUBERNETES_SVC_IP", - "value": "10.0.0.1" - }, - { - "name": "KUBE_SVC_REDIRECTOR_PROXY_IP", - "value": "127.0.0.1:14612" - }, - { - "name": "KUBERNETES_PORT_443_TCP_ADDR", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - }, - { - "name": "KUBERNETES_PORT", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_PORT_443_TCP", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_SERVICE_HOST", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - } - ], - "image": "aksrepos.azurecr.io/prod/kube-svc-redirect:v1.0.2", - "imagePullPolicy": "IfNotPresent", - "name": "redirector", - "resources": { - "requests": { - "cpu": "5m", - "memory": "2Mi" - } - }, - "securityContext": { - "capabilities": { - "add": [ - "NET_ADMIN" - ] - } - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "kube-svc-redirector-token-ngjg2", - "readOnly": true - } - ] - }, - { - "env": [ - { - "name": "KUBERNETES_PORT_443_TCP_ADDR", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - }, - { - "name": "KUBERNETES_PORT", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_PORT_443_TCP", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_SERVICE_HOST", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - } - ], - "image": "aksrepos.azurecr.io/mirror/nginx:1.13.12-alpine", - "imagePullPolicy": "IfNotPresent", - "name": "azureproxy", - "ports": [ - { - "containerPort": 14612, - "hostPort": 14612, - "protocol": "TCP" - } - ], - "resources": { - "requests": { - "cpu": "5m", - "memory": "32Mi" - } - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/etc/nginx/nginx.conf", - "name": "azureproxy-nginx", - "readOnly": true, - "subPath": "nginx.conf" - }, - { - "mountPath": "/etc/nginx/conf.d", - "name": "azureproxy-configs", - "readOnly": true - }, - { - "mountPath": "/etc/nginx/certs", - "name": "azureproxy-certs", - "readOnly": true - }, - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "kube-svc-redirector-token-ngjg2", - "readOnly": true - } - ] - } - ], - "dnsPolicy": "ClusterFirst", - "hostNetwork": true, - "imagePullSecrets": [ - { - "name": "emptyacrsecret" - } - ], - "nodeName": "aks-nodepool1-19574989-0", - "nodeSelector": { - "beta.kubernetes.io/os": "linux" - }, - "priority": 2000001000, - "priorityClassName": "system-node-critical", - "restartPolicy": "Always", - "schedulerName": "default-scheduler", - "securityContext": {}, - "serviceAccount": "kube-svc-redirector", - "serviceAccountName": "kube-svc-redirector", - "terminationGracePeriodSeconds": 30, - "tolerations": [ - { - "key": "CriticalAddonsOnly", - "operator": "Exists" - }, - { - "effect": "NoExecute", - "operator": "Exists" - }, - { - "effect": "NoSchedule", - "operator": "Exists" - }, - { - "effect": "NoExecute", - "key": "node.kubernetes.io/not-ready", - "operator": "Exists" - }, - { - "effect": "NoExecute", - "key": "node.kubernetes.io/unreachable", - "operator": "Exists" - }, - { - "effect": "NoSchedule", - "key": "node.kubernetes.io/disk-pressure", - "operator": "Exists" - }, - { - "effect": "NoSchedule", - "key": "node.kubernetes.io/memory-pressure", - "operator": "Exists" - }, - { - "effect": "NoSchedule", - "key": "node.kubernetes.io/unschedulable", - "operator": "Exists" - }, - { - "effect": "NoSchedule", - "key": "node.kubernetes.io/network-unavailable", - "operator": "Exists" - } - ], - "volumes": [ - { - "configMap": { - "defaultMode": 420, - "name": "azureproxy-nginx" - }, - "name": "azureproxy-nginx" - }, - { - "configMap": { - "defaultMode": 420, - "name": "azureproxy-config" - }, - "name": "azureproxy-configs" - }, - { - "name": "azureproxy-certs", - "secret": { - "defaultMode": 420, - "secretName": "azureproxy-certs" - } - }, - { - "name": "kube-svc-redirector-token-ngjg2", - "secret": { - "defaultMode": 420, - "secretName": "kube-svc-redirector-token-ngjg2" - } - } - ] - }, - "status": { - "conditions": [ - { - "lastProbeTime": null, - "lastTransitionTime": "2019-07-15T20:54:26Z", - "status": "True", - "type": "Initialized" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-07-15T20:55:03Z", - "status": "True", - "type": "Ready" - }, - { - "lastProbeTime": null, - "lastTransitionTime": null, - "status": "True", - "type": "ContainersReady" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-07-15T20:54:26Z", - "status": "True", - "type": "PodScheduled" - } - ], - "containerStatuses": [ - { - "containerID": "docker://942d4ddc66e488245fa77cf331a38de7df760d5d5d96b344f5bfbc84adbab861", - "image": "aksrepos.azurecr.io/mirror/nginx:1.13.12-alpine", - "imageID": "docker-pullable://aksrepos.azurecr.io/mirror/nginx@sha256:91d22184f3f9b1be658c2cc2c12d324de7ff12c8b9c9a597905457b4d93b069d", - "lastState": {}, - "name": "azureproxy", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-07-15T20:55:02Z" - } - } - }, - { - "containerID": "docker://71d6f73215c0994fa2f7b340732d5e4453a86ece31dcf5278fb2abc32e3e4de2", - "image": "aksrepos.azurecr.io/mirror/kube-svc-redirect:v1.0.2", - "imageID": "docker-pullable://aksrepos.azurecr.io/mirror/kube-svc-redirect@sha256:a448687b78d24dae388bd3d54591c179c891fa078404752bc9c9dfdaecdc02ef", - "lastState": {}, - "name": "redirector", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-07-15T20:54:36Z" - } - } - } - ], - "hostIP": "10.240.0.4", - "phase": "Running", - "podIP": "10.240.0.4", - "qosClass": "Burstable", - "startTime": "2019-07-15T20:54:26Z" - } - }, - { - "apiVersion": "v1", - "kind": "Pod", - "metadata": { - "creationTimestamp": "2019-08-07T18:57:56Z", - "generateName": "kube-svc-redirect-", - "labels": { - "component": "kube-svc-redirect", - "controller-revision-hash": "1216437240", - "pod-template-generation": "9", - "tier": "node" - }, - "name": "kube-svc-redirect-mqk98", - "namespace": "kube-system", - "ownerReferences": [ - { - "apiVersion": "apps/v1", - "blockOwnerDeletion": true, - "controller": true, - "kind": "DaemonSet", - "name": "kube-svc-redirect", - "uid": "45a5fc62-44e5-11e9-9920-423525a6b683" - } - ], - "resourceVersion": "16965477", - "selfLink": "/api/v1/namespaces/kube-system/pods/kube-svc-redirect-mqk98", - "uid": "44a61692-b945-11e9-a1b6-127094e7fd94" - }, - "spec": { - "affinity": { - "nodeAffinity": { - "requiredDuringSchedulingIgnoredDuringExecution": { - "nodeSelectorTerms": [ - { - "matchExpressions": [ - { - "key": "kubernetes.azure.com/cluster", - "operator": "Exists" - } - ] - } - ] - } - } - }, - "containers": [ - { - "env": [ - { - "name": "KUBERNETES_SVC_IP", - "value": "10.0.0.1" - }, - { - "name": "KUBE_SVC_REDIRECTOR_PROXY_IP", - "value": "127.0.0.1:14612" - }, - { - "name": "KUBERNETES_PORT_443_TCP_ADDR", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - }, - { - "name": "KUBERNETES_PORT", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_PORT_443_TCP", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_SERVICE_HOST", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - } - ], - "image": "aksrepos.azurecr.io/prod/kube-svc-redirect:v1.0.2", - "imagePullPolicy": "IfNotPresent", - "name": "redirector", - "resources": { - "requests": { - "cpu": "5m", - "memory": "2Mi" - } - }, - "securityContext": { - "capabilities": { - "add": [ - "NET_ADMIN" - ] - } - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "kube-svc-redirector-token-ngjg2", - "readOnly": true - } - ] - }, - { - "env": [ - { - "name": "KUBERNETES_PORT_443_TCP_ADDR", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - }, - { - "name": "KUBERNETES_PORT", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_PORT_443_TCP", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_SERVICE_HOST", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - } - ], - "image": "aksrepos.azurecr.io/mirror/nginx:1.13.12-alpine", - "imagePullPolicy": "IfNotPresent", - "name": "azureproxy", - "ports": [ - { - "containerPort": 14612, - "hostPort": 14612, - "protocol": "TCP" - } - ], - "resources": { - "requests": { - "cpu": "5m", - "memory": "32Mi" - } - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/etc/nginx/nginx.conf", - "name": "azureproxy-nginx", - "readOnly": true, - "subPath": "nginx.conf" - }, - { - "mountPath": "/etc/nginx/conf.d", - "name": "azureproxy-configs", - "readOnly": true - }, - { - "mountPath": "/etc/nginx/certs", - "name": "azureproxy-certs", - "readOnly": true - }, - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "kube-svc-redirector-token-ngjg2", - "readOnly": true - } - ] - } - ], - "dnsPolicy": "ClusterFirst", - "hostNetwork": true, - "imagePullSecrets": [ - { - "name": "emptyacrsecret" - } - ], - "nodeName": "aks-nodepool1-19574989-3", - "nodeSelector": { - "beta.kubernetes.io/os": "linux" - }, - "priority": 2000001000, - "priorityClassName": "system-node-critical", - "restartPolicy": "Always", - "schedulerName": "default-scheduler", - "securityContext": {}, - "serviceAccount": "kube-svc-redirector", - "serviceAccountName": "kube-svc-redirector", - "terminationGracePeriodSeconds": 30, - "tolerations": [ - { - "key": "CriticalAddonsOnly", - "operator": "Exists" - }, - { - "effect": "NoExecute", - "operator": "Exists" - }, - { - "effect": "NoSchedule", - "operator": "Exists" - }, - { - "effect": "NoExecute", - "key": "node.kubernetes.io/not-ready", - "operator": "Exists" - }, - { - "effect": "NoExecute", - "key": "node.kubernetes.io/unreachable", - "operator": "Exists" - }, - { - "effect": "NoSchedule", - "key": "node.kubernetes.io/disk-pressure", - "operator": "Exists" - }, - { - "effect": "NoSchedule", - "key": "node.kubernetes.io/memory-pressure", - "operator": "Exists" - }, - { - "effect": "NoSchedule", - "key": "node.kubernetes.io/unschedulable", - "operator": "Exists" - }, - { - "effect": "NoSchedule", - "key": "node.kubernetes.io/network-unavailable", - "operator": "Exists" - } - ], - "volumes": [ - { - "configMap": { - "defaultMode": 420, - "name": "azureproxy-nginx" - }, - "name": "azureproxy-nginx" - }, - { - "configMap": { - "defaultMode": 420, - "name": "azureproxy-config" - }, - "name": "azureproxy-configs" - }, - { - "name": "azureproxy-certs", - "secret": { - "defaultMode": 420, - "secretName": "azureproxy-certs" - } - }, - { - "name": "kube-svc-redirector-token-ngjg2", - "secret": { - "defaultMode": 420, - "secretName": "kube-svc-redirector-token-ngjg2" - } - } - ] - }, - "status": { - "conditions": [ - { - "lastProbeTime": null, - "lastTransitionTime": "2019-08-07T18:57:58Z", - "status": "True", - "type": "Initialized" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-08-07T18:58:09Z", - "status": "True", - "type": "Ready" - }, - { - "lastProbeTime": null, - "lastTransitionTime": null, - "status": "True", - "type": "ContainersReady" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-08-07T18:57:58Z", - "status": "True", - "type": "PodScheduled" - } - ], - "containerStatuses": [ - { - "containerID": "docker://5f47547dc8e4fceb8e2a6e01cee5612b49e2dc2d5682b6a58f648d8223b3a6b0", - "image": "aksrepos.azurecr.io/mirror/nginx:1.13.12-alpine", - "imageID": "docker-pullable://aksrepos.azurecr.io/mirror/nginx@sha256:91d22184f3f9b1be658c2cc2c12d324de7ff12c8b9c9a597905457b4d93b069d", - "lastState": {}, - "name": "azureproxy", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-08-07T18:58:09Z" - } - } - }, - { - "containerID": "docker://5da4e17288399f8e2d4998e5c06159d0d2d39690e89195c5381ab7e3c91aaf99", - "image": "aksrepos.azurecr.io/prod/kube-svc-redirect:v1.0.2", - "imageID": "docker-pullable://aksrepos.azurecr.io/prod/kube-svc-redirect@sha256:a448687b78d24dae388bd3d54591c179c891fa078404752bc9c9dfdaecdc02ef", - "lastState": {}, - "name": "redirector", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-08-07T18:58:08Z" - } - } - } - ], - "hostIP": "10.240.0.6", - "phase": "Running", - "podIP": "10.240.0.6", - "qosClass": "Burstable", - "startTime": "2019-08-07T18:57:58Z" - } - }, - { - "apiVersion": "v1", - "kind": "Pod", - "metadata": { - "creationTimestamp": "2019-07-15T20:55:38Z", - "generateName": "kube-svc-redirect-", - "labels": { - "component": "kube-svc-redirect", - "controller-revision-hash": "1216437240", - "pod-template-generation": "9", - "tier": "node" - }, - "name": "kube-svc-redirect-qf4tl", - "namespace": "kube-system", - "ownerReferences": [ - { - "apiVersion": "apps/v1", - "blockOwnerDeletion": true, - "controller": true, - "kind": "DaemonSet", - "name": "kube-svc-redirect", - "uid": "45a5fc62-44e5-11e9-9920-423525a6b683" - } - ], - "resourceVersion": "15144014", - "selfLink": "/api/v1/namespaces/kube-system/pods/kube-svc-redirect-qf4tl", - "uid": "e690309f-a742-11e9-a38a-22d1c75c4357" - }, - "spec": { - "affinity": { - "nodeAffinity": { - "requiredDuringSchedulingIgnoredDuringExecution": { - "nodeSelectorTerms": [ - { - "matchExpressions": [ - { - "key": "kubernetes.azure.com/cluster", - "operator": "Exists" - } - ] - } - ] - } - } - }, - "containers": [ - { - "env": [ - { - "name": "KUBERNETES_SVC_IP", - "value": "10.0.0.1" - }, - { - "name": "KUBE_SVC_REDIRECTOR_PROXY_IP", - "value": "127.0.0.1:14612" - }, - { - "name": "KUBERNETES_PORT_443_TCP_ADDR", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - }, - { - "name": "KUBERNETES_PORT", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_PORT_443_TCP", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_SERVICE_HOST", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - } - ], - "image": "aksrepos.azurecr.io/prod/kube-svc-redirect:v1.0.2", - "imagePullPolicy": "IfNotPresent", - "name": "redirector", - "resources": { - "requests": { - "cpu": "5m", - "memory": "2Mi" - } - }, - "securityContext": { - "capabilities": { - "add": [ - "NET_ADMIN" - ] - } - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "kube-svc-redirector-token-ngjg2", - "readOnly": true - } - ] - }, - { - "env": [ - { - "name": "KUBERNETES_PORT_443_TCP_ADDR", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - }, - { - "name": "KUBERNETES_PORT", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_PORT_443_TCP", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_SERVICE_HOST", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - } - ], - "image": "aksrepos.azurecr.io/mirror/nginx:1.13.12-alpine", - "imagePullPolicy": "IfNotPresent", - "name": "azureproxy", - "ports": [ - { - "containerPort": 14612, - "hostPort": 14612, - "protocol": "TCP" - } - ], - "resources": { - "requests": { - "cpu": "5m", - "memory": "32Mi" - } - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/etc/nginx/nginx.conf", - "name": "azureproxy-nginx", - "readOnly": true, - "subPath": "nginx.conf" - }, - { - "mountPath": "/etc/nginx/conf.d", - "name": "azureproxy-configs", - "readOnly": true - }, - { - "mountPath": "/etc/nginx/certs", - "name": "azureproxy-certs", - "readOnly": true - }, - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "kube-svc-redirector-token-ngjg2", - "readOnly": true - } - ] - } - ], - "dnsPolicy": "ClusterFirst", - "hostNetwork": true, - "imagePullSecrets": [ - { - "name": "emptyacrsecret" - } - ], - "nodeName": "aks-nodepool1-19574989-1", - "nodeSelector": { - "beta.kubernetes.io/os": "linux" - }, - "priority": 2000001000, - "priorityClassName": "system-node-critical", - "restartPolicy": "Always", - "schedulerName": "default-scheduler", - "securityContext": {}, - "serviceAccount": "kube-svc-redirector", - "serviceAccountName": "kube-svc-redirector", - "terminationGracePeriodSeconds": 30, - "tolerations": [ - { - "key": "CriticalAddonsOnly", - "operator": "Exists" - }, - { - "effect": "NoExecute", - "operator": "Exists" - }, - { - "effect": "NoSchedule", - "operator": "Exists" - }, - { - "effect": "NoExecute", - "key": "node.kubernetes.io/not-ready", - "operator": "Exists" - }, - { - "effect": "NoExecute", - "key": "node.kubernetes.io/unreachable", - "operator": "Exists" - }, - { - "effect": "NoSchedule", - "key": "node.kubernetes.io/disk-pressure", - "operator": "Exists" - }, - { - "effect": "NoSchedule", - "key": "node.kubernetes.io/memory-pressure", - "operator": "Exists" - }, - { - "effect": "NoSchedule", - "key": "node.kubernetes.io/unschedulable", - "operator": "Exists" - }, - { - "effect": "NoSchedule", - "key": "node.kubernetes.io/network-unavailable", - "operator": "Exists" - } - ], - "volumes": [ - { - "configMap": { - "defaultMode": 420, - "name": "azureproxy-nginx" - }, - "name": "azureproxy-nginx" - }, - { - "configMap": { - "defaultMode": 420, - "name": "azureproxy-config" - }, - "name": "azureproxy-configs" - }, - { - "name": "azureproxy-certs", - "secret": { - "defaultMode": 420, - "secretName": "azureproxy-certs" - } - }, - { - "name": "kube-svc-redirector-token-ngjg2", - "secret": { - "defaultMode": 420, - "secretName": "kube-svc-redirector-token-ngjg2" - } - } - ] - }, - "status": { - "conditions": [ - { - "lastProbeTime": null, - "lastTransitionTime": "2019-07-15T20:55:38Z", - "status": "True", - "type": "Initialized" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-07-15T20:55:47Z", - "status": "True", - "type": "Ready" - }, - { - "lastProbeTime": null, - "lastTransitionTime": null, - "status": "True", - "type": "ContainersReady" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-07-15T20:55:38Z", - "status": "True", - "type": "PodScheduled" - } - ], - "containerStatuses": [ - { - "containerID": "docker://a0fa774ceba9ae78cf75ffb96a0d8f3ca4d48e5d9d17218957b07e8b1e7e2862", - "image": "aksrepos.azurecr.io/mirror/nginx:1.13.12-alpine", - "imageID": "docker-pullable://aksrepos.azurecr.io/mirror/nginx@sha256:91d22184f3f9b1be658c2cc2c12d324de7ff12c8b9c9a597905457b4d93b069d", - "lastState": {}, - "name": "azureproxy", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-07-15T20:55:46Z" - } - } - }, - { - "containerID": "docker://7f281954c57ff6529aaeea2e79dc45a8abeabd4b360c2bbea5c0830ddac4f093", - "image": "aksrepos.azurecr.io/mirror/kube-svc-redirect:v1.0.2", - "imageID": "docker-pullable://aksrepos.azurecr.io/mirror/kube-svc-redirect@sha256:a448687b78d24dae388bd3d54591c179c891fa078404752bc9c9dfdaecdc02ef", - "lastState": {}, - "name": "redirector", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-07-15T20:55:44Z" - } - } - } - ], - "hostIP": "10.240.0.5", - "phase": "Running", - "podIP": "10.240.0.5", - "qosClass": "Burstable", - "startTime": "2019-07-15T20:55:38Z" - } - }, - { - "apiVersion": "v1", - "kind": "Pod", - "metadata": { - "creationTimestamp": "2019-07-15T20:56:33Z", - "generateName": "kube-svc-redirect-", - "labels": { - "component": "kube-svc-redirect", - "controller-revision-hash": "1216437240", - "pod-template-generation": "9", - "tier": "node" - }, - "name": "kube-svc-redirect-rtw2t", - "namespace": "kube-system", - "ownerReferences": [ - { - "apiVersion": "apps/v1", - "blockOwnerDeletion": true, - "controller": true, - "kind": "DaemonSet", - "name": "kube-svc-redirect", - "uid": "45a5fc62-44e5-11e9-9920-423525a6b683" - } - ], - "resourceVersion": "15144039", - "selfLink": "/api/v1/namespaces/kube-system/pods/kube-svc-redirect-rtw2t", - "uid": "06fef5f6-a743-11e9-a38a-22d1c75c4357" - }, - "spec": { - "affinity": { - "nodeAffinity": { - "requiredDuringSchedulingIgnoredDuringExecution": { - "nodeSelectorTerms": [ - { - "matchExpressions": [ - { - "key": "kubernetes.azure.com/cluster", - "operator": "Exists" - } - ] - } - ] - } - } - }, - "containers": [ - { - "env": [ - { - "name": "KUBERNETES_SVC_IP", - "value": "10.0.0.1" - }, - { - "name": "KUBE_SVC_REDIRECTOR_PROXY_IP", - "value": "127.0.0.1:14612" - }, - { - "name": "KUBERNETES_PORT_443_TCP_ADDR", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - }, - { - "name": "KUBERNETES_PORT", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_PORT_443_TCP", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_SERVICE_HOST", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - } - ], - "image": "aksrepos.azurecr.io/prod/kube-svc-redirect:v1.0.2", - "imagePullPolicy": "IfNotPresent", - "name": "redirector", - "resources": { - "requests": { - "cpu": "5m", - "memory": "2Mi" - } - }, - "securityContext": { - "capabilities": { - "add": [ - "NET_ADMIN" - ] - } - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "kube-svc-redirector-token-ngjg2", - "readOnly": true - } - ] - }, - { - "env": [ - { - "name": "KUBERNETES_PORT_443_TCP_ADDR", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - }, - { - "name": "KUBERNETES_PORT", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_PORT_443_TCP", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_SERVICE_HOST", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - } - ], - "image": "aksrepos.azurecr.io/mirror/nginx:1.13.12-alpine", - "imagePullPolicy": "IfNotPresent", - "name": "azureproxy", - "ports": [ - { - "containerPort": 14612, - "hostPort": 14612, - "protocol": "TCP" - } - ], - "resources": { - "requests": { - "cpu": "5m", - "memory": "32Mi" - } - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/etc/nginx/nginx.conf", - "name": "azureproxy-nginx", - "readOnly": true, - "subPath": "nginx.conf" - }, - { - "mountPath": "/etc/nginx/conf.d", - "name": "azureproxy-configs", - "readOnly": true - }, - { - "mountPath": "/etc/nginx/certs", - "name": "azureproxy-certs", - "readOnly": true - }, - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "kube-svc-redirector-token-ngjg2", - "readOnly": true - } - ] - } - ], - "dnsPolicy": "ClusterFirst", - "hostNetwork": true, - "imagePullSecrets": [ - { - "name": "emptyacrsecret" - } - ], - "nodeName": "aks-nodepool1-19574989-2", - "nodeSelector": { - "beta.kubernetes.io/os": "linux" - }, - "priority": 2000001000, - "priorityClassName": "system-node-critical", - "restartPolicy": "Always", - "schedulerName": "default-scheduler", - "securityContext": {}, - "serviceAccount": "kube-svc-redirector", - "serviceAccountName": "kube-svc-redirector", - "terminationGracePeriodSeconds": 30, - "tolerations": [ - { - "key": "CriticalAddonsOnly", - "operator": "Exists" - }, - { - "effect": "NoExecute", - "operator": "Exists" - }, - { - "effect": "NoSchedule", - "operator": "Exists" - }, - { - "effect": "NoExecute", - "key": "node.kubernetes.io/not-ready", - "operator": "Exists" - }, - { - "effect": "NoExecute", - "key": "node.kubernetes.io/unreachable", - "operator": "Exists" - }, - { - "effect": "NoSchedule", - "key": "node.kubernetes.io/disk-pressure", - "operator": "Exists" - }, - { - "effect": "NoSchedule", - "key": "node.kubernetes.io/memory-pressure", - "operator": "Exists" - }, - { - "effect": "NoSchedule", - "key": "node.kubernetes.io/unschedulable", - "operator": "Exists" - }, - { - "effect": "NoSchedule", - "key": "node.kubernetes.io/network-unavailable", - "operator": "Exists" - } - ], - "volumes": [ - { - "configMap": { - "defaultMode": 420, - "name": "azureproxy-nginx" - }, - "name": "azureproxy-nginx" - }, - { - "configMap": { - "defaultMode": 420, - "name": "azureproxy-config" - }, - "name": "azureproxy-configs" - }, - { - "name": "azureproxy-certs", - "secret": { - "defaultMode": 420, - "secretName": "azureproxy-certs" - } - }, - { - "name": "kube-svc-redirector-token-ngjg2", - "secret": { - "defaultMode": 420, - "secretName": "kube-svc-redirector-token-ngjg2" - } - } - ] - }, - "status": { - "conditions": [ - { - "lastProbeTime": null, - "lastTransitionTime": "2019-07-15T20:56:33Z", - "status": "True", - "type": "Initialized" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-07-15T20:56:49Z", - "status": "True", - "type": "Ready" - }, - { - "lastProbeTime": null, - "lastTransitionTime": null, - "status": "True", - "type": "ContainersReady" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-07-15T20:56:33Z", - "status": "True", - "type": "PodScheduled" - } - ], - "containerStatuses": [ - { - "containerID": "docker://aaea93b1e6a0c55e9ac0c002ffa6fdfb99e98b2f1a38c474cc2b9b65e947b6d9", - "image": "aksrepos.azurecr.io/mirror/nginx:1.13.12-alpine", - "imageID": "docker-pullable://aksrepos.azurecr.io/mirror/nginx@sha256:91d22184f3f9b1be658c2cc2c12d324de7ff12c8b9c9a597905457b4d93b069d", - "lastState": {}, - "name": "azureproxy", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-07-15T20:56:48Z" - } - } - }, - { - "containerID": "docker://c03c8b9e99095205945e15bef5f60c0501c8a0a77186afc1fcc8eb0804274e78", - "image": "aksrepos.azurecr.io/mirror/kube-svc-redirect:v1.0.2", - "imageID": "docker-pullable://aksrepos.azurecr.io/mirror/kube-svc-redirect@sha256:a448687b78d24dae388bd3d54591c179c891fa078404752bc9c9dfdaecdc02ef", - "lastState": {}, - "name": "redirector", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-07-15T20:56:43Z" - } - } - } - ], - "hostIP": "10.240.0.7", - "phase": "Running", - "podIP": "10.240.0.7", - "qosClass": "Burstable", - "startTime": "2019-07-15T20:56:33Z" - } - }, - { - "apiVersion": "v1", - "kind": "Pod", - "metadata": { - "creationTimestamp": "2019-07-09T02:38:07Z", - "generateName": "kubernetes-dashboard-6dcdfcd68b-", - "labels": { - "k8s-app": "kubernetes-dashboard", - "kubernetes.io/cluster-service": "true", - "pod-template-hash": "2878978246" - }, - "name": "kubernetes-dashboard-6dcdfcd68b-nfqbf", - "namespace": "kube-system", - "ownerReferences": [ - { - "apiVersion": "apps/v1", - "blockOwnerDeletion": true, - "controller": true, - "kind": "ReplicaSet", - "name": "kubernetes-dashboard-6dcdfcd68b", - "uid": "71ff2821-a1f2-11e9-9bc6-127bb0ec03b8" - } - ], - "resourceVersion": "15831517", - "selfLink": "/api/v1/namespaces/kube-system/pods/kubernetes-dashboard-6dcdfcd68b-nfqbf", - "uid": "9583b2ab-a1f2-11e9-8b08-d602e29755d5" - }, - "spec": { - "affinity": { - "nodeAffinity": { - "requiredDuringSchedulingIgnoredDuringExecution": { - "nodeSelectorTerms": [ - { - "matchExpressions": [ - { - "key": "kubernetes.azure.com/cluster", - "operator": "Exists" - } - ] - } - ] - } - } - }, - "containers": [ - { - "env": [ - { - "name": "KUBERNETES_PORT_443_TCP_ADDR", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - }, - { - "name": "KUBERNETES_PORT", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_PORT_443_TCP", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_SERVICE_HOST", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - } - ], - "image": "aksrepos.azurecr.io/mirror/kubernetes-dashboard-amd64:v1.10.1", - "imagePullPolicy": "IfNotPresent", - "livenessProbe": { - "failureThreshold": 3, - "httpGet": { - "path": "/", - "port": 9090, - "scheme": "HTTP" - }, - "initialDelaySeconds": 30, - "periodSeconds": 10, - "successThreshold": 1, - "timeoutSeconds": 30 - }, - "name": "main", - "ports": [ - { - "containerPort": 9090, - "name": "http", - "protocol": "TCP" - } - ], - "resources": { - "limits": { - "cpu": "100m", - "memory": "500Mi" - }, - "requests": { - "cpu": "100m", - "memory": "50Mi" - } - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "kubernetes-dashboard-token-w4t8s", - "readOnly": true - } - ] - } - ], - "dnsPolicy": "ClusterFirst", - "imagePullSecrets": [ - { - "name": "emptyacrsecret" - } - ], - "nodeName": "aks-nodepool1-19574989-0", - "nodeSelector": { - "beta.kubernetes.io/os": "linux" - }, - "priority": 2000001000, - "priorityClassName": "system-node-critical", - "restartPolicy": "Always", - "schedulerName": "default-scheduler", - "securityContext": {}, - "serviceAccount": "kubernetes-dashboard", - "serviceAccountName": "kubernetes-dashboard", - "terminationGracePeriodSeconds": 30, - "tolerations": [ - { - "key": "CriticalAddonsOnly", - "operator": "Exists" - }, - { - "effect": "NoExecute", - "key": "node.kubernetes.io/not-ready", - "operator": "Exists", - "tolerationSeconds": 300 - }, - { - "effect": "NoExecute", - "key": "node.kubernetes.io/unreachable", - "operator": "Exists", - "tolerationSeconds": 300 - } - ], - "volumes": [ - { - "name": "kubernetes-dashboard-token-w4t8s", - "secret": { - "defaultMode": 420, - "secretName": "kubernetes-dashboard-token-w4t8s" - } - } - ] - }, - "status": { - "conditions": [ - { - "lastProbeTime": null, - "lastTransitionTime": "2019-07-09T02:38:14Z", - "status": "True", - "type": "Initialized" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-07-09T02:39:08Z", - "status": "True", - "type": "Ready" - }, - { - "lastProbeTime": null, - "lastTransitionTime": null, - "status": "True", - "type": "ContainersReady" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-07-09T02:38:07Z", - "status": "True", - "type": "PodScheduled" - } - ], - "containerStatuses": [ - { - "containerID": "docker://2b042ce7bdf3d03cb606317b19ee797cbf7b99c65076a67001064bccb313b3cb", - "image": "aksrepos.azurecr.io/mirror/kubernetes-dashboard-amd64:v1.10.1", - "imageID": "docker-pullable://aksrepos.azurecr.io/mirror/kubernetes-dashboard-amd64@sha256:0ae6b69432e78069c5ce2bcde0fe409c5c4d6f0f4d9cd50a17974fea38898747", - "lastState": {}, - "name": "main", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-07-09T02:39:07Z" - } - } - } - ], - "hostIP": "10.240.0.4", - "phase": "Running", - "podIP": "10.244.1.197", - "qosClass": "Burstable", - "startTime": "2019-07-09T02:38:14Z" - } - }, - { - "apiVersion": "v1", - "kind": "Pod", - "metadata": { - "creationTimestamp": "2019-07-09T02:38:06Z", - "generateName": "metrics-server-76cd9fb66-", - "labels": { - "k8s-app": "metrics-server", - "pod-template-hash": "327859622" - }, - "name": "metrics-server-76cd9fb66-h2q55", - "namespace": "kube-system", - "ownerReferences": [ - { - "apiVersion": "apps/v1", - "blockOwnerDeletion": true, - "controller": true, - "kind": "ReplicaSet", - "name": "metrics-server-76cd9fb66", - "uid": "71c837df-a1f2-11e9-9bc6-127bb0ec03b8" - } - ], - "resourceVersion": "15144037", - "selfLink": "/api/v1/namespaces/kube-system/pods/metrics-server-76cd9fb66-h2q55", - "uid": "9543dbb7-a1f2-11e9-8b08-d602e29755d5" - }, - "spec": { - "affinity": { - "nodeAffinity": { - "requiredDuringSchedulingIgnoredDuringExecution": { - "nodeSelectorTerms": [ - { - "matchExpressions": [ - { - "key": "kubernetes.azure.com/cluster", - "operator": "Exists" - } - ] - } - ] - } - } - }, - "containers": [ - { - "command": [ - "/metrics-server", - "--source=kubernetes.summary_api:''" - ], - "env": [ - { - "name": "KUBERNETES_PORT_443_TCP_ADDR", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - }, - { - "name": "KUBERNETES_PORT", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_PORT_443_TCP", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_SERVICE_HOST", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - } - ], - "image": "aksrepos.azurecr.io/mirror/metrics-server-amd64:v0.2.1", - "imagePullPolicy": "IfNotPresent", - "name": "metrics-server", - "resources": {}, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "metrics-server-token-qtdgm", - "readOnly": true - } - ] - } - ], - "dnsPolicy": "ClusterFirst", - "imagePullSecrets": [ - { - "name": "emptyacrsecret" - } - ], - "nodeName": "aks-nodepool1-19574989-1", - "nodeSelector": { - "beta.kubernetes.io/os": "linux" - }, - "priority": 2000001000, - "priorityClassName": "system-node-critical", - "restartPolicy": "Always", - "schedulerName": "default-scheduler", - "securityContext": {}, - "serviceAccount": "metrics-server", - "serviceAccountName": "metrics-server", - "terminationGracePeriodSeconds": 30, - "tolerations": [ - { - "key": "CriticalAddonsOnly", - "operator": "Exists" - }, - { - "effect": "NoExecute", - "key": "node.kubernetes.io/not-ready", - "operator": "Exists", - "tolerationSeconds": 300 - }, - { - "effect": "NoExecute", - "key": "node.kubernetes.io/unreachable", - "operator": "Exists", - "tolerationSeconds": 300 - } - ], - "volumes": [ - { - "name": "metrics-server-token-qtdgm", - "secret": { - "defaultMode": 420, - "secretName": "metrics-server-token-qtdgm" - } - } - ] - }, - "status": { - "conditions": [ - { - "lastProbeTime": null, - "lastTransitionTime": "2019-07-09T02:38:09Z", - "status": "True", - "type": "Initialized" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-07-09T02:38:20Z", - "status": "True", - "type": "Ready" - }, - { - "lastProbeTime": null, - "lastTransitionTime": null, - "status": "True", - "type": "ContainersReady" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-07-09T02:38:07Z", - "status": "True", - "type": "PodScheduled" - } - ], - "containerStatuses": [ - { - "containerID": "docker://f60ef82657e5ccdfb611a4f3381848dff77a01bddf95c431e4b7a2bf6f4b8087", - "image": "aksrepos.azurecr.io/mirror/metrics-server-amd64:v0.2.1", - "imageID": "docker-pullable://aksrepos.azurecr.io/mirror/metrics-server-amd64@sha256:220c0ed3451cb95e4b2f72dd5dc8d9d39d9f529722e5b29d8286373ce27b117e", - "lastState": {}, - "name": "metrics-server", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-07-09T02:38:18Z" - } - } - } - ], - "hostIP": "10.240.0.5", - "phase": "Running", - "podIP": "10.244.0.193", - "qosClass": "BestEffort", - "startTime": "2019-07-09T02:38:09Z" - } - }, - { - "apiVersion": "v1", - "kind": "Pod", - "metadata": { - "annotations": { - "agentVersion": "1.10.0.1", - "dockerProviderVersion": "6.0.0-0", - "schema-versions": "v1" - }, - "creationTimestamp": "2019-08-23T19:53:57Z", - "generateName": "omsagent-", - "labels": { - "controller-revision-hash": "868116844", - "dsName": "omsagent-ds", - "pod-template-generation": "9" - }, - "name": "omsagent-25pks", - "namespace": "kube-system", - "ownerReferences": [ - { - "apiVersion": "apps/v1", - "blockOwnerDeletion": true, - "controller": true, - "kind": "DaemonSet", - "name": "omsagent", - "uid": "e2f8c552-c2d2-11e9-8736-86290fd7dd1f" - } - ], - "resourceVersion": "19063729", - "selfLink": "/api/v1/namespaces/kube-system/pods/omsagent-25pks", - "uid": "be78d7f6-c5df-11e9-8736-86290fd7dd1f" - }, - "spec": { - "containers": [ - { - "env": [ - { - "name": "AKS_RESOURCE_ID", - "value": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test" - }, - { - "name": "AKS_REGION", - "value": "eastus" - }, - { - "name": "CONTROLLER_TYPE", - "value": "DaemonSet" - }, - { - "name": "NODE_IP", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "status.hostIP" - } - } - }, - { - "name": "KUBERNETES_PORT_443_TCP_ADDR", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - }, - { - "name": "KUBERNETES_PORT", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_PORT_443_TCP", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_SERVICE_HOST", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - } - ], - "image": "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08222019", - "imagePullPolicy": "IfNotPresent", - "livenessProbe": { - "exec": { - "command": [ - "/bin/bash", - "-c", - "/opt/livenessprobe.sh" - ] - }, - "failureThreshold": 3, - "initialDelaySeconds": 60, - "periodSeconds": 60, - "successThreshold": 1, - "timeoutSeconds": 1 - }, - "name": "omsagent", - "ports": [ - { - "containerPort": 25225, - "protocol": "TCP" - }, - { - "containerPort": 25224, - "protocol": "UDP" - } - ], - "resources": { - "limits": { - "cpu": "150m", - "memory": "600Mi" - }, - "requests": { - "cpu": "75m", - "memory": "225Mi" - } - }, - "securityContext": { - "privileged": true - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/hostfs", - "name": "host-root", - "readOnly": true - }, - { - "mountPath": "/var/run/host", - "name": "docker-sock" - }, - { - "mountPath": "/var/log", - "name": "host-log" - }, - { - "mountPath": "/var/lib/docker/containers", - "name": "containerlog-path" - }, - { - "mountPath": "/etc/kubernetes/host", - "name": "azure-json-path" - }, - { - "mountPath": "/etc/omsagent-secret", - "name": "omsagent-secret" - }, - { - "mountPath": "/etc/config/settings", - "name": "settings-vol-config", - "readOnly": true - }, - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "omsagent-token-fjmqb", - "readOnly": true - } - ] - } - ], - "dnsPolicy": "ClusterFirst", - "nodeName": "aks-nodepool1-19574989-2", - "nodeSelector": { - "beta.kubernetes.io/os": "linux" - }, - "priority": 0, - "restartPolicy": "Always", - "schedulerName": "default-scheduler", - "securityContext": {}, - "serviceAccount": "omsagent", - "serviceAccountName": "omsagent", - "terminationGracePeriodSeconds": 30, - "tolerations": [ - { - "effect": "NoSchedule", - "key": "node-role.kubernetes.io/master", - "operator": "Equal", - "value": "true" - }, - { - "effect": "NoExecute", - "key": "node.kubernetes.io/not-ready", - "operator": "Exists" - }, - { - "effect": "NoExecute", - "key": "node.kubernetes.io/unreachable", - "operator": "Exists" - }, - { - "effect": "NoSchedule", - "key": "node.kubernetes.io/disk-pressure", - "operator": "Exists" - }, - { - "effect": "NoSchedule", - "key": "node.kubernetes.io/memory-pressure", - "operator": "Exists" - }, - { - "effect": "NoSchedule", - "key": "node.kubernetes.io/unschedulable", - "operator": "Exists" - } - ], - "volumes": [ - { - "hostPath": { - "path": "/", - "type": "" - }, - "name": "host-root" - }, - { - "hostPath": { - "path": "/var/run", - "type": "" - }, - "name": "docker-sock" - }, - { - "hostPath": { - "path": "/etc/hostname", - "type": "" - }, - "name": "container-hostname" - }, - { - "hostPath": { - "path": "/var/log", - "type": "" - }, - "name": "host-log" - }, - { - "hostPath": { - "path": "/var/lib/docker/containers", - "type": "" - }, - "name": "containerlog-path" - }, - { - "hostPath": { - "path": "/etc/kubernetes", - "type": "" - }, - "name": "azure-json-path" - }, - { - "name": "omsagent-secret", - "secret": { - "defaultMode": 420, - "secretName": "omsagent-secret" - } - }, - { - "configMap": { - "defaultMode": 420, - "name": "container-azm-ms-agentconfig", - "optional": true - }, - "name": "settings-vol-config" - }, - { - "name": "omsagent-token-fjmqb", - "secret": { - "defaultMode": 420, - "secretName": "omsagent-token-fjmqb" - } - } - ] - }, - "status": { - "conditions": [ - { - "lastProbeTime": null, - "lastTransitionTime": "2019-08-23T19:53:57Z", - "status": "True", - "type": "Initialized" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-08-23T19:54:44Z", - "status": "True", - "type": "Ready" - }, - { - "lastProbeTime": null, - "lastTransitionTime": null, - "status": "True", - "type": "ContainersReady" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-08-23T19:53:57Z", - "status": "True", - "type": "PodScheduled" - } - ], - "containerStatuses": [ - { - "containerID": "docker://acd5cedc2c5874122047c47bb1398f35a7c0297292fc4a0e01345123c233d19a", - "image": "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08222019", - "imageID": "docker-pullable://mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:69b420bdb4081293c37e2d0f8ad2e4054bd516f5c08c7512d6b695660a36eccf", - "lastState": {}, - "name": "omsagent", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-08-23T19:54:43Z" - } - } - } - ], - "hostIP": "10.240.0.7", - "phase": "Running", - "podIP": "10.244.12.169", - "qosClass": "Burstable", - "startTime": "2019-08-23T19:53:57Z" - } - }, - { - "apiVersion": "v1", - "kind": "Pod", - "metadata": { - "annotations": { - "agentVersion": "1.10.0.1", - "dockerProviderVersion": "6.0.0-0", - "schema-versions": "v1" - }, - "creationTimestamp": "2019-08-23T19:51:35Z", - "generateName": "omsagent-", - "labels": { - "controller-revision-hash": "868116844", - "dsName": "omsagent-ds", - "pod-template-generation": "9" - }, - "name": "omsagent-4tncr", - "namespace": "kube-system", - "ownerReferences": [ - { - "apiVersion": "apps/v1", - "blockOwnerDeletion": true, - "controller": true, - "kind": "DaemonSet", - "name": "omsagent", - "uid": "e2f8c552-c2d2-11e9-8736-86290fd7dd1f" - } - ], - "resourceVersion": "19063468", - "selfLink": "/api/v1/namespaces/kube-system/pods/omsagent-4tncr", - "uid": "69e68b21-c5df-11e9-8736-86290fd7dd1f" - }, - "spec": { - "containers": [ - { - "env": [ - { - "name": "AKS_RESOURCE_ID", - "value": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test" - }, - { - "name": "AKS_REGION", - "value": "eastus" - }, - { - "name": "CONTROLLER_TYPE", - "value": "DaemonSet" - }, - { - "name": "NODE_IP", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "status.hostIP" - } - } - }, - { - "name": "KUBERNETES_PORT_443_TCP_ADDR", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - }, - { - "name": "KUBERNETES_PORT", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_PORT_443_TCP", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_SERVICE_HOST", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - } - ], - "image": "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08222019", - "imagePullPolicy": "IfNotPresent", - "livenessProbe": { - "exec": { - "command": [ - "/bin/bash", - "-c", - "/opt/livenessprobe.sh" - ] - }, - "failureThreshold": 3, - "initialDelaySeconds": 60, - "periodSeconds": 60, - "successThreshold": 1, - "timeoutSeconds": 1 - }, - "name": "omsagent", - "ports": [ - { - "containerPort": 25225, - "protocol": "TCP" - }, - { - "containerPort": 25224, - "protocol": "UDP" - } - ], - "resources": { - "limits": { - "cpu": "150m", - "memory": "600Mi" - }, - "requests": { - "cpu": "75m", - "memory": "225Mi" - } - }, - "securityContext": { - "privileged": true - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/hostfs", - "name": "host-root", - "readOnly": true - }, - { - "mountPath": "/var/run/host", - "name": "docker-sock" - }, - { - "mountPath": "/var/log", - "name": "host-log" - }, - { - "mountPath": "/var/lib/docker/containers", - "name": "containerlog-path" - }, - { - "mountPath": "/etc/kubernetes/host", - "name": "azure-json-path" - }, - { - "mountPath": "/etc/omsagent-secret", - "name": "omsagent-secret" - }, - { - "mountPath": "/etc/config/settings", - "name": "settings-vol-config", - "readOnly": true - }, - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "omsagent-token-fjmqb", - "readOnly": true - } - ] - } - ], - "dnsPolicy": "ClusterFirst", - "nodeName": "aks-nodepool1-19574989-1", - "nodeSelector": { - "beta.kubernetes.io/os": "linux" - }, - "priority": 0, - "restartPolicy": "Always", - "schedulerName": "default-scheduler", - "securityContext": {}, - "serviceAccount": "omsagent", - "serviceAccountName": "omsagent", - "terminationGracePeriodSeconds": 30, - "tolerations": [ - { - "effect": "NoSchedule", - "key": "node-role.kubernetes.io/master", - "operator": "Equal", - "value": "true" - }, - { - "effect": "NoExecute", - "key": "node.kubernetes.io/not-ready", - "operator": "Exists" - }, - { - "effect": "NoExecute", - "key": "node.kubernetes.io/unreachable", - "operator": "Exists" - }, - { - "effect": "NoSchedule", - "key": "node.kubernetes.io/disk-pressure", - "operator": "Exists" - }, - { - "effect": "NoSchedule", - "key": "node.kubernetes.io/memory-pressure", - "operator": "Exists" - }, - { - "effect": "NoSchedule", - "key": "node.kubernetes.io/unschedulable", - "operator": "Exists" - } - ], - "volumes": [ - { - "hostPath": { - "path": "/", - "type": "" - }, - "name": "host-root" - }, - { - "hostPath": { - "path": "/var/run", - "type": "" - }, - "name": "docker-sock" - }, - { - "hostPath": { - "path": "/etc/hostname", - "type": "" - }, - "name": "container-hostname" - }, - { - "hostPath": { - "path": "/var/log", - "type": "" - }, - "name": "host-log" - }, - { - "hostPath": { - "path": "/var/lib/docker/containers", - "type": "" - }, - "name": "containerlog-path" - }, - { - "hostPath": { - "path": "/etc/kubernetes", - "type": "" - }, - "name": "azure-json-path" - }, - { - "name": "omsagent-secret", - "secret": { - "defaultMode": 420, - "secretName": "omsagent-secret" - } - }, - { - "configMap": { - "defaultMode": 420, - "name": "container-azm-ms-agentconfig", - "optional": true - }, - "name": "settings-vol-config" - }, - { - "name": "omsagent-token-fjmqb", - "secret": { - "defaultMode": 420, - "secretName": "omsagent-token-fjmqb" - } - } - ] - }, - "status": { - "conditions": [ - { - "lastProbeTime": null, - "lastTransitionTime": "2019-08-23T19:51:35Z", - "status": "True", - "type": "Initialized" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-08-23T19:52:28Z", - "status": "True", - "type": "Ready" - }, - { - "lastProbeTime": null, - "lastTransitionTime": null, - "status": "True", - "type": "ContainersReady" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-08-23T19:51:35Z", - "status": "True", - "type": "PodScheduled" - } - ], - "containerStatuses": [ - { - "containerID": "docker://7803b80452aa34460c848d9c1ca65d6bd925665cf78faaa8dbc122482f93c744", - "image": "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08222019", - "imageID": "docker-pullable://mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:69b420bdb4081293c37e2d0f8ad2e4054bd516f5c08c7512d6b695660a36eccf", - "lastState": {}, - "name": "omsagent", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-08-23T19:52:27Z" - } - } - } - ], - "hostIP": "10.240.0.5", - "phase": "Running", - "podIP": "10.244.0.251", - "qosClass": "Burstable", - "startTime": "2019-08-23T19:51:35Z" - } - }, - { - "apiVersion": "v1", - "kind": "Pod", - "metadata": { - "annotations": { - "agentVersion": "1.10.0.1", - "dockerProviderVersion": "6.0.0-0", - "schema-versions": "v1" - }, - "creationTimestamp": "2019-08-23T19:53:36Z", - "generateName": "omsagent-", - "labels": { - "controller-revision-hash": "868116844", - "dsName": "omsagent-ds", - "pod-template-generation": "9" - }, - "name": "omsagent-h44fk", - "namespace": "kube-system", - "ownerReferences": [ - { - "apiVersion": "apps/v1", - "blockOwnerDeletion": true, - "controller": true, - "kind": "DaemonSet", - "name": "omsagent", - "uid": "e2f8c552-c2d2-11e9-8736-86290fd7dd1f" - } - ], - "resourceVersion": "19063631", - "selfLink": "/api/v1/namespaces/kube-system/pods/omsagent-h44fk", - "uid": "b1e04e1c-c5df-11e9-8736-86290fd7dd1f" - }, - "spec": { - "containers": [ - { - "env": [ - { - "name": "AKS_RESOURCE_ID", - "value": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test" - }, - { - "name": "AKS_REGION", - "value": "eastus" - }, - { - "name": "CONTROLLER_TYPE", - "value": "DaemonSet" - }, - { - "name": "NODE_IP", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "status.hostIP" - } - } - }, - { - "name": "KUBERNETES_PORT_443_TCP_ADDR", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - }, - { - "name": "KUBERNETES_PORT", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_PORT_443_TCP", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_SERVICE_HOST", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - } - ], - "image": "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08222019", - "imagePullPolicy": "IfNotPresent", - "livenessProbe": { - "exec": { - "command": [ - "/bin/bash", - "-c", - "/opt/livenessprobe.sh" - ] - }, - "failureThreshold": 3, - "initialDelaySeconds": 60, - "periodSeconds": 60, - "successThreshold": 1, - "timeoutSeconds": 1 - }, - "name": "omsagent", - "ports": [ - { - "containerPort": 25225, - "protocol": "TCP" - }, - { - "containerPort": 25224, - "protocol": "UDP" - } - ], - "resources": { - "limits": { - "cpu": "150m", - "memory": "600Mi" - }, - "requests": { - "cpu": "75m", - "memory": "225Mi" - } - }, - "securityContext": { - "privileged": true - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/hostfs", - "name": "host-root", - "readOnly": true - }, - { - "mountPath": "/var/run/host", - "name": "docker-sock" - }, - { - "mountPath": "/var/log", - "name": "host-log" - }, - { - "mountPath": "/var/lib/docker/containers", - "name": "containerlog-path" - }, - { - "mountPath": "/etc/kubernetes/host", - "name": "azure-json-path" - }, - { - "mountPath": "/etc/omsagent-secret", - "name": "omsagent-secret" - }, - { - "mountPath": "/etc/config/settings", - "name": "settings-vol-config", - "readOnly": true - }, - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "omsagent-token-fjmqb", - "readOnly": true - } - ] - } - ], - "dnsPolicy": "ClusterFirst", - "nodeName": "aks-nodepool1-19574989-0", - "nodeSelector": { - "beta.kubernetes.io/os": "linux" - }, - "priority": 0, - "restartPolicy": "Always", - "schedulerName": "default-scheduler", - "securityContext": {}, - "serviceAccount": "omsagent", - "serviceAccountName": "omsagent", - "terminationGracePeriodSeconds": 30, - "tolerations": [ - { - "effect": "NoSchedule", - "key": "node-role.kubernetes.io/master", - "operator": "Equal", - "value": "true" - }, - { - "effect": "NoExecute", - "key": "node.kubernetes.io/not-ready", - "operator": "Exists" - }, - { - "effect": "NoExecute", - "key": "node.kubernetes.io/unreachable", - "operator": "Exists" - }, - { - "effect": "NoSchedule", - "key": "node.kubernetes.io/disk-pressure", - "operator": "Exists" - }, - { - "effect": "NoSchedule", - "key": "node.kubernetes.io/memory-pressure", - "operator": "Exists" - }, - { - "effect": "NoSchedule", - "key": "node.kubernetes.io/unschedulable", - "operator": "Exists" - } - ], - "volumes": [ - { - "hostPath": { - "path": "/", - "type": "" - }, - "name": "host-root" - }, - { - "hostPath": { - "path": "/var/run", - "type": "" - }, - "name": "docker-sock" - }, - { - "hostPath": { - "path": "/etc/hostname", - "type": "" - }, - "name": "container-hostname" - }, - { - "hostPath": { - "path": "/var/log", - "type": "" - }, - "name": "host-log" - }, - { - "hostPath": { - "path": "/var/lib/docker/containers", - "type": "" - }, - "name": "containerlog-path" - }, - { - "hostPath": { - "path": "/etc/kubernetes", - "type": "" - }, - "name": "azure-json-path" - }, - { - "name": "omsagent-secret", - "secret": { - "defaultMode": 420, - "secretName": "omsagent-secret" - } - }, - { - "configMap": { - "defaultMode": 420, - "name": "container-azm-ms-agentconfig", - "optional": true - }, - "name": "settings-vol-config" - }, - { - "name": "omsagent-token-fjmqb", - "secret": { - "defaultMode": 420, - "secretName": "omsagent-token-fjmqb" - } - } - ] - }, - "status": { - "conditions": [ - { - "lastProbeTime": null, - "lastTransitionTime": "2019-08-23T19:53:36Z", - "status": "True", - "type": "Initialized" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-08-23T19:53:51Z", - "status": "True", - "type": "Ready" - }, - { - "lastProbeTime": null, - "lastTransitionTime": null, - "status": "True", - "type": "ContainersReady" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-08-23T19:53:36Z", - "status": "True", - "type": "PodScheduled" - } - ], - "containerStatuses": [ - { - "containerID": "docker://4b71a82e472a8e5d0bc4ef9b9b5d2ccf25741b31269480a77e29424ebe87757c", - "image": "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08222019", - "imageID": "docker-pullable://mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:69b420bdb4081293c37e2d0f8ad2e4054bd516f5c08c7512d6b695660a36eccf", - "lastState": {}, - "name": "omsagent", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-08-23T19:53:49Z" - } - } - } - ], - "hostIP": "10.240.0.4", - "phase": "Running", - "podIP": "10.244.1.35", - "qosClass": "Burstable", - "startTime": "2019-08-23T19:53:36Z" - } - }, - { - "apiVersion": "v1", - "kind": "Pod", - "metadata": { - "annotations": { - "agentVersion": "1.10.0.1", - "dockerProviderVersion": "6.0.0-0", - "schema-versions": "v1" - }, - "creationTimestamp": "2019-08-23T19:51:28Z", - "generateName": "omsagent-rs-5bb85d7468-", - "labels": { - "pod-template-hash": "1664183024", - "rsName": "omsagent-rs" - }, - "name": "omsagent-rs-5bb85d7468-dnxpw", - "namespace": "kube-system", - "ownerReferences": [ - { - "apiVersion": "apps/v1", - "blockOwnerDeletion": true, - "controller": true, - "kind": "ReplicaSet", - "name": "omsagent-rs-5bb85d7468", - "uid": "659ec974-c5df-11e9-8736-86290fd7dd1f" - } - ], - "resourceVersion": "19063495", - "selfLink": "/api/v1/namespaces/kube-system/pods/omsagent-rs-5bb85d7468-dnxpw", - "uid": "65a6f978-c5df-11e9-8736-86290fd7dd1f" - }, - "spec": { - "containers": [ - { - "env": [ - { - "name": "AKS_RESOURCE_ID", - "value": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test" - }, - { - "name": "AKS_REGION", - "value": "eastus" - }, - { - "name": "CONTROLLER_TYPE", - "value": "ReplicaSet" - }, - { - "name": "NODE_IP", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "status.hostIP" - } - } - }, - { - "name": "KUBERNETES_PORT_443_TCP_ADDR", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - }, - { - "name": "KUBERNETES_PORT", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_PORT_443_TCP", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_SERVICE_HOST", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - } - ], - "image": "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08222019", - "imagePullPolicy": "IfNotPresent", - "livenessProbe": { - "exec": { - "command": [ - "/bin/bash", - "-c", - "/opt/livenessprobe.sh" - ] - }, - "failureThreshold": 3, - "initialDelaySeconds": 60, - "periodSeconds": 60, - "successThreshold": 1, - "timeoutSeconds": 1 - }, - "name": "omsagent", - "ports": [ - { - "containerPort": 25225, - "protocol": "TCP" - }, - { - "containerPort": 25224, - "protocol": "UDP" - }, - { - "containerPort": 25227, - "name": "in-rs-tcp", - "protocol": "TCP" - } - ], - "resources": { - "limits": { - "cpu": "150m", - "memory": "500Mi" - }, - "requests": { - "cpu": "110m", - "memory": "250Mi" - } - }, - "securityContext": { - "privileged": true - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/var/run/host", - "name": "docker-sock" - }, - { - "mountPath": "/var/log", - "name": "host-log" - }, - { - "mountPath": "/var/lib/docker/containers", - "name": "containerlog-path" - }, - { - "mountPath": "/etc/kubernetes/host", - "name": "azure-json-path" - }, - { - "mountPath": "/etc/omsagent-secret", - "name": "omsagent-secret", - "readOnly": true - }, - { - "mountPath": "/etc/config", - "name": "omsagent-rs-config" - }, - { - "mountPath": "/etc/config/settings", - "name": "settings-vol-config", - "readOnly": true - }, - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "omsagent-token-fjmqb", - "readOnly": true - } - ] - } - ], - "dnsPolicy": "ClusterFirst", - "nodeName": "aks-nodepool1-19574989-0", - "nodeSelector": { - "beta.kubernetes.io/os": "linux", - "kubernetes.io/role": "agent" - }, - "priority": 0, - "restartPolicy": "Always", - "schedulerName": "default-scheduler", - "securityContext": {}, - "serviceAccount": "omsagent", - "serviceAccountName": "omsagent", - "terminationGracePeriodSeconds": 30, - "tolerations": [ - { - "effect": "NoExecute", - "key": "node.kubernetes.io/not-ready", - "operator": "Exists", - "tolerationSeconds": 300 - }, - { - "effect": "NoExecute", - "key": "node.kubernetes.io/unreachable", - "operator": "Exists", - "tolerationSeconds": 300 - } - ], - "volumes": [ - { - "hostPath": { - "path": "/var/run", - "type": "" - }, - "name": "docker-sock" - }, - { - "hostPath": { - "path": "/etc/hostname", - "type": "" - }, - "name": "container-hostname" - }, - { - "hostPath": { - "path": "/var/log", - "type": "" - }, - "name": "host-log" - }, - { - "hostPath": { - "path": "/var/lib/docker/containers", - "type": "" - }, - "name": "containerlog-path" - }, - { - "hostPath": { - "path": "/etc/kubernetes", - "type": "" - }, - "name": "azure-json-path" - }, - { - "name": "omsagent-secret", - "secret": { - "defaultMode": 420, - "secretName": "omsagent-secret" - } - }, - { - "configMap": { - "defaultMode": 420, - "name": "omsagent-rs-config" - }, - "name": "omsagent-rs-config" - }, - { - "configMap": { - "defaultMode": 420, - "name": "container-azm-ms-agentconfig", - "optional": true - }, - "name": "settings-vol-config" - }, - { - "name": "omsagent-token-fjmqb", - "secret": { - "defaultMode": 420, - "secretName": "omsagent-token-fjmqb" - } - } - ] - }, - "status": { - "conditions": [ - { - "lastProbeTime": null, - "lastTransitionTime": "2019-08-23T19:51:28Z", - "status": "True", - "type": "Initialized" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-08-23T19:52:37Z", - "status": "True", - "type": "Ready" - }, - { - "lastProbeTime": null, - "lastTransitionTime": null, - "status": "True", - "type": "ContainersReady" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-08-23T19:51:28Z", - "status": "True", - "type": "PodScheduled" - } - ], - "containerStatuses": [ - { - "containerID": "docker://7e080036bc213a7dadd95b1d8439e06a1b62822219642a83cab059dc4292b0e5", - "image": "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08222019", - "imageID": "docker-pullable://mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:69b420bdb4081293c37e2d0f8ad2e4054bd516f5c08c7512d6b695660a36eccf", - "lastState": {}, - "name": "omsagent", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-08-23T19:52:37Z" - } - } - } - ], - "hostIP": "10.240.0.4", - "phase": "Running", - "podIP": "10.244.1.34", - "qosClass": "Burstable", - "startTime": "2019-08-23T19:51:28Z" - } - }, - { - "apiVersion": "v1", - "kind": "Pod", - "metadata": { - "annotations": { - "agentVersion": "1.10.0.1", - "dockerProviderVersion": "6.0.0-0", - "schema-versions": "v1" - }, - "creationTimestamp": "2019-08-23T19:52:35Z", - "generateName": "omsagent-", - "labels": { - "controller-revision-hash": "868116844", - "dsName": "omsagent-ds", - "pod-template-generation": "9" - }, - "name": "omsagent-sb6xx", - "namespace": "kube-system", - "ownerReferences": [ - { - "apiVersion": "apps/v1", - "blockOwnerDeletion": true, - "controller": true, - "kind": "DaemonSet", - "name": "omsagent", - "uid": "e2f8c552-c2d2-11e9-8736-86290fd7dd1f" - } - ], - "resourceVersion": "19063577", - "selfLink": "/api/v1/namespaces/kube-system/pods/omsagent-sb6xx", - "uid": "8dbd5e8b-c5df-11e9-8736-86290fd7dd1f" - }, - "spec": { - "containers": [ - { - "env": [ - { - "name": "AKS_RESOURCE_ID", - "value": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test" - }, - { - "name": "AKS_REGION", - "value": "eastus" - }, - { - "name": "CONTROLLER_TYPE", - "value": "DaemonSet" - }, - { - "name": "NODE_IP", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "status.hostIP" - } - } - }, - { - "name": "KUBERNETES_PORT_443_TCP_ADDR", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - }, - { - "name": "KUBERNETES_PORT", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_PORT_443_TCP", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_SERVICE_HOST", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - } - ], - "image": "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08222019", - "imagePullPolicy": "IfNotPresent", - "livenessProbe": { - "exec": { - "command": [ - "/bin/bash", - "-c", - "/opt/livenessprobe.sh" - ] - }, - "failureThreshold": 3, - "initialDelaySeconds": 60, - "periodSeconds": 60, - "successThreshold": 1, - "timeoutSeconds": 1 - }, - "name": "omsagent", - "ports": [ - { - "containerPort": 25225, - "protocol": "TCP" - }, - { - "containerPort": 25224, - "protocol": "UDP" - } - ], - "resources": { - "limits": { - "cpu": "150m", - "memory": "600Mi" - }, - "requests": { - "cpu": "75m", - "memory": "225Mi" - } - }, - "securityContext": { - "privileged": true - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/hostfs", - "name": "host-root", - "readOnly": true - }, - { - "mountPath": "/var/run/host", - "name": "docker-sock" - }, - { - "mountPath": "/var/log", - "name": "host-log" - }, - { - "mountPath": "/var/lib/docker/containers", - "name": "containerlog-path" - }, - { - "mountPath": "/etc/kubernetes/host", - "name": "azure-json-path" - }, - { - "mountPath": "/etc/omsagent-secret", - "name": "omsagent-secret" - }, - { - "mountPath": "/etc/config/settings", - "name": "settings-vol-config", - "readOnly": true - }, - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "omsagent-token-fjmqb", - "readOnly": true - } - ] - } - ], - "dnsPolicy": "ClusterFirst", - "nodeName": "aks-nodepool1-19574989-3", - "nodeSelector": { - "beta.kubernetes.io/os": "linux" - }, - "priority": 0, - "restartPolicy": "Always", - "schedulerName": "default-scheduler", - "securityContext": {}, - "serviceAccount": "omsagent", - "serviceAccountName": "omsagent", - "terminationGracePeriodSeconds": 30, - "tolerations": [ - { - "effect": "NoSchedule", - "key": "node-role.kubernetes.io/master", - "operator": "Equal", - "value": "true" - }, - { - "effect": "NoExecute", - "key": "node.kubernetes.io/not-ready", - "operator": "Exists" - }, - { - "effect": "NoExecute", - "key": "node.kubernetes.io/unreachable", - "operator": "Exists" - }, - { - "effect": "NoSchedule", - "key": "node.kubernetes.io/disk-pressure", - "operator": "Exists" - }, - { - "effect": "NoSchedule", - "key": "node.kubernetes.io/memory-pressure", - "operator": "Exists" - }, - { - "effect": "NoSchedule", - "key": "node.kubernetes.io/unschedulable", - "operator": "Exists" - } - ], - "volumes": [ - { - "hostPath": { - "path": "/", - "type": "" - }, - "name": "host-root" - }, - { - "hostPath": { - "path": "/var/run", - "type": "" - }, - "name": "docker-sock" - }, - { - "hostPath": { - "path": "/etc/hostname", - "type": "" - }, - "name": "container-hostname" - }, - { - "hostPath": { - "path": "/var/log", - "type": "" - }, - "name": "host-log" - }, - { - "hostPath": { - "path": "/var/lib/docker/containers", - "type": "" - }, - "name": "containerlog-path" - }, - { - "hostPath": { - "path": "/etc/kubernetes", - "type": "" - }, - "name": "azure-json-path" - }, - { - "name": "omsagent-secret", - "secret": { - "defaultMode": 420, - "secretName": "omsagent-secret" - } - }, - { - "configMap": { - "defaultMode": 420, - "name": "container-azm-ms-agentconfig", - "optional": true - }, - "name": "settings-vol-config" - }, - { - "name": "omsagent-token-fjmqb", - "secret": { - "defaultMode": 420, - "secretName": "omsagent-token-fjmqb" - } - } - ] - }, - "status": { - "conditions": [ - { - "lastProbeTime": null, - "lastTransitionTime": "2019-08-23T19:52:35Z", - "status": "True", - "type": "Initialized" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-08-23T19:53:25Z", - "status": "True", - "type": "Ready" - }, - { - "lastProbeTime": null, - "lastTransitionTime": null, - "status": "True", - "type": "ContainersReady" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-08-23T19:52:35Z", - "status": "True", - "type": "PodScheduled" - } - ], - "containerStatuses": [ - { - "containerID": "docker://f4f0cb19e5da394a4332847953c18d9321319f2ef422533b890ab844cb997879", - "image": "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08222019", - "imageID": "docker-pullable://mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:69b420bdb4081293c37e2d0f8ad2e4054bd516f5c08c7512d6b695660a36eccf", - "lastState": {}, - "name": "omsagent", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-08-23T19:53:24Z" - } - } - } - ], - "hostIP": "10.240.0.6", - "phase": "Running", - "podIP": "10.244.2.62", - "qosClass": "Burstable", - "startTime": "2019-08-23T19:52:35Z" - } - }, - { - "apiVersion": "v1", - "kind": "Pod", - "metadata": { - "creationTimestamp": "2019-08-12T20:28:08Z", - "generateName": "tunnelfront-65c8cfb7cc-", - "labels": { - "component": "tunnel", - "pod-template-hash": "2174796377" - }, - "name": "tunnelfront-65c8cfb7cc-z8srb", - "namespace": "kube-system", - "ownerReferences": [ - { - "apiVersion": "apps/v1", - "blockOwnerDeletion": true, - "controller": true, - "kind": "ReplicaSet", - "name": "tunnelfront-65c8cfb7cc", - "uid": "7013afa3-a742-11e9-a08d-96dd47774ee5" - } - ], - "resourceVersion": "17628809", - "selfLink": "/api/v1/namespaces/kube-system/pods/tunnelfront-65c8cfb7cc-z8srb", - "uid": "b2a0e1b3-bd3f-11e9-b2a7-d61658c73830" - }, - "spec": { - "affinity": { - "nodeAffinity": { - "requiredDuringSchedulingIgnoredDuringExecution": { - "nodeSelectorTerms": [ - { - "matchExpressions": [ - { - "key": "kubernetes.azure.com/cluster", - "operator": "Exists" - } - ] - } - ] - } - } - }, - "containers": [ - { - "env": [ - { - "name": "OVERRIDE_TUNNEL_SERVER_NAME", - "value": "t_dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - }, - { - "name": "TUNNEL_CLUSTERUSER_NAME", - "value": "28957308" - }, - { - "name": "TUNNELGATEWAY_SERVER_NAME", - "value": "dilipr-hea-dilipr-health-te-72c8e8-0b16acad.tun.eastus.azmk8s.io" - }, - { - "name": "TUNNELGATEWAY_SSH_PORT", - "value": "22" - }, - { - "name": "TUNNELGATEWAY_TLS_PORT", - "value": "443" - }, - { - "name": "KUBE_CONFIG", - "value": "/etc/kubernetes/kubeconfig/kubeconfig" - }, - { - "name": "KUBERNETES_PORT_443_TCP_ADDR", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - }, - { - "name": "KUBERNETES_PORT", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_PORT_443_TCP", - "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443" - }, - { - "name": "KUBERNETES_SERVICE_HOST", - "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io" - } - ], - "image": "aksrepos.azurecr.io/prod/hcp-tunnel-front:v1.9.2-v4.0.7", - "imagePullPolicy": "IfNotPresent", - "livenessProbe": { - "exec": { - "command": [ - "/lib/tunnel-front/check-tunnel-connection.sh" - ] - }, - "failureThreshold": 12, - "initialDelaySeconds": 10, - "periodSeconds": 60, - "successThreshold": 1, - "timeoutSeconds": 1 - }, - "name": "tunnel-front", - "resources": { - "requests": { - "cpu": "10m", - "memory": "64Mi" - } - }, - "securityContext": { - "privileged": true - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/etc/kubernetes/kubeconfig", - "name": "kubeconfig", - "readOnly": true - }, - { - "mountPath": "/etc/kubernetes/certs", - "name": "certificates", - "readOnly": true - }, - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "tunnelfront-token-njgvg", - "readOnly": true - } - ] - } - ], - "dnsPolicy": "Default", - "imagePullSecrets": [ - { - "name": "emptyacrsecret" - } - ], - "nodeName": "aks-nodepool1-19574989-3", - "nodeSelector": { - "beta.kubernetes.io/os": "linux" - }, - "priority": 2000001000, - "priorityClassName": "system-node-critical", - "restartPolicy": "Always", - "schedulerName": "default-scheduler", - "securityContext": {}, - "serviceAccount": "tunnelfront", - "serviceAccountName": "tunnelfront", - "terminationGracePeriodSeconds": 30, - "tolerations": [ - { - "key": "CriticalAddonsOnly", - "operator": "Exists" - }, - { - "effect": "NoExecute", - "key": "node.kubernetes.io/not-ready", - "operator": "Exists", - "tolerationSeconds": 300 - }, - { - "effect": "NoExecute", - "key": "node.kubernetes.io/unreachable", - "operator": "Exists", - "tolerationSeconds": 300 - } - ], - "volumes": [ - { - "configMap": { - "defaultMode": 420, - "name": "tunnelfront-kubecfg", - "optional": true - }, - "name": "kubeconfig" - }, - { - "hostPath": { - "path": "/etc/kubernetes/certs", - "type": "" - }, - "name": "certificates" - }, - { - "name": "tunnelfront-token-njgvg", - "secret": { - "defaultMode": 420, - "secretName": "tunnelfront-token-njgvg" - } - } - ] - }, - "status": { - "conditions": [ - { - "lastProbeTime": null, - "lastTransitionTime": "2019-08-12T20:28:08Z", - "status": "True", - "type": "Initialized" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-08-12T20:28:13Z", - "status": "True", - "type": "Ready" - }, - { - "lastProbeTime": null, - "lastTransitionTime": null, - "status": "True", - "type": "ContainersReady" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-08-12T20:28:08Z", - "status": "True", - "type": "PodScheduled" - } - ], - "containerStatuses": [ - { - "containerID": "docker://ac3b7482b15ba1f825e7a9ceef11defaccdc2682b9a20bb7c98bc307a8a34cf6", - "image": "aksrepos.azurecr.io/prod/hcp-tunnel-front:v1.9.2-v4.0.7", - "imageID": "docker-pullable://aksrepos.azurecr.io/prod/hcp-tunnel-front@sha256:68878ee3ea1781b322ea3952c3370e31dd89be8bb0864e2bf27bdba6dc904c41", - "lastState": {}, - "name": "tunnel-front", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-08-12T20:28:13Z" - } - } - } - ], - "hostIP": "10.240.0.6", - "phase": "Running", - "podIP": "10.244.2.10", - "qosClass": "Burstable", - "startTime": "2019-08-12T20:28:08Z" - } - } - ], - "kind": "List", - "metadata": { - "resourceVersion": "", - "selfLink": "" - } -} diff --git a/test/unit-tests/plugins/health/test_health_model_definition.json b/test/unit-tests/plugins/health/test_health_model_definition.json deleted file mode 100644 index 31d219705..000000000 --- a/test/unit-tests/plugins/health/test_health_model_definition.json +++ /dev/null @@ -1,42 +0,0 @@ -[ - { - "monitor_id": "monitor_id", - "parent_monitor_id": "parent_monitor_id", - "labels": [ - "container.azm.ms/namespace", - "container.azm.ms/workload-name", - "container.azm.ms/workload-kind", - "container.azm.ms/cluster-region", - "container.azm.ms/cluster-subscription-id", - "container.azm.ms/cluster-resource-group", - "container.azm.ms/cluster-name" - ] - }, - { - "monitor_id": "conditional_monitor_id", - "aggregation_algorithm": "worstOf", - "labels": [ - "kubernetes.io/hostname", - "agentpool", - "kubernetes.io/role", - "container.azm.ms/cluster-region", - "container.azm.ms/cluster-subscription-id", - "container.azm.ms/cluster-resource-group", - "container.azm.ms/cluster-name" - ], - "parent_monitor_id": [ - { - "label": "kubernetes.io/role", - "operator": "==", - "value": "master", - "id": "master_node_pool" - }, - { - "label": "kubernetes.io/role", - "operator": "==", - "value": "agent", - "id": "agent_node_pool" - } - ] - } -] \ No newline at end of file diff --git a/test/unit-tests/plugins/health/unit_monitor_spec.rb b/test/unit-tests/plugins/health/unit_monitor_spec.rb deleted file mode 100644 index 530c98290..000000000 --- a/test/unit-tests/plugins/health/unit_monitor_spec.rb +++ /dev/null @@ -1,20 +0,0 @@ -require_relative '../../../../source/plugins/ruby/health/unit_monitor' -require_relative '../test_helpers' - -include HealthModel - -describe "UnitMonitor Spec" do - it "is_aggregate_monitor is false for UnitMonitor" do - # Arrange/Act - monitor = UnitMonitor.new(:monitor_id, :monitor_instance_id, :pass, :time, {}, {}, {}) - # Assert - assert_equal monitor.is_aggregate_monitor, false - end - - it "get_member_monitors is nil for UnitMonitor" do - # Arrange/Act - monitor = UnitMonitor.new(:monitor_id, :monitor_instance_id, :pass, :time, {}, {}, {}) - #Assert - assert_nil monitor.get_member_monitors - end -end \ No newline at end of file diff --git a/test/unit-tests/plugins/health/unit_monitor_test.rb b/test/unit-tests/plugins/health/unit_monitor_test.rb deleted file mode 100644 index d46ae5665..000000000 --- a/test/unit-tests/plugins/health/unit_monitor_test.rb +++ /dev/null @@ -1,16 +0,0 @@ -require_relative '../../../../source/plugins/ruby/health/unit_monitor' -require_relative '../test_helpers' - -class UnitMonitorTest < Minitest::Test - include HealthModel - - def test_is_aggregate_monitor_false - monitor = UnitMonitor.new(:monitor_id, :monitor_instance_id, :pass, :time, {}, {}, {}) - assert_equal monitor.is_aggregate_monitor, false - end - - def test_get_member_monitors_nil - monitor = UnitMonitor.new(:monitor_id, :monitor_instance_id, :pass, :time, {}, {}, {}) - assert_nil monitor.get_member_monitors - end -end diff --git a/test/unit-tests/plugins/test_helpers.rb b/test/unit-tests/plugins/test_helpers.rb deleted file mode 100644 index 543f00ac9..000000000 --- a/test/unit-tests/plugins/test_helpers.rb +++ /dev/null @@ -1,3 +0,0 @@ -gem "minitest" -require "minitest/spec" -require 'minitest/autorun' \ No newline at end of file From 11d1c7a6e3ae9904bfda68a93d50bb65f35ea96e Mon Sep 17 00:00:00 2001 From: Amol Agrawal Date: Mon, 4 Apr 2022 10:24:18 -0700 Subject: [PATCH 215/301] check platform specific tags (#738) Co-authored-by: Amol Agrawal --- .../ServiceGroupRoot/Scripts/pushAgentToAcr.sh | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/deployment/multiarch-agent-deployment/ServiceGroupRoot/Scripts/pushAgentToAcr.sh b/deployment/multiarch-agent-deployment/ServiceGroupRoot/Scripts/pushAgentToAcr.sh index d39cedde0..2738e110e 100644 --- a/deployment/multiarch-agent-deployment/ServiceGroupRoot/Scripts/pushAgentToAcr.sh +++ b/deployment/multiarch-agent-deployment/ServiceGroupRoot/Scripts/pushAgentToAcr.sh @@ -19,7 +19,14 @@ if [ $? -ne 0 ]; then echo "-e error unable to get list of mcr tags for azuremonitor/containerinsights/ciprod repository" exit 1 fi -TAG_EXISTS=$(echo $MCR_TAG_RESULT | jq '.tags | contains(["'"$AGENT_RELEASE$AGENT_IMAGE_TAG_SUFFIX"'"])') + +if [[ "$AGENT_IMAGE_FULL_PATH" == *"win-"* ]]; then + echo "checking windows tags" + TAG_EXISTS=$(echo $MCR_TAG_RESULT | jq '.tags | contains(["'"win-$AGENT_RELEASE$AGENT_IMAGE_TAG_SUFFIX"'"])') +else + echo "checking linux tags" + TAG_EXISTS=$(echo $MCR_TAG_RESULT | jq '.tags | contains(["'"$AGENT_RELEASE$AGENT_IMAGE_TAG_SUFFIX"'"])') +fi if $TAG_EXISTS; then echo "-e error ${AGENT_IMAGE_TAG_SUFFIX} already exists in mcr. make sure the image tag is unique" @@ -69,4 +76,4 @@ if [ $? -eq 0 ]; then else echo "-e error failed to retag and push image to destination ACR" exit 1 -fi \ No newline at end of file +fi From d12827bdafa1d9047cd223dfed9c09707e328307 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Mon, 4 Apr 2022 11:12:33 -0700 Subject: [PATCH 216/301] Gangams/msi test instructions (#739) * instructions for msi test validation * readme updates * readme updates * readme updates * readme updates --- README.md | 31 +++++++---- kubernetes/omsagent.yaml | 112 ++++++++++++++++++++++++++++++++++++--- 2 files changed, 128 insertions(+), 15 deletions(-) diff --git a/README.md b/README.md index f0fa40e53..e7206c492 100644 --- a/README.md +++ b/README.md @@ -230,7 +230,7 @@ powershell -ExecutionPolicy bypass # switch to powershell if you are not on pow ``` ##### Developer Build optimizations -If you do not want to build the image from scratch every time you make changes during development,you can choose to build the docker images that are separated out by +If you do not want to build the image from scratch every time you make changes during development,you can choose to build the docker images that are separated out by * Base image and dependencies including agent bootstrap(setup.ps1) * Agent conf and plugin changes @@ -248,7 +248,7 @@ And then run the script to build the image consisting of code and conf changes. .\build-and-publish-dev-docker-image.ps1 -image /: # trigger build code and image and publish docker hub or acr ``` -For the subsequent builds, you can just run - +For the subsequent builds, you can just run - ``` .\build-and-publish-dev-docker-image.ps1 -image /: # trigger build code and image and publish docker hub or acr @@ -316,18 +316,31 @@ Navigate to Kubernetes directory and update the yamls with latest docker image o For DEV and PROD branches, automatically deployed latest yaml with latest agent image (which automatically built by the azure devops pipeline) onto CIDEV and CIPROD AKS clusters in build subscription. So, you can use CIDEV and CIPROD AKS cluster to validate E2E. Similarly, you can set up build and release pipelines for your feature branch. +# Testing MSI Auth Mode Using Yaml + + 1. Enable Monitoring addon with Managed Idenity Auth Mode either using Portal or CLI or Template + 2. Deploy [ARM template](./scripts/onboarding/aks/onboarding-using-msi-auth/) with enabled = false to create DCR, DCR-A and link the workspace to Portal + > Note - Make sure to update the parameter values in existingClusterParam.json file and have enabled = false in template file + `az deployment group create --resource-group --template-file ./existingClusterOnboarding.json --parameters @./existingClusterParam.json` + 3. Get the MSI token (which is valid for 24 hrs.) value via `kubectl get secrets -n kube-system omsagent-aad-msi-token -o=jsonpath='{.data.token}'` + 4. Disable Monitoring addon via `az aks disable-addons -a monitoring -g -n ` + 5. Uncomment MSI auth related yaml lines, replace all the placeholder values, MSI token value and image tag in the omsagent.yaml + 6. Deploy the omsagent.yaml via `kubectl apply -f omsagent.yaml` + > Note: use the image toggle for release E2E validation + 7. validate E2E for LA & Metrics data flows, and other scenarios + # E2E Tests ## For executing tests 1. Deploy the omsagent.yaml with your agent image. In the yaml, make sure `ISTEST` environment variable set to `true` if its not set already -2. Update the Service Principal CLIENT_ID, CLIENT_SECRET and TENANT_ID placeholder values and apply e2e-tests.yaml to execute the tests +2. Update the Service Principal CLIENT_ID, CLIENT_SECRET and TENANT_ID placeholder values and apply e2e-tests.yaml to execute the tests > Note: Service Principal requires reader role on log analytics workspace and cluster resource to query LA and metrics ``` - cd ~/Docker-Provider/test/e2e # based on your repo path - kubectl apply -f e2e-tests.yaml # this will trigger job to run the tests in sonobuoy namespace - kubectl get po -n sonobuoy # to check the pods and jobs associated to tests - ``` + cd ~/Docker-Provider/test/e2e # based on your repo path + kubectl apply -f e2e-tests.yaml # this will trigger job to run the tests in sonobuoy namespace + kubectl get po -n sonobuoy # to check the pods and jobs associated to tests + ``` 3. Download (sonobuoy)[https://github.com/vmware-tanzu/sonobuoy/releases] on your dev box to view the results of the tests ``` results=$(sonobuoy retrieve) # downloads tar file which has logs and test results @@ -338,9 +351,9 @@ For DEV and PROD branches, automatically deployed latest yaml with latest agent ## For adding new tests 1. Add the test python file with your test code under `tests` directory -2. Build the docker image, recommended to use ACR & MCR +2. Build the docker image, recommended to use ACR & MCR ``` - cd ~/Docker-Provider/test/e2e/src # based on your repo path + cd ~/Docker-Provider/test/e2e/src # based on your repo path docker login -u -p # login to acr docker build -f ./core/Dockerfile -t /: . docker push /: diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index 24db6f20f..85d0ffb6b 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -27,6 +27,11 @@ rules: - apiGroups: ["apps", "extensions", "autoscaling"] resources: ["replicasets", "deployments", "horizontalpodautoscalers"] verbs: ["list"] + # Uncomment below lines for MSI Auth Mode testing + # - apiGroups: [""] + # resources: ["secrets"] + # resourceNames: [ "omsagent-aad-msi-token" ] + # verbs: ["get", "watch"] - nonResourceURLs: ["/metrics"] verbs: ["get"] --- @@ -297,6 +302,16 @@ data: WSID: "VALUE_WSID" KEY: "VALUE_KEY" --- +# Uncomment below lines for MSI Auth Mode testing +# apiVersion: v1 +# kind: Secret +# metadata: +# name: omsagent-aad-msi-token +# namespace: kube-system +# type: Opaque +# data: +# token: "VALUE_MSI_TOKEN" +# --- apiVersion: apps/v1 kind: DaemonSet metadata: @@ -328,6 +343,41 @@ spec: - name: ndots value: "3" containers: + # Uncomment below lines for MSI Auth Mode testing + # - name: addon-token-adapter + # command: + # - /addon-token-adapter + # args: + # - --secret-namespace=kube-system + # - --secret-name=omsagent-aad-msi-token + # - --token-server-listening-port=8888 + # - --health-server-listening-port=9999 + # # Make sure this matching with version in AKS RP side + # image: mcr.microsoft.com/aks/msi/addon-token-adapter:master.220318.3 + # imagePullPolicy: IfNotPresent + # env: + # - name: AZMON_COLLECT_ENV + # value: "false" + # livenessProbe: + # httpGet: + # path: /healthz + # port: 9999 + # initialDelaySeconds: 10 + # periodSeconds: 60 + # resources: + # limits: + # cpu: 500m + # memory: 500Mi + # requests: + # cpu: 100m + # memory: 100Mi + # securityContext: + # capabilities: + # drop: + # - ALL + # add: + # - NET_ADMIN + # - NET_RAW - name: omsagent image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod03172022" imagePullPolicy: IfNotPresent @@ -367,8 +417,9 @@ spec: value: "VALUE_USER_ASSIGNED_IDENTITY_CLIENT_ID_VALUE" - name: AZMON_CONTAINERLOGS_ONEAGENT_REGIONS value: "koreacentral,norwayeast,eastus2" - - name: USING_AAD_MSI_AUTH - value: "false" + # Uncomment below lines for MSI Auth Mode testing + # - name: USING_AAD_MSI_AUTH + # value: "true" securityContext: privileged: true ports: @@ -448,8 +499,9 @@ spec: # Update this with the user assigned msi client id for omsagent - name: USER_ASSIGNED_IDENTITY_CLIENT_ID value: "VALUE_USER_ASSIGNED_IDENTITY_CLIENT_ID_VALUE" - - name: USING_AAD_MSI_AUTH - value: "false" + # Uncomment below lines for MSI Auth Mode testing + # - name: USING_AAD_MSI_AUTH + # value: "true" securityContext: privileged: true volumeMounts: @@ -563,6 +615,41 @@ spec: spec: serviceAccountName: omsagent containers: + # Uncomment below lines for MSI Auth Mode testing + # - name: addon-token-adapter + # command: + # - /addon-token-adapter + # args: + # - --secret-namespace=kube-system + # - --secret-name=omsagent-aad-msi-token + # - --token-server-listening-port=8888 + # - --health-server-listening-port=9999 + # # Make sure this matching with version in AKS RP side + # image: mcr.microsoft.com/aks/msi/addon-token-adapter:master.220318.3 + # imagePullPolicy: IfNotPresent + # env: + # - name: AZMON_COLLECT_ENV + # value: "false" + # livenessProbe: + # httpGet: + # path: /healthz + # port: 9999 + # initialDelaySeconds: 10 + # periodSeconds: 60 + # resources: + # limits: + # cpu: 500m + # memory: 500Mi + # requests: + # cpu: 100m + # memory: 100Mi + # securityContext: + # capabilities: + # drop: + # - ALL + # add: + # - NET_ADMIN + # - NET_RAW - name: omsagent image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod03172022" imagePullPolicy: IfNotPresent @@ -596,8 +683,9 @@ spec: # Add the below environment variable to true only in sidecar enabled regions, else set it to false - name: SIDECAR_SCRAPING_ENABLED value: "true" - - name: USING_AAD_MSI_AUTH - value: "false" + # Uncomment below lines for MSI Auth Mode testing + # - name: USING_AAD_MSI_AUTH + # value: "true" securityContext: privileged: true ports: @@ -776,6 +864,9 @@ spec: # Add this only for clouds that require cert bootstrapping # - name: REQUIRES_CERT_BOOTSTRAP # value: "true" + # Uncomment below lines for MSI Auth Mode testing + # - name: USING_AAD_MSI_AUTH + # value: "true" volumeMounts: - mountPath: C:\ProgramData\docker\containers name: docker-windows-containers @@ -798,6 +889,10 @@ spec: - mountPath: C:\etc\kubernetes\host name: azure-json-path readOnly: true + # Uncomment below lines for MSI Auth Mode testing + # - mountPath: C:\etc\IMDS-access-token + # name: imds-token + # readOnly: true livenessProbe: exec: command: @@ -855,3 +950,8 @@ spec: secret: secretName: omsagent-adx-secret optional: true + # Uncomment below lines for MSI Auth Mode testing + # - name: imds-token + # secret: + # secretName: omsagent-aad-msi-token + From 9da85dc281af7510abb511d48a0f4bcf6efbdf36 Mon Sep 17 00:00:00 2001 From: Amol Agrawal Date: Wed, 6 Apr 2022 12:42:45 -0700 Subject: [PATCH 217/301] Add CI Windows Build to MultiArch Dev pipeline (#740) * test image in pools * update dev pipeline - 1 * update dev -1 * fix job names * correct paths * test pool name * update pool name * updated urls * speed up installs * add base build * fix paths * do both builds * fix bug * add pool for common * fix bug * create path * temp remove metadata windows * fix bug * fix docker command * almost there * login to acr * create windows metadata file * address PR comments I Co-authored-by: Amol Agrawal --- .pipelines/azure_pipeline_dev.yaml | 284 +++++++++++------- kubernetes/windows/setup.ps1 | 4 + .../windows/install-build-pre-requisites.ps1 | 12 +- 3 files changed, 194 insertions(+), 106 deletions(-) diff --git a/.pipelines/azure_pipeline_dev.yaml b/.pipelines/azure_pipeline_dev.yaml index 395fafebf..4834bcac5 100644 --- a/.pipelines/azure_pipeline_dev.yaml +++ b/.pipelines/azure_pipeline_dev.yaml @@ -14,9 +14,6 @@ pr: include: - ci_dev -pool: - name: Azure-Pipelines-CI-Test-EO - variables: armServiceConnectionName: 'ci-1es-acr-connection' subscription: '9b96ebbd-c57a-42d1-bbe9-b69296e4c7fb' @@ -24,102 +21,185 @@ variables: repoImageName: '${{ variables.containerRegistry }}.azurecr.io/public/azuremonitor/containerinsights/cidev' IS_PR: $[eq(variables['Build.Reason'], 'PullRequest')] -steps: -- bash: | - commit=$(git rev-parse --short HEAD) - echo "##vso[task.setvariable variable=commit;]$commit" - - datetime=$(date +'%m%d%Y') - echo "##vso[task.setvariable variable=datetime;]$datetime" - - cd $(Build.SourcesDirectory)/deployment/multiarch-agent-deployment/ServiceGroupRoot/Scripts - tar -czvf ../artifacts.tar.gz pushAgentToAcr.sh - - cd $(Build.SourcesDirectory)/deployment/arc-k8s-extension/ServiceGroupRoot/Scripts - tar -czvf ../artifacts.tar.gz ../../../../charts/azuremonitor-containers/ pushChartToAcr.sh - -- task: CopyFiles@2 - displayName: "Copy ev2 deployment artifacts" - inputs: - SourceFolder: "$(Build.SourcesDirectory)/deployment" - Contents: | - **/* - TargetFolder: '$(Build.ArtifactStagingDirectory)/build' - -- task: CopyFiles@2 - displayName: "Copy ev2 deployment scripts" - inputs: - SourceFolder: "$(Build.SourcesDirectory)/.pipelines" - Contents: | - **/*.sh - TargetFolder: '$(Build.ArtifactStagingDirectory)/build' - -- task: CopyFiles@2 - displayName: "Copy ev2 deployment scripts" - inputs: - SourceFolder: "$(Build.SourcesDirectory)/kubernetes" - Contents: | - *.yaml - TargetFolder: '$(Build.ArtifactStagingDirectory)/build' - -- task: CopyFiles@2 - displayName: "Copy ev2 deployment scripts" - inputs: - SourceFolder: "$(Build.SourcesDirectory)/charts" - Contents: | - **/* - TargetFolder: '$(Build.ArtifactStagingDirectory)/build' - -- task: CopyFiles@2 - displayName: "Copy ev2 deployment scripts" - inputs: - SourceFolder: "$(Build.SourcesDirectory)/test/e2e" - Contents: | - *.yaml - TargetFolder: '$(Build.ArtifactStagingDirectory)/build' - -- task: AzureCLI@2 - displayName: "Docker multi-arch linux build" - inputs: - azureSubscription: ${{ variables.armServiceConnectionName }} - scriptType: bash - scriptLocation: inlineScript - inlineScript: | - - sudo apt-get update && sudo apt-get -y install qemu binfmt-support qemu-user-static - docker run --rm --privileged multiarch/qemu-user-static --reset -p yes - - docker buildx create --name testbuilder - docker buildx use testbuilder - - az --version - az account show - az account set -s ${{ variables.subscription }} - az acr login -n ${{ variables.containerRegistry }} - - if [ "$(Build.Reason)" != "PullRequest" ]; then - docker buildx build --platform linux/amd64,linux/arm64 --tag ${{ variables.repoImageName }}:$(datetime)-$(commit) -f kubernetes/linux/Dockerfile.multiarch --metadata-file $(Build.ArtifactStagingDirectory)/metadata.json --build-arg IMAGE_TAG=$(datetime)-$(commit) --push . - - docker pull ${{ variables.repoImageName }}:$(datetime)-$(commit) - else - docker buildx build --platform linux/amd64,linux/arm64 --tag ${{ variables.repoImageName }}:$(datetime)-$(commit) -f kubernetes/linux/Dockerfile.multiarch --metadata-file $(Build.ArtifactStagingDirectory)/metadata.json --build-arg IMAGE_TAG=$(datetime)-$(commit) . - fi - -- task: AzureArtifacts.manifest-generator-task.manifest-generator-task.ManifestGeneratorTask@0 - displayName: 'Generation Task' - condition: eq(variables.IS_PR, true) - inputs: - BuildDropPath: '$(Build.ArtifactStagingDirectory)' - DockerImagesToScan: 'golang:1.15.14, ubuntu:18.04' - -- task: AzureArtifacts.manifest-generator-task.manifest-generator-task.ManifestGeneratorTask@0 - displayName: 'Generation Task' - condition: eq(variables.IS_PR, false) - inputs: - BuildDropPath: '$(Build.ArtifactStagingDirectory)' - DockerImagesToScan: 'golang:1.15.14, ubuntu:18.04, ${{ variables.repoImageName }}:$(datetime)-$(commit)' - -- task: PublishBuildArtifacts@1 - inputs: - pathToPublish: '$(Build.ArtifactStagingDirectory)' - artifactName: drop +jobs: +- job: common + pool: + name: Azure-Pipelines-CI-Test-EO + steps: + - bash: | + commit=$(git rev-parse --short HEAD) + datetime=$(date +'%m%d%Y') + linuxImagetag="$datetime"-"$commit" + windowsImageTag=win-"$datetime"-"$commit" + echo "##vso[task.setvariable variable=linuxImagetag;isOutput=true]$linuxImagetag" + echo "##vso[task.setvariable variable=windowsImageTag;isOutput=true]$windowsImageTag" + + cd $(Build.SourcesDirectory)/deployment/multiarch-agent-deployment/ServiceGroupRoot/Scripts + tar -czvf ../artifacts.tar.gz pushAgentToAcr.sh + + cd $(Build.SourcesDirectory)/deployment/arc-k8s-extension/ServiceGroupRoot/Scripts + tar -czvf ../artifacts.tar.gz ../../../../charts/azuremonitor-containers/ pushChartToAcr.sh + name: setup + + - task: CopyFiles@2 + displayName: "Copy ev2 deployment artifacts" + inputs: + SourceFolder: "$(Build.SourcesDirectory)/deployment" + Contents: | + **/* + TargetFolder: '$(Build.ArtifactStagingDirectory)/build' + + - task: CopyFiles@2 + displayName: "Copy ev2 deployment scripts" + inputs: + SourceFolder: "$(Build.SourcesDirectory)/.pipelines" + Contents: | + **/*.sh + TargetFolder: '$(Build.ArtifactStagingDirectory)/build' + + - task: CopyFiles@2 + displayName: "Copy ev2 deployment scripts" + inputs: + SourceFolder: "$(Build.SourcesDirectory)/kubernetes" + Contents: | + *.yaml + TargetFolder: '$(Build.ArtifactStagingDirectory)/build' + + - task: CopyFiles@2 + displayName: "Copy ev2 deployment scripts" + inputs: + SourceFolder: "$(Build.SourcesDirectory)/charts" + Contents: | + **/* + TargetFolder: '$(Build.ArtifactStagingDirectory)/build' + + - task: CopyFiles@2 + displayName: "Copy ev2 deployment scripts" + inputs: + SourceFolder: "$(Build.SourcesDirectory)/test/e2e" + Contents: | + *.yaml + TargetFolder: '$(Build.ArtifactStagingDirectory)/build' + + - task: PublishBuildArtifacts@1 + inputs: + pathToPublish: '$(Build.ArtifactStagingDirectory)' + artifactName: drop + +- job: build_linux + dependsOn: common + pool: + name: Azure-Pipelines-CI-Test-EO + variables: + linuxImagetag: $[ dependencies.common.outputs['setup.linuxImagetag'] ] + + steps: + - task: AzureCLI@2 + displayName: "Docker multi-arch linux build" + inputs: + azureSubscription: ${{ variables.armServiceConnectionName }} + scriptType: bash + scriptLocation: inlineScript + inlineScript: | + mkdir -p $(Build.ArtifactStagingDirectory)/linux + + sudo apt-get update && sudo apt-get -y install qemu binfmt-support qemu-user-static + docker run --rm --privileged multiarch/qemu-user-static --reset -p yes + + docker buildx create --name testbuilder + docker buildx use testbuilder + + az --version + az account show + az account set -s ${{ variables.subscription }} + az acr login -n ${{ variables.containerRegistry }} + + if [ "$(Build.Reason)" != "PullRequest" ]; then + docker buildx build --platform linux/amd64,linux/arm64 --tag ${{ variables.repoImageName }}:$(linuxImagetag) -f kubernetes/linux/Dockerfile.multiarch --metadata-file $(Build.ArtifactStagingDirectory)/linux/metadata.json --build-arg IMAGE_TAG=$(linuxImagetag) --push . + + docker pull ${{ variables.repoImageName }}:$(linuxImagetag) + else + docker buildx build --platform linux/amd64,linux/arm64 --tag ${{ variables.repoImageName }}:$(linuxImagetag) -f kubernetes/linux/Dockerfile.multiarch --metadata-file $(Build.ArtifactStagingDirectory)/linux/metadata.json --build-arg IMAGE_TAG=$(linuxImagetag) . + fi + + - task: AzureArtifacts.manifest-generator-task.manifest-generator-task.ManifestGeneratorTask@0 + displayName: 'Generation Task' + condition: eq(variables.IS_PR, true) + inputs: + BuildDropPath: '$(Build.ArtifactStagingDirectory)/linux' + DockerImagesToScan: 'golang:1.15.14, ubuntu:18.04' + + - task: AzureArtifacts.manifest-generator-task.manifest-generator-task.ManifestGeneratorTask@0 + displayName: 'Generation Task' + condition: eq(variables.IS_PR, false) + inputs: + BuildDropPath: '$(Build.ArtifactStagingDirectory)/linux' + DockerImagesToScan: 'golang:1.15.14, ubuntu:18.04, ${{ variables.repoImageName }}:$(linuxImagetag)' + + - task: PublishBuildArtifacts@1 + inputs: + pathToPublish: '$(Build.ArtifactStagingDirectory)' + artifactName: drop + +- job: build_windows + dependsOn: common + pool: + name: Azure-Pipelines-Windows-CI-Test-EO + variables: + windowsImageTag: $[ dependencies.common.outputs['setup.windowsImageTag'] ] + + steps: + - task: PowerShell@2 + inputs: + targetType: 'filePath' + filePath: $(System.DefaultWorkingDirectory)/scripts/build/windows/install-build-pre-requisites.ps1 + displayName: 'install prereqs' + + - script: | + setlocal enabledelayedexpansion + powershell.exe -ExecutionPolicy Unrestricted -NoProfile -WindowStyle Hidden -File "build\windows\Makefile.ps1" + endlocal + exit /B %ERRORLEVEL% + displayName: 'build base' + + - task: AzureCLI@2 + displayName: "Docker windows build" + inputs: + azureSubscription: ${{ variables.armServiceConnectionName }} + scriptType: ps + scriptLocation: inlineScript + inlineScript: | + mkdir -p $(Build.ArtifactStagingDirectory)/windows + cd kubernetes/windows + + az --version + az account show + az account set -s ${{ variables.subscription }} + az acr login -n ${{ variables.containerRegistry }} + + @{"image.name"="${{ variables.repoImageName }}:$(windowsImageTag)"} | ConvertTo-Json -Compress | Out-File -Encoding ascii $(Build.ArtifactStagingDirectory)/windows/metadata.json + + docker build --tag ${{ variables.repoImageName }}:$(windowsImageTag) --build-arg IMAGE_TAG=$(windowsImageTag) . + + if ("$(Build.Reason)" -ne "PullRequest") { + docker push ${{ variables.repoImageName }}:$(windowsImageTag) + } + + - task: AzureArtifacts.manifest-generator-task.manifest-generator-task.ManifestGeneratorTask@0 + displayName: 'Generation Task' + condition: eq(variables.IS_PR, true) + inputs: + BuildDropPath: '$(Build.ArtifactStagingDirectory)/windows' + DockerImagesToScan: 'mcr.microsoft.com/windows/servercore:ltsc2019' + + - task: AzureArtifacts.manifest-generator-task.manifest-generator-task.ManifestGeneratorTask@0 + displayName: 'Generation Task' + condition: eq(variables.IS_PR, false) + inputs: + BuildDropPath: '$(Build.ArtifactStagingDirectory)/windows' + DockerImagesToScan: 'mcr.microsoft.com/windows/servercore:ltsc2019, ${{ variables.repoImageName }}:$(windowsImageTag)' + + - task: PublishBuildArtifacts@1 + inputs: + pathToPublish: '$(Build.ArtifactStagingDirectory)' + artifactName: drop \ No newline at end of file diff --git a/kubernetes/windows/setup.ps1 b/kubernetes/windows/setup.ps1 index 33bac61d1..af9e4b40e 100644 --- a/kubernetes/windows/setup.ps1 +++ b/kubernetes/windows/setup.ps1 @@ -1,3 +1,7 @@ +# speed up Invoke-WebRequest +# https://stackoverflow.com/questions/28682642/powershell-why-is-using-invoke-webrequest-much-slower-than-a-browser-download +$ProgressPreference = 'SilentlyContinue' + Write-Host ('Creating folder structure') New-Item -Type Directory -Path /installation -ErrorAction SilentlyContinue diff --git a/scripts/build/windows/install-build-pre-requisites.ps1 b/scripts/build/windows/install-build-pre-requisites.ps1 index 750a7b18b..632af1fe0 100644 --- a/scripts/build/windows/install-build-pre-requisites.ps1 +++ b/scripts/build/windows/install-build-pre-requisites.ps1 @@ -13,7 +13,7 @@ function Install-Go { exit 1 } - $url = "https://dl.google.com/go/go1.15.14.windows-amd64.msi" + $url = "https://go.dev/dl/go1.15.14.windows-amd64.msi" $output = Join-Path -Path $tempGo -ChildPath "go1.15.14.windows-amd64.msi" Write-Host("downloading go msi into directory path : " + $output + " ...") Invoke-WebRequest -Uri $url -OutFile $output -ErrorAction Stop @@ -102,7 +102,7 @@ function Install-DotNetCoreSDK() { # install dotNet core sdk Write-Host("installing .net core sdk 3.1 ...") - Start-Process msiexec.exe -Wait -ArgumentList '/I ', $output, '/quiet' + Start-Process -Wait $output -ArgumentList " /q /norestart" Write-Host("installing .net core sdk 3.1 completed") } @@ -121,8 +121,8 @@ function Install-Docker() { exit 1 } - $url = "https://download.docker.com/win/stable/Docker%20Desktop%20Installer.exe" - $output = Join-Path -Path $dockerTemp -ChildPath "docker-desktop-installer.exe" + $url = "https://download.docker.com/win/enterprise/DockerDesktop.msi" + $output = Join-Path -Path $dockerTemp -ChildPath "docker-desktop-installer.msi" Write-Host("downloading docker-desktop-installer: " + $dockerTemp + " ...") Invoke-WebRequest -Uri $url -OutFile $output -ErrorAction Stop Write-Host("downloading docker-desktop-installer: " + $dockerTemp + " completed") @@ -133,6 +133,10 @@ function Install-Docker() { Write-Host("installing docker for desktop completed") } +# speed up Invoke-WebRequest +# https://stackoverflow.com/questions/28682642/powershell-why-is-using-invoke-webrequest-much-slower-than-a-browser-download +$ProgressPreference = 'SilentlyContinue' + Write-Host "Install GO 1.15.14 version" Install-Go Write-Host "Install Build dependencies" From d02670acb7ff053a0143d93d9c6f94e1b498ce0b Mon Sep 17 00:00:00 2001 From: Amol Agrawal Date: Thu, 7 Apr 2022 17:03:05 -0700 Subject: [PATCH 218/301] Add Windows phase (#741) * build and release windows for prod Co-authored-by: Amol Agrawal --- .pipelines/azure_pipeline_prod.yaml | 286 +++++++++++------- .../ContainerInsights.Windows.Parameters.json | 68 +++++ .../RolloutSpecs/RolloutSpecs.json | 13 +- .../ServiceModels/Public.ServiceModel.json | 15 +- 4 files changed, 273 insertions(+), 109 deletions(-) create mode 100644 deployment/multiarch-agent-deployment/ServiceGroupRoot/Parameters/ContainerInsights.Windows.Parameters.json diff --git a/.pipelines/azure_pipeline_prod.yaml b/.pipelines/azure_pipeline_prod.yaml index e1c3a9db2..28071786d 100644 --- a/.pipelines/azure_pipeline_prod.yaml +++ b/.pipelines/azure_pipeline_prod.yaml @@ -21,106 +21,190 @@ variables: armServiceConnectionName: 'ci-1es-acr-connection-prod' subscription: '30c56c3a-54da-46ea-b004-06eb33432687' containerRegistry: 'containerinsightsbuild' - repoImageName: '${{ variables.containerRegistry }}.azurecr.io/official/linux' + repoImageNameLinux: '${{ variables.containerRegistry }}.azurecr.io/official/linux' + repoImageNameWindows: '${{ variables.containerRegistry }}.azurecr.io/official/windows' IS_PR: $[eq(variables['Build.Reason'], 'PullRequest')] -steps: -- bash: | - commit=$(git rev-parse --short HEAD) - echo "##vso[task.setvariable variable=commit;]$commit" - - datetime=$(date +'%m%d%Y') - echo "##vso[task.setvariable variable=datetime;]$datetime" - - cd $(Build.SourcesDirectory)/deployment/multiarch-agent-deployment/ServiceGroupRoot/Scripts - tar -czvf ../artifacts.tar.gz pushAgentToAcr.sh - - cd $(Build.SourcesDirectory)/deployment/arc-k8s-extension/ServiceGroupRoot/Scripts - tar -czvf ../artifacts.tar.gz ../../../../charts/azuremonitor-containers/ pushChartToAcr.sh - -- task: CopyFiles@2 - displayName: "Copy ev2 deployment artifacts" - inputs: - SourceFolder: "$(Build.SourcesDirectory)/deployment" - Contents: | - **/* - TargetFolder: '$(Build.ArtifactStagingDirectory)/build' - -- task: CopyFiles@2 - displayName: "Copy ev2 deployment scripts" - inputs: - SourceFolder: "$(Build.SourcesDirectory)/.pipelines" - Contents: | - **/*.sh - TargetFolder: '$(Build.ArtifactStagingDirectory)/build' - -- task: CopyFiles@2 - displayName: "Copy ev2 deployment scripts" - inputs: - SourceFolder: "$(Build.SourcesDirectory)/kubernetes" - Contents: | - *.yaml - TargetFolder: '$(Build.ArtifactStagingDirectory)/build' - -- task: CopyFiles@2 - displayName: "Copy ev2 deployment scripts" - inputs: - SourceFolder: "$(Build.SourcesDirectory)/charts" - Contents: | - **/* - TargetFolder: '$(Build.ArtifactStagingDirectory)/build' - -- task: CopyFiles@2 - displayName: "Copy ev2 deployment scripts" - inputs: - SourceFolder: "$(Build.SourcesDirectory)/test/e2e" - Contents: | - *.yaml - TargetFolder: '$(Build.ArtifactStagingDirectory)/build' - -- task: AzureCLI@2 - displayName: "Docker multi-arch linux build" - inputs: - azureSubscription: ${{ variables.armServiceConnectionName }} - scriptType: bash - scriptLocation: inlineScript - inlineScript: | - - sudo apt-get update && sudo apt-get -y install qemu binfmt-support qemu-user-static - docker run --rm --privileged multiarch/qemu-user-static --reset -p yes - - docker buildx create --name testbuilder - docker buildx use testbuilder - - az --version - az account show - az account set -s ${{ variables.subscription }} - az acr login -n ${{ variables.containerRegistry }} - - if [ "$(Build.Reason)" != "PullRequest" ]; then - docker buildx build --platform linux/amd64,linux/arm64 --tag ${{ variables.repoImageName }}:ciprod-$(datetime)-$(commit) -f kubernetes/linux/Dockerfile.multiarch --metadata-file $(Build.ArtifactStagingDirectory)/metadata.json --push . - - docker pull ${{ variables.repoImageName }}:ciprod-$(datetime)-$(commit) - else - docker buildx build --platform linux/amd64,linux/arm64 --tag ${{ variables.repoImageName }}:ciprod-$(datetime)-$(commit) -f kubernetes/linux/Dockerfile.multiarch --metadata-file $(Build.ArtifactStagingDirectory)/metadata.json . - fi - - -- task: AzureArtifacts.manifest-generator-task.manifest-generator-task.ManifestGeneratorTask@0 - displayName: 'Generation Task' - condition: eq(variables.IS_PR, true) - inputs: - BuildDropPath: '$(Build.ArtifactStagingDirectory)' - DockerImagesToScan: 'golang:1.15.14, ubuntu:18.04' - -- task: AzureArtifacts.manifest-generator-task.manifest-generator-task.ManifestGeneratorTask@0 - displayName: 'Generation Task' - condition: eq(variables.IS_PR, false) - inputs: - BuildDropPath: '$(Build.ArtifactStagingDirectory)' - DockerImagesToScan: 'golang:1.15.14, ubuntu:18.04, ${{ variables.repoImageName }}:ciprod-$(datetime)-$(commit)' - -- task: PublishBuildArtifacts@1 - inputs: - pathToPublish: '$(Build.ArtifactStagingDirectory)' - artifactName: drop \ No newline at end of file +jobs: +- job: common + pool: + name: Azure-Pipelines-CI-Prod-EO + steps: + - bash: | + commit=$(git rev-parse --short HEAD) + datetime=$(date +'%m%d%Y') + linuxImagetag=ciprod-"$datetime"-"$commit" + windowsImageTag=win-ciprod-"$datetime"-"$commit" + echo "##vso[task.setvariable variable=linuxImagetag;isOutput=true]$linuxImagetag" + echo "##vso[task.setvariable variable=windowsImageTag;isOutput=true]$windowsImageTag" + + cd $(Build.SourcesDirectory)/deployment/multiarch-agent-deployment/ServiceGroupRoot/Scripts + tar -czvf ../artifacts.tar.gz pushAgentToAcr.sh + + cd $(Build.SourcesDirectory)/deployment/arc-k8s-extension/ServiceGroupRoot/Scripts + tar -czvf ../artifacts.tar.gz ../../../../charts/azuremonitor-containers/ pushChartToAcr.sh + name: setup + + - task: CopyFiles@2 + displayName: "Copy ev2 deployment artifacts" + inputs: + SourceFolder: "$(Build.SourcesDirectory)/deployment" + Contents: | + **/* + TargetFolder: '$(Build.ArtifactStagingDirectory)/build' + + - task: CopyFiles@2 + displayName: "Copy ev2 deployment scripts" + inputs: + SourceFolder: "$(Build.SourcesDirectory)/.pipelines" + Contents: | + **/*.sh + TargetFolder: '$(Build.ArtifactStagingDirectory)/build' + + - task: CopyFiles@2 + displayName: "Copy ev2 deployment scripts" + inputs: + SourceFolder: "$(Build.SourcesDirectory)/kubernetes" + Contents: | + *.yaml + TargetFolder: '$(Build.ArtifactStagingDirectory)/build' + + - task: CopyFiles@2 + displayName: "Copy ev2 deployment scripts" + inputs: + SourceFolder: "$(Build.SourcesDirectory)/charts" + Contents: | + **/* + TargetFolder: '$(Build.ArtifactStagingDirectory)/build' + + - task: CopyFiles@2 + displayName: "Copy ev2 deployment scripts" + inputs: + SourceFolder: "$(Build.SourcesDirectory)/test/e2e" + Contents: | + *.yaml + TargetFolder: '$(Build.ArtifactStagingDirectory)/build' + + - task: PublishBuildArtifacts@1 + inputs: + pathToPublish: '$(Build.ArtifactStagingDirectory)' + artifactName: drop + +- job: build_linux + dependsOn: common + pool: + name: Azure-Pipelines-CI-Prod-EO + variables: + linuxImagetag: $[ dependencies.common.outputs['setup.linuxImagetag'] ] + + steps: + - task: AzureCLI@2 + displayName: "Docker multi-arch linux build" + inputs: + azureSubscription: ${{ variables.armServiceConnectionName }} + scriptType: bash + scriptLocation: inlineScript + inlineScript: | + mkdir -p $(Build.ArtifactStagingDirectory)/linux + + sudo apt-get update && sudo apt-get -y install qemu binfmt-support qemu-user-static + docker run --rm --privileged multiarch/qemu-user-static --reset -p yes + + docker buildx create --name testbuilder + docker buildx use testbuilder + + az --version + az account show + az account set -s ${{ variables.subscription }} + az acr login -n ${{ variables.containerRegistry }} + + if [ "$(Build.Reason)" != "PullRequest" ]; then + docker buildx build --platform linux/amd64,linux/arm64 --tag ${{ variables.repoImageNameLinux }}:$(linuxImagetag) -f kubernetes/linux/Dockerfile.multiarch --metadata-file $(Build.ArtifactStagingDirectory)/linux/metadata.json --push . + + docker pull ${{ variables.repoImageNameLinux }}:$(linuxImagetag) + else + docker buildx build --platform linux/amd64,linux/arm64 --tag ${{ variables.repoImageNameLinux }}:$(linuxImagetag) -f kubernetes/linux/Dockerfile.multiarch --metadata-file $(Build.ArtifactStagingDirectory)/linux/metadata.json . + fi + + + - task: AzureArtifacts.manifest-generator-task.manifest-generator-task.ManifestGeneratorTask@0 + displayName: 'Generation Task' + condition: eq(variables.IS_PR, true) + inputs: + BuildDropPath: '$(Build.ArtifactStagingDirectory)/linux' + DockerImagesToScan: 'golang:1.15.14, ubuntu:18.04' + + - task: AzureArtifacts.manifest-generator-task.manifest-generator-task.ManifestGeneratorTask@0 + displayName: 'Generation Task' + condition: eq(variables.IS_PR, false) + inputs: + BuildDropPath: '$(Build.ArtifactStagingDirectory)/linux' + DockerImagesToScan: 'golang:1.15.14, ubuntu:18.04, ${{ variables.repoImageNameLinux }}:$(linuxImagetag)' + + - task: PublishBuildArtifacts@1 + inputs: + pathToPublish: '$(Build.ArtifactStagingDirectory)' + artifactName: drop + +- job: build_windows + dependsOn: common + pool: + name: Azure-Pipelines-Windows-CI-Prod-EO + variables: + windowsImageTag: $[ dependencies.common.outputs['setup.windowsImageTag'] ] + + steps: + - task: PowerShell@2 + inputs: + targetType: 'filePath' + filePath: $(System.DefaultWorkingDirectory)/scripts/build/windows/install-build-pre-requisites.ps1 + displayName: 'install prereqs' + + - script: | + setlocal enabledelayedexpansion + powershell.exe -ExecutionPolicy Unrestricted -NoProfile -WindowStyle Hidden -File "build\windows\Makefile.ps1" + endlocal + exit /B %ERRORLEVEL% + displayName: 'build base' + + - task: AzureCLI@2 + displayName: "Docker windows build" + inputs: + azureSubscription: ${{ variables.armServiceConnectionName }} + scriptType: ps + scriptLocation: inlineScript + inlineScript: | + mkdir -p $(Build.ArtifactStagingDirectory)/windows + cd kubernetes/windows + + az --version + az account show + az account set -s ${{ variables.subscription }} + az acr login -n ${{ variables.containerRegistry }} + + @{"image.name"="${{ variables.repoImageNameWindows }}:$(windowsImageTag)"} | ConvertTo-Json -Compress | Out-File -Encoding ascii $(Build.ArtifactStagingDirectory)/windows/metadata.json + + docker build --tag ${{ variables.repoImageNameWindows }}:$(windowsImageTag) . + + if ("$(Build.Reason)" -ne "PullRequest") { + docker push ${{ variables.repoImageNameWindows }}:$(windowsImageTag) + } + + - task: AzureArtifacts.manifest-generator-task.manifest-generator-task.ManifestGeneratorTask@0 + displayName: 'Generation Task' + condition: eq(variables.IS_PR, true) + inputs: + BuildDropPath: '$(Build.ArtifactStagingDirectory)/windows' + DockerImagesToScan: 'mcr.microsoft.com/windows/servercore:ltsc2019' + + - task: AzureArtifacts.manifest-generator-task.manifest-generator-task.ManifestGeneratorTask@0 + displayName: 'Generation Task' + condition: eq(variables.IS_PR, false) + inputs: + BuildDropPath: '$(Build.ArtifactStagingDirectory)/windows' + DockerImagesToScan: 'mcr.microsoft.com/windows/servercore:ltsc2019, ${{ variables.repoImageNameWindows }}:$(windowsImageTag)' + + - task: PublishBuildArtifacts@1 + inputs: + pathToPublish: '$(Build.ArtifactStagingDirectory)' + artifactName: drop \ No newline at end of file diff --git a/deployment/multiarch-agent-deployment/ServiceGroupRoot/Parameters/ContainerInsights.Windows.Parameters.json b/deployment/multiarch-agent-deployment/ServiceGroupRoot/Parameters/ContainerInsights.Windows.Parameters.json new file mode 100644 index 000000000..cfa945e5d --- /dev/null +++ b/deployment/multiarch-agent-deployment/ServiceGroupRoot/Parameters/ContainerInsights.Windows.Parameters.json @@ -0,0 +1,68 @@ +{ + "$schema": "http://schema.express.azure.com/schemas/2015-01-01-alpha/RolloutParameters.json", + "contentVersion": "1.0.0.0", + "wait": [ + { + "name": "waitSdpBakeTime", + "properties": { + "duration": "PT24H" + } + } + ], + "shellExtensions": [ + { + "name": "PushAgentToACR", + "type": "ShellExtensionType", + "properties": { + "maxexecutiontime": "PT1H" + }, + "package": { + "reference": { + "path": "artifacts.tar.gz" + } + }, + "launch": { + "command": [ + "/bin/bash", + "pushAgentToAcr.sh" + ], + "environmentVariables": [ + { + "name": "ACR_NAME", + "value": "__ACR_NAME__" + }, + { + "name": "AGENT_RELEASE", + "value": "__AGENT_RELEASE__" + }, + { + "name": "AGENT_IMAGE_TAG_SUFFIX", + "value": "__AGENT_IMAGE_TAG_SUFFIX__" + }, + { + "name": "AGENT_IMAGE_FULL_PATH", + "value": "public/azuremonitor/containerinsights/__AGENT_RELEASE__:win-__AGENT_RELEASE____AGENT_IMAGE_TAG_SUFFIX__" + }, + { + "name": "CDPX_REGISTRY", + "value": "__CDPX_WINDOWS_REGISTRY__" + }, + { + "name": "CDPX_REPO_NAME", + "value": "__CDPX_WINDOWS_REPO_NAME__" + }, + { + "name": "CDPX_TAG", + "value": "__CDPX_WINDOWS_TAG__" + } + ], + "identity": { + "type": "userAssigned", + "userAssignedIdentities": [ + "__MANAGED_IDENTITY__" + ] + } + } + } + ] + } \ No newline at end of file diff --git a/deployment/multiarch-agent-deployment/ServiceGroupRoot/RolloutSpecs/RolloutSpecs.json b/deployment/multiarch-agent-deployment/ServiceGroupRoot/RolloutSpecs/RolloutSpecs.json index 250878590..91bc42ba1 100644 --- a/deployment/multiarch-agent-deployment/ServiceGroupRoot/RolloutSpecs/RolloutSpecs.json +++ b/deployment/multiarch-agent-deployment/ServiceGroupRoot/RolloutSpecs/RolloutSpecs.json @@ -2,8 +2,8 @@ "$schema": "https://ev2schema.azure.net/schemas/2020-01-01/rolloutSpecification.json", "ContentVersion": "1.0.0.0", "RolloutMetadata": { - "ServiceModelPath": "ServiceModels//Public.ServiceModel.json", - "ScopeBindingsPath": "ScopeBindings//Public.ScopeBindings.json", + "ServiceModelPath": "ServiceModels//Public.ServiceModel.json", + "ScopeBindingsPath": "ScopeBindings//Public.ScopeBindings.json", "Name": "ContainerInsightsAgent", "RolloutType": "Major", "BuildSource": { @@ -15,7 +15,7 @@ "Email": { "To": "omscontainers@microsoft.com" } - } + } }, "OrchestratedSteps": [ { @@ -24,6 +24,13 @@ "targetName": "PushLinuxAgent", "actions": [ "Shell/PushAgentToACR" ], "dependsOn": [ ] + }, + { + "name": "PushWindowsAgent", + "targetType": "ServiceResource", + "targetName": "PushWindowsAgent", + "actions": [ "Shell/PushAgentToACR" ], + "dependsOn": [ ] } ] } \ No newline at end of file diff --git a/deployment/multiarch-agent-deployment/ServiceGroupRoot/ServiceModels/Public.ServiceModel.json b/deployment/multiarch-agent-deployment/ServiceGroupRoot/ServiceModels/Public.ServiceModel.json index c3b00340a..cc632446c 100644 --- a/deployment/multiarch-agent-deployment/ServiceGroupRoot/ServiceModels/Public.ServiceModel.json +++ b/deployment/multiarch-agent-deployment/ServiceGroupRoot/ServiceModels/Public.ServiceModel.json @@ -28,24 +28,29 @@ ] } ], - "ServiceResourceGroups": [ + "ServiceResourceGroups": [ { "AzureResourceGroupName": "ContainerInsights-MultiArch-Agent-Release", "Location": "eastus2", "InstanceOf": "CI-Agent-ServiceResourceGroupDefinition", - "AzureSubscriptionId": "30c56c3a-54da-46ea-b004-06eb33432687", + "AzureSubscriptionId": "30c56c3a-54da-46ea-b004-06eb33432687", "ScopeTags": [ { "Name": "Global" } - ], + ], "ServiceResources": [ { "Name": "PushLinuxAgent", "InstanceOf": "ShellExtension", "RolloutParametersPath": "Parameters\\ContainerInsights.Linux.Parameters.json" + }, + { + "Name": "PushWindowsAgent", + "InstanceOf": "ShellExtension", + "RolloutParametersPath": "Parameters\\ContainerInsights.Windows.Parameters.json" } ] - } + } ] - } \ No newline at end of file + } \ No newline at end of file From 38f3d926d7294ac4d27c8015fb261f61d7893451 Mon Sep 17 00:00:00 2001 From: sarahpeiffer <46665092+sarahpeiffer@users.noreply.github.com> Date: Mon, 11 Apr 2022 17:34:00 -0700 Subject: [PATCH 219/301] Sarah/add onboarding templates (#742) * add onboarding templates for legacy auth --- .../existingClusterOnboarding.json | 44 +++++++++++++++++++ .../existingClusterParam.json | 15 +++++++ 2 files changed, 59 insertions(+) create mode 100644 test/onboarding-templates-legacy-auth/existingClusterOnboarding.json create mode 100644 test/onboarding-templates-legacy-auth/existingClusterParam.json diff --git a/test/onboarding-templates-legacy-auth/existingClusterOnboarding.json b/test/onboarding-templates-legacy-auth/existingClusterOnboarding.json new file mode 100644 index 000000000..c996e6042 --- /dev/null +++ b/test/onboarding-templates-legacy-auth/existingClusterOnboarding.json @@ -0,0 +1,44 @@ +{ + "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "aksResourceId": { + "type": "string", + "metadata": { + "description": "AKS Cluster Resource ID" + } + }, + "aksResourceLocation": { + "type": "string", + "metadata": { + "description": "Location of the AKS resource e.g. \"East US\"" + } + }, + "workspaceResourceId": { + "type": "string", + "metadata": { + "description": "Azure Monitor Log Analytics Resource ID" + } + } + }, + "resources": [ + { + "name": "[split(parameters('aksResourceId'),'/')[8]]", + "type": "Microsoft.ContainerService/managedClusters", + "location": "[parameters('aksResourceLocation')]", + "apiVersion": "2018-03-31", + "properties": { + "mode": "Incremental", + "id": "[parameters('aksResourceId')]", + "addonProfiles": { + "omsagent": { + "enabled": false, + "config": { + "logAnalyticsWorkspaceResourceID": "[parameters('workspaceResourceId')]" + } + } + } + } + } + ] +} \ No newline at end of file diff --git a/test/onboarding-templates-legacy-auth/existingClusterParam.json b/test/onboarding-templates-legacy-auth/existingClusterParam.json new file mode 100644 index 000000000..fb5d81c73 --- /dev/null +++ b/test/onboarding-templates-legacy-auth/existingClusterParam.json @@ -0,0 +1,15 @@ +{ + "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentParameters.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "aksResourceId": { + "value": "/subscriptions//resourcegroups//providers/Microsoft.ContainerService/managedClusters/" + }, + "aksResourceLocation": { + "value": "" + }, + "workspaceResourceId": { + "value": "/subscriptions//resourceGroups//providers/Microsoft.OperationalInsights/workspaces/" + } + } +} \ No newline at end of file From ca3fdcb7f4c9726fbc5d34361a462261d92d771c Mon Sep 17 00:00:00 2001 From: Amol Agrawal Date: Tue, 26 Apr 2022 15:06:55 -0700 Subject: [PATCH 220/301] fix download (#749) Co-authored-by: Amol Agrawal --- scripts/build/windows/install-build-pre-requisites.ps1 | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/build/windows/install-build-pre-requisites.ps1 b/scripts/build/windows/install-build-pre-requisites.ps1 index 632af1fe0..235f6ace9 100644 --- a/scripts/build/windows/install-build-pre-requisites.ps1 +++ b/scripts/build/windows/install-build-pre-requisites.ps1 @@ -121,15 +121,15 @@ function Install-Docker() { exit 1 } - $url = "https://download.docker.com/win/enterprise/DockerDesktop.msi" - $output = Join-Path -Path $dockerTemp -ChildPath "docker-desktop-installer.msi" + $url = "https://desktop.docker.com/win/main/amd64/Docker%20Desktop%20Installer.exe" + $output = Join-Path -Path $dockerTemp -ChildPath "docker-desktop-installer.exe" Write-Host("downloading docker-desktop-installer: " + $dockerTemp + " ...") Invoke-WebRequest -Uri $url -OutFile $output -ErrorAction Stop Write-Host("downloading docker-desktop-installer: " + $dockerTemp + " completed") # install docker Write-Host("installing docker for desktop ...") - Start-Process msiexec.exe -Wait -ArgumentList '/I ', $output, '/quiet' + Start-Process $output -Wait -ArgumentList 'install --quiet' Write-Host("installing docker for desktop completed") } From f6c1deb574ac2584ee5659f6f0645a2cfb446ea2 Mon Sep 17 00:00:00 2001 From: Amol Agrawal Date: Wed, 27 Apr 2022 11:49:41 -0700 Subject: [PATCH 221/301] force run trivy stage (#745) - scans for HIGH, MEDIUM, CRITICAL CVEs with fixes available in / and /usr/lib - breaks build if CVEs with existing fixes found - adds trivyignore to accomodate CVEs which are understood and should not get flagged - adds CVEs to trivyignore to unblock builds; CVEs will be fixed and removed from trivyignore in later PRs Co-authored-by: Amol Agrawal --- .github/workflows/pr-checker.yml | 2 -- .trivyignore | 17 +++++++++++++++++ kubernetes/linux/Dockerfile.multiarch | 11 ++++++++++- 3 files changed, 27 insertions(+), 3 deletions(-) create mode 100644 .trivyignore diff --git a/.github/workflows/pr-checker.yml b/.github/workflows/pr-checker.yml index f0cea063d..91e81dc16 100644 --- a/.github/workflows/pr-checker.yml +++ b/.github/workflows/pr-checker.yml @@ -56,8 +56,6 @@ jobs: format: 'table' severity: 'CRITICAL,HIGH' vuln-type: 'os,library' - #[vishwa] - Fix telegraf & test all for next release - see work item #https://msazure.visualstudio.com/InfrastructureInsights/_workitems/edit/13322134 - skip-files: '/usr/sbin/telegraf,/opt/telegraf' exit-code: '1' timeout: '5m0s' ignore-unfixed: true diff --git a/.trivyignore b/.trivyignore new file mode 100644 index 000000000..1b6a7090b --- /dev/null +++ b/.trivyignore @@ -0,0 +1,17 @@ +# related to telegraf +#[vishwa] - Fix telegraf & test all for next release - see work item #https://msazure.visualstudio.com/InfrastructureInsights/_workitems/edit/13322134 +CVE-2021-43816 +CVE-2022-23648 +CVE-2022-24450 +CVE-2022-26652 +CVE-2019-3826 +CVE-2022-27191 +CVE-2021-42836 + +# ruby in /usr/lib +CVE-2020-36327 +CVE-2021-43809 +CVE-2021-41816 +CVE-2021-41819 +CVE-2021-31799 +CVE-2021-28965 \ No newline at end of file diff --git a/kubernetes/linux/Dockerfile.multiarch b/kubernetes/linux/Dockerfile.multiarch index e94bf71bb..38103dd65 100644 --- a/kubernetes/linux/Dockerfile.multiarch +++ b/kubernetes/linux/Dockerfile.multiarch @@ -39,8 +39,17 @@ RUN chmod 775 $tmpdir/*.sh; sync; $tmpdir/setup.sh ${TARGETARCH} # Do vulnerability scan in a seperate stage to avoid adding layer FROM base_image AS vulnscan COPY --from=aquasec/trivy:latest /usr/local/bin/trivy /usr/local/bin/trivy -RUN trivy rootfs --exit-code 1 --ignore-unfixed --no-progress --severity HIGH,CRITICAL --skip-files "/usr/sbin/telegraf" --skip-files "/opt/telegraf" --skip-files "/usr/local/bin/trivy" / +COPY .trivyignore .trivyignore +RUN trivy rootfs --ignore-unfixed --no-progress --severity HIGH,CRITICAL,MEDIUM --skip-files "/usr/local/bin/trivy" / +RUN trivy rootfs --ignore-unfixed --no-progress --severity HIGH,CRITICAL,MEDIUM /usr/lib +RUN trivy rootfs --exit-code 1 --ignore-unfixed --no-progress --severity HIGH,CRITICAL,MEDIUM --skip-files "/usr/local/bin/trivy" / > /dev/null 2>&1 && trivy rootfs --exit-code 1 --ignore-unfixed --no-progress --severity HIGH,CRITICAL,MEDIUM /usr/lib > /dev/null 2>&1 # Revert to base layer before vulnscan FROM base_image AS ContainerInsights +# force the trivy stage to run +# docker buildx (BUILDKIT) does not build stages which do not affect the final stage +# by copying over a file we create a dependency +# see: https://github.com/docker/build-push-action/issues/377 +COPY --from=vulnscan /usr/local/bin/trivy /usr/local/bin/trivy +RUN rm /usr/local/bin/trivy CMD [ "/opt/main.sh" ] \ No newline at end of file From 9c64f93ad05f294e7ac66ffc700a7baa0681ae71 Mon Sep 17 00:00:00 2001 From: Amol Agrawal Date: Wed, 4 May 2022 15:07:17 -0700 Subject: [PATCH 222/301] update telegraf to 1.22.2 to fix vulns (#752) * update telegraf to 1.22.2 to fix vulns Co-authored-by: Amol Agrawal --- .trivyignore | 7 +++---- kubernetes/linux/setup.sh | 6 +++--- kubernetes/windows/setup.ps1 | 2 +- 3 files changed, 7 insertions(+), 8 deletions(-) diff --git a/.trivyignore b/.trivyignore index 1b6a7090b..3a8089422 100644 --- a/.trivyignore +++ b/.trivyignore @@ -1,11 +1,10 @@ # related to telegraf #[vishwa] - Fix telegraf & test all for next release - see work item #https://msazure.visualstudio.com/InfrastructureInsights/_workitems/edit/13322134 -CVE-2021-43816 -CVE-2022-23648 -CVE-2022-24450 -CVE-2022-26652 +# Unfixed as of 4/28/2022 CVE-2019-3826 CVE-2022-27191 + +#still present in mdsd telegraf CVE-2021-42836 # ruby in /usr/lib diff --git a/kubernetes/linux/setup.sh b/kubernetes/linux/setup.sh index 709c8f1c4..c478af0e5 100644 --- a/kubernetes/linux/setup.sh +++ b/kubernetes/linux/setup.sh @@ -40,10 +40,10 @@ sudo apt-get install jq=1.5+dfsg-2 -y #used to setcaps for ruby process to read /proc/env sudo apt-get install libcap2-bin -y -wget https://dl.influxdata.com/telegraf/releases/telegraf-1.20.3_linux_$ARCH.tar.gz -tar -zxvf telegraf-1.20.3_linux_$ARCH.tar.gz +wget https://dl.influxdata.com/telegraf/releases/telegraf-1.22.2_linux_$ARCH.tar.gz +tar -zxvf telegraf-1.22.2_linux_$ARCH.tar.gz -mv /opt/telegraf-1.20.3/usr/bin/telegraf /opt/telegraf +mv /opt/telegraf-1.22.2/usr/bin/telegraf /opt/telegraf chmod 544 /opt/telegraf diff --git a/kubernetes/windows/setup.ps1 b/kubernetes/windows/setup.ps1 index af9e4b40e..2fd429e43 100644 --- a/kubernetes/windows/setup.ps1 +++ b/kubernetes/windows/setup.ps1 @@ -35,7 +35,7 @@ Write-Host ('Finished Installing Fluentbit') Write-Host ('Installing Telegraf'); try { - $telegrafUri='https://dl.influxdata.com/telegraf/releases/telegraf-1.20.3_windows_amd64.zip' + $telegrafUri='https://dl.influxdata.com/telegraf/releases/telegraf-1.22.2_windows_amd64.zip' Invoke-WebRequest -Uri $telegrafUri -OutFile /installation/telegraf.zip Expand-Archive -Path /installation/telegraf.zip -Destination /installation/telegraf Move-Item -Path /installation/telegraf/*/* -Destination /opt/telegraf/ -ErrorAction SilentlyContinue From 52c7fe4e72db186e7d1b53247b90651ad484838a Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Thu, 5 May 2022 17:50:47 -0700 Subject: [PATCH 223/301] Gangams/arc k8s aad msi auth (#743) * arc k8s msi * wip * extension identity role * imds sidecar integration for arc k8s * imds sidecar integration for arc k8s * imds endpoint for windows * imds endpoint for windows * wip * fix exception * rename param name * arc msi imdsd container changes * arc msi imdsd container changes * arc msi imdsd container changes * arc msi imdsd container changes * arc msi imdsd container changes * revert unneeded yaml changes * revert unneeded yaml changes * wip * wip * working * working * working * add implementation for msi token for windows mdm metrics * fix comment * arc k8s msi onboarding templates * fix template bug * fix template bug * fix template bug * rename flag name * fix template bug * make useAADAuth specific to arc k8s * set k8sport at machine scope for windows * fix bug * fix bug * update rbac for arc k8s imds * bump chart version for conformance test run * conf test updates for msi auth * cli extension whl file * add containerinsights solution in msi auth mode * unify tags * revert test chart and image versions * remove test whl file and fix conf test * conf test updates for addon-token-adapter * remove container insights solution add for msi auth * add missing arm template param --- .../templates/omsagent-arc-k8s-crd.yaml | 12 +- .../templates/omsagent-daemonset-windows.yaml | 2 + .../templates/omsagent-daemonset.yaml | 14 ++ .../templates/omsagent-deployment.yaml | 12 + .../templates/omsagent-rbac.yaml | 11 +- charts/azuremonitor-containers/values.yaml | 4 + kubernetes/windows/main.ps1 | 11 +- .../existingClusterOnboarding.json | 14 +- .../existingClusterParam.json | 9 +- .../existingClusterOnboarding.json | 224 ++++++++++++++++++ .../existingClusterParam.json | 28 +++ source/plugins/go/src/go.mod | 1 + .../plugins/go/src/ingestion_token_utils.go | 186 +++++++++++---- source/plugins/ruby/constants.rb | 3 + source/plugins/ruby/out_mdm.rb | 132 +++++++---- test/e2e/conformance.yaml | 2 +- test/e2e/src/core/e2e_tests.sh | 15 +- test/e2e/src/tests/test_e2e_workflows.py | 9 + .../tests/test_node_metrics_e2e_workflow.py | 120 +++++++--- .../tests/test_pod_metrics_e2e_workflow.py | 30 ++- 20 files changed, 670 insertions(+), 169 deletions(-) create mode 100644 scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterOnboarding.json create mode 100644 scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterParam.json diff --git a/charts/azuremonitor-containers/templates/omsagent-arc-k8s-crd.yaml b/charts/azuremonitor-containers/templates/omsagent-arc-k8s-crd.yaml index b7482b8b5..c61d4b83c 100644 --- a/charts/azuremonitor-containers/templates/omsagent-arc-k8s-crd.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-arc-k8s-crd.yaml @@ -19,8 +19,16 @@ metadata: name: container-insights-clusteridentityrequest namespace: azure-arc spec: - audience: https://monitoring.azure.com/ + {{- if eq (.Values.Azure.Cluster.Cloud | lower) "azurepubliccloud" }} + audience: https://monitor.azure.com/ + {{- else if eq (.Values.Azure.Cluster.Cloud | lower) "azurechinacloud" }} + audience: https://monitor.azure.cn/ + {{- else if eq (.Values.Azure.Cluster.Cloud | lower) "azureusgovernmentcloud" }} + audience: https://monitor.azure.us/ + {{- else }} + audience: https://monitor.azure.com/ + {{- end }} {{- if not (empty .Values.Azure.Extension.Name) }} - resourceId: {{ .Values.Azure.Extension.Name }} + resourceId: {{ .Values.Azure.Extension.Name }} {{- end }} {{- end }} diff --git a/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml b/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml index b581a324a..ef72b385b 100644 --- a/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml @@ -69,6 +69,8 @@ spec: {{- else if ne .Values.Azure.Cluster.ResourceId "" }} - name: AKS_RESOURCE_ID value: {{ .Values.Azure.Cluster.ResourceId | quote }} + - name: USING_AAD_MSI_AUTH + value: {{ .Values.omsagent.useAADAuth | quote }} {{- if ne .Values.Azure.Cluster.Region "" }} - name: AKS_REGION value: {{ .Values.Azure.Cluster.Region | quote }} diff --git a/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml b/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml index 3b48c26c4..5bd8bdf79 100644 --- a/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml @@ -37,6 +37,16 @@ spec: serviceAccountName: omsagent {{- end }} containers: +{{- if and (ne .Values.Azure.Cluster.ResourceId "") (.Values.omsagent.useAADAuth) }} + - name: addon-token-adapter + imagePullPolicy: IfNotPresent + env: + - name: AZMON_COLLECT_ENV + value: "false" + - name: TOKEN_NAMESPACE + value: "azure-arc" +{{- .Values.Azure.Identity.MSIAdapterYaml | nindent 7 }} +{{- end }} - name: omsagent {{- if eq (.Values.omsagent.domain | lower) "opinsights.azure.cn" }} image: "mcr.azk8s.cn/azuremonitor/containerinsights/ciprod:{{ .Values.omsagent.image.tag }}" @@ -57,6 +67,8 @@ spec: {{- else if ne .Values.Azure.Cluster.ResourceId "" }} - name: AKS_RESOURCE_ID value: {{ .Values.Azure.Cluster.ResourceId | quote }} + - name: USING_AAD_MSI_AUTH + value: {{ .Values.omsagent.useAADAuth | quote }} {{- if ne .Values.Azure.Cluster.Region "" }} - name: AKS_REGION value: {{ .Values.Azure.Cluster.Region | quote }} @@ -159,6 +171,8 @@ spec: {{- else if ne .Values.Azure.Cluster.ResourceId "" }} - name: AKS_RESOURCE_ID value: {{ .Values.Azure.Cluster.ResourceId | quote }} + - name: USING_AAD_MSI_AUTH + value: {{ .Values.omsagent.useAADAuth | quote }} {{- if ne .Values.Azure.Cluster.Region "" }} - name: AKS_REGION value: {{ .Values.Azure.Cluster.Region | quote }} diff --git a/charts/azuremonitor-containers/templates/omsagent-deployment.yaml b/charts/azuremonitor-containers/templates/omsagent-deployment.yaml index 2a60fbb7f..a0abb0f57 100644 --- a/charts/azuremonitor-containers/templates/omsagent-deployment.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-deployment.yaml @@ -33,6 +33,16 @@ spec: serviceAccountName: omsagent {{- end }} containers: +{{- if and (ne .Values.Azure.Cluster.ResourceId "") (.Values.omsagent.useAADAuth) }} + - name: addon-token-adapter + imagePullPolicy: IfNotPresent + env: + - name: AZMON_COLLECT_ENV + value: "false" + - name: TOKEN_NAMESPACE + value: "azure-arc" +{{- .Values.Azure.Identity.MSIAdapterYaml | nindent 7 }} +{{- end }} - name: omsagent {{- if eq (.Values.omsagent.domain | lower) "opinsights.azure.cn" }} image: "mcr.azk8s.cn/azuremonitor/containerinsights/ciprod:{{ .Values.omsagent.image.tag }}" @@ -53,6 +63,8 @@ spec: {{- else if ne .Values.Azure.Cluster.ResourceId "" }} - name: AKS_RESOURCE_ID value: {{ .Values.Azure.Cluster.ResourceId | quote }} + - name: USING_AAD_MSI_AUTH + value: {{ .Values.omsagent.useAADAuth | quote }} {{- if ne .Values.Azure.Cluster.Region "" }} - name: AKS_REGION value: {{ .Values.Azure.Cluster.Region | quote }} diff --git a/charts/azuremonitor-containers/templates/omsagent-rbac.yaml b/charts/azuremonitor-containers/templates/omsagent-rbac.yaml index a167e99a5..fe9b65973 100644 --- a/charts/azuremonitor-containers/templates/omsagent-rbac.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-rbac.yaml @@ -30,18 +30,9 @@ rules: verbs: ["list"] - apiGroups: ["clusterconfig.azure.com"] resources: ["azureclusteridentityrequests", "azureclusteridentityrequests/status"] - resourceNames: ["container-insights-clusteridentityrequest"] - verbs: ["get", "create", "patch"] + verbs: ["get", "create", "patch", "list", "update", "delete"] - nonResourceURLs: ["/metrics"] verbs: ["get"] -#arc k8s extension model grants access as part of the extension msi -#remove this explicit permission once the extension available in public preview -{{- if (empty .Values.Azure.Extension.Name) }} -- apiGroups: [""] - resources: ["secrets"] - resourceNames: ["container-insights-clusteridentityrequest-token"] - verbs: ["get"] -{{- end }} --- kind: ClusterRoleBinding {{- if .Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1" }} diff --git a/charts/azuremonitor-containers/values.yaml b/charts/azuremonitor-containers/values.yaml index 4460f7756..104efb86d 100644 --- a/charts/azuremonitor-containers/values.yaml +++ b/charts/azuremonitor-containers/values.yaml @@ -7,6 +7,7 @@ ## Values of under Azure are being populated by Azure Arc K8s RP during the installation of the extension Azure: Cluster: + Cloud: Region: ResourceId: Extension: @@ -45,6 +46,9 @@ omsagent: # if set to true additional agent workflow logs will be emitted which are used for e2e and arc k8s conformance testing ISTEST: false + # This flag used to determine whether to use AAD MSI auth or not for Arc K8s cluster + useAADAuth: false + ## To get your workspace id and key do the following ## You can create a Azure Loganalytics workspace from portal.azure.com and get its ID & PRIMARY KEY from 'Advanced Settings' tab in the Ux. diff --git a/kubernetes/windows/main.ps1 b/kubernetes/windows/main.ps1 index 6482daed9..184af787d 100644 --- a/kubernetes/windows/main.ps1 +++ b/kubernetes/windows/main.ps1 @@ -302,10 +302,19 @@ function Set-EnvironmentVariables { Write-Host "Failed to set environment variable AGENT_VERSION for target 'machine' since it is either null or empty" } + $kubernetesPort = [System.Environment]::GetEnvironmentVariable("KUBERNETES_PORT_443_TCP_PORT", "process") + if (![string]::IsNullOrEmpty($kubernetesPort)) { + [System.Environment]::SetEnvironmentVariable("KUBERNETES_PORT_443_TCP_PORT", $kubernetesPort, "machine") + Write-Host "Successfully set environment variable KUBERNETES_PORT_443_TCP_PORT - $($kubernetesPort) for target 'machine'..." + } + else { + Write-Host "Failed to set environment variable KUBERNETES_PORT_443_TCP_PORT for target 'machine' since it is either null or empty" + } + # run config parser ruby /opt/omsagentwindows/scripts/ruby/tomlparser.rb .\setenv.ps1 - + #Parse the configmap to set the right environment variables for agent config. ruby /opt/omsagentwindows/scripts/ruby/tomlparser-agent-config.rb .\setagentenv.ps1 diff --git a/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json b/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json index 28996f4a1..c42a1d074 100644 --- a/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json +++ b/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json @@ -14,10 +14,10 @@ "description": "Location of the AKS resource e.g. \"East US\"" } }, - "aksResourceTagValues": { + "resourceTagValues": { "type": "object", "metadata": { - "description": "Existing all tags on AKS Cluster Resource" + "description": "Existing or new tags to use on AKS, ContainerInsights and DataCollectionRule Resources" } }, "workspaceLocation": { @@ -31,12 +31,6 @@ "metadata": { "description": "Full Resource ID of the log analitycs workspace that will be used for data destination. For example /subscriptions/00000000-0000-0000-0000-0000-00000000/resourceGroups/ResourceGroupName/providers/Microsoft.operationalinsights/workspaces/ws_xyz" } - }, - "dcrResourceTagValues": { - "type": "object", - "metadata": { - "description": "Existing or new tags on DCR Resource" - } } }, "variables": { @@ -70,7 +64,7 @@ "apiVersion": "2019-11-01-preview", "name": "[variables('dcrName')]", "location": "[parameters('workspaceLocation')]", - "tags": "[parameters('dcrResourceTagValues')]", + "tags": "[parameters('resourceTagValues')]", "kind": "Linux", "properties": { "dataSources": { @@ -184,7 +178,7 @@ "name": "[variables('clusterName')]", "type": "Microsoft.ContainerService/managedClusters", "location": "[parameters('aksResourceLocation')]", - "tags": "[parameters('aksResourceTagValues')]", + "tags": "[parameters('resourceTagValues')]", "apiVersion": "2018-03-31", "properties": { "mode": "Incremental", diff --git a/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterParam.json b/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterParam.json index 31f0f9c49..e0f9a643f 100644 --- a/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterParam.json +++ b/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterParam.json @@ -8,20 +8,13 @@ "aksResourceLocation": { "value": "" }, - "aksResourceTagValues": { - "value": { - "": "", - "": "", - "": "" - } - }, "workspaceResourceId": { "value": "/subscriptions//resourceGroups//providers/Microsoft.OperationalInsights/workspaces/" }, "workspaceLocation": { "value": "" }, - "dcrResourceTagValues": { + "resourceTagValues": { "value": { "": "", "": "", diff --git a/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterOnboarding.json b/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterOnboarding.json new file mode 100644 index 000000000..a4a4e3453 --- /dev/null +++ b/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterOnboarding.json @@ -0,0 +1,224 @@ +{ + "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "clusterResourceId": { + "type": "string", + "metadata": { + "description": "Resource Id of the Azure Arc Connected Cluster" + } + }, + "clusterRegion": { + "type": "string", + "metadata": { + "description": "Location of the Azure Arc Connected Cluster Resource e.g. \"eastus\"" + } + }, + "workspaceResourceId": { + "type": "string", + "metadata": { + "description": "Azure Monitor Log Analytics Resource ID" + } + }, + "workspaceRegion": { + "type": "string", + "metadata": { + "description": "Azure Monitor Log Analytics Workspace region e.g. \"eastus\"" + } + }, + "workspaceDomain": { + "type": "string", + "allowedValues": [ + "opinsights.azure.com", + "opinsights.azure.cn", + "opinsights.azure.us", + "opinsights.azure.eaglex.ic.gov", + "opinsights.azure.microsoft.scloud" + ], + "defaultValue": "opinsights.azure.com", + "metadata": { + "description": "Azure Monitor Log Analytics Workspace Domain e.g. opinsights.azure.com" + } + }, + "resourceTagValues": { + "type": "object", + "metadata": { + "description": "Existing or new tags to use on Arc K8s ContainerInsights extension resources" + } + } + }, + "variables": { + "clusterSubscriptionId": "[split(parameters('clusterResourceId'),'/')[2]]", + "clusterResourceGroup": "[split(parameters('clusterResourceId'),'/')[4]]", + "clusterName": "[split(parameters('clusterResourceId'),'/')[8]]", + "clusterLocation": "[replace(parameters('clusterRegion'),' ', '')]", + "workspaceSubscriptionId": "[split(parameters('workspaceResourceId'),'/')[2]]", + "workspaceResourceGroup": "[split(parameters('workspaceResourceId'),'/')[4]]", + "dcrName": "[Concat('MSCI', '-', variables('clusterName'), '-', variables('clusterLocation'))]", + "associationName": "ContainerInsightsExtension", + "dataCollectionRuleId": "[resourceId(variables('workspaceSubscriptionId'), variables('workspaceResourceGroup'), 'Microsoft.Insights/dataCollectionRules', variables('dcrName'))]" + }, + "resources": [ + { + "type": "Microsoft.Resources/deployments", + "name": "[Concat('arc-k8s-monitoring-msi-dcr', '-', uniqueString(variables('dcrName')))]", + "apiVersion": "2017-05-10", + "subscriptionId": "[variables('workspaceSubscriptionId')]", + "resourceGroup": "[variables('workspaceResourceGroup')]", + "properties": { + "mode": "Incremental", + "template": { + "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": {}, + "variables": {}, + "resources": [ + { + "type": "Microsoft.Insights/dataCollectionRules", + "apiVersion": "2019-11-01-preview", + "name": "[variables('dcrName')]", + "location": "[parameters('workspaceRegion')]", + "tags": "[parameters('resourceTagValues')]", + "kind": "Linux", + "properties": { + "dataSources": { + "extensions": [ + { + "name": "ContainerInsightsExtension", + "streams": [ + "Microsoft-Perf", + "Microsoft-ContainerInventory", + "Microsoft-ContainerLog", + "Microsoft-ContainerLogV2", + "Microsoft-ContainerNodeInventory", + "Microsoft-KubeEvents", + "Microsoft-KubeMonAgentEvents", + "Microsoft-KubeNodeInventory", + "Microsoft-KubePodInventory", + "Microsoft-KubePVInventory", + "Microsoft-KubeServices", + "Microsoft-InsightsMetrics" + ], + "extensionName": "ContainerInsights" + } + ] + }, + "destinations": { + "logAnalytics": [ + { + "workspaceResourceId": "[parameters('workspaceResourceId')]", + "name": "ciworkspace" + } + ] + }, + "dataFlows": [ + { + "streams": [ + "Microsoft-Perf", + "Microsoft-ContainerInventory", + "Microsoft-ContainerLog", + "Microsoft-ContainerLogV2", + "Microsoft-ContainerNodeInventory", + "Microsoft-KubeEvents", + "Microsoft-KubeMonAgentEvents", + "Microsoft-KubeNodeInventory", + "Microsoft-KubePodInventory", + "Microsoft-KubePVInventory", + "Microsoft-KubeServices", + "Microsoft-InsightsMetrics" + ], + "destinations": [ + "ciworkspace" + ] + } + ] + } + } + ] + }, + "parameters": {} + } + }, + { + "type": "Microsoft.Resources/deployments", + "name": "[Concat('arc-k8s-monitoring-msi-dcra', '-', uniqueString(parameters('clusterResourceId')))]", + "apiVersion": "2017-05-10", + "subscriptionId": "[variables('clusterSubscriptionId')]", + "resourceGroup": "[variables('clusterResourceGroup')]", + "dependsOn": [ + "[Concat('arc-k8s-monitoring-msi-dcr', '-', uniqueString(variables('dcrName')))]" + ], + "properties": { + "mode": "Incremental", + "template": { + "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": {}, + "variables": {}, + "resources": [ + { + "type": "Microsoft.Kubernetes/connectedClusters/providers/dataCollectionRuleAssociations", + "name": "[concat(variables('clusterName'),'/microsoft.insights/', variables('associationName'))]", + "apiVersion": "2019-11-01-preview", + "properties": { + "description": "Association of data collection rule. Deleting this association will break the data collection for this AKS Cluster.", + "dataCollectionRuleId": "[variables('dataCollectionRuleId')]" + } + } + ] + }, + "parameters": {} + } + }, + { + "type": "Microsoft.Resources/deployments", + "name": "[Concat('arc-k8s-ci-extension', '-', uniqueString(parameters('clusterResourceId')))]", + "apiVersion": "2019-05-01", + "subscriptionId": "[split(parameters('clusterResourceId'),'/')[2]]", + "resourceGroup": "[split(parameters('clusterResourceId'),'/')[4]]", + "dependsOn": [ + "[Concat('arc-k8s-monitoring-msi-dcra', '-', uniqueString(parameters('clusterResourceId')))]" + ], + "properties": { + "mode": "Incremental", + "template": { + "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": {}, + "variables": {}, + "resources": [ + { + "type": "Microsoft.KubernetesConfiguration/extensions", + "apiVersion": "2021-09-01", + "name": "azuremonitor-containers", + "location": "[parameters('clusterRegion')]", + "identity": { + "type": "systemassigned" + }, + "properties": { + "extensionType": "Microsoft.AzureMonitor.Containers", + "configurationSettings": { + "logAnalyticsWorkspaceResourceID": "[parameters('workspaceResourceId')]", + "omsagent.domain": "[parameters('workspaceDomain')]", + "omsagent.useAADAuth": "true" + }, + "configurationProtectedSettings": { + "omsagent.secret.wsid": "[reference(parameters('workspaceResourceId'), '2015-03-20').customerId]", + "omsagent.secret.key": "[listKeys(parameters('workspaceResourceId'), '2015-03-20').primarySharedKey]" + }, + "autoUpgradeMinorVersion": true, + "releaseTrain": "Stable", + "scope": { + "Cluster": { + "releaseNamespace": "azuremonitor-containers" + } + } + }, + "scope": "[concat('Microsoft.Kubernetes/connectedClusters/', split(parameters('clusterResourceId'),'/')[8])]" + } + ] + } + } + } + ] +} diff --git a/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterParam.json b/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterParam.json new file mode 100644 index 000000000..8cd17ceb3 --- /dev/null +++ b/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterParam.json @@ -0,0 +1,28 @@ +{ + "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentParameters.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "clusterResourceId": { + "value": "/subscriptions//resourceGroups//providers/Microsoft.Kubernetes/connectedClusters/" + }, + "clusterRegion": { + "value": "" + }, + "workspaceResourceId": { + "value": "/subscriptions//resourcegroups//providers/microsoft.operationalinsights/workspaces/" + }, + "workspaceRegion": { + "value": "" + }, + "workspaceDomain": { + "value": "" + }, + "resourceTagValues": { + "value": { + "": "", + "": "", + "": "" + } + } + } +} diff --git a/source/plugins/go/src/go.mod b/source/plugins/go/src/go.mod index 9f30afab1..1960f82b8 100644 --- a/source/plugins/go/src/go.mod +++ b/source/plugins/go/src/go.mod @@ -13,6 +13,7 @@ require ( github.com/tinylib/msgp v1.1.2 github.com/ugorji/go v1.1.2-0.20180813092308-00b869d2f4a5 gopkg.in/natefinch/lumberjack.v2 v2.0.0-20170531160350-a96e63847dc3 + k8s.io/api v0.21.0 k8s.io/apimachinery v0.21.0 k8s.io/client-go v0.21.0 ) diff --git a/source/plugins/go/src/ingestion_token_utils.go b/source/plugins/go/src/ingestion_token_utils.go index 4f245a514..81039f966 100644 --- a/source/plugins/go/src/ingestion_token_utils.go +++ b/source/plugins/go/src/ingestion_token_utils.go @@ -1,6 +1,7 @@ package main import ( + "context" "encoding/json" "errors" "fmt" @@ -12,6 +13,9 @@ import ( "strconv" "strings" "time" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) const IMDSTokenPathForWindows = "c:/etc/imds-access-token/token" // only used in windows @@ -29,6 +33,12 @@ var IngestionAuthToken string var IngestionAuthTokenExpiration int64 var AMCSRedirectedEndpoint string = "" +// Arc k8s MSI related +const ArcK8sClusterConfigCRDAPIVersion = "clusterconfig.azure.com/v1beta1" +const ArcK8sClusterIdentityResourceName = "container-insights-clusteridentityrequest" +const ArcK8sClusterIdentityResourceNameSpace = "azure-arc" +const ArcK8sMSITokenSecretNameSpace = "azure-arc" + type IMDSResponse struct { AccessToken string `json:"access_token"` ClientID string `json:"client_id"` @@ -66,8 +76,8 @@ type AgentConfiguration struct { } `json:"channels"` Extensionconfigurations struct { Containerinsights []struct { - ID string `json:"id"` - Originids []string `json:"originIds"` + ID string `json:"id"` + Originids []string `json:"originIds"` Outputstreams struct { LinuxPerfBlob string `json:"LINUX_PERF_BLOB"` ContainerInventoryBlob string `json:"CONTAINER_INVENTORY_BLOB"` @@ -93,14 +103,47 @@ type IngestionTokenResponse struct { Ingestionauthtoken string `json:"ingestionAuthToken"` } +type ContainerInsightsIdentityRequest struct { + APIVersion string `json:"apiVersion"` + Kind string `json:"kind"` + Metadata struct { + Annotations struct { + MetaHelmShReleaseName string `json:"meta.helm.sh/release-name"` + MetaHelmShReleaseNamespace string `json:"meta.helm.sh/release-namespace"` + } `json:"annotations"` + CreationTimestamp time.Time `json:"creationTimestamp"` + Generation int `json:"generation"` + Labels struct { + AppKubernetesIoManagedBy string `json:"app.kubernetes.io/managed-by"` + } `json:"labels"` + Name string `json:"name"` + Namespace string `json:"namespace"` + ResourceVersion string `json:"resourceVersion"` + SelfLink string `json:"selfLink"` + UID string `json:"uid"` + } `json:"metadata"` + Spec struct { + Audience string `json:"audience"` + ResourceID string `json:"resourceId"` + } `json:"spec"` + Status struct { + ExpirationTime time.Time `json:"expirationTime"` + TokenReference struct { + DataName string `json:"dataName"` + SecretName string `json:"secretName"` + } `json:"tokenReference"` + } `json:"status"` +} + func getAccessTokenFromIMDS() (string, int64, error) { Log("Info getAccessTokenFromIMDS: start") useIMDSTokenProxyEndPoint := os.Getenv("USE_IMDS_TOKEN_PROXY_END_POINT") imdsAccessToken := "" + var expiration int64 var responseBytes []byte var err error - if (useIMDSTokenProxyEndPoint != "" && strings.Compare(strings.ToLower(useIMDSTokenProxyEndPoint), "true") == 0) { + if useIMDSTokenProxyEndPoint != "" && strings.Compare(strings.ToLower(useIMDSTokenProxyEndPoint), "true") == 0 { Log("Info Reading IMDS Access Token from IMDS Token proxy endpoint") mcsEndpoint := os.Getenv("MCS_ENDPOINT") msi_endpoint_string := fmt.Sprintf("http://169.254.169.254/metadata/identity/oauth2/token?api-version=2018-02-01&resource=https://%s/", mcsEndpoint) @@ -108,12 +151,12 @@ func getAccessTokenFromIMDS() (string, int64, error) { msi_endpoint, err := url.Parse(msi_endpoint_string) if err != nil { Log("getAccessTokenFromIMDS: Error creating IMDS endpoint URL: %s", err.Error()) - return imdsAccessToken, 0, err + return imdsAccessToken, expiration, err } req, err := http.NewRequest("GET", msi_endpoint.String(), nil) if err != nil { Log("getAccessTokenFromIMDS: Error creating HTTP request: %s", err.Error()) - return imdsAccessToken, 0, err + return imdsAccessToken, expiration, err } req.Header.Add("Metadata", "true") @@ -133,14 +176,14 @@ func getAccessTokenFromIMDS() (string, int64, error) { } if resp != nil && resp.Body != nil { - defer resp.Body.Close() + defer resp.Body.Close() } Log("getAccessTokenFromIMDS: IMDS Response Status: %d, retryCount: %d", resp.StatusCode, retryCount) - if IsRetriableError(resp.StatusCode) { + if IsRetriableError(resp.StatusCode) { message := fmt.Sprintf("getAccessTokenFromIMDS: IMDS Request failed with an error code: %d, retryCount: %d", resp.StatusCode, retryCount) Log(message) - retryDelay := time.Duration((retryCount + 1) * 100) * time.Millisecond + retryDelay := time.Duration((retryCount+1)*100) * time.Millisecond if resp.StatusCode == 429 { if resp != nil && resp.Header.Get("Retry-After") != "" { after, err := strconv.ParseInt(resp.Header.Get("Retry-After"), 10, 64) @@ -155,44 +198,103 @@ func getAccessTokenFromIMDS() (string, int64, error) { message := fmt.Sprintf("getAccessTokenFromIMDS: IMDS Request failed with nonretryable error code: %d, retryCount: %d", resp.StatusCode, retryCount) Log(message) SendException(message) - return imdsAccessToken, 0, err + return imdsAccessToken, expiration, err } IsSuccess = true break // call succeeded, don't retry any more } if !IsSuccess || resp == nil || resp.Body == nil { Log("getAccessTokenFromIMDS: IMDS Request ran out of retries") - return imdsAccessToken, 0, err + return imdsAccessToken, expiration, err } // Pull out response body responseBytes, err = ioutil.ReadAll(resp.Body) if err != nil { Log("getAccessTokenFromIMDS: Error reading response body: %s", err.Error()) - return imdsAccessToken, 0, err + return imdsAccessToken, expiration, err } } else { - Log("Info Reading IMDS Access Token from file : %s", IMDSTokenPathForWindows) - if _, err = os.Stat(IMDSTokenPathForWindows); os.IsNotExist(err) { - Log("getAccessTokenFromIMDS: IMDS token file doesnt exist: %s", err.Error()) - return imdsAccessToken, 0, err - } - //adding retries incase if we ended up reading the token file while the token file being written - for retryCount := 0; retryCount < MaxRetries; retryCount++ { - responseBytes, err = ioutil.ReadFile(IMDSTokenPathForWindows) - if err != nil { - Log("getAccessTokenFromIMDS: Could not read IMDS token from file: %s, retryCount: %d", err.Error(), retryCount) - time.Sleep(time.Duration((retryCount + 1) * 100) * time.Millisecond) - continue + resourceId := os.Getenv("AKS_RESOURCE_ID") + if resourceId != "" && strings.Contains(strings.ToLower(resourceId), strings.ToLower("Microsoft.ContainerService/managedClusters")) { + Log("Info Reading IMDS Access Token from file : %s", IMDSTokenPathForWindows) + if _, err = os.Stat(IMDSTokenPathForWindows); os.IsNotExist(err) { + Log("getAccessTokenFromIMDS: IMDS token file doesnt exist: %s", err.Error()) + return imdsAccessToken, expiration, err + } + //adding retries incase if we ended up reading the token file while the token file being written + for retryCount := 0; retryCount < MaxRetries; retryCount++ { + responseBytes, err = ioutil.ReadFile(IMDSTokenPathForWindows) + if err != nil { + Log("getAccessTokenFromIMDS: Could not read IMDS token from file: %s, retryCount: %d", err.Error(), retryCount) + time.Sleep(time.Duration((retryCount+1)*100) * time.Millisecond) + continue + } + break + } + } else { + Log("getAccessTokenFromIMDS: Info Getting MSI Access Token reference from CRD and token from secret for Azure Arc K8s cluster") + var crdResponseBytes []byte + var errorMessage string + for retryCount := 0; retryCount < MaxRetries; retryCount++ { + crd_request_endpoint := fmt.Sprintf("/apis/%s/namespaces/%s/azureclusteridentityrequests/%s", ArcK8sClusterConfigCRDAPIVersion, ArcK8sClusterIdentityResourceNameSpace, ArcK8sClusterIdentityResourceName) + crdResponseBytes, err = ClientSet.RESTClient().Get().AbsPath(crd_request_endpoint).DoRaw(context.TODO()) + if err != nil { + Log("getAccessTokenFromIMDS: Failed to get the CRD: %s in namespace: %s, retryCount: %d", ArcK8sClusterIdentityResourceName, ArcK8sClusterIdentityResourceNameSpace, err.Error(), retryCount) + time.Sleep(time.Duration((retryCount+1)*100) * time.Millisecond) + continue + } + break } - break - } - } + if crdResponseBytes != nil { + var ciCRDRequest ContainerInsightsIdentityRequest + err = json.Unmarshal(crdResponseBytes, &ciCRDRequest) + if err != nil { + errorMessage = fmt.Sprintf("getAccessTokenFromIMDS: Error unmarshalling the crdResponseBytes: %s", err.Error()) + Log(errorMessage) + return imdsAccessToken, expiration, errors.New(errorMessage) + } else { + status := ciCRDRequest.Status + tokenReference := status.TokenReference + dataFieldName := tokenReference.DataName + secretName := tokenReference.SecretName + expirationTime := status.ExpirationTime + if dataFieldName == "" || secretName == "" || expirationTime.IsZero() { + errorMessage = "getAccessTokenFromIMDS: Either dataName or SecretName or ExpirationTime values empty which indicates token not refreshed" + Log(errorMessage) + Log("getAccessTokenFromIMDS: dataName: %s, secretName: %s, expirationTime: %s", dataFieldName, secretName, expirationTime) + return imdsAccessToken, expiration, errors.New(errorMessage) + } else { + var secret *v1.Secret + for retryCount := 0; retryCount < MaxRetries; retryCount++ { + secret, err = ClientSet.CoreV1().Secrets(ArcK8sMSITokenSecretNameSpace).Get(context.TODO(), secretName, metav1.GetOptions{}) + if err != nil { + Log("getAccessTokenFromIMDS: Failed to read the secret: %s in namespace: %s, error: %s, retryCount: %d", secretName, ArcK8sMSITokenSecretNameSpace, err.Error(), retryCount) + time.Sleep(time.Duration((retryCount+1)*100) * time.Millisecond) + continue + } + break + } + if secret == nil { + errorMessage = fmt.Sprintf("getAccessTokenFromIMDS: value of secret: %s in nil in namespace: %s", secretName, ArcK8sMSITokenSecretNameSpace) + return imdsAccessToken, expiration, errors.New(errorMessage) + } + imdsAccessToken = string(secret.Data[dataFieldName]) + expiration = expirationTime.Unix() + return imdsAccessToken, expiration, nil + } + } + } else { + errorMessage = fmt.Sprintf("getAccessTokenFromIMDS: faled to get the CRD: %s in namespace: %s", ArcK8sClusterIdentityResourceName, ArcK8sClusterIdentityResourceNameSpace) + return imdsAccessToken, expiration, errors.New(errorMessage) + } + } + } - if responseBytes == nil { + if responseBytes == nil { Log("getAccessTokenFromIMDS: Error responseBytes is nil") - return imdsAccessToken, 0, err + return imdsAccessToken, expiration, err } // Unmarshall response body into struct @@ -200,14 +302,14 @@ func getAccessTokenFromIMDS() (string, int64, error) { err = json.Unmarshal(responseBytes, &imdsResponse) if err != nil { Log("getAccessTokenFromIMDS: Error unmarshalling the response: %s", err.Error()) - return imdsAccessToken, 0, err + return imdsAccessToken, expiration, err } imdsAccessToken = imdsResponse.AccessToken - expiration, err := strconv.ParseInt(imdsResponse.ExpiresOn, 10, 64) + expiration, err = strconv.ParseInt(imdsResponse.ExpiresOn, 10, 64) if err != nil { Log("getAccessTokenFromIMDS: Error parsing ExpiresOn field from IMDS response: %s", err.Error()) - return imdsAccessToken, 0, err + return imdsAccessToken, expiration, err } Log("Info getAccessTokenFromIMDS: end") return imdsAccessToken, expiration, nil @@ -258,7 +360,7 @@ func getAgentConfiguration(imdsAccessToken string) (configurationId string, chan } if resp != nil && resp.Body != nil { defer resp.Body.Close() - } + } Log("getAgentConfiguration Response Status: %d", resp.StatusCode) if resp.StatusCode == 421 { // AMCS returns redirected endpoint incase of private link agentConfigEndpoint := resp.Header.Get("x-ms-agent-config-endpoint") @@ -282,7 +384,7 @@ func getAgentConfiguration(imdsAccessToken string) (configurationId string, chan if IsRetriableError(resp.StatusCode) { message := fmt.Sprintf("getAgentConfiguration: Request failed with an error code: %d, retryCount: %d", resp.StatusCode, retryCount) Log(message) - retryDelay := time.Duration((retryCount + 1) * 100) * time.Millisecond + retryDelay := time.Duration((retryCount+1)*100) * time.Millisecond if resp.StatusCode == 429 { if resp != nil && resp.Header.Get("Retry-After") != "" { after, err := strconv.ParseInt(resp.Header.Get("Retry-After"), 10, 64) @@ -382,7 +484,7 @@ func getIngestionAuthToken(imdsAccessToken string, configurationId string, chann req.Header.Add("Authorization", bearer) var resp *http.Response = nil - IsSuccess := false + IsSuccess := false for retryCount := 0; retryCount < MaxRetries; retryCount++ { // Call managed services for Azure resources token endpoint resp, err = HTTPClient.Do(req) @@ -396,7 +498,7 @@ func getIngestionAuthToken(imdsAccessToken string, configurationId string, chann if resp != nil && resp.Body != nil { defer resp.Body.Close() - } + } Log("getIngestionAuthToken Response Status: %d", resp.StatusCode) if resp.StatusCode == 421 { // AMCS returns redirected endpoint incase of private link @@ -421,7 +523,7 @@ func getIngestionAuthToken(imdsAccessToken string, configurationId string, chann if IsRetriableError(resp.StatusCode) { message := fmt.Sprintf("getIngestionAuthToken: Request failed with an error code: %d, retryCount: %d", resp.StatusCode, retryCount) Log(message) - retryDelay := time.Duration((retryCount + 1) * 100) * time.Millisecond + retryDelay := time.Duration((retryCount+1)*100) * time.Millisecond if resp.StatusCode == 429 { if resp != nil && resp.Header.Get("Retry-After") != "" { after, err := strconv.ParseInt(resp.Header.Get("Retry-After"), 10, 64) @@ -429,7 +531,7 @@ func getIngestionAuthToken(imdsAccessToken string, configurationId string, chann retryDelay = time.Duration(after) * time.Second } } - } + } time.Sleep(retryDelay) continue } else if resp.StatusCode != 200 { @@ -504,7 +606,7 @@ func getTokenRefreshIntervalFromAmcsResponse(header http.Header) (refreshInterva func refreshIngestionAuthToken() { for ; true; <-IngestionAuthTokenRefreshTicker.C { - if IMDSToken == "" || IMDSTokenExpiration <= (time.Now().Unix() + 60 * 60) { // token valid 24 hrs and refresh token 1 hr before expiry + if IMDSToken == "" || IMDSTokenExpiration <= (time.Now().Unix()+60*60) { // token valid 24 hrs and refresh token 1 hr before expiry imdsToken, imdsTokenExpiry, err := getAccessTokenFromIMDS() if err != nil { message := fmt.Sprintf("refreshIngestionAuthToken: Error on getAccessTokenFromIMDS %s \n", err.Error()) @@ -532,7 +634,7 @@ func refreshIngestionAuthToken() { continue } } - if IMDSToken == "" || ConfigurationId == "" || ChannelId == "" { + if IMDSToken == "" || ConfigurationId == "" || ChannelId == "" { message := "refreshIngestionAuthToken: IMDSToken or ConfigurationId or ChannelId empty" Log(message) SendException(message) @@ -560,9 +662,9 @@ func refreshIngestionAuthToken() { func IsRetriableError(httpStatusCode int) bool { retryableStatusCodes := [5]int{408, 429, 502, 503, 504} for _, code := range retryableStatusCodes { - if code == httpStatusCode { - return true - } + if code == httpStatusCode { + return true + } } return false } diff --git a/source/plugins/ruby/constants.rb b/source/plugins/ruby/constants.rb index 72b035d45..542f342a6 100644 --- a/source/plugins/ruby/constants.rb +++ b/source/plugins/ruby/constants.rb @@ -135,4 +135,7 @@ class Constants #This is for telemetry to track if any of the windows customer has any of the field size >= 64KB #To evaluate switching to Windows AMA 64KB impacts any existing customers MAX_RECORD_OR_FIELD_SIZE_FOR_TELEMETRY = 65536 + + # only used in windows in AAD MSI auth mode + IMDS_TOKEN_PATH_FOR_WINDOWS = "c:/etc/imds-access-token/token" end diff --git a/source/plugins/ruby/out_mdm.rb b/source/plugins/ruby/out_mdm.rb index e10a2049f..c83972f11 100644 --- a/source/plugins/ruby/out_mdm.rb +++ b/source/plugins/ruby/out_mdm.rb @@ -1,7 +1,7 @@ #!/usr/local/bin/ruby # frozen_string_literal: true -require 'fluent/plugin/output' +require "fluent/plugin/output" module Fluent::Plugin class OutputMDM < Output @@ -19,7 +19,7 @@ def initialize require_relative "constants" require_relative "arc_k8s_cluster_identity" require_relative "proxy_utils" - + require_relative "extension_utils" @@token_resource_url = "https://monitoring.azure.com/" # AAD auth supported only in public cloud and handle other clouds when enabled # this is unified new token audience for LA AAD MSI auth & metrics @@ -52,6 +52,7 @@ def initialize # Setting useMsi to false by default @useMsi = false @isAADMSIAuth = false + @isWindows = false @metrics_flushed_count = 0 @cluster_identity = nil @@ -88,6 +89,9 @@ def start aks_region = aks_region.gsub(" ", "") end + @isWindows = isWindows() + @isAADMSIAuth = ExtensionUtils.isAADMSIAuthMode() + if @can_send_data_to_mdm @log.info "MDM Metrics supported in #{aks_region} region" @@ -108,11 +112,19 @@ def start @log.info "POST Request url: #{@@post_request_url}" ApplicationInsightsUtility.sendCustomEvent("AKSCustomMetricsMDMPluginStart", {}) - # arc k8s cluster uses cluster identity if (!!@isArcK8sCluster) - @log.info "using cluster identity token since cluster is azure arc k8s cluster" - @cluster_identity = ArcK8sClusterIdentity.new - @cached_access_token = @cluster_identity.get_cluster_identity_token + if @isAADMSIAuth && !@isWindows + @log.info "using IMDS sidecar endpoint for MSI token since its Arc k8s and Linux node" + @useMsi = true + msi_endpoint = @@imds_msi_endpoint_template % { resource: @@token_resource_audience } + @parsed_token_uri = URI.parse(msi_endpoint) + @cached_access_token = get_access_token + else + # switch to IMDS endpoint for the windows once the Arc K8s team supports the IMDS sidecar for windows + @log.info "using cluster identity token since cluster is azure arc k8s cluster" + @cluster_identity = ArcK8sClusterIdentity.new + @cached_access_token = @cluster_identity.get_cluster_identity_token + end else # azure json file only used for aks and doesnt exist in non-azure envs file = File.read(@@azure_json_path) @@ -132,7 +144,6 @@ def start else # in case of aad msi auth user_assigned_client_id will be empty @log.info "using aad msi auth" - @isAADMSIAuth = true msi_endpoint = @@imds_msi_endpoint_template % { resource: @@token_resource_audience } end @parsed_token_uri = URI.parse(msi_endpoint) @@ -153,48 +164,59 @@ def get_access_token if (Time.now > @get_access_token_backoff_expiry) http_access_token = nil retries = 0 + properties = {} begin if @cached_access_token.to_s.empty? || (Time.now + 5 * 60 > @token_expiry_time) # Refresh token 5 minutes from expiration @log.info "Refreshing access token for out_mdm plugin.." - - if (!!@useMsi) - properties = {} - if (!!@isAADMSIAuth) - @log.info "Using aad msi auth to get the token to post MDM data" - properties["aadAuthMSIMode"] = "true" + if (!!@isAADMSIAuth) + properties["aadAuthMSIMode"] = "true" + end + if @isAADMSIAuth && @isWindows + @log.info "reading the token from IMDS token file since its windows.." + if File.exist?(Constants::IMDS_TOKEN_PATH_FOR_WINDOWS) && File.readable?(Constants::IMDS_TOKEN_PATH_FOR_WINDOWS) + token_content = File.read(Constants::IMDS_TOKEN_PATH_FOR_WINDOWS).strip + parsed_json = JSON.parse(token_content) + @token_expiry_time = Time.now + @@token_refresh_back_off_interval * 60 # set the expiry time to be ~ thirty minutes from current time + @cached_access_token = parsed_json["access_token"] + @log.info "Successfully got access token" + ApplicationInsightsUtility.sendCustomEvent("AKSCustomMetricsMDMToken-MSI", properties) else - @log.info "Using msi to get the token to post MDM data" + raise "Either MSI Token file path doesnt exist or not readble" end - ApplicationInsightsUtility.sendCustomEvent("AKSCustomMetricsMDMToken-MSI", properties) - @log.info "Opening TCP connection" - http_access_token = Net::HTTP.start(@parsed_token_uri.host, @parsed_token_uri.port, :use_ssl => false) - # http_access_token.use_ssl = false - token_request = Net::HTTP::Get.new(@parsed_token_uri.request_uri) - token_request["Metadata"] = true else - @log.info "Using SP to get the token to post MDM data" - ApplicationInsightsUtility.sendCustomEvent("AKSCustomMetricsMDMToken-SP", {}) - @log.info "Opening TCP connection" - http_access_token = Net::HTTP.start(@parsed_token_uri.host, @parsed_token_uri.port, :use_ssl => true) - # http_access_token.use_ssl = true - token_request = Net::HTTP::Post.new(@parsed_token_uri.request_uri) - token_request.set_form_data( - { - "grant_type" => @@grant_type, - "client_id" => @data_hash["aadClientId"], - "client_secret" => @data_hash["aadClientSecret"], - "resource" => @@token_resource_url, - } - ) - end + if (!!@useMsi) + @log.info "Using msi to get the token to post MDM data" + ApplicationInsightsUtility.sendCustomEvent("AKSCustomMetricsMDMToken-MSI", properties) + @log.info "Opening TCP connection" + http_access_token = Net::HTTP.start(@parsed_token_uri.host, @parsed_token_uri.port, :use_ssl => false) + # http_access_token.use_ssl = false + token_request = Net::HTTP::Get.new(@parsed_token_uri.request_uri) + token_request["Metadata"] = true + else + @log.info "Using SP to get the token to post MDM data" + ApplicationInsightsUtility.sendCustomEvent("AKSCustomMetricsMDMToken-SP", {}) + @log.info "Opening TCP connection" + http_access_token = Net::HTTP.start(@parsed_token_uri.host, @parsed_token_uri.port, :use_ssl => true) + # http_access_token.use_ssl = true + token_request = Net::HTTP::Post.new(@parsed_token_uri.request_uri) + token_request.set_form_data( + { + "grant_type" => @@grant_type, + "client_id" => @data_hash["aadClientId"], + "client_secret" => @data_hash["aadClientSecret"], + "resource" => @@token_resource_url, + } + ) + end - @log.info "making request to get token.." - token_response = http_access_token.request(token_request) - # Handle the case where the response is not 200 - parsed_json = JSON.parse(token_response.body) - @token_expiry_time = Time.now + @@token_refresh_back_off_interval * 60 # set the expiry time to be ~ thirty minutes from current time - @cached_access_token = parsed_json["access_token"] - @log.info "Successfully got access token" + @log.info "making request to get token.." + token_response = http_access_token.request(token_request) + # Handle the case where the response is not 200 + parsed_json = JSON.parse(token_response.body) + @token_expiry_time = Time.now + @@token_refresh_back_off_interval * 60 # set the expiry time to be ~ thirty minutes from current time + @cached_access_token = parsed_json["access_token"] + @log.info "Successfully got access token" + end end rescue => err @log.info "Exception in get_access_token: #{err}" @@ -316,10 +338,15 @@ def write(chunk) def send_to_mdm(post_body) begin if (!!@isArcK8sCluster) - if @cluster_identity.nil? - @cluster_identity = ArcK8sClusterIdentity.new + if @isAADMSIAuth && !@isWindows + access_token = get_access_token + else + # switch to IMDS sidecar endpoint for the windows once the Arc K8s team supports + if @cluster_identity.nil? + @cluster_identity = ArcK8sClusterIdentity.new + end + access_token = @cluster_identity.get_cluster_identity_token end - access_token = @cluster_identity.get_cluster_identity_token else access_token = get_access_token end @@ -336,7 +363,7 @@ def send_to_mdm(post_body) ApplicationInsightsUtility.sendCustomEvent("AKSCustomMetricsMDMSendSuccessful", {}) @last_telemetry_sent_time = Time.now end - rescue Net::HTTPClientException => e # see https://docs.ruby-lang.org/en/2.6.0/NEWS.html about deprecating HTTPServerException and adding HTTPClientException + rescue Net::HTTPClientException => e # see https://docs.ruby-lang.org/en/2.6.0/NEWS.html about deprecating HTTPServerException and adding HTTPClientException if !response.nil? && !response.body.nil? #body will have actual error @log.info "Failed to Post Metrics to MDM : #{e} Response.body: #{response.body}" else @@ -368,5 +395,18 @@ def send_to_mdm(post_body) raise e end end + + def isWindows() + isWindows = false + begin + os_type = ENV["OS_TYPE"] + if !os_type.nil? && !os_type.empty? && os_type.strip.casecmp("windows") == 0 + isWindows = true + end + rescue => error + @log.warn "Error in MDM isWindows method: #{error}" + end + return isWindows + end end # class OutputMDM end # module Fluent diff --git a/test/e2e/conformance.yaml b/test/e2e/conformance.yaml index 71e40a6a2..bf9a3727a 100644 --- a/test/e2e/conformance.yaml +++ b/test/e2e/conformance.yaml @@ -3,7 +3,7 @@ sonobuoy-config: plugin-name: azure-arc-ci-conformance result-format: junit spec: - image: mcr.microsoft.com/azuremonitor/containerinsights/cidev:ciconftest10202021 + image: mcr.microsoft.com/azuremonitor/containerinsights/cidev:ciconftest04282022 imagePullPolicy: Always name: plugin resources: {} diff --git a/test/e2e/src/core/e2e_tests.sh b/test/e2e/src/core/e2e_tests.sh index dd9d93073..db89adf9a 100644 --- a/test/e2e/src/core/e2e_tests.sh +++ b/test/e2e/src/core/e2e_tests.sh @@ -111,8 +111,13 @@ addArcConnectedK8sExtension() { } addArcK8sCLIExtension() { - echo "adding Arc K8s k8s-extension extension" - az extension add --name k8s-extension + if [ ! -z "$K8S_EXTENSION_WHL_URL" ]; then + echo "adding Arc K8s k8s-extension cli extension from whl file path ${K8S_EXTENSION_WHL_URL}" + az extension add --source $K8S_EXTENSION_WHL_URL -y + else + echo "adding Arc K8s k8s-extension cli extension" + az extension add --name k8s-extension + fi } createArcCIExtension() { @@ -125,7 +130,11 @@ createArcCIExtension() { basicparameters="$basicparameters --version $CI_ARC_VERSION" fi - az k8s-extension create $basicparameters --configuration-settings omsagent.ISTEST=true + if [ ! -z "$USE_AAD_AUTH" ]; then + az k8s-extension create $basicparameters --configuration-settings omsagent.ISTEST=true omsagent.useAADAuth=$USE_AAD_AUTH + else + az k8s-extension create $basicparameters --configuration-settings omsagent.ISTEST=true + fi } showArcCIExtension() { diff --git a/test/e2e/src/tests/test_e2e_workflows.py b/test/e2e/src/tests/test_e2e_workflows.py index 02ad8cf14..1515534e1 100755 --- a/test/e2e/src/tests/test_e2e_workflows.py +++ b/test/e2e/src/tests/test_e2e_workflows.py @@ -38,7 +38,16 @@ def test_e2e_workflows(env_dict): if len(pod_list.items) <= 0: pytest.fail("number of items in pod list should be greater than 0") + if len(pod_list.items[0].spec.containers) < 1: + pytest.fail("number of containers in pod item should be at least 1") + envVars = pod_list.items[0].spec.containers[0].env + if (len(pod_list.items[0].spec.containers) > 1): + for container in pod_list.items[0].spec.containers: + if (container.name == constants.OMSAGENT_MAIN_CONTAINER_NAME): + envVars = container.env + break + if not envVars: pytest.fail("environment variables should be defined in the replicaset pod") diff --git a/test/e2e/src/tests/test_node_metrics_e2e_workflow.py b/test/e2e/src/tests/test_node_metrics_e2e_workflow.py index dfcc89dde..264abad6b 100755 --- a/test/e2e/src/tests/test_node_metrics_e2e_workflow.py +++ b/test/e2e/src/tests/test_node_metrics_e2e_workflow.py @@ -12,6 +12,8 @@ pytestmark = pytest.mark.agentests # validation of node metrics e2e workflow + + def test_node_metrics_e2e_workflow(env_dict): print("Starting node metrics e2e workflow test.") append_result_output("test_node_metrics_e2e_workflow start \n", @@ -39,7 +41,16 @@ def test_node_metrics_e2e_workflow(env_dict): if len(pod_list.items) <= 0: pytest.fail("number of items in pod list should be greater than 0") + if len(pod_list.items[0].spec.containers) < 1: + pytest.fail("number of containers in pod item should be at least 1") + envVars = pod_list.items[0].spec.containers[0].env + if (len(pod_list.items[0].spec.containers) > 1): + for container in pod_list.items[0].spec.containers: + if (container.name == constants.OMSAGENT_MAIN_CONTAINER_NAME): + envVars = container.env + break + if not envVars: pytest.fail( "environment variables should be defined in the replicaset pod") @@ -71,9 +82,11 @@ def test_node_metrics_e2e_workflow(env_dict): pytest.fail("access_token shouldnt be null or empty") waitTimeSeconds = env_dict['AGENT_WAIT_TIME_SECS'] - print("start: waiting for seconds: {} for agent workflows to get emitted".format(waitTimeSeconds)) + print("start: waiting for seconds: {} for agent workflows to get emitted".format( + waitTimeSeconds)) time.sleep(int(waitTimeSeconds)) - print("complete: waiting for seconds: {} for agent workflows to get emitted".format(waitTimeSeconds)) + print("complete: waiting for seconds: {} for agent workflows to get emitted".format( + waitTimeSeconds)) # validate metrics e2e workflow now = datetime.utcnow() @@ -105,8 +118,8 @@ def test_node_metrics_e2e_workflow(env_dict): "response of the metrics query API shouldnt be null or empty") if response.status_code != 200: - pytest.fail("metrics query API failed with an error code: {}".format( - response.status_code)) + pytest.fail("metrics query API failed with an error code: {}".format( + response.status_code)) responseJSON = response.json() if not responseJSON: @@ -122,18 +135,21 @@ def test_node_metrics_e2e_workflow(env_dict): pytest.fail("response JSON shouldnt be null or empty") if len(responseValues) <= 0: - pytest.fail("length of value array in the response should be greater than 0") + pytest.fail( + "length of value array in the response should be greater than 0") for responseVal in responseValues: metricName = responseVal['name']['value'] if metricName != constants.NODE_MEMORY_RSS_METRIC_NAME: - pytest.fail("got the metricname: {0} but expected metricname:{1} in the response".format(metricName, constants.NODE_MEMORY_RSS_METRIC_NAME)) + pytest.fail("got the metricname: {0} but expected metricname:{1} in the response".format( + metricName, constants.NODE_MEMORY_RSS_METRIC_NAME)) timeseries = responseVal['timeseries'] if not timeseries: pytest.fail("metric series shouldnt be null or empty for metric:{0} in namespace: {1}".format( constants.NODE_MEMORY_RSS_METRIC_NAME, constants.NODE_METRICS_NAMESPACE)) if len(timeseries) <= 0: - pytest.fail("length of timeseries should be greater than for 0 for metric: {0} in namespace :{1}".format(constants.NODE_MEMORY_RSS_METRIC_NAME, constants.NODE_METRICS_NAMESPACE)) + pytest.fail("length of timeseries should be greater than for 0 for metric: {0} in namespace :{1}".format( + constants.NODE_MEMORY_RSS_METRIC_NAME, constants.NODE_METRICS_NAMESPACE)) # node metric - memoryRssPercentage custommetricsUrl = '{0}{1}/providers/microsoft.Insights/metrics?timespan={2}/{3}&interval=FULL&metricnames={4}&aggregation={5}&metricNamespace={6}&validatedimensions=false&api-version={7}'.format( @@ -154,8 +170,8 @@ def test_node_metrics_e2e_workflow(env_dict): "response of the metrics query API shouldnt be null or empty") if response.status_code != 200: - pytest.fail("metrics query API failed with an error code: {}".format( - response.status_code)) + pytest.fail("metrics query API failed with an error code: {}".format( + response.status_code)) responseJSON = response.json() if not responseJSON: @@ -171,18 +187,21 @@ def test_node_metrics_e2e_workflow(env_dict): pytest.fail("response JSON shouldnt be null or empty") if len(responseValues) <= 0: - pytest.fail("length of value array in the response should be greater than 0") + pytest.fail( + "length of value array in the response should be greater than 0") for responseVal in responseValues: metricName = responseVal['name']['value'] if metricName != constants.NODE_MEMORY_RSS_PERCENTAGE_METRIC_NAME: - pytest.fail("got the metricname: {0} but expected metricname:{1} in the response".format(metricName, constants.NODE_MEMORY_RSS_PERCENTAGE_METRIC_NAME)) + pytest.fail("got the metricname: {0} but expected metricname:{1} in the response".format( + metricName, constants.NODE_MEMORY_RSS_PERCENTAGE_METRIC_NAME)) timeseries = responseVal['timeseries'] if not timeseries: pytest.fail("metric series shouldnt be null or empty for metric:{0} in namespace: {1}".format( constants.NODE_MEMORY_RSS_PERCENTAGE_METRIC_NAME, constants.NODE_METRICS_NAMESPACE)) if len(timeseries) <= 0: - pytest.fail("length of timeseries should be greater than for 0 for metric: {0} in namespace :{1}".format(constants.NODE_MEMORY_RSS_PERCENTAGE_METRIC_NAME, constants.NODE_METRICS_NAMESPACE)) + pytest.fail("length of timeseries should be greater than for 0 for metric: {0} in namespace :{1}".format( + constants.NODE_MEMORY_RSS_PERCENTAGE_METRIC_NAME, constants.NODE_METRICS_NAMESPACE)) # node metric - memoryWorkingSetBytes custommetricsUrl = '{0}{1}/providers/microsoft.Insights/metrics?timespan={2}/{3}&interval=FULL&metricnames={4}&aggregation={5}&metricNamespace={6}&validatedimensions=false&api-version={7}'.format( @@ -199,11 +218,12 @@ def test_node_metrics_e2e_workflow(env_dict): headers=Headers) if not response: - pytest.fail("response of the metrics query API shouldnt be null or empty") + pytest.fail( + "response of the metrics query API shouldnt be null or empty") if response.status_code != 200: - pytest.fail("metrics query API failed with an error code: {}".format( - response.status_code)) + pytest.fail("metrics query API failed with an error code: {}".format( + response.status_code)) responseJSON = response.json() if not responseJSON: @@ -219,18 +239,21 @@ def test_node_metrics_e2e_workflow(env_dict): pytest.fail("response JSON shouldnt be null or empty") if len(responseValues) <= 0: - pytest.fail("length of value array in the response should be greater than 0") + pytest.fail( + "length of value array in the response should be greater than 0") for responseVal in responseValues: metricName = responseVal['name']['value'] if metricName != constants.NODE_MEMORY_WS_METRIC_NAME: - pytest.fail("got the metricname: {0} but expected metricname:{1} in the response".format(metricName, constants.NODE_MEMORY_WS_METRIC_NAME)) + pytest.fail("got the metricname: {0} but expected metricname:{1} in the response".format( + metricName, constants.NODE_MEMORY_WS_METRIC_NAME)) timeseries = responseVal['timeseries'] if not timeseries: pytest.fail("metric series shouldnt be null or empty for metric:{0} in namespace: {1}".format( constants.NODE_MEMORY_WS_METRIC_NAME, constants.NODE_METRICS_NAMESPACE)) if len(timeseries) <= 0: - pytest.fail("length of timeseries should be greater than for 0 for metric: {0} in namespace :{1}".format(constants.NODE_MEMORYE_WS_METRIC_NAME, constants.NODE_METRICS_NAMESPACE)) + pytest.fail("length of timeseries should be greater than for 0 for metric: {0} in namespace :{1}".format( + constants.NODE_MEMORYE_WS_METRIC_NAME, constants.NODE_METRICS_NAMESPACE)) # node metric - memoryWorkingSetPercentage custommetricsUrl = '{0}{1}/providers/microsoft.Insights/metrics?timespan={2}/{3}&interval=FULL&metricnames={4}&aggregation={5}&metricNamespace={6}&validatedimensions=false&api-version={7}'.format( @@ -247,11 +270,12 @@ def test_node_metrics_e2e_workflow(env_dict): headers=Headers) if not response: - pytest.fail("response of the metrics query API shouldnt be null or empty") + pytest.fail( + "response of the metrics query API shouldnt be null or empty") if response.status_code != 200: - pytest.fail("metrics query API failed with an error code: {}".format( - response.status_code)) + pytest.fail("metrics query API failed with an error code: {}".format( + response.status_code)) responseJSON = response.json() if not responseJSON: @@ -267,18 +291,21 @@ def test_node_metrics_e2e_workflow(env_dict): pytest.fail("response JSON shouldnt be null or empty") if len(responseValues) <= 0: - pytest.fail("length of value array in the response should be greater than 0") + pytest.fail( + "length of value array in the response should be greater than 0") for responseVal in responseValues: metricName = responseVal['name']['value'] if metricName != constants.NODE_MEMORY_WS_PERCENTAGE_METRIC_NAME: - pytest.fail("got the metricname: {0} but expected metricname:{1} in the response".format(metricName, constants.NODE_MEMORY_WS_PERCENTAGE_METRIC_NAME)) + pytest.fail("got the metricname: {0} but expected metricname:{1} in the response".format( + metricName, constants.NODE_MEMORY_WS_PERCENTAGE_METRIC_NAME)) timeseries = responseVal['timeseries'] if not timeseries: pytest.fail("metric series shouldnt be null or empty for metric:{0} in namespace: {1}".format( constants.NODE_MEMORY_WS_PERCENTAGE_METRIC_NAME, constants.NODE_METRICS_NAMESPACE)) if len(timeseries) <= 0: - pytest.fail("length of timeseries should be greater than for 0 for metric: {0} in namespace :{1}".format(constants.NODE_MEMORY_WS_PERCENTAGE_METRIC_NAME, constants.NODE_METRICS_NAMESPACE)) + pytest.fail("length of timeseries should be greater than for 0 for metric: {0} in namespace :{1}".format( + constants.NODE_MEMORY_WS_PERCENTAGE_METRIC_NAME, constants.NODE_METRICS_NAMESPACE)) # node metric - cpuUsageMilliCores custommetricsUrl = '{0}{1}/providers/microsoft.Insights/metrics?timespan={2}/{3}&interval=FULL&metricnames={4}&aggregation={5}&metricNamespace={6}&validatedimensions=false&api-version={7}'.format( @@ -295,10 +322,12 @@ def test_node_metrics_e2e_workflow(env_dict): headers=Headers) if not response: - pytest.fail("response of the metrics query API shouldnt be null or empty") + pytest.fail( + "response of the metrics query API shouldnt be null or empty") if response.status_code != 200: - pytest.fail("metrics query API failed with an error code: {}".format(response.status_code)) + pytest.fail("metrics query API failed with an error code: {}".format( + response.status_code)) responseJSON = response.json() if not responseJSON: @@ -314,18 +343,21 @@ def test_node_metrics_e2e_workflow(env_dict): pytest.fail("response JSON shouldnt be null or empty") if len(responseValues) <= 0: - pytest.fail("length of value array in the response should be greater than 0") + pytest.fail( + "length of value array in the response should be greater than 0") for responseVal in responseValues: metricName = responseVal['name']['value'] if metricName != constants.NODE_CPU_USAGE_MILLI_CORES_METRIC_NAME: - pytest.fail("got the metricname: {0} but expected metricname:{1} in the response".format(metricName, constants.NODE_CPU_USAGE_MILLI_CORES_METRIC_NAME)) + pytest.fail("got the metricname: {0} but expected metricname:{1} in the response".format( + metricName, constants.NODE_CPU_USAGE_MILLI_CORES_METRIC_NAME)) timeseries = responseVal['timeseries'] if not timeseries: pytest.fail("metric series shouldnt be null or empty for metric:{0} in namespace: {1}".format( constants.NODE_CPU_USAGE_MILLI_CORES_METRIC_NAME, constants.NODE_METRICS_NAMESPACE)) if len(timeseries) <= 0: - pytest.fail("length of timeseries should be greater than for 0 for metric: {0} in namespace :{1}".format(constants.NODE_CPU_USAGE_MILLI_CORES_METRIC_NAME, constants.NODE_METRICS_NAMESPACE)) + pytest.fail("length of timeseries should be greater than for 0 for metric: {0} in namespace :{1}".format( + constants.NODE_CPU_USAGE_MILLI_CORES_METRIC_NAME, constants.NODE_METRICS_NAMESPACE)) # node metric - cpuUsagePercentage custommetricsUrl = '{0}{1}/providers/microsoft.Insights/metrics?timespan={2}/{3}&interval=FULL&metricnames={4}&aggregation={5}&metricNamespace={6}&validatedimensions=false&api-version={7}'.format( @@ -342,10 +374,12 @@ def test_node_metrics_e2e_workflow(env_dict): headers=Headers) if not response: - pytest.fail("response of the metrics query API shouldnt be null or empty") + pytest.fail( + "response of the metrics query API shouldnt be null or empty") if response.status_code != 200: - pytest.fail("metrics query API failed with an error code: {}".format(response.status_code)) + pytest.fail("metrics query API failed with an error code: {}".format( + response.status_code)) responseJSON = response.json() if not responseJSON: @@ -361,18 +395,21 @@ def test_node_metrics_e2e_workflow(env_dict): pytest.fail("response JSON shouldnt be null or empty") if len(responseValues) <= 0: - pytest.fail("length of value array in the response should be greater than 0") + pytest.fail( + "length of value array in the response should be greater than 0") for responseVal in responseValues: metricName = responseVal['name']['value'] if metricName != constants.NODE_CPU_USAGE_PERCENTAGE_METRIC_NAME: - pytest.fail("got the metricname: {0} but expected metricname:{1} in the response".format(metricName, constants.NODE_CPU_USAGE_PERCENTAGE_METRIC_NAME)) + pytest.fail("got the metricname: {0} but expected metricname:{1} in the response".format( + metricName, constants.NODE_CPU_USAGE_PERCENTAGE_METRIC_NAME)) timeseries = responseVal['timeseries'] if not timeseries: pytest.fail("metric series shouldnt be null or empty for metric:{0} in namespace: {1}".format( constants.NODE_CPU_USAGE_PERCENTAGE_METRIC_NAME, constants.NODE_METRICS_NAMESPACE)) if len(timeseries) <= 0: - pytest.fail("length of timeseries should be greater than for 0 for metric: {0} in namespace :{1}".format(constants.NODE_CPU_USAGE_PERCENTAGE_METRIC_NAME, constants.NODE_METRICS_NAMESPACE)) + pytest.fail("length of timeseries should be greater than for 0 for metric: {0} in namespace :{1}".format( + constants.NODE_CPU_USAGE_PERCENTAGE_METRIC_NAME, constants.NODE_METRICS_NAMESPACE)) # node metric - nodesCount custommetricsUrl = '{0}{1}/providers/microsoft.Insights/metrics?timespan={2}/{3}&interval=FULL&metricnames={4}&aggregation={5}&metricNamespace={6}&validatedimensions=false&api-version={7}'.format( @@ -389,10 +426,12 @@ def test_node_metrics_e2e_workflow(env_dict): headers=Headers) if not response: - pytest.fail("response of the metrics query API shouldnt be null or empty") + pytest.fail( + "response of the metrics query API shouldnt be null or empty") if response.status_code != 200: - pytest.fail("metrics query API failed with an error code: {}".format(response.status_code)) + pytest.fail("metrics query API failed with an error code: {}".format( + response.status_code)) responseJSON = response.json() if not responseJSON: @@ -408,18 +447,21 @@ def test_node_metrics_e2e_workflow(env_dict): pytest.fail("response JSON shouldnt be null or empty") if len(responseValues) <= 0: - pytest.fail("length of value array in the response should be greater than 0") + pytest.fail( + "length of value array in the response should be greater than 0") for responseVal in responseValues: metricName = responseVal['name']['value'] if metricName != constants.NODE_COUNT_METRIC_NAME: - pytest.fail("got the metricname: {0} but expected metricname:{1} in the response".format(metricName, constants.NODE_COUNT_METRIC_NAME)) + pytest.fail("got the metricname: {0} but expected metricname:{1} in the response".format( + metricName, constants.NODE_COUNT_METRIC_NAME)) timeseries = responseVal['timeseries'] if not timeseries: pytest.fail("metric series shouldnt be null or empty for metric:{0} in namespace: {1}".format( constants.NODE_COUNT_METRIC_NAME, constants.NODE_METRICS_NAMESPACE)) if len(timeseries) <= 0: - pytest.fail("length of timeseries should be greater than for 0 for metric: {0} in namespace :{1}".format(constants.NODE_COUNT_METRIC_NAME, constants.NODE_METRICS_NAMESPACE)) + pytest.fail("length of timeseries should be greater than for 0 for metric: {0} in namespace :{1}".format( + constants.NODE_COUNT_METRIC_NAME, constants.NODE_METRICS_NAMESPACE)) append_result_output("test_node_metrics_e2e_workflow end \n", env_dict['TEST_AGENT_LOG_FILE']) diff --git a/test/e2e/src/tests/test_pod_metrics_e2e_workflow.py b/test/e2e/src/tests/test_pod_metrics_e2e_workflow.py index 81e2b77a9..4be36b8a9 100755 --- a/test/e2e/src/tests/test_pod_metrics_e2e_workflow.py +++ b/test/e2e/src/tests/test_pod_metrics_e2e_workflow.py @@ -12,6 +12,8 @@ pytestmark = pytest.mark.agentests # validation of pod metrics e2e workflows + + def test_pod_metrics_e2e_workflow(env_dict): print("Starting pod metrics e2e workflows test.") append_result_output("test_pod_metrics_e2e_workflow start \n", @@ -39,7 +41,16 @@ def test_pod_metrics_e2e_workflow(env_dict): if len(pod_list.items) <= 0: pytest.fail("number of items in pod list should be greater than 0") + if len(pod_list.items[0].spec.containers) < 1: + pytest.fail("number of containers in pod item should be at least 1") + envVars = pod_list.items[0].spec.containers[0].env + if (len(pod_list.items[0].spec.containers) > 1): + for container in pod_list.items[0].spec.containers: + if (container.name == constants.OMSAGENT_MAIN_CONTAINER_NAME): + envVars = container.env + break + if not envVars: pytest.fail( "environment variables should be defined in the replicaset pod") @@ -71,9 +82,11 @@ def test_pod_metrics_e2e_workflow(env_dict): pytest.fail("access_token shouldnt be null or empty") waitTimeSeconds = env_dict['AGENT_WAIT_TIME_SECS'] - print("start: waiting for seconds: {} for agent workflows to get emitted".format(waitTimeSeconds)) + print("start: waiting for seconds: {} for agent workflows to get emitted".format( + waitTimeSeconds)) time.sleep(int(waitTimeSeconds)) - print("complete: waiting for seconds: {} for agent workflows to get emitted".format(waitTimeSeconds)) + print("complete: waiting for seconds: {} for agent workflows to get emitted".format( + waitTimeSeconds)) # validate metrics e2e workflow now = datetime.utcnow() @@ -104,8 +117,8 @@ def test_pod_metrics_e2e_workflow(env_dict): "response of the metrics query API shouldnt be null or empty") if response.status_code != 200: - pytest.fail("metrics query API failed with an error code: {}".format( - response.status_code)) + pytest.fail("metrics query API failed with an error code: {}".format( + response.status_code)) responseJSON = response.json() if not responseJSON: @@ -121,18 +134,21 @@ def test_pod_metrics_e2e_workflow(env_dict): pytest.fail("response JSON shouldnt be null or empty") if len(responseValues) <= 0: - pytest.fail("length of value array in the response should be greater than 0") + pytest.fail( + "length of value array in the response should be greater than 0") for responseVal in responseValues: metricName = responseVal['name']['value'] if metricName != constants.POD_COUNT_METRIC_NAME: - pytest.fail("got the metricname: {0} but expected metricname:{1} in the response".format(metricName, constants.POD_COUNT_METRIC_NAME)) + pytest.fail("got the metricname: {0} but expected metricname:{1} in the response".format( + metricName, constants.POD_COUNT_METRIC_NAME)) timeseries = responseVal['timeseries'] if not timeseries: pytest.fail("metric series shouldnt be null or empty for metric:{0} in namespace: {1}".format( constants.POD_COUNT_METRIC_NAME, constants.POD_METRICS_NAMESPACE)) if len(timeseries) <= 0: - pytest.fail("length of timeseries should be greater than for 0 for metric: {0} in namespace :{1}".format(constants.POD_COUNT_METRIC_NAME, constants.POD_METRICS_NAMESPACE)) + pytest.fail("length of timeseries should be greater than for 0 for metric: {0} in namespace :{1}".format( + constants.POD_COUNT_METRIC_NAME, constants.POD_METRICS_NAMESPACE)) append_result_output("test_pod_metrics_e2e_workflow end \n", env_dict['TEST_AGENT_LOG_FILE']) From 0da25a72c1ecd0e893e3484cdce14aaaa9153e2f Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Mon, 9 May 2022 14:28:14 -0700 Subject: [PATCH 224/301] Gangams/ws2022 support (#756) * use hyperv isolation * multi-arc image support * multi-arc image support * multi-arc image support * multi-arc image support * multi-arc image support * multi-arc image support * multi-arc image support * multi-arc image support * multi-arc image support * multi-arc image support * doc and script updates * add common as dependency for multi-arc job * merge into single job for perf evaluation * merge into single job for perf evaluation * merge into single job for perf evaluation * separate jobs for ltsc2019 & ltsc2022 * separate jobs for ltsc2019 & ltsc2022 * update dev image docker file & script * remove unnecessary task * update prod pipeline yaml for windows multi-arc image * test yamls for ltsc2019 & ltsc2022 * fix pr checker fail * fix repoImageWindows path in windows pipeline * remove passing imagetag for prod --- .github/workflows/pr-checker.yml | 4 +- .pipelines/azure_pipeline_dev.yaml | 96 ++++++++++++++++--- .pipelines/azure_pipeline_prod.yaml | 96 ++++++++++++++++--- README.md | 16 ++++ kubernetes/windows/Dockerfile | 4 +- kubernetes/windows/Dockerfile-dev-base-image | 3 +- .../build-and-publish-dev-docker-image.ps1 | 55 +++++++++-- .../build-and-publish-docker-image.ps1 | 54 +++++++++-- .../win-prometheus-ref-app-ltsc2019.yml | 50 ++++++++++ .../win-prometheus-ref-app-ltsc2022.yml | 50 ++++++++++ test/scenario/log-app-win-ltsc2019.yml | 50 ++++++++++ test/scenario/log-app-win-ltsc2022.yml | 50 ++++++++++ 12 files changed, 486 insertions(+), 42 deletions(-) create mode 100644 test/prometheus-scraping/win-prometheus-ref-app-ltsc2019.yml create mode 100644 test/prometheus-scraping/win-prometheus-ref-app-ltsc2022.yml create mode 100644 test/scenario/log-app-win-ltsc2019.yml create mode 100644 test/scenario/log-app-win-ltsc2022.yml diff --git a/.github/workflows/pr-checker.yml b/.github/workflows/pr-checker.yml index 91e81dc16..036c7edd3 100644 --- a/.github/workflows/pr-checker.yml +++ b/.github/workflows/pr-checker.yml @@ -91,7 +91,7 @@ jobs: run: cd ./build/windows/ && & .\Makefile.ps1 - name: Create-docker-image run: | - cd ./kubernetes/windows/ && docker build . --file Dockerfile -t $env:IMAGETAG --build-arg IMAGE_TAG=$env:IMAGETAG_TELEMETRY + cd ./kubernetes/windows/ && docker build . --file Dockerfile -t $env:IMAGETAG --build-arg WINDOWS_VERSION=ltsc2019 --build-arg IMAGE_TAG=$env:IMAGETAG_TELEMETRY - name: List-docker-images run: docker images --digests --all - + diff --git a/.pipelines/azure_pipeline_dev.yaml b/.pipelines/azure_pipeline_dev.yaml index 4834bcac5..c4723e8e1 100644 --- a/.pipelines/azure_pipeline_dev.yaml +++ b/.pipelines/azure_pipeline_dev.yaml @@ -91,7 +91,7 @@ jobs: pool: name: Azure-Pipelines-CI-Test-EO variables: - linuxImagetag: $[ dependencies.common.outputs['setup.linuxImagetag'] ] + linuxImagetag: $[ dependencies.common.outputs['setup.linuxImagetag'] ] steps: - task: AzureCLI@2 @@ -141,13 +141,14 @@ jobs: pathToPublish: '$(Build.ArtifactStagingDirectory)' artifactName: drop -- job: build_windows - dependsOn: common +- job: build_windows_2019 + dependsOn: + - common pool: name: Azure-Pipelines-Windows-CI-Test-EO variables: - windowsImageTag: $[ dependencies.common.outputs['setup.windowsImageTag'] ] - + windowsImageTag: $[ dependencies.common.outputs['setup.windowsImageTag'] ] + windows2019BaseImageVersion: ltsc2019 steps: - task: PowerShell@2 inputs: @@ -163,7 +164,7 @@ jobs: displayName: 'build base' - task: AzureCLI@2 - displayName: "Docker windows build" + displayName: "Docker windows build for ltsc2019" inputs: azureSubscription: ${{ variables.armServiceConnectionName }} scriptType: ps @@ -177,28 +178,99 @@ jobs: az account set -s ${{ variables.subscription }} az acr login -n ${{ variables.containerRegistry }} - @{"image.name"="${{ variables.repoImageName }}:$(windowsImageTag)"} | ConvertTo-Json -Compress | Out-File -Encoding ascii $(Build.ArtifactStagingDirectory)/windows/metadata.json + docker build --isolation=hyperv --tag ${{ variables.repoImageName }}:$(windowsImageTag)-$(windows2019BaseImageVersion) --build-arg WINDOWS_VERSION=$(windows2019BaseImageVersion) --build-arg IMAGE_TAG=$(windowsImageTag) . + if ("$(Build.Reason)" -ne "PullRequest") { + docker push ${{ variables.repoImageName }}:$(windowsImageTag)-$(windows2019BaseImageVersion) + } - docker build --tag ${{ variables.repoImageName }}:$(windowsImageTag) --build-arg IMAGE_TAG=$(windowsImageTag) . +- job: build_windows_2022 + dependsOn: + - common + pool: + name: Azure-Pipelines-Windows-CI-Test-EO + variables: + windowsImageTag: $[ dependencies.common.outputs['setup.windowsImageTag'] ] + windows2022BaseImageVersion: ltsc2022 + steps: + - task: PowerShell@2 + inputs: + targetType: 'filePath' + filePath: $(System.DefaultWorkingDirectory)/scripts/build/windows/install-build-pre-requisites.ps1 + displayName: 'install prereqs' + + - script: | + setlocal enabledelayedexpansion + powershell.exe -ExecutionPolicy Unrestricted -NoProfile -WindowStyle Hidden -File "build\windows\Makefile.ps1" + endlocal + exit /B %ERRORLEVEL% + displayName: 'build base' + + - task: AzureCLI@2 + displayName: "Docker windows build for ltsc2022" + inputs: + azureSubscription: ${{ variables.armServiceConnectionName }} + scriptType: ps + scriptLocation: inlineScript + inlineScript: | + mkdir -p $(Build.ArtifactStagingDirectory)/windows + cd kubernetes/windows + az --version + az account show + az account set -s ${{ variables.subscription }} + az acr login -n ${{ variables.containerRegistry }} + + docker build --isolation=hyperv --tag ${{ variables.repoImageName }}:$(windowsImageTag)-$(windows2022BaseImageVersion) --build-arg WINDOWS_VERSION=$(windows2022BaseImageVersion) --build-arg IMAGE_TAG=$(windowsImageTag) . if ("$(Build.Reason)" -ne "PullRequest") { - docker push ${{ variables.repoImageName }}:$(windowsImageTag) + docker push ${{ variables.repoImageName }}:$(windowsImageTag)-$(windows2022BaseImageVersion) } +- job: build_windows_multi_arc + dependsOn: + - common + - build_windows_2019 + - build_windows_2022 + pool: + name: Azure-Pipelines-Windows-CI-Test-EO + variables: + windowsImageTag: $[ dependencies.common.outputs['setup.windowsImageTag'] ] + windows2019BaseImageVersion: ltsc2019 + windows2022BaseImageVersion: ltsc2022 + steps: + - task: AzureCLI@2 + displayName: "Docker windows build for multi-arc image" + inputs: + azureSubscription: ${{ variables.armServiceConnectionName }} + scriptType: ps + scriptLocation: inlineScript + inlineScript: | + mkdir -p $(Build.ArtifactStagingDirectory)/windows + cd kubernetes/windows + + az --version + az account show + az account set -s ${{ variables.subscription }} + az acr login -n ${{ variables.containerRegistry }} + + @{"image.name"="${{ variables.repoImageName }}:$(windowsImageTag)"} | ConvertTo-Json -Compress | Out-File -Encoding ascii $(Build.ArtifactStagingDirectory)/windows/metadata.json + + if ("$(Build.Reason)" -ne "PullRequest") { + docker manifest create ${{ variables.repoImageName }}:$(windowsImageTag) ${{ variables.repoImageName }}:$(windowsImageTag)-$(windows2019BaseImageVersion) ${{ variables.repoImageName }}:$(windowsImageTag)-$(windows2022BaseImageVersion) + docker manifest push ${{ variables.repoImageName }}:$(windowsImageTag) + } - task: AzureArtifacts.manifest-generator-task.manifest-generator-task.ManifestGeneratorTask@0 displayName: 'Generation Task' condition: eq(variables.IS_PR, true) inputs: BuildDropPath: '$(Build.ArtifactStagingDirectory)/windows' - DockerImagesToScan: 'mcr.microsoft.com/windows/servercore:ltsc2019' + DockerImagesToScan: 'mcr.microsoft.com/windows/servercore:ltsc2019, mcr.microsoft.com/windows/servercore:ltsc2022' - task: AzureArtifacts.manifest-generator-task.manifest-generator-task.ManifestGeneratorTask@0 displayName: 'Generation Task' condition: eq(variables.IS_PR, false) inputs: BuildDropPath: '$(Build.ArtifactStagingDirectory)/windows' - DockerImagesToScan: 'mcr.microsoft.com/windows/servercore:ltsc2019, ${{ variables.repoImageName }}:$(windowsImageTag)' - + DockerImagesToScan: 'mcr.microsoft.com/windows/servercore:ltsc2019, mcr.microsoft.com/windows/servercore:ltsc2022, ${{ variables.repoImageName }}:$(windowsImageTag)' - task: PublishBuildArtifacts@1 inputs: pathToPublish: '$(Build.ArtifactStagingDirectory)' diff --git a/.pipelines/azure_pipeline_prod.yaml b/.pipelines/azure_pipeline_prod.yaml index 28071786d..8239a7058 100644 --- a/.pipelines/azure_pipeline_prod.yaml +++ b/.pipelines/azure_pipeline_prod.yaml @@ -95,7 +95,7 @@ jobs: pool: name: Azure-Pipelines-CI-Prod-EO variables: - linuxImagetag: $[ dependencies.common.outputs['setup.linuxImagetag'] ] + linuxImagetag: $[ dependencies.common.outputs['setup.linuxImagetag'] ] steps: - task: AzureCLI@2 @@ -146,13 +146,14 @@ jobs: pathToPublish: '$(Build.ArtifactStagingDirectory)' artifactName: drop -- job: build_windows - dependsOn: common +- job: build_windows_2019 + dependsOn: + - common pool: name: Azure-Pipelines-Windows-CI-Prod-EO variables: - windowsImageTag: $[ dependencies.common.outputs['setup.windowsImageTag'] ] - + windowsImageTag: $[ dependencies.common.outputs['setup.windowsImageTag'] ] + windows2019BaseImageVersion: ltsc2019 steps: - task: PowerShell@2 inputs: @@ -168,7 +169,7 @@ jobs: displayName: 'build base' - task: AzureCLI@2 - displayName: "Docker windows build" + displayName: "Docker windows build for ltsc2019" inputs: azureSubscription: ${{ variables.armServiceConnectionName }} scriptType: ps @@ -182,28 +183,99 @@ jobs: az account set -s ${{ variables.subscription }} az acr login -n ${{ variables.containerRegistry }} - @{"image.name"="${{ variables.repoImageNameWindows }}:$(windowsImageTag)"} | ConvertTo-Json -Compress | Out-File -Encoding ascii $(Build.ArtifactStagingDirectory)/windows/metadata.json + docker build --isolation=hyperv --tag ${{ variables.repoImageNameWindows }}:$(windowsImageTag)-$(windows2019BaseImageVersion) --build-arg WINDOWS_VERSION=$(windows2019BaseImageVersion) . + if ("$(Build.Reason)" -ne "PullRequest") { + docker push ${{ variables.repoImageNameWindows }}:$(windowsImageTag)-$(windows2019BaseImageVersion) + } - docker build --tag ${{ variables.repoImageNameWindows }}:$(windowsImageTag) . +- job: build_windows_2022 + dependsOn: + - common + pool: + name: Azure-Pipelines-Windows-CI-Prod-EO + variables: + windowsImageTag: $[ dependencies.common.outputs['setup.windowsImageTag'] ] + windows2022BaseImageVersion: ltsc2022 + steps: + - task: PowerShell@2 + inputs: + targetType: 'filePath' + filePath: $(System.DefaultWorkingDirectory)/scripts/build/windows/install-build-pre-requisites.ps1 + displayName: 'install prereqs' + + - script: | + setlocal enabledelayedexpansion + powershell.exe -ExecutionPolicy Unrestricted -NoProfile -WindowStyle Hidden -File "build\windows\Makefile.ps1" + endlocal + exit /B %ERRORLEVEL% + displayName: 'build base' + + - task: AzureCLI@2 + displayName: "Docker windows build for ltsc2022" + inputs: + azureSubscription: ${{ variables.armServiceConnectionName }} + scriptType: ps + scriptLocation: inlineScript + inlineScript: | + mkdir -p $(Build.ArtifactStagingDirectory)/windows + cd kubernetes/windows + az --version + az account show + az account set -s ${{ variables.subscription }} + az acr login -n ${{ variables.containerRegistry }} + + docker build --isolation=hyperv --tag ${{ variables.repoImageNameWindows }}:$(windowsImageTag)-$(windows2022BaseImageVersion) --build-arg WINDOWS_VERSION=$(windows2022BaseImageVersion) . if ("$(Build.Reason)" -ne "PullRequest") { - docker push ${{ variables.repoImageNameWindows }}:$(windowsImageTag) + docker push ${{ variables.repoImageNameWindows }}:$(windowsImageTag)-$(windows2022BaseImageVersion) } +- job: build_windows_multi_arc + dependsOn: + - common + - build_windows_2019 + - build_windows_2022 + pool: + name: Azure-Pipelines-Windows-CI-Prod-EO + variables: + windowsImageTag: $[ dependencies.common.outputs['setup.windowsImageTag'] ] + windows2019BaseImageVersion: ltsc2019 + windows2022BaseImageVersion: ltsc2022 + steps: + - task: AzureCLI@2 + displayName: "Docker windows build for multi-arc image" + inputs: + azureSubscription: ${{ variables.armServiceConnectionName }} + scriptType: ps + scriptLocation: inlineScript + inlineScript: | + mkdir -p $(Build.ArtifactStagingDirectory)/windows + cd kubernetes/windows + + az --version + az account show + az account set -s ${{ variables.subscription }} + az acr login -n ${{ variables.containerRegistry }} + + @{"image.name"="${{ variables.repoImageNameWindows }}:$(windowsImageTag)"} | ConvertTo-Json -Compress | Out-File -Encoding ascii $(Build.ArtifactStagingDirectory)/windows/metadata.json + + if ("$(Build.Reason)" -ne "PullRequest") { + docker manifest create ${{ variables.repoImageNameWindows }}:$(windowsImageTag) ${{ variables.repoImageNameWindows }}:$(windowsImageTag)-$(windows2019BaseImageVersion) ${{ variables.repoImageNameWindows }}:$(windowsImageTag)-$(windows2022BaseImageVersion) + docker manifest push ${{ variables.repoImageNameWindows }}:$(windowsImageTag) + } - task: AzureArtifacts.manifest-generator-task.manifest-generator-task.ManifestGeneratorTask@0 displayName: 'Generation Task' condition: eq(variables.IS_PR, true) inputs: BuildDropPath: '$(Build.ArtifactStagingDirectory)/windows' - DockerImagesToScan: 'mcr.microsoft.com/windows/servercore:ltsc2019' + DockerImagesToScan: 'mcr.microsoft.com/windows/servercore:ltsc2019, mcr.microsoft.com/windows/servercore:ltsc2022' - task: AzureArtifacts.manifest-generator-task.manifest-generator-task.ManifestGeneratorTask@0 displayName: 'Generation Task' condition: eq(variables.IS_PR, false) inputs: BuildDropPath: '$(Build.ArtifactStagingDirectory)/windows' - DockerImagesToScan: 'mcr.microsoft.com/windows/servercore:ltsc2019, ${{ variables.repoImageNameWindows }}:$(windowsImageTag)' - + DockerImagesToScan: 'mcr.microsoft.com/windows/servercore:ltsc2019, mcr.microsoft.com/windows/servercore:ltsc2022, ${{ variables.repoImageName }}:$(windowsImageTag)' - task: PublishBuildArtifacts@1 inputs: pathToPublish: '$(Build.ArtifactStagingDirectory)' diff --git a/README.md b/README.md index e7206c492..6e51d256b 100644 --- a/README.md +++ b/README.md @@ -246,12 +246,28 @@ powershell -ExecutionPolicy bypass # switch to powershell if you are not on pow And then run the script to build the image consisting of code and conf changes. ``` .\build-and-publish-dev-docker-image.ps1 -image /: # trigger build code and image and publish docker hub or acr +By default, multi-arc docker image will be built, but if you want generate test image either with ltsc2019 or ltsc2022 base image, then you can follow the instructions below + +For building image with base image version ltsc2019 +.\build-and-publish-dev-docker-image.ps1 -image /: -windowsBaseImageVersion ltsc2019 + +For building image with base image version ltsc2022 +.\build-and-publish-dev-docker-image.ps1 -image /: -windowsBaseImageVersion ltsc2022 + + ``` For the subsequent builds, you can just run - ``` .\build-and-publish-dev-docker-image.ps1 -image /: # trigger build code and image and publish docker hub or acr +By default, multi-arc docker image will be built, but if you want generate test image either with ltsc2019 or ltsc2022 base image, then you can follow the instructions below + +For building image with base image version ltsc2019 +.\build-and-publish-dev-docker-image.ps1 -image /: -windowsBaseImageVersion ltsc2019 + +For building image with base image version ltsc2022 +.\build-and-publish-dev-docker-image.ps1 -image /: -windowsBaseImageVersion ltsc2022 ``` ###### Note - If you have changes in setup.ps1 and want to test those changes, uncomment the section consisting of setup.ps1 in the Dockerfile-dev-image file. diff --git a/kubernetes/windows/Dockerfile b/kubernetes/windows/Dockerfile index 7c514a777..672dfb1f7 100644 --- a/kubernetes/windows/Dockerfile +++ b/kubernetes/windows/Dockerfile @@ -1,4 +1,6 @@ -FROM mcr.microsoft.com/windows/servercore:ltsc2019 +# Supported values of windows version are ltsc2019 or ltsc2022 which are being passed by the build script or build pipeline +ARG WINDOWS_VERSION= +FROM mcr.microsoft.com/windows/servercore:${WINDOWS_VERSION} MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" diff --git a/kubernetes/windows/Dockerfile-dev-base-image b/kubernetes/windows/Dockerfile-dev-base-image index 6a758060a..3aca6ae20 100644 --- a/kubernetes/windows/Dockerfile-dev-base-image +++ b/kubernetes/windows/Dockerfile-dev-base-image @@ -1,4 +1,5 @@ -FROM mcr.microsoft.com/windows/servercore:ltsc2019 +ARG WINDOWS_VERSION= +FROM mcr.microsoft.com/windows/servercore:${WINDOWS_VERSION} MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" diff --git a/kubernetes/windows/dockerbuild/build-and-publish-dev-docker-image.ps1 b/kubernetes/windows/dockerbuild/build-and-publish-dev-docker-image.ps1 index b87132218..f62851eac 100644 --- a/kubernetes/windows/dockerbuild/build-and-publish-dev-docker-image.ps1 +++ b/kubernetes/windows/dockerbuild/build-and-publish-dev-docker-image.ps1 @@ -7,7 +7,8 @@ #> param( [Parameter(mandatory = $true)] - [string]$image + [string]$image, + [string]$windowsBaseImageVersion="" # Supported values are ltsc2019 or ltsc2022. Default is multi-arc image unless this value specified ) $currentdir = $PSScriptRoot @@ -55,10 +56,50 @@ Write-Host "changing directory to DockerFile dir: $dockerFileDir" Set-Location -Path $dockerFileDir $updateImage = ${imagerepo} + ":" + ${imageTag} -Write-Host "STAT:Triggering docker image build: $image" -docker build -t $updateImage --build-arg IMAGE_TAG=$imageTag -f Dockerfile-dev-image . -Write-Host "END:Triggering docker image build: $updateImage" +if ([string]::IsNullOrEmpty($windowsBaseImageVersion)) { + Write-Host "START:Triggering multi-arc docker image build for ltsc2019 & ltsc2022: $image" -Write-Host "STAT:pushing docker image : $updateImage" -docker push $updateImage -Write-Host "EnD:pushing docker image : $updateImage" + $WINDOWS_VERSION="ltsc2019" + $updateImageLTSC2019 = ${imagerepo} + ":" + ${imageTag} + "-" + ${WINDOWS_VERSION} + Write-Host "START:Triggering docker image build for ltsc2019: $updateImageLTSC2019" + docker build --isolation=hyperv -t $updateImageLTSC2019 --build-arg WINDOWS_VERSION=$WINDOWS_VERSION --build-arg IMAGE_TAG=$imageTag -f Dockerfile-dev-image . + Write-Host "END:Triggering docker image build for ltsc2019: $updateImageLTSC2019" + + $WINDOWS_VERSION="ltsc2022" + $updateImageLTSC2022 = ${imagerepo} + ":" + ${imageTag} + "-" + ${WINDOWS_VERSION} + Write-Host "START:Triggering docker image build for ltsc2022: $updateImageLTSC2022" + docker build --isolation=hyperv -t $updateImageLTSC2022 --build-arg WINDOWS_VERSION=$WINDOWS_VERSION --build-arg IMAGE_TAG=$imageTag -f Dockerfile-dev-image . + Write-Host "END:Triggering docker image build for ltsc2022: $updateImageLTSC2022" + + Write-Host "START:pushing docker image with base image ltsc2019 : $updateImageLTSC2019" + docker push $updateImageLTSC2019 + Write-Host "END:pushing docker image : $updateImageLTSC2019" + + Write-Host "START:pushing docker image with base image ltsc2022 : $updateImageLTSC2022" + docker push $updateImageLTSC2022 + Write-Host "END:pushing docker image : $updateImageLTSC2022" + + Write-Host "START:Triggering manigest for multi-arc docker image: $updateImage" + docker manifest create $updateImage $updateImageLTSC2019 $updateImageLTSC2022 + docker manifest push $updateImage + Write-Host "END:Triggering manifest for multi-arc docker image: $updateImage" + + Write-Host "END:Triggering multi-arc docker image build for ltsc2019 & ltsc2022: $image" + +} else { + + if (($windowsBaseImageVersion -eq "ltsc2019") -or ($windowsBaseImageVersion -eq "ltsc2022")) { + Write-Host "Provided baseimage version valid and supported: ${windowsBaseImageVersion}" + } else { + Write-Host "Provided baseimage version neither valid nor supported: ${windowsBaseImageVersion}" -ForegroundColor Red + exit 1 + } + + Write-Host "STAT:Triggering docker image build: $image with base image version: $windowsBaseImageVersion" + docker build -t $updateImage --build-arg WINDOWS_VERSION=$windowsBaseImageVersion --build-arg IMAGE_TAG=$imageTag -f Dockerfile-dev-image . + Write-Host "END:Triggering docker image build: $updateImage" + + Write-Host "STAT:pushing docker image : $updateImage" + docker push $updateImage + Write-Host "EnD:pushing docker image : $updateImage" +} diff --git a/kubernetes/windows/dockerbuild/build-and-publish-docker-image.ps1 b/kubernetes/windows/dockerbuild/build-and-publish-docker-image.ps1 index c1f655882..a5c78cc72 100644 --- a/kubernetes/windows/dockerbuild/build-and-publish-docker-image.ps1 +++ b/kubernetes/windows/dockerbuild/build-and-publish-docker-image.ps1 @@ -7,7 +7,8 @@ #> param( [Parameter(mandatory = $true)] - [string]$image + [string]$image, + [string]$windowsBaseImageVersion="" # Supported values are ltsc2019 or ltsc2022. Default is multi-arc image unless this value specified ) $currentdir = $PSScriptRoot @@ -55,10 +56,49 @@ Write-Host "changing directory to DockerFile dir: $dockerFileDir" Set-Location -Path $dockerFileDir $updateImage = ${imagerepo} + ":" + ${imageTag} -Write-Host "STAT:Triggering docker image build: $image" -docker build -t $updateImage --build-arg IMAGE_TAG=$imageTag . -Write-Host "END:Triggering docker image build: $updateImage" +if ([string]::IsNullOrEmpty($windowsBaseImageVersion)) { + Write-Host "START:Triggering multi-arc docker image build for ltsc2019 & ltsc2022: $image" + + $WINDOWS_VERSION="ltsc2019" + $updateImageLTSC2019 = ${imagerepo} + ":" + ${imageTag} + "-" + ${WINDOWS_VERSION} + Write-Host "START:Triggering docker image build for ltsc2019: $updateImageLTSC2019" + docker build --isolation=hyperv -t $updateImageLTSC2019 --build-arg WINDOWS_VERSION=$WINDOWS_VERSION --build-arg IMAGE_TAG=$imageTag . + Write-Host "END:Triggering docker image build for ltsc2019: $updateImageLTSC2019" + + $WINDOWS_VERSION="ltsc2022" + $updateImageLTSC2022 = ${imagerepo} + ":" + ${imageTag} + "-" + ${WINDOWS_VERSION} + Write-Host "START:Triggering docker image build for ltsc2022: $updateImageLTSC2022" + docker build --isolation=hyperv -t $updateImageLTSC2022 --build-arg WINDOWS_VERSION=$WINDOWS_VERSION --build-arg IMAGE_TAG=$imageTag . + Write-Host "END:Triggering docker image build for ltsc2022: $updateImageLTSC2022" + + Write-Host "START:pushing docker image with base image ltsc2019 : $updateImageLTSC2019" + docker push $updateImageLTSC2019 + Write-Host "END:pushing docker image : $updateImageLTSC2019" + + Write-Host "START:pushing docker image with base image ltsc2022 : $updateImageLTSC2022" + docker push $updateImageLTSC2022 + Write-Host "END:pushing docker image : $updateImageLTSC2022" + + Write-Host "START:Triggering manigest for multi-arc docker image: $updateImage" + docker manifest create $updateImage $updateImageLTSC2019 $updateImageLTSC2022 + docker manifest push $updateImage + Write-Host "END:Triggering manifest for multi-arc docker image: $updateImage" + + Write-Host "END:Triggering multi-arc docker image build for ltsc2019 & ltsc2022: $image" +} else { + if (($windowsBaseImageVersion -eq "ltsc2019") -or ($windowsBaseImageVersion -eq "ltsc2022")) { + Write-Host "Provided baseimage version valid and supported: ${windowsBaseImageVersion}" + } else { + Write-Host "Provided baseimage version neither valid nor supported: ${windowsBaseImageVersion}" -ForegroundColor Red + exit 1 + } + + Write-Host "START:Triggering docker image build: $image with baseImage version: ${windowsBaseImageVersion}" + docker build --isolation=hyperv -t $updateImage --build-arg WINDOWS_VERSION=$windowsBaseImageVersion --build-arg IMAGE_TAG=$imageTag . + Write-Host "END:Triggering docker image build: $updateImage" + + Write-Host "START:pushing docker image : $updateImage" + docker push $updateImage + Write-Host "END:pushing docker image : $updateImage" +} -Write-Host "STAT:pushing docker image : $updateImage" -docker push $updateImage -Write-Host "EnD:pushing docker image : $updateImage" diff --git a/test/prometheus-scraping/win-prometheus-ref-app-ltsc2019.yml b/test/prometheus-scraping/win-prometheus-ref-app-ltsc2019.yml new file mode 100644 index 000000000..6dde6346c --- /dev/null +++ b/test/prometheus-scraping/win-prometheus-ref-app-ltsc2019.yml @@ -0,0 +1,50 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: win-prometheus-reference-app-ltsc2019 +spec: + selector: + matchLabels: + app: win-prometheus-reference-app-ltsc2019 + replicas: 1 + template: + metadata: + annotations: + prometheus.io/port: '2112' + prometheus.io/scrape: 'true' + labels: + app: win-prometheus-reference-app-ltsc2019 + spec: + containers: + - name: win-prometheus-reference-app-golang + image: mcr.microsoft.com/azuremonitor/containerinsights/cidev:win-prometheus-reference-app-golang + env: + - name: RUN_PERF_TEST + value: "false" + - name: SCRAPE_INTERVAL + value: "60" + - name: METRIC_COUNT + value: "125000" + ports: + - containerPort: 2112 + protocol: TCP + - containerPort: 2113 + protocol: TCP + - name: win-prometheus-reference-app-python + image: mcr.microsoft.com/azuremonitor/containerinsights/cidev:win-prometheus-reference-app-python + ports: + - containerPort: 2114 + protocol: TCP + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/os + operator: In + values: + - windows + - key: kubernetes.azure.com/os-sku + operator: NotIn + values: + - Windows2022 \ No newline at end of file diff --git a/test/prometheus-scraping/win-prometheus-ref-app-ltsc2022.yml b/test/prometheus-scraping/win-prometheus-ref-app-ltsc2022.yml new file mode 100644 index 000000000..259a959ff --- /dev/null +++ b/test/prometheus-scraping/win-prometheus-ref-app-ltsc2022.yml @@ -0,0 +1,50 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: win-prometheus-reference-app-ltsc2022 +spec: + selector: + matchLabels: + app: win-prometheus-reference-app-ltsc2022 + replicas: 1 + template: + metadata: + annotations: + prometheus.io/port: '2112' + prometheus.io/scrape: 'true' + labels: + app: win-prometheus-reference-app-ltsc2022 + spec: + containers: + - name: win-prometheus-reference-app-golang + image: mcr.microsoft.com/azuremonitor/containerinsights/cidev:win-prometheus-reference-app-golang + env: + - name: RUN_PERF_TEST + value: "false" + - name: SCRAPE_INTERVAL + value: "60" + - name: METRIC_COUNT + value: "125000" + ports: + - containerPort: 2112 + protocol: TCP + - containerPort: 2113 + protocol: TCP + - name: win-prometheus-reference-app-python + image: mcr.microsoft.com/azuremonitor/containerinsights/cidev:win-prometheus-reference-app-python + ports: + - containerPort: 2114 + protocol: TCP + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/os + operator: In + values: + - windows + - key: kubernetes.azure.com/os-sku + operator: In + values: + - Windows2022 \ No newline at end of file diff --git a/test/scenario/log-app-win-ltsc2019.yml b/test/scenario/log-app-win-ltsc2019.yml new file mode 100644 index 000000000..67381fa35 --- /dev/null +++ b/test/scenario/log-app-win-ltsc2019.yml @@ -0,0 +1,50 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: windows-log-ltsc2019 + labels: + name: windows-log-ltsc2019 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: second-log-app + namespace: windows-log-ltsc2019 +spec: + replicas: 1 + selector: + matchLabels: + app: second-log-app + template: + metadata: + labels: + app: second-log-app + spec: + volumes: + - name: html + emptyDir: {} + containers: + - name: second-log-app + image: mcr.microsoft.com/windows/servercore:ltsc2019 + command: ["powershell", "-c"] + args: + - "$counter = 1; For(;;) { echo $counter; $counter++; Start-Sleep -Seconds 1; }" + env: + - name: RANOMD_ENV_VAR_1 + value: "#123312'@$98" + - name: RANOMD_ENV_VAR_2 + value: "test" + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/os + operator: In + values: + - windows + - key: kubernetes.azure.com/os-sku + operator: NotIn + values: + - Windows2022 \ No newline at end of file diff --git a/test/scenario/log-app-win-ltsc2022.yml b/test/scenario/log-app-win-ltsc2022.yml new file mode 100644 index 000000000..bdbddbaa4 --- /dev/null +++ b/test/scenario/log-app-win-ltsc2022.yml @@ -0,0 +1,50 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: windows-log-ltsc2022 + labels: + name: windows-log-ltsc2022 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: second-log-app + namespace: windows-log-ltsc2022 +spec: + replicas: 1 + selector: + matchLabels: + app: second-log-app + template: + metadata: + labels: + app: second-log-app + spec: + volumes: + - name: html + emptyDir: {} + containers: + - name: second-log-app + image: mcr.microsoft.com/windows/servercore:ltsc2022 + command: ["powershell", "-c"] + args: + - "$counter = 1; For(;;) { echo $counter; $counter++; Start-Sleep -Seconds 1; }" + env: + - name: RANOMD_ENV_VAR_1 + value: "#123312'@$98" + - name: RANOMD_ENV_VAR_2 + value: "test" + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/os + operator: In + values: + - windows + - key: kubernetes.azure.com/os-sku + operator: In + values: + - Windows2022 From 0ee0c603ea0667f2be7d1ba196b3a2b0c0384767 Mon Sep 17 00:00:00 2001 From: Grace Wehner Date: Mon, 9 May 2022 16:56:19 -0700 Subject: [PATCH 225/301] CA Cert Fix for Mariner Hosts in Air Gap (#751) --- kubernetes/linux/main.sh | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index 997f624e2..2b25b044c 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -288,6 +288,30 @@ fi export CLOUD_ENVIRONMENT=$CLOUD_ENVIRONMENT echo "export CLOUD_ENVIRONMENT=$CLOUD_ENVIRONMENT" >> ~/.bashrc +# Copying over CA certs for airgapped clouds. This is needed for Mariner vs Ubuntu hosts. +# We are unable to tell if the host is Mariner or Ubuntu, +# so both /anchors/ubuntu and /anchors/mariner are mounted in the yaml. +# One will have the certs and the other will be empty. +# These need to be copied to a different location for Mariner vs Ubuntu containers. +# OS_ID here is the container distro. +# Adding Mariner now even though the elif will never currently evaluate. +if [ $CLOUD_ENVIRONMENT == "usnat" ] || [ $CLOUD_ENVIRONMENT == "ussec" ]; then + OS_ID=$(cat /etc/os-release | grep ^ID= | cut -d '=' -f2 | tr -d '"' | tr -d "'") + if [ $OS_ID == "mariner" ]; then + cp /anchors/ubuntu/* /etc/pki/ca-trust/source/anchors + cp /anchors/mariner/* /etc/pki/ca-trust/source/anchors + update-ca-trust + else + if [ $OS_ID != "ubuntu" ]; then + echo "Error: The ID in /etc/os-release is not ubuntu or mariner. Defaulting to ubuntu." + fi + cp /anchors/ubuntu/* /usr/local/share/ca-certificates/ + cp /anchors/mariner/* /usr/local/share/ca-certificates/ + update-ca-certificates + cp /etc/ssl/certs/ca-certificates.crt /usr/lib/ssl/cert.pem + fi +fi + #consisten naming conventions with the windows export DOMAIN=$domain echo "export DOMAIN=$DOMAIN" >> ~/.bashrc From b7a84ea681c82252a367579b071627203d3652f6 Mon Sep 17 00:00:00 2001 From: Vishwanath Date: Wed, 11 May 2022 11:46:37 -0700 Subject: [PATCH 226/301] add cifs & fuse file systems to ignore list (#750) --- build/linux/installer/conf/telegraf.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build/linux/installer/conf/telegraf.conf b/build/linux/installer/conf/telegraf.conf index 9f213e3e8..6ee1c472b 100644 --- a/build/linux/installer/conf/telegraf.conf +++ b/build/linux/installer/conf/telegraf.conf @@ -445,7 +445,7 @@ # mount_points = ["/"] ## Ignore mount points by filesystem type. - ignore_fs = ["tmpfs", "devtmpfs", "devfs", "overlay", "aufs", "squashfs"] + ignore_fs = ["tmpfs", "devtmpfs", "devfs", "overlay", "aufs", "squashfs", "cifs", "fuse"] fieldpass = ["free", "used", "used_percent"] taginclude = ["device","path","hostName"] # Below due to Bug - https://github.com/influxdata/telegraf/issues/5615 From e7c72377baed32eafe7aa864862fb9b8a2acf5aa Mon Sep 17 00:00:00 2001 From: Nina <47805999+MSNina123456@users.noreply.github.com> Date: Wed, 18 May 2022 10:37:07 +0800 Subject: [PATCH 227/301] Data collection script (#759) * Add files via upload * Add files via upload * Delete AKSInsightsLogCollection.sh * Create README.md * Add files via upload * move script to subfolder LogCollection * Update README.md * Rename AKSInsightsLogCollection.sh to AgentLogCollection.sh --- .../LogCollection/AgentLogCollection.sh | 226 ++++++++++++++++++ scripts/troubleshoot/LogCollection/README.md | 46 ++++ 2 files changed, 272 insertions(+) create mode 100644 scripts/troubleshoot/LogCollection/AgentLogCollection.sh create mode 100644 scripts/troubleshoot/LogCollection/README.md diff --git a/scripts/troubleshoot/LogCollection/AgentLogCollection.sh b/scripts/troubleshoot/LogCollection/AgentLogCollection.sh new file mode 100644 index 000000000..42a351e6e --- /dev/null +++ b/scripts/troubleshoot/LogCollection/AgentLogCollection.sh @@ -0,0 +1,226 @@ +#!/bin/bash +# +# Copyright (c) Microsoft Corporation. +# +# This script will collect all logs from the replicaset agent pod and a random daemonset pod, also collect onboard logs with processes +# +# Author Nina Li + +Red='\033[0;31m' +Cyan='\033[0;36m' +NC='\033[0m' # No Color + +init() +{ + echo -e "Preparing for log collection..." | tee -a Tool.log + + if ! cmd="$(type -p kubectl)" || [[ -z $cmd ]]; then + echo -e "${Red}Command kubectl not found, please install it firstly, exit...${NC}" + cd .. + rm -rf $output_path + exit + fi + + if ! cmd="$(type -p tar)" || [[ -z $cmd ]]; then + echo -e "${Red}Command tar not found, please install it firstly, exit...${NC}" + cd .. + rm -rf $output_path + exit + fi + + cmd=`kubectl get nodes 2>&1` + if [[ $cmd == *"refused"* ]];then + echo -e "${Red}Fail to connect your AKS, please fisrlty connect to cluster by command: az aks get-credentials --resource-group myResourceGroup --name myAKSCluster${NC}" + cd .. + rm -rf $output_path + exit + fi + + cmd=`kubectl get nodes | sed 1,1d | awk '{print $2}'` + for node in $cmd + do + if [ `echo $node | tr -s '[:upper:]' '[:lower:]'` != "ready" ]; then + kubectl get nodes + echo -e "${Red} One or more AKS node is not ready, please start this node firstly for log collection, exit...${NC}" + cd .. + rm -rf $output_path + exit + fi + done + echo -e "Prerequistes check is done, all good" | tee -a Tool.log + + echo -e "Saving cluster information" | tee -a Tool.log + + cmd=`kubectl cluster-info 2>&1` + if [[ $cmd == *"refused"* ]];then + echo -e "${Red}Fail to get cluster info, please check your AKS status fistly, exit...${NC}" + cd .. + rm -rf $output_path + exit + else + echo $cmd >> Tool.log + echo -e "cluster info saved to Tool.log" | tee -a Tool.log + fi + +} + +ds_logCollection() +{ + echo -e "Collecting logs from ${ds_pod}..." | tee -a Tool.log + kubectl describe pod ${ds_pod} --namespace=kube-system > describe_${ds_pod}.txt + kubectl logs ${ds_pod} --container omsagent --namespace=kube-system > logs_${ds_pod}.txt + kubectl logs ${ds_pod} --container omsagent-prometheus --namespace=kube-system > logs_${ds_pod}_prom.txt + kubectl exec ${ds_pod} -n kube-system --request-timeout=10m -- ps -ef > process_${ds_pod}.txt + + cmd=`kubectl exec ${ds_pod} -n kube-system -- ls /var/opt/microsoft 2>&1` + if [[ $cmd == *"cannot access"* ]];then + echo -e "${Red}/var/opt/microsoft not exist on ${ds_pod}${NC}" | tee -a Tool.log + else + kubectl cp ${ds_pod}:/var/opt/microsoft/docker-cimprov/log omsagent-daemonset --namespace=kube-system --container omsagent > /dev/null + kubectl cp ${ds_pod}:/var/opt/microsoft/docker-cimprov/log omsagent-prom-daemonset --namespace=kube-system --container omsagent-prometheus > /dev/null + kubectl cp ${ds_pod}:/var/opt/microsoft/linuxmonagent/log omsagent-daemonset-mdsd --namespace=kube-system --container omsagent > /dev/null + kubectl cp ${ds_pod}:/var/opt/microsoft/linuxmonagent/log omsagent-prom-daemonset-mdsd --namespace=kube-system --container omsagent-prometheus > /dev/null + fi + + kubectl exec ${ds_pod} --namespace=kube-system -- ls /var/opt/microsoft/docker-cimprov/state/ContainerInventory > containerID_${ds_pod}.txt 2>&1 + + cmd=`kubectl exec ${ds_pod} -n kube-system -- ls /etc/fluent 2>&1` + if [[ $cmd == *"cannot access"* ]];then + echo -e "${Red}/etc/fluent not exist on ${ds_pod}${NC}" | tee -a Tool.log + else + kubectl cp ${ds_pod}:/etc/fluent/container.conf omsagent-daemonset/container_${ds_pod}.conf --namespace=kube-system --container omsagent > /dev/null + kubectl cp ${ds_pod}:/etc/fluent/container.conf omsagent-prom-daemonset/container_${ds_pod}_prom.conf --namespace=kube-system --container omsagent-prometheus > /dev/null + fi + + cmd=`kubectl exec ${ds_pod} -n kube-system -- ls /etc/opt/microsoft/docker-cimprov 2>&1` + if [[ $cmd == *"cannot access"* ]];then + echo -e "${Red}/etc/opt/microsoft/docker-cimprov not exist on ${ds_pod}${NC}" | tee -a Tool.log + else + kubectl cp ${ds_pod}:/etc/opt/microsoft/docker-cimprov/td-agent-bit.conf omsagent-daemonset/td-agent-bit.conf --namespace=kube-system --container omsagent > /dev/null + kubectl cp ${ds_pod}:/etc/opt/microsoft/docker-cimprov/telegraf.conf omsagent-daemonset/telegraf.conf --namespace=kube-system --container omsagent > /dev/null + kubectl cp ${ds_pod}:/etc/opt/microsoft/docker-cimprov/telegraf.conf omsagent-prom-daemonset/telegraf.conf --namespace=kube-system --container omsagent-prometheus > /dev/null + kubectl cp ${ds_pod}:/etc/opt/microsoft/docker-cimprov/td-agent-bit.conf omsagent-prom-daemonset/td-agent-bit.conf --namespace=kube-system --container omsagent-prometheus > /dev/null + fi + echo -e "Complete log collection from ${ds_pod}!" | tee -a Tool.log +} + +win_logCollection() +{ + echo -e "Collecting logs from ${ds_win_pod}, windows pod will take several minutes for log collection, please dont exit forcely..." | tee -a Tool.log + kubectl describe pod ${ds_win_pod} --namespace=kube-system > describe_${ds_win_pod}.txt + kubectl logs ${ds_win_pod} --container omsagent-win --namespace=kube-system > logs_${ds_win_pod}.txt + kubectl exec ${ds_win_pod} -n kube-system --request-timeout=10m -- powershell Get-Process > process_${ds_win_pod}.txt + + cmd=`kubectl exec ${ds_win_pod} -n kube-system -- powershell ls /etc 2>&1` + if [[ $cmd == *"cannot access"* ]];then + echo -e "${Red}/etc/ not exist on ${ds_pod}${NC}" | tee -a Tool.log + else + kubectl cp ${ds_win_pod}:/etc/fluent-bit omsagent-win-daemonset-fbit --namespace=kube-system > /dev/null + kubectl cp ${ds_win_pod}:/etc/telegraf/telegraf.conf omsagent-win-daemonset-fbit/telegraf.conf --namespace=kube-system > /dev/null + + echo -e "${Cyan}If your log size are too large, log collection of windows node may fail. You can reduce log size by re-creating windows pod ${NC}" + # for some reason copying logs out of /etc/omsagentwindows doesn't work (gives a permission error), but exec then cat does work. + # kubectl cp ${ds_win_pod}:/etc/omsagentwindows omsagent-win-daemonset --namespace=kube-system + mkdir -p omsagent-win-daemonset + kubectl exec ${ds_win_pod} -n kube-system --request-timeout=10m -- powershell cat /etc/omsagentwindows/kubernetes_perf_log.txt > omsagent-win-daemonset/kubernetes_perf_log.txt + kubectl exec ${ds_win_pod} -n kube-system --request-timeout=10m -- powershell cat /etc/omsagentwindows/appinsights_error.log > omsagent-win-daemonset/appinsights_error.log + kubectl exec ${ds_win_pod} -n kube-system --request-timeout=10m -- powershell cat /etc/omsagentwindows/filter_cadvisor2mdm.log > omsagent-win-daemonset/filter_cadvisor2mdm.log + kubectl exec ${ds_win_pod} -n kube-system --request-timeout=10m -- powershell cat /etc/omsagentwindows/fluent-bit-out-oms-runtime.log > omsagent-win-daemonset/fluent-bit-out-oms-runtime.log + kubectl exec ${ds_win_pod} -n kube-system --request-timeout=10m -- powershell cat /etc/omsagentwindows/kubernetes_client_log.txt > omsagent-win-daemonset/kubernetes_client_log.txt + kubectl exec ${ds_win_pod} -n kube-system --request-timeout=10m -- powershell cat /etc/omsagentwindows/mdm_metrics_generator.log > omsagent-win-daemonset/mdm_metrics_generator.log + kubectl exec ${ds_win_pod} -n kube-system --request-timeout=10m -- powershell cat /etc/omsagentwindows/out_oms.conf > omsagent-win-daemonset/out_oms.conf + fi + + echo -e "Complete log collection from ${ds_win_pod}!" | tee -a Tool.log +} + +rs_logCollection() +{ + echo -e "Collecting logs from ${rs_pod}..." + kubectl describe pod ${rs_pod} --namespace=kube-system > describe_${rs_pod}.txt + kubectl logs ${rs_pod} --container omsagent --namespace=kube-system > logs_${rs_pod}.txt + kubectl exec ${rs_pod} -n kube-system --request-timeout=10m -- ps -ef > process_${rs_pod}.txt + + cmd=`kubectl exec ${rs_pod} -n kube-system -- ls /var/opt/microsoft 2>&1` + if [[ $cmd == *"cannot access"* ]];then + echo -e "${Red}/var/opt/microsoft not exist on ${rs_pod}${NC}" | tee -a Tool.log + else + kubectl cp ${rs_pod}:/var/opt/microsoft/docker-cimprov/log omsagent-replicaset --namespace=kube-system > /dev/null + kubectl cp ${rs_pod}:/var/opt/microsoft/linuxmonagent/log omsagent-replicaset-mdsd --namespace=kube-system > /dev/null + fi + + cmd=`kubectl exec ${rs_pod} -n kube-system -- ls /etc/fluent 2>&1` + if [[ $cmd == *"cannot access"* ]];then + echo -e "${Red}/etc/fluent not exist on ${rs_pod}${NC}" | tee -a Tool.log + else + kubectl cp ${rs_pod}:/etc/fluent/kube.conf omsagent-replicaset/kube_${rs_pod}.conf --namespace=kube-system --container omsagent > /dev/null + fi + + cmd=`kubectl exec ${rs_pod} -n kube-system -- ls /etc/opt/microsoft/docker-cimprov 2>&1` + if [[ $cmd == *"cannot access"* ]];then + echo -e "${Red}/etc/opt/microsoft/docker-cimprov not exist on ${rs_pod}${NC}" | tee -a Tool.log + else + kubectl cp ${rs_pod}:/etc/opt/microsoft/docker-cimprov/td-agent-bit-rs.conf omsagent-replicaset/td-agent-bit.conf --namespace=kube-system --container omsagent > /dev/null + kubectl cp ${rs_pod}:/etc/opt/microsoft/docker-cimprov/telegraf-rs.conf omsagent-replicaset/telegraf-rs.conf --namespace=kube-system --container omsagent > /dev/null + fi + echo -e "Complete log collection from ${rs_pod}!" | tee -a Tool.log +} + +other_logCollection() +{ + echo -e "Collecting onboard logs..." + export deploy=$(kubectl get deployment --namespace=kube-system | grep -E omsagent | head -n 1 | awk '{print $1}') + if [ -z "$deploy" ];then + echo -e "${Red}there is not omsagent deployment, skipping log collection of deployment${NC}" | tee -a Tool.log + else + kubectl get deployment $deploy --namespace=kube-system -o yaml > deployment_${deploy}.txt + fi + + export config=$(kubectl get configmaps --namespace=kube-system | grep -E container-azm-ms-agentconfig | head -n 1 | awk '{print $1}') + if [ -z "$config" ];then + echo -e "${Red}configMap named container-azm-ms-agentconfig is not found, if you created configMap for omsagent, please manually save your custom configMap of omsagent by command: kubectl get configmaps --namespace=kube-system -o yaml > configMap.yaml${NC}" | tee -a Tool.log + else + kubectl get configmaps $config --namespace=kube-system -o yaml > ${config}.yaml + fi + + kubectl get nodes > node.txt + echo -e "Complete onboard log collection!" | tee -a Tool.log +} + +#main +output_path="AKSInsights-logs.$(date +%s).`hostname`" +mkdir -p $output_path +cd $output_path + +init + +export ds_pod=$(kubectl get pods -n kube-system -o custom-columns=NAME:.metadata.name | grep -E omsagent-[a-z0-9]{5} | head -n 1) +if [ -z "$ds_pod" ];then + echo -e "${Red}daemonset pod do not exist, skipping log collection for daemonset pod${NC}" | tee -a Tool.log +else + ds_logCollection +fi + +export ds_win_pod=$(kubectl get pods -n kube-system -o custom-columns=NAME:.metadata.name | grep -E omsagent-win-[a-z0-9]{5} | head -n 1) +if [ -z "$ds_win_pod" ];then + echo -e "${Cyan} windows agent pod do not exist, skipping log collection for windows agent pod ${NC}" | tee -a Tool.log +else + win_logCollection +fi + +export rs_pod=$(kubectl get pods -n kube-system -o custom-columns=NAME:.metadata.name | grep -E omsagent-rs-[a-z0-9]{5} | head -n 1) +if [ -z "$rs_pod" ];then + echo -e "${Red}replicaset pod do not exist, skipping log collection for replicaset pod ${NC}" | tee -a Tool.log +else + rs_logCollection +fi + +other_logCollection + +cd .. +echo +echo -e "Archiving logs..." +tar -czf $output_path.tgz $output_path +rm -rf $output_path + +echo "log files have been written to ${output_path}.tgz in current folder" diff --git a/scripts/troubleshoot/LogCollection/README.md b/scripts/troubleshoot/LogCollection/README.md new file mode 100644 index 000000000..9c867837b --- /dev/null +++ b/scripts/troubleshoot/LogCollection/README.md @@ -0,0 +1,46 @@ +# Container Insights Log collector + +This tool will collect: +* agent logs from linux ds and rs pods; +* agent logs from windows pod if enabled; +* cluster/node info, pod deployment, configMap, process logs etc.. + +## Prerequisites +* kubectl: az aks install-cli +* tar (installed by default) +* all nodes should be running on AKS +* AKS Insights are enabled: https://docs.microsoft.com/en-us/azure/azure-monitor/containers/container-insights-onboard + +Otherwise, script will report error message and exit. + +## How to run +``` +az login --use-device-code # login to azure +az account set --subscription +az aks get-credentials --resource-group --name --file ~/ClusterKubeConfig +export KUBECONFIG=~/ClusterKubeConfig + +wget https://raw.githubusercontent.com/microsoft/Docker-Provider/ci_dev/scripts/troubleshoot/LogCollection/AgentLogCollection.sh && bash ./AgentLogCollection.sh +``` + +Output: +``` +Preparing for log collection... +Prerequistes check is done, all good +Saving cluster information +cluster info saved to Tool.log +Collecting logs from omsagent-5kwzn... +Defaulted container "omsagent" out of: omsagent, omsagent-prometheus +Complete log collection from omsagent-5kwzn! +Collecting logs from omsagent-win-krcpv, windows pod will take several minutes for log collection, please dont exit forcely... +If your log size are too large, log collection of windows node may fail. You can reduce log size by re-creating windows pod +Complete log collection from omsagent-win-krcpv! +Collecting logs from omsagent-rs-6fc95c45cf-qjsdb... +Complete log collection from omsagent-rs-6fc95c45cf-qjsdb! +Collecting onboard logs... +configMap named container-azm-ms-agentconfig is not found, if you created configMap for omsagent, please use command to save your custom configMap of omsagent: kubectl get configmaps --namespace=kube-system -o yaml > configMap.yaml +Complete onboard log collection! + +Archiving logs... +log files have been written to AKSInsights-logs.1649655490.ubuntu1804.tgz in current folder +``` From 34b05c61bc278002b6f2a15d6be10856c38baa80 Mon Sep 17 00:00:00 2001 From: "microsoft-github-policy-service[bot]" <77245923+microsoft-github-policy-service[bot]@users.noreply.github.com> Date: Tue, 17 May 2022 20:09:08 -0700 Subject: [PATCH 228/301] Microsoft mandatory file (#763) Co-authored-by: microsoft-github-policy-service[bot] <77245923+microsoft-github-policy-service[bot]@users.noreply.github.com> --- SECURITY.md | 41 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) create mode 100644 SECURITY.md diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 000000000..766e6f887 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,41 @@ + + +## Security + +Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/). + +If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://docs.microsoft.com/previous-versions/tn-archive/cc751383(v=technet.10)), please report it to us as described below. + +## Reporting Security Issues + +**Please do not report security vulnerabilities through public GitHub issues.** + +Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://msrc.microsoft.com/create-report). + +If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://www.microsoft.com/msrc/pgp-key-msrc). + +You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://www.microsoft.com/msrc). + +Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: + + * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) + * Full paths of source file(s) related to the manifestation of the issue + * The location of the affected source code (tag/branch/commit or direct URL) + * Any special configuration required to reproduce the issue + * Step-by-step instructions to reproduce the issue + * Proof-of-concept or exploit code (if possible) + * Impact of the issue, including how an attacker might exploit the issue + +This information will help us triage your report more quickly. + +If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://microsoft.com/msrc/bounty) page for more details about our active programs. + +## Preferred Languages + +We prefer all communications to be in English. + +## Policy + +Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://www.microsoft.com/msrc/cvd). + + From 31e320b6877aeef182b5f9924675287e06a7ec42 Mon Sep 17 00:00:00 2001 From: Auston Li Date: Thu, 19 May 2022 13:23:37 -0700 Subject: [PATCH 229/301] Adding v2 schema options (#762) * Adding v2 schema options Adding commented out section in log collection settings for v2 schema * adding documentation link --- kubernetes/container-azm-ms-agentconfig.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/kubernetes/container-azm-ms-agentconfig.yaml b/kubernetes/container-azm-ms-agentconfig.yaml index 328acb201..5e8aa187a 100644 --- a/kubernetes/container-azm-ms-agentconfig.yaml +++ b/kubernetes/container-azm-ms-agentconfig.yaml @@ -42,6 +42,12 @@ data: # When the setting is set to false, only the kube events with !normal event type will be collected enabled = false # When this is enabled (enabled = true), all kube events including normal events will be collected + #[log_collection_settings.schema] + # In the absence of this configmap, default value for containerlog_schema_version is "v1" + # Supported values for this setting are "v1","v2" + # See documentation at https://aka.ms/ContainerLogv2 for benefits of v2 schema over v1 schema before opting for "v2" schema + # containerlog_schema_version = "v2" + prometheus-data-collection-settings: |- # Custom Prometheus metrics data collection settings From b9520c619ae6d7de884d8a488fa8970eee388730 Mon Sep 17 00:00:00 2001 From: Janvi Jatakia Date: Thu, 19 May 2022 14:02:22 -0700 Subject: [PATCH 230/301] Agent release for ciprod05192022 and win-ciprod05192022 (#765) * Making changes for the release ciprod05192022 (except release notes) * Adding release notes * Remove unnecessary spaces * Updating release notes for configmap v2 and disk usage metrics --- ReleaseNotes.md | 39 ++++++++++++++++++++++ build/version | 4 +-- charts/azuremonitor-containers/Chart.yaml | 2 +- charts/azuremonitor-containers/values.yaml | 6 ++-- kubernetes/linux/Dockerfile | 2 +- kubernetes/linux/Dockerfile.multiarch | 2 +- kubernetes/omsagent.yaml | 14 ++++---- kubernetes/windows/Dockerfile | 2 +- 8 files changed, 55 insertions(+), 16 deletions(-) diff --git a/ReleaseNotes.md b/ReleaseNotes.md index d0e5bad22..176cbc2b8 100644 --- a/ReleaseNotes.md +++ b/ReleaseNotes.md @@ -11,6 +11,45 @@ additional questions or comments. Note : The agent version(s) below has dates (ciprod), which indicate the agent build dates (not release dates) +### 5/19/2022 - +##### Version microsoft/oms:ciprod05192022 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod05192022 (linux) +##### Version microsoft/oms:win-ciprod05192022 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod05192022 (windows) +##### Code change log +- Linux Agent + - PodReadyPercentage metric bug fix + - add cifs & fuse file systems to ignore list + - CA Cert Fix for Mariner Hosts in Air Gap + - Disk usage metrics will no longer be collected for the paths "/mnt/containers" and "/mnt/docker" +- Windows Agent + - Ruby version upgrade from 2.6.5.1 to 2.7.5.1 + - Added Support for Windows Server 2022 + - Multi-Arch Image to support both Windows 2019 and Windows 2022 +- Common (Linux & Windows Agent) + - Telegraf version update from 1.20.3 to 1.22.2 to fix the vulnerabilitis + - Removal of Health feature as part of deprecation plan + - AAD Auth MSI feature support for Arc K8s (not usable externally yet) + - MSI onboarding ARM template updates for both AKS & Arc K8s + - Fixed the bug related to windows metrics in MSI mode for AKS + - Configmap updates for log collection settings for v2 schema +- Misc + - Improvements related to CI/CD Multi-arc image + - Do trivy rootfs checks + - Disable push to ACR for PR and PR updates + - Enable batch builds + - Scope Dev/Prod pipelines to respective branches + - Shorten datetime component of image tag + - Troubleshooting script updates for MSI onboarding + - Instructions for testing of agent in MSI auth mode + - Add CI Windows Build to MultiArch Dev pipeline + - Updates related to building of Multi-arc image for windows in Build Pipeline and local dev builds + - Test yamls to test container logs and prometheus scraping on both WS2019 & WS2022 + - Arc K8s conformance test updates + - Script to collect the Agent logs for troubleshooting + - Force run trivy stage for Linux + - Fix docker msi download link in windows install-build-pre-requisites.ps1 script + - Added Onboarding templates for legacy auth for internal testing + - Update the Build pipelines to have separate phase for Windows + ### 3/17/2022 - ##### Version microsoft/oms:ciprod03172022 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod03172022 (linux) ##### Version microsoft/oms:win-ciprod03172022 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod03172022 (windows) diff --git a/build/version b/build/version index 95d20e931..19787cb72 100644 --- a/build/version +++ b/build/version @@ -2,11 +2,11 @@ # Build Version Information -CONTAINER_BUILDVERSION_MAJOR=16 +CONTAINER_BUILDVERSION_MAJOR=17 CONTAINER_BUILDVERSION_MINOR=0 CONTAINER_BUILDVERSION_PATCH=0 CONTAINER_BUILDVERSION_BUILDNR=0 -CONTAINER_BUILDVERSION_DATE=20210611 +CONTAINER_BUILDVERSION_DATE=20220519 CONTAINER_BUILDVERSION_STATUS=Developer_Build #-------------------------------- End of File ----------------------------------- diff --git a/charts/azuremonitor-containers/Chart.yaml b/charts/azuremonitor-containers/Chart.yaml index 0ff1e3387..8e9f4847f 100644 --- a/charts/azuremonitor-containers/Chart.yaml +++ b/charts/azuremonitor-containers/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v1 appVersion: 7.0.0-1 description: Helm chart for deploying Azure Monitor container monitoring agent in Kubernetes name: azuremonitor-containers -version: 2.9.2 +version: 2.9.3 kubeVersion: "^1.10.0-0" keywords: - monitoring diff --git a/charts/azuremonitor-containers/values.yaml b/charts/azuremonitor-containers/values.yaml index 104efb86d..480e7040c 100644 --- a/charts/azuremonitor-containers/values.yaml +++ b/charts/azuremonitor-containers/values.yaml @@ -22,10 +22,10 @@ Azure: omsagent: image: repo: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod" - tag: "ciprod03172022" - tagWindows: "win-ciprod03172022" + tag: "ciprod05192022" + tagWindows: "win-ciprod05192022" pullPolicy: IfNotPresent - dockerProviderVersion: "16.0.0-0" + dockerProviderVersion: "17.0.0-0" agentVersion: "azure-mdsd-1.17.0" winAgentVersion: "0.0.0-0" # there is no base agent version for windows agent diff --git a/kubernetes/linux/Dockerfile b/kubernetes/linux/Dockerfile index becbe1157..6f68f664e 100644 --- a/kubernetes/linux/Dockerfile +++ b/kubernetes/linux/Dockerfile @@ -17,7 +17,7 @@ ENV RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR 0.9 RUN /usr/bin/apt-get update && /usr/bin/apt-get install -y libc-bin wget openssl curl sudo python-ctypes init-system-helpers net-tools rsyslog cron vim dmidecode apt-transport-https gnupg && rm -rf /var/lib/apt/lists/* COPY setup.sh main.sh defaultpromenvvariables defaultpromenvvariables-rs defaultpromenvvariables-sidecar mdsd.xml envmdsd logrotate.conf $tmpdir/ -ARG IMAGE_TAG=ciprod03172022 +ARG IMAGE_TAG=ciprod05192022 ENV AGENT_VERSION ${IMAGE_TAG} WORKDIR ${tmpdir} diff --git a/kubernetes/linux/Dockerfile.multiarch b/kubernetes/linux/Dockerfile.multiarch index 38103dd65..fd0330d5d 100644 --- a/kubernetes/linux/Dockerfile.multiarch +++ b/kubernetes/linux/Dockerfile.multiarch @@ -29,7 +29,7 @@ RUN /usr/bin/apt-get update && /usr/bin/apt-get install -y libc-bin wget openssl COPY --from=builder /src/kubernetes/linux/Linux_ULINUX_1.0_*_64_Release/docker-cimprov-*.*.*-*.*.sh $tmpdir/ COPY kubernetes/linux/setup.sh kubernetes/linux/main.sh kubernetes/linux/defaultpromenvvariables kubernetes/linux/defaultpromenvvariables-rs kubernetes/linux/defaultpromenvvariables-sidecar kubernetes/linux/mdsd.xml kubernetes/linux/envmdsd kubernetes/linux/logrotate.conf $tmpdir/ -ARG IMAGE_TAG=ciprod03172022 +ARG IMAGE_TAG=ciprod05192022 ENV AGENT_VERSION ${IMAGE_TAG} WORKDIR ${tmpdir} diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index 85d0ffb6b..4e021e1b8 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -334,7 +334,7 @@ spec: tier: node annotations: agentVersion: "azure-mdsd-1.17.0" - dockerProviderVersion: "16.0.0-0" + dockerProviderVersion: "17.0.0-0" schema-versions: "v1" spec: serviceAccountName: omsagent @@ -379,7 +379,7 @@ spec: # - NET_ADMIN # - NET_RAW - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod03172022" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod05192022" imagePullPolicy: IfNotPresent resources: limits: @@ -466,7 +466,7 @@ spec: timeoutSeconds: 15 #Only in sidecar scraping mode - name: omsagent-prometheus - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod03172022" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod05192022" imagePullPolicy: IfNotPresent resources: limits: @@ -610,7 +610,7 @@ spec: rsName: "omsagent-rs" annotations: agentVersion: "azure-mdsd-1.17.0" - dockerProviderVersion: "16.0.0-0" + dockerProviderVersion: "17.0.0-0" schema-versions: "v1" spec: serviceAccountName: omsagent @@ -651,7 +651,7 @@ spec: # - NET_ADMIN # - NET_RAW - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod03172022" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod05192022" imagePullPolicy: IfNotPresent resources: limits: @@ -812,7 +812,7 @@ spec: tier: node-win annotations: agentVersion: "0.0.0-0" - dockerProviderVersion: "16.0.0-0" + dockerProviderVersion: "17.0.0-0" schema-versions: "v1" spec: serviceAccountName: omsagent @@ -822,7 +822,7 @@ spec: value: "3" containers: - name: omsagent-win - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod03172022" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod05192022" imagePullPolicy: IfNotPresent resources: limits: diff --git a/kubernetes/windows/Dockerfile b/kubernetes/windows/Dockerfile index 672dfb1f7..383652e0e 100644 --- a/kubernetes/windows/Dockerfile +++ b/kubernetes/windows/Dockerfile @@ -5,7 +5,7 @@ MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=win-ciprod03172022 +ARG IMAGE_TAG=win-ciprod05192022 # Do not split this into multiple RUN! # Docker creates a layer for every RUN-Statement From 1dc7209e3e63475b0f8226f1f9fcc3b22c67d451 Mon Sep 17 00:00:00 2001 From: Amol Agrawal Date: Wed, 1 Jun 2022 08:48:50 -0700 Subject: [PATCH 231/301] trivy image scan (#770) * do trivy image check in azure pipelines * remove pr-checker github action Co-authored-by: Amol Agrawal --- .github/workflows/pr-checker.yml | 97 ----------------------------- .pipelines/azure_pipeline_dev.yaml | 10 ++- .pipelines/azure_pipeline_prod.yaml | 8 ++- 3 files changed, 16 insertions(+), 99 deletions(-) delete mode 100644 .github/workflows/pr-checker.yml diff --git a/.github/workflows/pr-checker.yml b/.github/workflows/pr-checker.yml deleted file mode 100644 index 036c7edd3..000000000 --- a/.github/workflows/pr-checker.yml +++ /dev/null @@ -1,97 +0,0 @@ -name: pullrequest-build-and-scan -on: - pull_request: - types: [opened, synchronize, reopened] - branches: - - ci_dev - - ci_prod -jobs: - LINUX-build-and-scan: - runs-on: ubuntu-latest - steps: - - name: Set-workflow-initiator - run: echo "Initiated by - ${GITHUB_ACTOR}" - - name: Set-branch-name-for-pr - if: ${{ github.event_name == 'pull_request' }} - run: echo "BRANCH_NAME=$(echo ${GITHUB_HEAD_REF} | tr / _)" >> $GITHUB_ENV - - name: Set-Env - run: echo "ENV=dev" >> $GITHUB_ENV - - name: Set-ACR-Registry - run: echo "ACR_REGISTRY=containerinsightsprod.azurecr.io" >> $GITHUB_ENV - - name: Set-ACR-Repository - run: echo "ACR_REPOSITORY=/public/azuremonitor/containerinsights/cidev" >> $GITHUB_ENV - - name: Set-image-tag-name - run: echo "IMAGE_TAG_NAME=cidev" >> $GITHUB_ENV - - name: Set-image-tag-suffix - run: echo "IMAGE_TAG_DATE=$(date +%m-%d-%Y)" >> $GITHUB_ENV - - name: Set-commit-sha - run: echo "COMMIT_SHA=${GITHUB_SHA::8}" >> $GITHUB_ENV - - name: Set-image-tag - run: echo "IMAGETAG=${ACR_REGISTRY}${ACR_REPOSITORY}:${IMAGE_TAG_NAME}-${BRANCH_NAME}-${IMAGE_TAG_DATE}-${COMMIT_SHA}" >> $GITHUB_ENV - - name: Set-image-telemetry-tag - run: echo "IMAGETAG_TELEMETRY=${IMAGE_TAG_NAME}-${BRANCH_NAME}-${IMAGE_TAG_DATE}-${COMMIT_SHA}" >> $GITHUB_ENV - - name: Set-Helm-OCI-Experimental-feature - run: echo "HELM_EXPERIMENTAL_OCI=1" >> $GITHUB_ENV - - name: Set-Helm-chart-version - run: echo "HELM_CHART_VERSION=0.0.1" >> $GITHUB_ENV - - name: Set-Helm-tag - run: echo "HELMTAG=${ACR_REGISTRY}${ACR_REPOSITORY}:${IMAGE_TAG_NAME}-chart-${BRANCH_NAME}-${HELM_CHART_VERSION}-${IMAGE_TAG_DATE}-${COMMIT_SHA}" >> $GITHUB_ENV - - name: Checkout-code - uses: actions/checkout@v2 - - name: Show-versions-On-build-machine - run: lsb_release -a && go version && helm version && docker version - - name: Install-build-dependencies - run: sudo apt-get install build-essential -y - - name: Build-source-code - run: cd ./build/linux/ && make - - name: Create-docker-image - run: | - cd ./kubernetes/linux/ && docker build . --file Dockerfile -t $IMAGETAG --build-arg IMAGE_TAG=$IMAGETAG_TELEMETRY - - name: List-docker-images - run: docker images --digests --all - - name: Run-trivy-scanner-on-docker-image - uses: aquasecurity/trivy-action@master - with: - image-ref: "${{ env.IMAGETAG }}" - format: 'table' - severity: 'CRITICAL,HIGH' - vuln-type: 'os,library' - exit-code: '1' - timeout: '5m0s' - ignore-unfixed: true - WINDOWS-build: - runs-on: windows-2019 - steps: - - name: Set-workflow-initiator - run: echo ("Initiated by -" + $env:GITHUB_ACTOR) - - name: Set-branch-name-for-pr - if: ${{ github.event_name == 'pull_request' }} - run: echo ("BRANCH_NAME=" + $env:GITHUB_HEAD_REF.replace('/','_')) >> $env:GITHUB_ENV - - name: Set-Env - run: echo ("ENV=dev") >> $env:GITHUB_ENV - - name: Set-ACR-Registry - run: echo ("ACR_REGISTRY=containerinsightsprod.azurecr.io") >> $env:GITHUB_ENV - - name: Set-ACR-Repository - run: echo ("ACR_REPOSITORY=/public/azuremonitor/containerinsights/cidev") >> $env:GITHUB_ENV - - name: Set-image-tag-name - run: echo ("IMAGE_TAG_NAME=cidev-win") >> $env:GITHUB_ENV - - name: Set-image-tag-suffix - run: echo ("IMAGE_TAG_DATE="+ (Get-Date -Format "MM-dd-yyyy")) >> $env:GITHUB_ENV - - name: Set-commit-sha - run: echo ("COMMIT_SHA=" + $env:GITHUB_SHA.SubString(0,8)) >> $env:GITHUB_ENV - - name: Set-image-tag - run: echo ("IMAGETAG=" + $env:ACR_REGISTRY + $env:ACR_REPOSITORY + ":" + $env:IMAGE_TAG_NAME + "-" + $env:BRANCH_NAME + "-" + $env:IMAGE_TAG_DATE + "-" + $env:COMMIT_SHA) >> $env:GITHUB_ENV - - name: Set-image-telemetry-tag - run: echo ("IMAGETAG_TELEMETRY=" + $env:IMAGE_TAG_NAME + "-" + $env:BRANCH_NAME + "-" + $env:IMAGE_TAG_DATE + "-" + $env:COMMIT_SHA) >> $env:GITHUB_ENV - - name: Checkout-code - uses: actions/checkout@v2 - - name: Show-versions-On-build-machine - run: systeminfo && go version && docker version - - name: Build-source-code - run: cd ./build/windows/ && & .\Makefile.ps1 - - name: Create-docker-image - run: | - cd ./kubernetes/windows/ && docker build . --file Dockerfile -t $env:IMAGETAG --build-arg WINDOWS_VERSION=ltsc2019 --build-arg IMAGE_TAG=$env:IMAGETAG_TELEMETRY - - name: List-docker-images - run: docker images --digests --all - diff --git a/.pipelines/azure_pipeline_dev.yaml b/.pipelines/azure_pipeline_dev.yaml index c4723e8e1..9147501ba 100644 --- a/.pipelines/azure_pipeline_dev.yaml +++ b/.pipelines/azure_pipeline_dev.yaml @@ -95,7 +95,7 @@ jobs: steps: - task: AzureCLI@2 - displayName: "Docker multi-arch linux build" + displayName: "Multi-arch Linux build and Vulnerability Scan" inputs: azureSubscription: ${{ variables.armServiceConnectionName }} scriptType: bash @@ -120,8 +120,16 @@ jobs: docker pull ${{ variables.repoImageName }}:$(linuxImagetag) else docker buildx build --platform linux/amd64,linux/arm64 --tag ${{ variables.repoImageName }}:$(linuxImagetag) -f kubernetes/linux/Dockerfile.multiarch --metadata-file $(Build.ArtifactStagingDirectory)/linux/metadata.json --build-arg IMAGE_TAG=$(linuxImagetag) . + + # load the multi-arch image to run tests + docker buildx build --tag ${{ variables.repoImageName }}:$(linuxImagetag) -f kubernetes/linux/Dockerfile.multiarch --metadata-file $(Build.ArtifactStagingDirectory)/linux/metadata.json --build-arg IMAGE_TAG=$(linuxImagetag) --load . fi + curl -sfL https://raw.githubusercontent.com/aquasecurity/trivy/main/contrib/install.sh | sh -s -- -b /usr/local/bin + + trivy image --ignore-unfixed --no-progress --severity HIGH,CRITICAL,MEDIUM --exit-code 1 ${{ variables.repoImageName }}:$(linuxImagetag) + + - task: AzureArtifacts.manifest-generator-task.manifest-generator-task.ManifestGeneratorTask@0 displayName: 'Generation Task' condition: eq(variables.IS_PR, true) diff --git a/.pipelines/azure_pipeline_prod.yaml b/.pipelines/azure_pipeline_prod.yaml index 8239a7058..5e22bdd3b 100644 --- a/.pipelines/azure_pipeline_prod.yaml +++ b/.pipelines/azure_pipeline_prod.yaml @@ -99,7 +99,7 @@ jobs: steps: - task: AzureCLI@2 - displayName: "Docker multi-arch linux build" + displayName: "Multi-arch Linux build and Vulnerability Scan" inputs: azureSubscription: ${{ variables.armServiceConnectionName }} scriptType: bash @@ -124,8 +124,14 @@ jobs: docker pull ${{ variables.repoImageNameLinux }}:$(linuxImagetag) else docker buildx build --platform linux/amd64,linux/arm64 --tag ${{ variables.repoImageNameLinux }}:$(linuxImagetag) -f kubernetes/linux/Dockerfile.multiarch --metadata-file $(Build.ArtifactStagingDirectory)/linux/metadata.json . + + # load the multi-arch image to run tests + docker buildx build --tag ${{ variables.repoImageNameLinux }}:$(linuxImagetag) -f kubernetes/linux/Dockerfile.multiarch --metadata-file $(Build.ArtifactStagingDirectory)/linux/metadata.json --load . fi + curl -sfL https://raw.githubusercontent.com/aquasecurity/trivy/main/contrib/install.sh | sh -s -- -b /usr/local/bin + + trivy image --ignore-unfixed --no-progress --severity HIGH,CRITICAL,MEDIUM --exit-code 1 ${{ variables.repoImageNameLinux }}:$(linuxImagetag) - task: AzureArtifacts.manifest-generator-task.manifest-generator-task.ManifestGeneratorTask@0 displayName: 'Generation Task' From 2345f293a411711380cca4e346ee7f76a8a7a80d Mon Sep 17 00:00:00 2001 From: Janvi Jatakia Date: Wed, 1 Jun 2022 09:53:38 -0700 Subject: [PATCH 232/301] Prometheus sidecar memory optimization (#769) Don't start telegraf, mdsd, and fluent-bit in the prometheus sidecar if it has no work to do (monitor_kubernetes_pods = false and no OSM namespaces to scrape). This part is just a resource-usage optimization. Adding the newly created environment variables in a file as adding them to bashrc makes it inaccessible if being run in a non-interactive environment. This happens in case of livenessprobe.sh. --- .trivyignore | 5 +- .../scripts/tomlparser-prom-customconfig.rb | 2 +- .../linux/installer/scripts/livenessprobe.sh | 44 ++++---- kubernetes/linux/main.sh | 102 ++++++++++++------ 4 files changed, 102 insertions(+), 51 deletions(-) diff --git a/.trivyignore b/.trivyignore index 3a8089422..f8c029116 100644 --- a/.trivyignore +++ b/.trivyignore @@ -13,4 +13,7 @@ CVE-2021-43809 CVE-2021-41816 CVE-2021-41819 CVE-2021-31799 -CVE-2021-28965 \ No newline at end of file +CVE-2021-28965 + +#dpkg vulnerability in ubuntu +CVE-2022-1664 \ No newline at end of file diff --git a/build/common/installer/scripts/tomlparser-prom-customconfig.rb b/build/common/installer/scripts/tomlparser-prom-customconfig.rb index 819c1956f..642eadc14 100644 --- a/build/common/installer/scripts/tomlparser-prom-customconfig.rb +++ b/build/common/installer/scripts/tomlparser-prom-customconfig.rb @@ -323,7 +323,7 @@ def populateSettingValuesFromConfigMap(parsedConfig) File.open(file_name, "w") { |file| file.puts new_contents } puts "config::Successfully substituted the placeholders in telegraf conf file for custom prometheus scraping" - #Set environment variables for telemetry in the sidecar container + #Set environment variables for configuration and telemetry in the sidecar container if (!@containerType.nil? && @containerType.casecmp(@promSideCar) == 0) file = File.open("telemetry_prom_config_env_var", "w") if !file.nil? diff --git a/build/linux/installer/scripts/livenessprobe.sh b/build/linux/installer/scripts/livenessprobe.sh index 8ecb7fe44..3d74810d3 100644 --- a/build/linux/installer/scripts/livenessprobe.sh +++ b/build/linux/installer/scripts/livenessprobe.sh @@ -1,4 +1,28 @@ -#!/bin/bash +#!/bin/bash +source /opt/env_vars + +if [ -s "inotifyoutput.txt" ] +then + # inotifyoutput file has data(config map was applied) + echo "inotifyoutput.txt has been updated - config changed" > /dev/termination-log + exit 1 +fi + +# Perform the following check only for prometheus sidecar that does OSM scraping or for replicaset when sidecar scraping is disabled +if [[ ( ( ! -e "/etc/config/kube.conf" ) && ( "${CONTAINER_TYPE}" == "PrometheusSidecar" ) ) || + ( ( -e "/etc/config/kube.conf" ) && ( ( ! -z "${SIDECAR_SCRAPING_ENABLED}" ) && ( "${SIDECAR_SCRAPING_ENABLED}" == "false" ) ) ) ]]; then + if [ -s "inotifyoutput-osm.txt" ] + then + # inotifyoutput-osm file has data(config map was applied) + echo "inotifyoutput-osm.txt has been updated - config changed" > /dev/termination-log + exit 1 + fi +fi + +# if this is the prometheus sidecar and there are no prometheus metrics to scrape then the rest of the liveness probe doesn't apply +if [[ "${CONTAINER_TYPE}" == "PrometheusSidecar" && "${MUTE_PROM_SIDECAR}" == "true" ]]; then + exit 0 +fi #test to exit non zero value if mdsd is not running (ps -ef | grep "mdsd" | grep -v "grep") @@ -53,22 +77,4 @@ then # exit 1 fi -if [ -s "inotifyoutput.txt" ] -then - # inotifyoutput file has data(config map was applied) - echo "inotifyoutput.txt has been updated - config changed" > /dev/termination-log - exit 1 -fi - -# Perform the following check only for prometheus sidecar that does OSM scraping or for replicaset when sidecar scraping is disabled -if [[ ( ( ! -e "/etc/config/kube.conf" ) && ( "${CONTAINER_TYPE}" == "PrometheusSidecar" ) ) || - ( ( -e "/etc/config/kube.conf" ) && ( ( ! -z "${SIDECAR_SCRAPING_ENABLED}" ) && ( "${SIDECAR_SCRAPING_ENABLED}" == "false" ) ) ) ]]; then - if [ -s "inotifyoutput-osm.txt" ] - then - # inotifyoutput-osm file has data(config map was applied) - echo "inotifyoutput-osm.txt has been updated - config changed" > /dev/termination-log - exit 1 - fi -fi - exit 0 diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index 2b25b044c..3e25fc3a4 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -1,5 +1,13 @@ #!/bin/bash +# please use this instead of adding env vars to bashrc directly +# usage: setGlobalEnvVar ENABLE_SIDECAR_SCRAPING true +setGlobalEnvVar() { + export "$1"="$2" + echo "export \"$1\"=\"$2\"" >> /opt/env_vars +} +echo "source /opt/env_vars" >> ~/.bashrc + waitforlisteneronTCPport() { local sleepdurationsecs=1 local totalsleptsecs=0 @@ -366,7 +374,6 @@ if [ "${CONTAINER_TYPE}" != "PrometheusSidecar" ]; then /usr/bin/ruby2.7 tomlparser-agent-config.rb cat agent_config_env_var | while read line; do - #echo $line echo $line >> ~/.bashrc done source agent_config_env_var @@ -375,7 +382,6 @@ if [ "${CONTAINER_TYPE}" != "PrometheusSidecar" ]; then /usr/bin/ruby2.7 tomlparser-npm-config.rb cat integration_npm_config_env_var | while read line; do - #echo $line echo $line >> ~/.bashrc done source integration_npm_config_env_var @@ -409,7 +415,7 @@ else source defaultpromenvvariables-rs fi -#Sourcing telemetry environment variable file if it exists +#Sourcing environment variable file if it exists. This file has telemetry and whether kubernetes pods are monitored if [ -e "telemetry_prom_config_env_var" ]; then cat telemetry_prom_config_env_var | while read line; do echo $line >> ~/.bashrc @@ -464,6 +470,17 @@ if [[ ( ( ! -e "/etc/config/kube.conf" ) && ( "${CONTAINER_TYPE}" == "Prometheus fi fi +# If the prometheus sidecar isn't doing anything then there's no need to run mdsd and telegraf in it. +if [[ ( "${CONTAINER_TYPE}" == "PrometheusSidecar" ) && + ( "${TELEMETRY_CUSTOM_PROM_MONITOR_PODS}" == "false" ) && + ( "${TELEMETRY_OSM_CONFIGURATION_NAMESPACES_COUNT}" -eq 0 ) ]]; then + setGlobalEnvVar MUTE_PROM_SIDECAR true +else + setGlobalEnvVar MUTE_PROM_SIDECAR false +fi + +echo "MUTE_PROM_SIDECAR = $MUTE_PROM_SIDECAR" + #Setting environment variable for CAdvisor metrics to use port 10255/10250 based on curl request echo "Making wget request to cadvisor endpoint with port 10250" #Defaults to use secure port: 10250 @@ -576,7 +593,7 @@ MDSD_AAD_MSI_AUTH_ARGS="" # check if its AAD Auth MSI mode via USING_AAD_MSI_AUTH export AAD_MSI_AUTH_MODE=false if [ "${USING_AAD_MSI_AUTH}" == "true" ]; then - echo "*** activating oneagent in aad auth msi mode ***" + echo "*** setting up oneagent in aad auth msi mode ***" # msi auth specific args MDSD_AAD_MSI_AUTH_ARGS="-a -A" export AAD_MSI_AUTH_MODE=true @@ -593,7 +610,7 @@ if [ "${USING_AAD_MSI_AUTH}" == "true" ]; then export MDSD_USE_LOCAL_PERSISTENCY="false" echo "export MDSD_USE_LOCAL_PERSISTENCY=$MDSD_USE_LOCAL_PERSISTENCY" >> ~/.bashrc else - echo "*** activating oneagent in legacy auth mode ***" + echo "*** setting up oneagent in legacy auth mode ***" CIWORKSPACE_id="$(cat /etc/omsagent-secret/WSID)" #use the file path as its secure than env CIWORKSPACE_keyFile="/etc/omsagent-secret/KEY" @@ -617,17 +634,21 @@ source ~/.bashrc dpkg -l | grep mdsd | awk '{print $2 " " $3}' if [ "${CONTAINER_TYPE}" == "PrometheusSidecar" ]; then - echo "starting mdsd with mdsd-port=26130, fluentport=26230 and influxport=26330 in sidecar container..." - #use tenant name to avoid unix socket conflict and different ports for port conflict - #roleprefix to use container specific mdsd socket - export TENANT_NAME="${CONTAINER_TYPE}" - echo "export TENANT_NAME=$TENANT_NAME" >> ~/.bashrc - export MDSD_ROLE_PREFIX=/var/run/mdsd-${CONTAINER_TYPE}/default - echo "export MDSD_ROLE_PREFIX=$MDSD_ROLE_PREFIX" >> ~/.bashrc - source ~/.bashrc - mkdir /var/run/mdsd-${CONTAINER_TYPE} - # add -T 0xFFFF for full traces - mdsd ${MDSD_AAD_MSI_AUTH_ARGS} -r ${MDSD_ROLE_PREFIX} -p 26130 -f 26230 -i 26330 -e ${MDSD_LOG}/mdsd.err -w ${MDSD_LOG}/mdsd.warn -o ${MDSD_LOG}/mdsd.info -q ${MDSD_LOG}/mdsd.qos & + if [ "${MUTE_PROM_SIDECAR}" != "true" ]; then + echo "starting mdsd with mdsd-port=26130, fluentport=26230 and influxport=26330 in sidecar container..." + #use tenant name to avoid unix socket conflict and different ports for port conflict + #roleprefix to use container specific mdsd socket + export TENANT_NAME="${CONTAINER_TYPE}" + echo "export TENANT_NAME=$TENANT_NAME" >> ~/.bashrc + export MDSD_ROLE_PREFIX=/var/run/mdsd-${CONTAINER_TYPE}/default + echo "export MDSD_ROLE_PREFIX=$MDSD_ROLE_PREFIX" >> ~/.bashrc + source ~/.bashrc + mkdir /var/run/mdsd-${CONTAINER_TYPE} + # add -T 0xFFFF for full traces + mdsd ${MDSD_AAD_MSI_AUTH_ARGS} -r ${MDSD_ROLE_PREFIX} -p 26130 -f 26230 -i 26330 -e ${MDSD_LOG}/mdsd.err -w ${MDSD_LOG}/mdsd.warn -o ${MDSD_LOG}/mdsd.info -q ${MDSD_LOG}/mdsd.qos & + else + echo "not starting mdsd (no metrics to scrape since MUTE_PROM_SIDECAR is true)" + fi else echo "starting mdsd mode in main container..." # add -T 0xFFFF for full traces @@ -654,13 +675,17 @@ fi #If config parsing was successful, a copy of the conf file with replaced custom settings file is created if [ ! -e "/etc/config/kube.conf" ]; then if [ "${CONTAINER_TYPE}" == "PrometheusSidecar" ] && [ -e "/opt/telegraf-test-prom-side-car.conf" ]; then - echo "****************Start Telegraf in Test Mode**************************" - /opt/telegraf --config /opt/telegraf-test-prom-side-car.conf --input-filter file -test - if [ $? -eq 0 ]; then - mv "/opt/telegraf-test-prom-side-car.conf" "/etc/opt/microsoft/docker-cimprov/telegraf-prom-side-car.conf" - echo "Moving test conf file to telegraf side-car conf since test run succeeded" + if [ "${MUTE_PROM_SIDECAR}" != "true" ]; then + echo "****************Start Telegraf in Test Mode**************************" + /opt/telegraf --config /opt/telegraf-test-prom-side-car.conf --input-filter file -test + if [ $? -eq 0 ]; then + mv "/opt/telegraf-test-prom-side-car.conf" "/etc/opt/microsoft/docker-cimprov/telegraf-prom-side-car.conf" + echo "Moving test conf file to telegraf side-car conf since test run succeeded" + fi + echo "****************End Telegraf Run in Test Mode**************************" + else + echo "****************Skipping Telegraf Run in Test Mode since MUTE_PROM_SIDECAR is true**************************" fi - echo "****************End Telegraf Run in Test Mode**************************" else if [ -e "/opt/telegraf-test.conf" ]; then echo "****************Start Telegraf in Test Mode**************************" @@ -687,9 +712,13 @@ fi #telegraf & fluentbit requirements if [ ! -e "/etc/config/kube.conf" ]; then if [ "${CONTAINER_TYPE}" == "PrometheusSidecar" ]; then - echo "starting fluent-bit and setting telegraf conf file for prometheus sidecar" - /opt/td-agent-bit/bin/td-agent-bit -c /etc/opt/microsoft/docker-cimprov/td-agent-bit-prom-side-car.conf -e /opt/td-agent-bit/bin/out_oms.so & telegrafConfFile="/etc/opt/microsoft/docker-cimprov/telegraf-prom-side-car.conf" + if [ "${MUTE_PROM_SIDECAR}" != "true" ]; then + echo "starting fluent-bit and setting telegraf conf file for prometheus sidecar" + /opt/td-agent-bit/bin/td-agent-bit -c /etc/opt/microsoft/docker-cimprov/td-agent-bit-prom-side-car.conf -e /opt/td-agent-bit/bin/out_oms.so & + else + echo "not starting fluent-bit in prometheus sidecar (no metrics to scrape since MUTE_PROM_SIDECAR is true)" + fi else echo "starting fluent-bit and setting telegraf conf file for daemonset" if [ "$CONTAINER_RUNTIME" == "docker" ]; then @@ -756,8 +785,12 @@ echo "export HOST_VAR=/hostfs/var" >> ~/.bashrc if [ ! -e "/etc/config/kube.conf" ]; then if [ "${CONTAINER_TYPE}" == "PrometheusSidecar" ]; then - echo "checking for listener on tcp #25229 and waiting for 30 secs if not.." - waitforlisteneronTCPport 25229 30 + if [ "${MUTE_PROM_SIDECAR}" != "true" ]; then + echo "checking for listener on tcp #25229 and waiting for 30 secs if not.." + waitforlisteneronTCPport 25229 30 + else + echo "no metrics to scrape since MUTE_PROM_SIDECAR is true, not checking for listener on tcp #25229" + fi else echo "checking for listener on tcp #25226 and waiting for 30 secs if not.." waitforlisteneronTCPport 25226 30 @@ -769,10 +802,15 @@ else waitforlisteneronTCPport 25226 30 fi + #start telegraf -/opt/telegraf --config $telegrafConfFile & -/opt/telegraf --version -dpkg -l | grep td-agent-bit | awk '{print $2 " " $3}' +if [ "${MUTE_PROM_SIDECAR}" != "true" ]; then + /opt/telegraf --config $telegrafConfFile & + echo "telegraf version: $(/opt/telegraf --version)" + dpkg -l | grep td-agent-bit | awk '{print $2 " " $3}' +else + echo "not starting telegraf (no metrics to scrape since MUTE_PROM_SIDECAR is true)" +fi #dpkg -l | grep telegraf | awk '{print $2 " " $3}' @@ -785,7 +823,11 @@ service rsyslog stop echo "getting rsyslog status..." service rsyslog status -checkAgentOnboardingStatus $AAD_MSI_AUTH_MODE 30 +if [ "${MUTE_PROM_SIDECAR}" != "true" ]; then + checkAgentOnboardingStatus $AAD_MSI_AUTH_MODE 30 +else + echo "not checking onboarding status (no metrics to scrape since MUTE_PROM_SIDECAR is true)" +fi shutdown() { pkill -f mdsd From e1b0ccf6dacd08aef7b4870e3aab8f25ae785561 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Thu, 2 Jun 2022 17:37:05 -0700 Subject: [PATCH 233/301] Gangams/fix telegraf issue (#773) * avoid imds token call during start up * avoid imds token call during start up --- source/plugins/ruby/out_mdm.rb | 4 ---- 1 file changed, 4 deletions(-) diff --git a/source/plugins/ruby/out_mdm.rb b/source/plugins/ruby/out_mdm.rb index c83972f11..d850d93e5 100644 --- a/source/plugins/ruby/out_mdm.rb +++ b/source/plugins/ruby/out_mdm.rb @@ -118,12 +118,10 @@ def start @useMsi = true msi_endpoint = @@imds_msi_endpoint_template % { resource: @@token_resource_audience } @parsed_token_uri = URI.parse(msi_endpoint) - @cached_access_token = get_access_token else # switch to IMDS endpoint for the windows once the Arc K8s team supports the IMDS sidecar for windows @log.info "using cluster identity token since cluster is azure arc k8s cluster" @cluster_identity = ArcK8sClusterIdentity.new - @cached_access_token = @cluster_identity.get_cluster_identity_token end else # azure json file only used for aks and doesnt exist in non-azure envs @@ -148,8 +146,6 @@ def start end @parsed_token_uri = URI.parse(msi_endpoint) end - - @cached_access_token = get_access_token end end rescue => e From 315470a70a51f7eb5ebbdbf811824dd3f77c3a08 Mon Sep 17 00:00:00 2001 From: MSFTXiangyu <89832657+MSFTXiangyu@users.noreply.github.com> Date: Thu, 9 Jun 2022 10:33:41 +0800 Subject: [PATCH 234/301] Make metrics endpoint variable on ArcA cluster (#772) --- .../templates/omsagent-daemonset.yaml | 8 ++++++++ .../templates/omsagent-deployment.yaml | 8 ++++++++ charts/azuremonitor-containers/values.yaml | 5 +++++ source/plugins/ruby/CustomMetricsUtils.rb | 5 +++++ source/plugins/ruby/out_mdm.rb | 14 ++++++++++++-- 5 files changed, 38 insertions(+), 2 deletions(-) diff --git a/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml b/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml index 5bd8bdf79..3acfdb4a2 100644 --- a/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml @@ -103,6 +103,14 @@ spec: {{- end }} - name: ISTEST value: {{ .Values.omsagent.ISTEST | quote }} + {{ if .Values.omsagent.isArcACluster }} + - name: IS_ARCA_CLUSTER + value: {{ .Values.omsagent.isArcACluster | quote }} + {{- end }} + {{- if ne .Values.omsagent.metricsEndpoint "" }} + - name: CUSTOM_METRICS_ENDPOINT + value: {{ .Values.omsagent.metricsEndpoint | quote }} + {{- end }} securityContext: privileged: true ports: diff --git a/charts/azuremonitor-containers/templates/omsagent-deployment.yaml b/charts/azuremonitor-containers/templates/omsagent-deployment.yaml index a0abb0f57..b5b239af0 100644 --- a/charts/azuremonitor-containers/templates/omsagent-deployment.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-deployment.yaml @@ -89,6 +89,14 @@ spec: value: {{ .Values.omsagent.sidecarscraping | quote }} - name: ISTEST value: {{ .Values.omsagent.ISTEST | quote }} + {{ if .Values.omsagent.isArcACluster }} + - name: IS_ARCA_CLUSTER + value: {{ .Values.omsagent.isArcACluster | quote }} + {{- end }} + {{- if ne .Values.omsagent.metricsEndpoint "" }} + - name: CUSTOM_METRICS_ENDPOINT + value: {{ .Values.omsagent.metricsEndpoint | quote }} + {{- end }} securityContext: privileged: true ports: diff --git a/charts/azuremonitor-containers/values.yaml b/charts/azuremonitor-containers/values.yaml index 480e7040c..91b8270cd 100644 --- a/charts/azuremonitor-containers/values.yaml +++ b/charts/azuremonitor-containers/values.yaml @@ -49,6 +49,9 @@ omsagent: # This flag used to determine whether to use AAD MSI auth or not for Arc K8s cluster useAADAuth: false + # This flag used to determine whether this cluster is connected to ArcA control plane. This value will be setup before pushed into on-premise ArcA ACR. + isArcACluster: false + ## To get your workspace id and key do the following ## You can create a Azure Loganalytics workspace from portal.azure.com and get its ID & PRIMARY KEY from 'Advanced Settings' tab in the Ux. @@ -57,6 +60,8 @@ omsagent: key: domain: opinsights.azure.com proxy: + # This metricsEndpoint used to define the endpoint custom metrics emit to. If not defined, default public Azure monitoring endpoint '{aks_region}.monitoring.azure.com' will be used. + metricsEndpoint: env: clusterName: ## Applicable for only managed clusters hosted in Azure diff --git a/source/plugins/ruby/CustomMetricsUtils.rb b/source/plugins/ruby/CustomMetricsUtils.rb index fd9290b78..77675950c 100644 --- a/source/plugins/ruby/CustomMetricsUtils.rb +++ b/source/plugins/ruby/CustomMetricsUtils.rb @@ -13,6 +13,11 @@ def check_custom_metrics_availability if aks_region.to_s.empty? || aks_resource_id.to_s.empty? return false # This will also take care of AKS-Engine Scenario. AKS_REGION/AKS_RESOURCE_ID is not set for AKS-Engine. Only ACS_RESOURCE_NAME is set end + # If this is cluster is connected to ArcA control plane and metrics endpoint provided, custom metrics shall be emitted. + is_arca_cluster = ENV['IS_ARCA_CLUSTER'] + if is_arca_cluster.to_s.downcase == "true" && !ENV['CUSTOM_METRICS_ENDPOINT'].to_s.empty? + return true + end return aks_cloud_environment.to_s.downcase == 'azurepubliccloud' end diff --git a/source/plugins/ruby/out_mdm.rb b/source/plugins/ruby/out_mdm.rb index d850d93e5..e882f5ec7 100644 --- a/source/plugins/ruby/out_mdm.rb +++ b/source/plugins/ruby/out_mdm.rb @@ -26,7 +26,8 @@ def initialize @@token_resource_audience = "https://monitor.azure.com/" @@grant_type = "client_credentials" @@azure_json_path = "/etc/kubernetes/host/azure.json" - @@post_request_url_template = "https://%{aks_region}.monitoring.azure.com%{aks_resource_id}/metrics" + @@public_metrics_endpoint_template = "https://%{aks_region}.monitoring.azure.com" + @@post_request_url_template = "%{metrics_endpoint}%{aks_resource_id}/metrics" @@aad_token_url_template = "https://login.microsoftonline.com/%{tenant_id}/oauth2/token" # msiEndpoint is the well known endpoint for getting MSI authentications tokens @@ -98,7 +99,16 @@ def start if aks_resource_id.downcase.include?("microsoft.kubernetes/connectedclusters") @isArcK8sCluster = true end - @@post_request_url = @@post_request_url_template % { aks_region: aks_region, aks_resource_id: aks_resource_id } + + # If CUSTOM_METRICS_ENDPOINT provided, the url format shall be validated before emitting metrics into given endpoint. + custom_metrics_endpoint = ENV['CUSTOM_METRICS_ENDPOINT'] + if !custom_metrics_endpoint.to_s.empty? + metrics_endpoint = custom_metrics_endpoint.strip + URI.parse(metrics_endpoint) + else + metrics_endpoint = @@public_metrics_endpoint_template % { aks_region: aks_region } + end + @@post_request_url = @@post_request_url_template % { metrics_endpoint: metrics_endpoint, aks_resource_id: aks_resource_id } @post_request_uri = URI.parse(@@post_request_url) proxy = (ProxyUtils.getProxyConfiguration) if proxy.nil? || proxy.empty? From c3136e47d9a27ab736d0ffea1d9f710dc23a845c Mon Sep 17 00:00:00 2001 From: Vishwanath Date: Mon, 13 Jun 2022 14:08:56 -0700 Subject: [PATCH 235/301] add integration for azure subnet ip usage (#774) * add integration for azure cni subnet ip usage * exclude unfixed cve & remove fixed one --- .trivyignore | 2 +- build/linux/installer/conf/telegraf.conf | 21 ++++++++- .../scripts/tomlparser-npm-config.rb | 43 ++++++++++++++++--- kubernetes/container-azm-ms-agentconfig.yaml | 2 + .../plugins/ruby/CAdvisorMetricsAPIClient.rb | 5 +++ 5 files changed, 66 insertions(+), 7 deletions(-) diff --git a/.trivyignore b/.trivyignore index f8c029116..56ac504d5 100644 --- a/.trivyignore +++ b/.trivyignore @@ -16,4 +16,4 @@ CVE-2021-31799 CVE-2021-28965 #dpkg vulnerability in ubuntu -CVE-2022-1664 \ No newline at end of file +CVE-2022-1304 \ No newline at end of file diff --git a/build/linux/installer/conf/telegraf.conf b/build/linux/installer/conf/telegraf.conf index 6ee1c472b..e9931e1f2 100644 --- a/build/linux/installer/conf/telegraf.conf +++ b/build/linux/installer/conf/telegraf.conf @@ -805,4 +805,23 @@ # ClusterName = "$TELEMETRY_CLUSTER_NAME" # ClusterType = "$TELEMETRY_CLUSTER_TYPE" # Computer = "placeholder_hostname" -# ControllerType = "$CONTROLLER_TYPE" \ No newline at end of file +# ControllerType = "$CONTROLLER_TYPE" + +## ip subnet usage +[[inputs.prometheus]] + #name_prefix="container.azm.ms/" + ## An array of urls to scrape metrics from. + urls = $AZMON_INTEGRATION_SUBNET_IP_USAGE_METRICS_URL_LIST_NODE + + metric_version = 2 + url_tag = "scrapeUrl" + + ## Use bearer token for authorization. ('bearer_token' takes priority) + bearer_token = "/var/run/secrets/kubernetes.io/serviceaccount/token" + + ## Specify timeout duration for slower prometheus clients (default is 3s) + response_timeout = "15s" + + ## Optional TLS Config + tls_ca = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" + insecure_skip_verify = true \ No newline at end of file diff --git a/build/linux/installer/scripts/tomlparser-npm-config.rb b/build/linux/installer/scripts/tomlparser-npm-config.rb index 777fef209..e8cf216fd 100644 --- a/build/linux/installer/scripts/tomlparser-npm-config.rb +++ b/build/linux/installer/scripts/tomlparser-npm-config.rb @@ -18,6 +18,9 @@ @npm_node_urls = "[\"http://$NODE_IP:10091/node-metrics\"]" @npm_cluster_urls="[\"http://npm-metrics-cluster-service.kube-system:9000/cluster-metrics\"]" @npm_basic_drop_metrics_cluster = "[\"npm_ipset_counts\"]" +@collect_subnet_ip_usage_metrics = false +@azure_subnet_ip_usage_metrics_node_urls = "[\"http://$NODE_IP:10092/metrics\"]" +@azure_subnet_ip_usage_default_setting = "[]" @tgfConfigFileDS = "/etc/opt/microsoft/docker-cimprov/telegraf.conf" @tgfConfigFileRS = "/etc/opt/microsoft/docker-cimprov/telegraf-rs.conf" @replicaset = "replicaset" @@ -43,6 +46,21 @@ def parseConfigMap # Use the ruby structure created after config parsing to set the right values to be used as environment variables def populateSettingValuesFromConfigMap(parsedConfig) + begin + if !parsedConfig.nil? && !parsedConfig[:integrations].nil? && !parsedConfig[:integrations][:azure_subnet_ip_usage].nil? && !parsedConfig[:integrations][:azure_subnet_ip_usage][:enabled].nil? + azure_subnet_ip_usage_metrics = parsedConfig[:integrations][:azure_subnet_ip_usage][:enabled].to_s + puts "config::azure_subnet_ip_usage::got:integrations.azure_subnet_ip_usage.enabled='#{azure_subnet_ip_usage_metrics}'" + if !azure_subnet_ip_usage_metrics.nil? && azure_subnet_ip_usage_metrics.strip.casecmp("true") == 0 + @collect_azure_subnet_ip_usage_metrics = true + else + @collect_azure_subnet_ip_usage_metrics = false + end + puts "config::azure_subnet_ip_usage::got:integrations.azure_subnet_ip_usage.enabled=#{@collect_azure_subnet_ip_usage_metrics}" + end + rescue => errorStr + puts "config::npm::error:Exception while reading config settings for azure_subnet_ip_usage setting - #{errorStr}, using defaults" + @collect_azure_subnet_ip_usage_metrics = false + end begin if !parsedConfig.nil? && !parsedConfig[:integrations].nil? && !parsedConfig[:integrations][:azure_network_policy_manager].nil? && !parsedConfig[:integrations][:azure_network_policy_manager][:collect_advanced_metrics].nil? advanced_npm_metrics = parsedConfig[:integrations][:azure_network_policy_manager][:collect_advanced_metrics].to_s @@ -76,7 +94,7 @@ def populateSettingValuesFromConfigMap(parsedConfig) end @configSchemaVersion = ENV["AZMON_AGENT_CFG_SCHEMA_VERSION"] -puts "****************Start NPM Config Processing********************" +puts "****************Start NPM & subnet ip usage integrations Config Processing********************" if !@configSchemaVersion.nil? && !@configSchemaVersion.empty? && @configSchemaVersion.strip.casecmp("v1") == 0 #note v1 is the only supported schema version , so hardcoding it configMapSettings = parseConfigMap if !configMapSettings.nil? @@ -84,10 +102,11 @@ def populateSettingValuesFromConfigMap(parsedConfig) end else if (File.file?(@configMapMountPath)) - ConfigParseErrorLogger.logError("config::npm::unsupported/missing config schema version - '#{@configSchemaVersion}' , using defaults, please use supported schema version") + ConfigParseErrorLogger.logError("config::integrations::unsupported/missing config schema version - '#{@configSchemaVersion}' , using defaults, please use supported schema version") end @collect_basic_npm_metrics = false @collect_advanced_npm_metrics = false + @collect_azure_subnet_ip_usage_metrics = false end @@ -99,7 +118,7 @@ def populateSettingValuesFromConfigMap(parsedConfig) tgfConfigFile = @tgfConfigFileRS end -#replace place holders in configuration file +#replace place holders in configuration file for npm integration tgfConfig = File.read(tgfConfigFile) #read returns only after closing the file if @collect_advanced_npm_metrics == true @@ -116,8 +135,19 @@ def populateSettingValuesFromConfigMap(parsedConfig) tgfConfig = tgfConfig.gsub("$AZMON_INTEGRATION_NPM_METRICS_DROP_LIST_CLUSTER", @npm_default_setting) end +#replace place holders in configuration file for subnet ip usage integration +if @collect_azure_subnet_ip_usage_metrics == true + tgfConfig = tgfConfig.gsub("$AZMON_INTEGRATION_SUBNET_IP_USAGE_METRICS_URL_LIST_NODE", @azure_subnet_ip_usage_metrics_node_urls) +else + tgfConfig = tgfConfig.gsub("$AZMON_INTEGRATION_SUBNET_IP_USAGE_METRICS_URL_LIST_NODE", @azure_subnet_ip_usage_default_setting) +end + +File.open(tgfConfigFile, "w") { |file| file.puts tgfConfig } # 'file' will be closed here after it goes out of scope +puts "config::integrations::Successfully substituted the placeholders for integrations into #{tgfConfigFile} file for #{controller}" + + File.open(tgfConfigFile, "w") { |file| file.puts tgfConfig } # 'file' will be closed here after it goes out of scope -puts "config::npm::Successfully substituted the NPM placeholders into #{tgfConfigFile} file for #{controller}" +puts "config::integrations::Successfully substituted the integrations placeholders into #{tgfConfigFile} file for #{controller}" # Write the telemetry to file, so that they can be set as environment variables telemetryFile = File.open("integration_npm_config_env_var", "w") @@ -128,9 +158,12 @@ def populateSettingValuesFromConfigMap(parsedConfig) elsif @collect_basic_npm_metrics == true telemetryFile.write("export TELEMETRY_NPM_INTEGRATION_METRICS_BASIC=1\n") end + if @collect_azure_subnet_ip_usage_metrics == true + telemetryFile.write("export TELEMETRY_SUBNET_IP_USAGE_INTEGRATION_METRICS=1\n") + end # Close file after writing all environment variables telemetryFile.close else - puts "config::npm::Exception while opening file for writing NPM telemetry environment variables" + puts "config::integrations::Exception while opening file for writing Integrations telemetry environment variables" puts "****************End NPM Config Processing********************" end diff --git a/kubernetes/container-azm-ms-agentconfig.yaml b/kubernetes/container-azm-ms-agentconfig.yaml index 5e8aa187a..8b9e2d718 100644 --- a/kubernetes/container-azm-ms-agentconfig.yaml +++ b/kubernetes/container-azm-ms-agentconfig.yaml @@ -141,6 +141,8 @@ data: [integrations.azure_network_policy_manager] collect_basic_metrics = false collect_advanced_metrics = false + [integrations.azure_subnet_ip_usage] + enabled = false # Doc - https://github.com/microsoft/Docker-Provider/blob/ci_prod/Documentation/AgentSettings/ReadMe.md agent-settings: |- diff --git a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb index 20faf4619..a0c50e6c5 100644 --- a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb +++ b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb @@ -36,6 +36,7 @@ class CAdvisorMetricsAPIClient @containerLogsRoute = ENV["AZMON_CONTAINER_LOGS_ROUTE"] @npmIntegrationBasic = ENV["TELEMETRY_NPM_INTEGRATION_METRICS_BASIC"] @npmIntegrationAdvanced = ENV["TELEMETRY_NPM_INTEGRATION_METRICS_ADVANCED"] + @subnetIpUsageMetrics = ENV["TELEMETRY_SUBNET_IP_USAGE_INTEGRATION_METRICS"] @os_type = ENV["OS_TYPE"] if !@os_type.nil? && !@os_type.empty? && @os_type.strip.casecmp("windows") == 0 @@ -282,6 +283,10 @@ def getContainerCpuMetricItems(metricJSON, hostName, cpuMetricNameToCollect, met elsif (!@npmIntegrationBasic.nil? && !@npmIntegrationBasic.empty?) telemetryProps["int-npm-b"] = "1" end + # telemetry for subnet ip usage integration + if (!@subnetIpUsageMetrics.nil? && !@subnetIpUsageMetrics.empty?) + telemetryProps["int-ipsubnetusage"] = "1" + end #telemetry for Container log schema version clusterContainerLogSchemaVersion if (!@clusterContainerLogSchemaVersion.nil? && !@clusterContainerLogSchemaVersion.empty?) telemetryProps["containerLogVer"] = @clusterContainerLogSchemaVersion From 96fdadbef6eea2522dc89f26eaf775afba6cbf3f Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Tue, 14 Jun 2022 09:22:01 -0700 Subject: [PATCH 236/301] Gangams/rs hyper scale 2022 ready (#753) * watch and multiproc implementation * fix weird bug * multiproc support for fluentd * working * fix log lines * refactor code * cache telemetry * nodecount telemetry * bug fix * further optimize * bugfix related typo * node allocatable cache * wincontainerinventory in multiproc * disable health * config events on different core * add ts to logs * move kube perf records to separate plugin * refactor * minor update * remove commented code * mdm state file * mdm state file * podmdm to separate plugin * bug fixes * bug fixes * bug fixes * podmdm plugin * bug fixes * bug fixes * remove unneeded log lines * more improvements * clean up * clean up * add requestId header for mdm metrics * latest mdsd and fix for threading issue in out mdm * rs specific config for large cluster * optimize out mdm * bug fix * use large queue limit for kube perf * 5k preview rs limits * handle resourceversion empty or 0 scenrio * handle pagination api call failures * fix bug * preview image for internal customer validation * preview image * wip * wip * fix trailing whitespaces * fix bug * remove unused envvars in yaml * revert minor things * telemetry tags for preview release * revert preview image tags * revert unintended change * fix bug * use same batchtime for both mdm & podinventory records * use same batchtime for both mdm & podinventory records * use same batchtime for both mdm & podinventory records * use same batchtime for both mdm & podinventory records * preview image tag with latest ci_dev changes * change back to use prod image in docker files * fix unit test failures * exclude unfixed cve until this get fixed * fix minor issue * increase retries to handle transient errors --- build/linux/installer/conf/kube.conf | 378 +++++---- .../installer/datafiles/base_container.data | 3 + .../templates/omsagent-deployment.yaml | 5 + kubernetes/linux/main.sh | 340 +++++--- kubernetes/omsagent.yaml | 9 + source/plugins/ruby/KubernetesApiClient.rb | 666 ++++++++++++++- source/plugins/ruby/WatchStream.rb | 70 ++ source/plugins/ruby/constants.rb | 6 + source/plugins/ruby/in_kube_nodes.rb | 273 ++++++- source/plugins/ruby/in_kube_nodes_test.rb | 118 +-- source/plugins/ruby/in_kube_perfinventory.rb | 433 ++++++++++ source/plugins/ruby/in_kube_podinventory.rb | 766 ++++++++++++++---- .../plugins/ruby/in_kube_podmdminventory.rb | 217 +++++ .../ruby/kubernetes_container_inventory.rb | 64 +- source/plugins/ruby/out_mdm.rb | 46 +- source/plugins/ruby/podinventory_to_mdm.rb | 15 +- 16 files changed, 2792 insertions(+), 617 deletions(-) create mode 100644 source/plugins/ruby/WatchStream.rb create mode 100644 source/plugins/ruby/in_kube_perfinventory.rb create mode 100644 source/plugins/ruby/in_kube_podmdminventory.rb diff --git a/build/linux/installer/conf/kube.conf b/build/linux/installer/conf/kube.conf index 53040e2f9..5b3837748 100644 --- a/build/linux/installer/conf/kube.conf +++ b/build/linux/installer/conf/kube.conf @@ -1,99 +1,80 @@ - #Kubernetes pod inventory - - @type kube_podinventory - tag oneagent.containerInsights.KUBE_POD_INVENTORY_BLOB - run_interval 60 - @log_level debug - - - #Kubernetes Persistent Volume inventory - - @type kube_pvinventory - tag oneagent.containerInsights.KUBE_PV_INVENTORY_BLOB - run_interval 60 - @log_level debug - - - #Kubernetes events - - @type kube_events - tag oneagent.containerInsights.KUBE_EVENTS_BLOB - run_interval 60 - @log_level debug - - - #Kubernetes Nodes - - @type kube_nodes - tag oneagent.containerInsights.KUBE_NODE_INVENTORY_BLOB - run_interval 60 - @log_level debug - - - #cadvisor perf- Windows nodes - - @type win_cadvisor_perf - tag oneagent.containerInsights.LINUX_PERF_BLOB - run_interval 60 - @log_level debug - - - #Kubernetes object state - deployments - - @type kubestate_deployments - tag oneagent.containerInsights.INSIGHTS_METRICS_BLOB - run_interval 60 - @log_level debug - + #fluent forward plugin + + workers "#{ENV['NUM_OF_FLUENTD_WORKERS']}" + root_dir /var/opt/microsoft/docker-cimprov/state + - #Kubernetes object state - HPA - - @type kubestate_hpa - tag oneagent.containerInsights.INSIGHTS_METRICS_BLOB - run_interval 60 - @log_level debug - + #perf + + @type forward + @id out_perf_fwd + @log_level debug + send_timeout 30 + connect_timeout 30 + heartbeat_type none + + host 0.0.0.0 + port "#{ENV['MDSD_FLUENT_SOCKET_PORT']}" + + + @type file + overflow_action drop_oldest_chunk + chunk_limit_size 4m + queue_limit_length "#{ENV['FLUENTD_KUBE_PERF_QUEUE_LIMIT_LENGTH']}" + flush_interval "#{ENV['FLUENTD_FLUSH_INTERVAL']}" + retry_max_times 10 + retry_wait 5s + retry_max_interval 5m + flush_thread_count 5 + + keepalive true + - - @type inventory2mdm - @log_level info - - - #custom_metrics_mdm filter plugin for perf data from windows nodes + #custom_metrics_mdm filter plugin for perf data from windows nodes @type cadvisor2mdm metrics_to_collect cpuUsageNanoCores,memoryWorkingSetBytes,pvUsedBytes @log_level info - #kubepodinventory - - @type forward - @log_level debug - send_timeout 30 - connect_timeout 30 - heartbeat_type none - - host 0.0.0.0 - port "#{ENV['MDSD_FLUENT_SOCKET_PORT']}" - + #containerinventory for windows containers + + @type forward + @id out_ci_fwd + @log_level debug + send_timeout 30 + connect_timeout 30 + heartbeat_type none + + host 0.0.0.0 + port "#{ENV['MDSD_FLUENT_SOCKET_PORT']}" + @type file - path /var/opt/microsoft/docker-cimprov/state/kubepod*.buffer overflow_action drop_oldest_chunk chunk_limit_size 4m - queue_limit_length 20 - flush_interval 20s + queue_limit_length "#{ENV['FLUENTD_QUEUE_LIMIT_LENGTH']}" + flush_interval "#{ENV['FLUENTD_FLUSH_INTERVAL']}" retry_max_times 10 retry_wait 5s retry_max_interval 5m - flush_thread_count 5 + flush_thread_count 5 - keepalive true + keepalive true - #kubepvinventory - + + + #Kubernetes pod inventory + + @type kube_podinventory + tag oneagent.containerInsights.KUBE_POD_INVENTORY_BLOB + run_interval 60 + @log_level debug + + + #kubepodinventory + @type forward @log_level debug send_timeout 30 @@ -105,22 +86,21 @@ @type file - path /var/opt/microsoft/docker-cimprov/state/kubepv*.buffer + path /var/opt/microsoft/docker-cimprov/state/kubepod*.buffer overflow_action drop_oldest_chunk chunk_limit_size 4m - queue_limit_length 20 - flush_interval 20s + queue_limit_length "#{ENV['FLUENTD_QUEUE_LIMIT_LENGTH']}" + flush_interval "#{ENV['FLUENTD_FLUSH_INTERVAL']}" retry_max_times 10 retry_wait 5s retry_max_interval 5m - flush_thread_count 5 + flush_thread_count 5 - keepalive true - + keepalive true + - #InsightsMetrics - #kubestate - + #kubeservices + @type forward @log_level debug send_timeout 30 @@ -132,21 +112,30 @@ @type file - path /var/opt/microsoft/docker-cimprov/state/insightsmetrics*.buffer + path /var/opt/microsoft/docker-cimprov/state/kubeservices*.buffer overflow_action drop_oldest_chunk chunk_limit_size 4m - queue_limit_length 20 - flush_interval 20s + queue_limit_length "#{ENV['FLUENTD_QUEUE_LIMIT_LENGTH']}" + flush_interval "#{ENV['FLUENTD_FLUSH_INTERVAL']}" retry_max_times 10 retry_wait 5s retry_max_interval 5m - flush_thread_count 5 + flush_thread_count 2 - keepalive true + keepalive true + + + #Kubernetes Nodes + + @type kube_nodes + tag oneagent.containerInsights.KUBE_NODE_INVENTORY_BLOB + run_interval 60 + @log_level debug + - #kubeevents - + #containernodeinventory + @type forward @log_level debug send_timeout 30 @@ -158,21 +147,26 @@ @type file - path /var/opt/microsoft/docker-cimprov/state/kubeevents*.buffer + path /var/opt/microsoft/docker-cimprov/state/containernodeinventory*.buffer overflow_action drop_oldest_chunk chunk_limit_size 4m - queue_limit_length 20 - flush_interval 20s + queue_limit_length "#{ENV['FLUENTD_QUEUE_LIMIT_LENGTH']}" + flush_interval "#{ENV['FLUENTD_FLUSH_INTERVAL']}" retry_max_times 10 retry_wait 5s retry_max_interval 5m - flush_thread_count 5 + flush_thread_count 3 keepalive true - - #kubeservices - + + + @type inventory2mdm + @log_level info + + + #kubenodeinventory + @type forward @log_level debug send_timeout 30 @@ -184,47 +178,49 @@ @type file - path /var/opt/microsoft/docker-cimprov/state/kubeservices*.buffer + path /var/opt/microsoft/docker-cimprov/state/kubenode*.buffer overflow_action drop_oldest_chunk chunk_limit_size 4m - queue_limit_length 20 - flush_interval 20s + queue_limit_length "#{ENV['FLUENTD_QUEUE_LIMIT_LENGTH']}" + flush_interval "#{ENV['FLUENTD_FLUSH_INTERVAL']}" retry_max_times 10 retry_wait 5s retry_max_interval 5m - flush_thread_count 2 + flush_thread_count 5 - keepalive true - + keepalive true + - #kubenodeinventory - - @type forward + + @type mdm + @id out_mdm_nodeinventory @log_level debug - send_timeout 30 - connect_timeout 30 - heartbeat_type none - - host 0.0.0.0 - port "#{ENV['MDSD_FLUENT_SOCKET_PORT']}" - @type file - path /var/opt/microsoft/docker-cimprov/state/kubenode*.buffer + path /var/opt/microsoft/docker-cimprov/state/out_mdm_nodeinventory*.buffer overflow_action drop_oldest_chunk chunk_limit_size 4m - queue_limit_length 20 - flush_interval 20s + queue_limit_length "#{ENV['FLUENTD_QUEUE_LIMIT_LENGTH']}" + flush_interval "#{ENV['FLUENTD_FLUSH_INTERVAL']}" retry_max_times 10 retry_wait 5s retry_max_interval 5m - flush_thread_count 5 + flush_thread_count 5 - keepalive true + retry_mdm_post_wait_minutes 30 + + + #Kubernetes events + + @type kube_events + tag oneagent.containerInsights.KUBE_EVENTS_BLOB + run_interval 60 + @log_level debug + - #containernodeinventory - + #kubeevents + @type forward @log_level debug send_timeout 30 @@ -236,47 +232,90 @@ @type file - path /var/opt/microsoft/docker-cimprov/state/containernodeinventory*.buffer + path /var/opt/microsoft/docker-cimprov/state/kubeevents*.buffer overflow_action drop_oldest_chunk chunk_limit_size 4m - queue_limit_length 20 - flush_interval 20s + queue_limit_length "#{ENV['FLUENTD_QUEUE_LIMIT_LENGTH']}" + flush_interval "#{ENV['FLUENTD_FLUSH_INTERVAL']}" retry_max_times 10 retry_wait 5s retry_max_interval 5m - flush_thread_count 3 + flush_thread_count 5 - keepalive true + keepalive true + + + #Kubernetes podmdm inventory + + @type kube_podmdminventory + run_interval 60 + @log_level debug + - #containerinventory for windows containers - - @type forward - @log_level debug - send_timeout 30 - connect_timeout 30 - heartbeat_type none - - host 0.0.0.0 - port "#{ENV['MDSD_FLUENT_SOCKET_PORT']}" - + + @type mdm + @id out_mdm_podinventory + @log_level debug @type file - path /var/opt/microsoft/docker-cimprov/state/containerinventory*.buffer + path /var/opt/microsoft/docker-cimprov/state/out_mdm_podinventory*.buffer overflow_action drop_oldest_chunk chunk_limit_size 4m - queue_limit_length 20 - flush_interval 20s + queue_limit_length "#{ENV['FLUENTD_QUEUE_LIMIT_LENGTH']}" + flush_interval "#{ENV['FLUENTD_FLUSH_INTERVAL']}" retry_max_times 10 retry_wait 5s retry_max_interval 5m - flush_thread_count 5 + flush_thread_count "#{ENV['FLUENTD_MDM_FLUSH_THREAD_COUNT']}" - keepalive true - + retry_mdm_post_wait_minutes 30 + + + + + #Kubernetes perf inventory + + @type kube_perfinventory + tag oneagent.containerInsights.LINUX_PERF_BLOB + run_interval 60 + @log_level debug + + + #Kubernetes Persistent Volume inventory + + @type kube_pvinventory + tag oneagent.containerInsights.KUBE_PV_INVENTORY_BLOB + run_interval 60 + @log_level debug + - #perf - + #cadvisor perf- Windows nodes + + @type win_cadvisor_perf + tag oneagent.containerInsights.LINUX_PERF_BLOB + run_interval 60 + @log_level debug + + + #Kubernetes object state - deployments + + @type kubestate_deployments + tag oneagent.containerInsights.INSIGHTS_METRICS_BLOB + run_interval 60 + @log_level debug + + + #Kubernetes object state - HPA + + @type kubestate_hpa + tag oneagent.containerInsights.INSIGHTS_METRICS_BLOB + run_interval 60 + @log_level debug + + + #kubepvinventory + @type forward @log_level debug send_timeout 30 @@ -288,51 +327,62 @@ @type file - path /var/opt/microsoft/docker-cimprov/state/perf*.buffer + path /var/opt/microsoft/docker-cimprov/state/kubepv*.buffer overflow_action drop_oldest_chunk chunk_limit_size 4m - queue_limit_length 20 - flush_interval 20s + queue_limit_length "#{ENV['FLUENTD_QUEUE_LIMIT_LENGTH']}" + flush_interval "#{ENV['FLUENTD_FLUSH_INTERVAL']}" retry_max_times 10 retry_wait 5s retry_max_interval 5m - flush_thread_count 5 + flush_thread_count 5 - keepalive true + keepalive true - - @type mdm - @log_level debug + #InsightsMetrics + #kubestate + + @type forward + @log_level debug + send_timeout 30 + connect_timeout 30 + heartbeat_type none + + host 0.0.0.0 + port "#{ENV['MDSD_FLUENT_SOCKET_PORT']}" + @type file - path /var/opt/microsoft/docker-cimprov/state/out_mdm_*.buffer + path /var/opt/microsoft/docker-cimprov/state/insightsmetrics*.buffer overflow_action drop_oldest_chunk chunk_limit_size 4m - queue_limit_length 20 - flush_interval 20s + queue_limit_length "#{ENV['FLUENTD_QUEUE_LIMIT_LENGTH']}" + flush_interval "#{ENV['FLUENTD_FLUSH_INTERVAL']}" retry_max_times 10 retry_wait 5s retry_max_interval 5m - flush_thread_count 5 + flush_thread_count 5 - retry_mdm_post_wait_minutes 30 + keepalive true @type mdm + @id out_mdm_perf @log_level debug @type file path /var/opt/microsoft/docker-cimprov/state/out_mdm_cdvisorperf*.buffer overflow_action drop_oldest_chunk chunk_limit_size 4m - queue_limit_length 20 - flush_interval 20s + queue_limit_length "#{ENV['FLUENTD_QUEUE_LIMIT_LENGTH']}" + flush_interval "#{ENV['FLUENTD_FLUSH_INTERVAL']}" retry_max_times 10 retry_wait 5s retry_max_interval 5m - flush_thread_count 5 + flush_thread_count 5 retry_mdm_post_wait_minutes 30 + diff --git a/build/linux/installer/datafiles/base_container.data b/build/linux/installer/datafiles/base_container.data index 7dcbde31f..92b494ae3 100644 --- a/build/linux/installer/datafiles/base_container.data +++ b/build/linux/installer/datafiles/base_container.data @@ -132,6 +132,8 @@ MAINTAINER: 'Microsoft Corporation' /etc/fluent/plugin/in_containerinventory.rb; source/plugins/ruby/in_containerinventory.rb; 644; root; root /etc/fluent/plugin/in_kube_nodes.rb; source/plugins/ruby/in_kube_nodes.rb; 644; root; root /etc/fluent/plugin/in_kube_podinventory.rb; source/plugins/ruby/in_kube_podinventory.rb; 644; root; root +/etc/fluent/plugin/in_kube_podmdminventory.rb; source/plugins/ruby/in_kube_podmdminventory.rb; 644; root; root +/etc/fluent/plugin/in_kube_perfinventory.rb; source/plugins/ruby/in_kube_perfinventory.rb; 644; root; root /etc/fluent/plugin/KubernetesApiClient.rb; source/plugins/ruby/KubernetesApiClient.rb; 644; root; root /etc/fluent/plugin/in_kube_events.rb; source/plugins/ruby/in_kube_events.rb; 644; root; root /etc/fluent/plugin/in_kube_pvinventory.rb; source/plugins/ruby/in_kube_pvinventory.rb; 644; root; root @@ -143,6 +145,7 @@ MAINTAINER: 'Microsoft Corporation' /etc/fluent/plugin/filter_telegraf2mdm.rb; source/plugins/ruby/filter_telegraf2mdm.rb; 644; root; root /etc/fluent/plugin/out_mdm.rb; source/plugins/ruby/out_mdm.rb; 644; root; root +/etc/fluent/plugin/WatchStream.rb; source/plugins/ruby/WatchStream.rb; 644; root; root diff --git a/charts/azuremonitor-containers/templates/omsagent-deployment.yaml b/charts/azuremonitor-containers/templates/omsagent-deployment.yaml index b5b239af0..ad7452aa5 100644 --- a/charts/azuremonitor-containers/templates/omsagent-deployment.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-deployment.yaml @@ -53,6 +53,11 @@ spec: resources: {{ toYaml .Values.omsagent.resources.deployment | indent 9 }} env: + - name: NUM_OF_FLUENTD_WORKERS + valueFrom: + resourceFieldRef: + containerName: omsagent + resource: limits.cpu {{- if ne .Values.omsagent.env.clusterId "" }} - name: AKS_RESOURCE_ID value: {{ .Values.omsagent.env.clusterId | quote }} diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index 3e25fc3a4..1e00457d9 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -23,8 +23,7 @@ waitforlisteneronTCPport() { if [[ $port =~ $numeric ]] && [[ $waittimesecs =~ $numeric ]]; then #local varlistener=$(netstat -lnt | awk '$6 == "LISTEN" && $4 ~ ":25228$"') - while true - do + while true; do if [ $totalsleptsecs -gt $waittimesecs ]; then echo "${FUNCNAME[0]} giving up waiting for listener on port:$port after $totalsleptsecs secs" return 1 @@ -33,7 +32,7 @@ waitforlisteneronTCPport() { if [ -z "$varlistener" ]; then #echo "${FUNCNAME[0]} waiting for $sleepdurationsecs more sec for listener on port:$port ..." sleep $sleepdurationsecs - totalsleptsecs=$(($totalsleptsecs+1)) + totalsleptsecs=$(($totalsleptsecs + 1)) else echo "${FUNCNAME[0]} found listener on port:$port in $totalsleptsecs secs" return 0 @@ -65,23 +64,22 @@ checkAgentOnboardingStatus() { successMessage="Loaded data sources" failureMessage="Failed to load data sources into config" fi - while true - do - if [ $totalsleptsecs -gt $waittimesecs ]; then - echo "${FUNCNAME[0]} giving up checking agent onboarding status after $totalsleptsecs secs" - return 1 - fi - - if grep "$successMessage" "${MDSD_LOG}/mdsd.info"; then - echo "Onboarding success" - return 0 - elif grep "$failureMessage" "${MDSD_LOG}/mdsd.err"; then - echo "Onboarding Failure: Reason: Failed to onboard the agent" - echo "Onboarding Failure: Please verify log analytics workspace configuration such as existence of the workspace, workspace key and workspace enabled for public ingestion" - return 1 - fi - sleep $sleepdurationsecs - totalsleptsecs=$(($totalsleptsecs+1)) + while true; do + if [ $totalsleptsecs -gt $waittimesecs ]; then + echo "${FUNCNAME[0]} giving up checking agent onboarding status after $totalsleptsecs secs" + return 1 + fi + + if grep "$successMessage" "${MDSD_LOG}/mdsd.info"; then + echo "Onboarding success" + return 0 + elif grep "$failureMessage" "${MDSD_LOG}/mdsd.err"; then + echo "Onboarding Failure: Reason: Failed to onboard the agent" + echo "Onboarding Failure: Please verify log analytics workspace configuration such as existence of the workspace, workspace key and workspace enabled for public ingestion" + return 1 + fi + sleep $sleepdurationsecs + totalsleptsecs=$(($totalsleptsecs + 1)) done else echo "${FUNCNAME[0]} called with non-numeric arguments<$2>. Required arguments <#wait-time-in-seconds>" @@ -90,6 +88,103 @@ checkAgentOnboardingStatus() { fi } +setReplicaSetSpecificConfig() { + echo "num of fluentd workers:${NUM_OF_FLUENTD_WORKERS}" + export FLUENTD_FLUSH_INTERVAL="20s" + export FLUENTD_QUEUE_LIMIT_LENGTH="20" # default + export FLUENTD_KUBE_PERF_QUEUE_LIMIT_LENGTH="20" + export FLUENTD_MDM_FLUSH_THREAD_COUNT="5" # default + case $NUM_OF_FLUENTD_WORKERS in + [5-9]|9[0-9]|100) + export NUM_OF_FLUENTD_WORKERS=5 # Max is 5 core even if the specified limits more than 5 cores + export FLUENTD_POD_INVENTORY_WORKER_ID=4 + export FLUENTD_NODE_INVENTORY_WORKER_ID=3 + export FLUENTD_EVENT_INVENTORY_WORKER_ID=2 + export FLUENTD_POD_MDM_INVENTORY_WORKER_ID=1 + export FLUENTD_OTHER_INVENTORY_WORKER_ID=0 + export FLUENTD_FLUSH_INTERVAL="5s" + export FLUENTD_QUEUE_LIMIT_LENGTH="50" + export FLUENTD_KUBE_PERF_QUEUE_LIMIT_LENGTH="100" # kube perf is high volume so would need large queue limit to avoid data loss + export MONITORING_MAX_EVENT_RATE="100000" # default MDSD EPS is 20K which is not enough for large scale + export FLUENTD_MDM_FLUSH_THREAD_COUNT="20" # if the pod mdm inventory running on separate worker + ;; + 4) + export NUM_OF_FLUENTD_WORKERS=4 + export FLUENTD_POD_INVENTORY_WORKER_ID=3 + export FLUENTD_NODE_INVENTORY_WORKER_ID=2 + export FLUENTD_EVENT_INVENTORY_WORKER_ID=1 + export FLUENTD_POD_MDM_INVENTORY_WORKER_ID=0 + export FLUENTD_OTHER_INVENTORY_WORKER_ID=0 + export FLUENTD_FLUSH_INTERVAL="10s" + export FLUENTD_QUEUE_LIMIT_LENGTH="40" + export FLUENTD_KUBE_PERF_QUEUE_LIMIT_LENGTH="80" # kube perf is high volume so would need large queue limit + export MONITORING_MAX_EVENT_RATE="80000" # default MDSD EPS is 20K which is not enough for large scale + ;; + 3) + export NUM_OF_FLUENTD_WORKERS=3 + export FLUENTD_POD_INVENTORY_WORKER_ID=2 + export FLUENTD_NODE_INVENTORY_WORKER_ID=1 + export FLUENTD_POD_MDM_INVENTORY_WORKER_ID=0 + export FLUENTD_EVENT_INVENTORY_WORKER_ID=0 + export FLUENTD_OTHER_INVENTORY_WORKER_ID=0 + export FLUENTD_FLUSH_INTERVAL="15s" + export FLUENTD_QUEUE_LIMIT_LENGTH="30" + export FLUENTD_KUBE_PERF_QUEUE_LIMIT_LENGTH="60" # kube perf is high volume so would need large queue limit + export MONITORING_MAX_EVENT_RATE="60000" # default MDSD EPS is 20K which is not enough for large scale + ;; + 2) + export NUM_OF_FLUENTD_WORKERS=2 + export FLUENTD_POD_INVENTORY_WORKER_ID=1 + export FLUENTD_NODE_INVENTORY_WORKER_ID=1 + export FLUENTD_POD_MDM_INVENTORY_WORKER_ID=0 + export FLUENTD_EVENT_INVENTORY_WORKER_ID=0 + export FLUENTD_OTHER_INVENTORY_WORKER_ID=0 + export FLUENTD_FLUSH_INTERVAL="20s" + export FLUENTD_QUEUE_LIMIT_LENGTH="20" + export FLUENTD_KUBE_PERF_QUEUE_LIMIT_LENGTH="40" # kube perf is high volume so would need large queue limit + export MONITORING_MAX_EVENT_RATE="40000" # default MDSD EPS is 20K which is not enough for large scale + ;; + + *) + export NUM_OF_FLUENTD_WORKERS=1 + export FLUENTD_POD_INVENTORY_WORKER_ID=0 + export FLUENTD_NODE_INVENTORY_WORKER_ID=0 + export FLUENTD_EVENT_INVENTORY_WORKER_ID=0 + export FLUENTD_POD_MDM_INVENTORY_WORKER_ID=0 + export FLUENTD_OTHER_INVENTORY_WORKER_ID=0 + export FLUENTD_FLUSH_INTERVAL="20s" + export FLUENTD_QUEUE_LIMIT_LENGTH="20" + export FLUENTD_KUBE_PERF_QUEUE_LIMIT_LENGTH="20" + ;; + esac + echo "export NUM_OF_FLUENTD_WORKERS=$NUM_OF_FLUENTD_WORKERS" >>~/.bashrc + echo "export FLUENTD_POD_INVENTORY_WORKER_ID=$FLUENTD_POD_INVENTORY_WORKER_ID" >>~/.bashrc + echo "export FLUENTD_NODE_INVENTORY_WORKER_ID=$FLUENTD_NODE_INVENTORY_WORKER_ID" >>~/.bashrc + echo "export FLUENTD_EVENT_INVENTORY_WORKER_ID=$FLUENTD_EVENT_INVENTORY_WORKER_ID" >>~/.bashrc + echo "export FLUENTD_POD_MDM_INVENTORY_WORKER_ID=$FLUENTD_POD_MDM_INVENTORY_WORKER_ID" >>~/.bashrc + echo "export FLUENTD_OTHER_INVENTORY_WORKER_ID=$FLUENTD_OTHER_INVENTORY_WORKER_ID" >>~/.bashrc + echo "export FLUENTD_FLUSH_INTERVAL=$FLUENTD_FLUSH_INTERVAL" >>~/.bashrc + echo "export FLUENTD_KUBE_PERF_QUEUE_LIMIT_LENGTH=$FLUENTD_KUBE_PERF_QUEUE_LIMIT_LENGTH" >>~/.bashrc + echo "export FLUENTD_QUEUE_LIMIT_LENGTH=$FLUENTD_QUEUE_LIMIT_LENGTH" >>~/.bashrc + echo "export FLUENTD_MDM_FLUSH_THREAD_COUNT=$FLUENTD_MDM_FLUSH_THREAD_COUNT" >>~/.bashrc + + if [ ! -z $MONITORING_MAX_EVENT_RATE ]; then + echo "export MONITORING_MAX_EVENT_RATE=$MONITORING_MAX_EVENT_RATE" >>~/.bashrc + echo "Configured MDSD Max EPS is: ${MONITORING_MAX_EVENT_RATE}" + fi + + source ~/.bashrc + + echo "pod inventory worker id: ${FLUENTD_POD_INVENTORY_WORKER_ID}" + echo "node inventory worker id: ${FLUENTD_NODE_INVENTORY_WORKER_ID}" + echo "event inventory worker id: ${FLUENTD_EVENT_INVENTORY_WORKER_ID}" + echo "pod mdm inventory worker id: ${FLUENTD_POD_MDM_INVENTORY_WORKER_ID}" + echo "other inventory worker id: ${FLUENTD_OTHER_INVENTORY_WORKER_ID}" + echo "fluentd flush interval: ${FLUENTD_FLUSH_INTERVAL}" + echo "fluentd kube perf buffer plugin queue length: ${FLUENTD_KUBE_PERF_QUEUE_LIMIT_LENGTH}" + echo "fluentd buffer plugin queue length for all other non kube perf plugin: ${FLUENTD_QUEUE_LIMIT_LENGTH}" + echo "fluentd out mdm flush thread count: ${FLUENTD_MDM_FLUSH_THREAD_COUNT}" +} #using /var/opt/microsoft/docker-cimprov/state instead of /var/opt/microsoft/omsagent/state since the latter gets deleted during onboarding mkdir -p /var/opt/microsoft/docker-cimprov/state @@ -98,8 +193,8 @@ mkdir -p /var/opt/microsoft/docker-cimprov/state inotifywait /etc/config/settings --daemon --recursive --outfile "/opt/inotifyoutput.txt" --event create,delete --format '%e : %T' --timefmt '+%s' #Run inotify as a daemon to track changes to the mounted configmap for OSM settings. -if [[ ( ( ! -e "/etc/config/kube.conf" ) && ( "${CONTAINER_TYPE}" == "PrometheusSidecar" ) ) || - ( ( -e "/etc/config/kube.conf" ) && ( "${SIDECAR_SCRAPING_ENABLED}" == "false" ) ) ]]; then +if [[ ((! -e "/etc/config/kube.conf") && ("${CONTAINER_TYPE}" == "PrometheusSidecar")) || + ((-e "/etc/config/kube.conf") && ("${SIDECAR_SCRAPING_ENABLED}" == "false")) ]]; then inotifywait /etc/config/osm-settings --daemon --recursive --outfile "/opt/inotifyoutput-osm.txt" --event create,delete --format '%e : %T' --timefmt '+%s' fi @@ -108,58 +203,58 @@ if [ -z $AKS_RESOURCE_ID ]; then echo "not setting customResourceId" else export customResourceId=$AKS_RESOURCE_ID - echo "export customResourceId=$AKS_RESOURCE_ID" >> ~/.bashrc + echo "export customResourceId=$AKS_RESOURCE_ID" >>~/.bashrc source ~/.bashrc echo "customResourceId:$customResourceId" export customRegion=$AKS_REGION - echo "export customRegion=$AKS_REGION" >> ~/.bashrc + echo "export customRegion=$AKS_REGION" >>~/.bashrc source ~/.bashrc echo "customRegion:$customRegion" fi #set agent config schema version -if [ -e "/etc/config/settings/schema-version" ] && [ -s "/etc/config/settings/schema-version" ]; then +if [ -e "/etc/config/settings/schema-version" ] && [ -s "/etc/config/settings/schema-version" ]; then #trim config_schema_version="$(cat /etc/config/settings/schema-version | xargs)" #remove all spaces config_schema_version="${config_schema_version//[[:space:]]/}" #take first 10 characters - config_schema_version="$(echo $config_schema_version| cut -c1-10)" + config_schema_version="$(echo $config_schema_version | cut -c1-10)" export AZMON_AGENT_CFG_SCHEMA_VERSION=$config_schema_version - echo "export AZMON_AGENT_CFG_SCHEMA_VERSION=$config_schema_version" >> ~/.bashrc + echo "export AZMON_AGENT_CFG_SCHEMA_VERSION=$config_schema_version" >>~/.bashrc source ~/.bashrc echo "AZMON_AGENT_CFG_SCHEMA_VERSION:$AZMON_AGENT_CFG_SCHEMA_VERSION" fi #set agent config file version -if [ -e "/etc/config/settings/config-version" ] && [ -s "/etc/config/settings/config-version" ]; then +if [ -e "/etc/config/settings/config-version" ] && [ -s "/etc/config/settings/config-version" ]; then #trim config_file_version="$(cat /etc/config/settings/config-version | xargs)" #remove all spaces config_file_version="${config_file_version//[[:space:]]/}" #take first 10 characters - config_file_version="$(echo $config_file_version| cut -c1-10)" + config_file_version="$(echo $config_file_version | cut -c1-10)" export AZMON_AGENT_CFG_FILE_VERSION=$config_file_version - echo "export AZMON_AGENT_CFG_FILE_VERSION=$config_file_version" >> ~/.bashrc + echo "export AZMON_AGENT_CFG_FILE_VERSION=$config_file_version" >>~/.bashrc source ~/.bashrc echo "AZMON_AGENT_CFG_FILE_VERSION:$AZMON_AGENT_CFG_FILE_VERSION" fi #set OSM config schema version -if [[ ( ( ! -e "/etc/config/kube.conf" ) && ( "${CONTAINER_TYPE}" == "PrometheusSidecar" ) ) || - ( ( -e "/etc/config/kube.conf" ) && ( "${SIDECAR_SCRAPING_ENABLED}" == "false" ) ) ]]; then - if [ -e "/etc/config/osm-settings/schema-version" ] && [ -s "/etc/config/osm-settings/schema-version" ]; then +if [[ ((! -e "/etc/config/kube.conf") && ("${CONTAINER_TYPE}" == "PrometheusSidecar")) || + ((-e "/etc/config/kube.conf") && ("${SIDECAR_SCRAPING_ENABLED}" == "false")) ]]; then + if [ -e "/etc/config/osm-settings/schema-version" ] && [ -s "/etc/config/osm-settings/schema-version" ]; then #trim osm_config_schema_version="$(cat /etc/config/osm-settings/schema-version | xargs)" #remove all spaces osm_config_schema_version="${osm_config_schema_version//[[:space:]]/}" #take first 10 characters - osm_config_schema_version="$(echo $osm_config_schema_version| cut -c1-10)" + osm_config_schema_version="$(echo $osm_config_schema_version | cut -c1-10)" export AZMON_OSM_CFG_SCHEMA_VERSION=$osm_config_schema_version - echo "export AZMON_OSM_CFG_SCHEMA_VERSION=$osm_config_schema_version" >> ~/.bashrc + echo "export AZMON_OSM_CFG_SCHEMA_VERSION=$osm_config_schema_version" >>~/.bashrc source ~/.bashrc echo "AZMON_OSM_CFG_SCHEMA_VERSION:$AZMON_OSM_CFG_SCHEMA_VERSION" fi @@ -201,13 +296,13 @@ if [ -e "/etc/omsagent-secret/WSID" ]; then if [ -z "$host" -o -z "$port" ]; then echo "-e error proxy endpoint should be in this format http(s)://: or http(s)://:@:" else - echo "successfully validated provided proxy endpoint is valid and expected format" + echo "successfully validated provided proxy endpoint is valid and expected format" fi - echo $pwd > /opt/microsoft/docker-cimprov/proxy_password + echo $pwd >/opt/microsoft/docker-cimprov/proxy_password export MDSD_PROXY_MODE=application - echo "export MDSD_PROXY_MODE=$MDSD_PROXY_MODE" >> ~/.bashrc + echo "export MDSD_PROXY_MODE=$MDSD_PROXY_MODE" >>~/.bashrc export MDSD_PROXY_ADDRESS=$proto$hostport echo "export MDSD_PROXY_ADDRESS=$MDSD_PROXY_ADDRESS" >> ~/.bashrc if [ ! -z "$user" -a ! -z "$pwd" ]; then @@ -231,8 +326,8 @@ if [ -e "/etc/omsagent-secret/WSID" ]; then curl --max-time 10 https://$workspaceId.oms.$domain/AgentService.svc/LinuxAgentTopologyRequest --proxy $PROXY_ENDPOINT fi else - echo "Making curl request to oms endpint with domain: $domain" - curl --max-time 10 https://$workspaceId.oms.$domain/AgentService.svc/LinuxAgentTopologyRequest + echo "Making curl request to oms endpint with domain: $domain" + curl --max-time 10 https://$workspaceId.oms.$domain/AgentService.svc/LinuxAgentTopologyRequest fi if [ $? -ne 0 ]; then @@ -245,8 +340,8 @@ if [ -e "/etc/omsagent-secret/WSID" ]; then RET=`curl --max-time 10 -s -o /dev/null -w "%{http_code}" ifconfig.co --proxy $PROXY_ENDPOINT` fi else - echo "Making curl request to ifconfig.co" - RET=`curl --max-time 10 -s -o /dev/null -w "%{http_code}" ifconfig.co` + echo "Making curl request to ifconfig.co" + RET=$(curl --max-time 10 -s -o /dev/null -w "%{http_code}" ifconfig.co) fi if [ $RET -eq 000 ]; then echo "-e error Error resolving host during the onboarding request. Check the internet connectivity and/or network policy on the cluster" @@ -261,8 +356,8 @@ if [ -e "/etc/omsagent-secret/WSID" ]; then curl --max-time 10 https://$workspaceId.oms.$domain/AgentService.svc/LinuxAgentTopologyRequest --proxy $PROXY_ENDPOINT fi else - echo "ifconfig check succeeded, retrying oms endpoint..." - curl --max-time 10 https://$workspaceId.oms.$domain/AgentService.svc/LinuxAgentTopologyRequest + echo "ifconfig check succeeded, retrying oms endpoint..." + curl --max-time 10 https://$workspaceId.oms.$domain/AgentService.svc/LinuxAgentTopologyRequest fi if [ $? -ne 0 ]; then @@ -278,23 +373,22 @@ else echo "LA Onboarding:Workspace Id not mounted, skipping the telemetry check" fi - # Set environment variable for if public cloud by checking the workspace domain. if [ -z $domain ]; then - ClOUD_ENVIRONMENT="unknown" + ClOUD_ENVIRONMENT="unknown" elif [ $domain == "opinsights.azure.com" ]; then - CLOUD_ENVIRONMENT="azurepubliccloud" + CLOUD_ENVIRONMENT="azurepubliccloud" elif [ $domain == "opinsights.azure.cn" ]; then - CLOUD_ENVIRONMENT="azurechinacloud" + CLOUD_ENVIRONMENT="azurechinacloud" elif [ $domain == "opinsights.azure.us" ]; then - CLOUD_ENVIRONMENT="azureusgovernmentcloud" + CLOUD_ENVIRONMENT="azureusgovernmentcloud" elif [ $domain == "opinsights.azure.eaglex.ic.gov" ]; then - CLOUD_ENVIRONMENT="usnat" + CLOUD_ENVIRONMENT="usnat" elif [ $domain == "opinsights.azure.microsoft.scloud" ]; then - CLOUD_ENVIRONMENT="ussec" + CLOUD_ENVIRONMENT="ussec" fi export CLOUD_ENVIRONMENT=$CLOUD_ENVIRONMENT -echo "export CLOUD_ENVIRONMENT=$CLOUD_ENVIRONMENT" >> ~/.bashrc +echo "export CLOUD_ENVIRONMENT=$CLOUD_ENVIRONMENT" >>~/.bashrc # Copying over CA certs for airgapped clouds. This is needed for Mariner vs Ubuntu hosts. # We are unable to tell if the host is Mariner or Ubuntu, @@ -302,7 +396,7 @@ echo "export CLOUD_ENVIRONMENT=$CLOUD_ENVIRONMENT" >> ~/.bashrc # One will have the certs and the other will be empty. # These need to be copied to a different location for Mariner vs Ubuntu containers. # OS_ID here is the container distro. -# Adding Mariner now even though the elif will never currently evaluate. +# Adding Mariner now even though the elif will never currently evaluate. if [ $CLOUD_ENVIRONMENT == "usnat" ] || [ $CLOUD_ENVIRONMENT == "ussec" ]; then OS_ID=$(cat /etc/os-release | grep ^ID= | cut -d '=' -f2 | tr -d '"' | tr -d "'") if [ $OS_ID == "mariner" ]; then @@ -322,39 +416,38 @@ fi #consisten naming conventions with the windows export DOMAIN=$domain -echo "export DOMAIN=$DOMAIN" >> ~/.bashrc +echo "export DOMAIN=$DOMAIN" >>~/.bashrc export WSID=$workspaceId -echo "export WSID=$WSID" >> ~/.bashrc +echo "export WSID=$WSID" >>~/.bashrc # Check if the instrumentation key needs to be fetched from a storage account (as in airgapped clouds) -if [ ${#APPLICATIONINSIGHTS_AUTH_URL} -ge 1 ]; then # (check if APPLICATIONINSIGHTS_AUTH_URL has length >=1) +if [ ${#APPLICATIONINSIGHTS_AUTH_URL} -ge 1 ]; then # (check if APPLICATIONINSIGHTS_AUTH_URL has length >=1) for BACKOFF in {1..4}; do - KEY=$(curl -sS $APPLICATIONINSIGHTS_AUTH_URL ) + KEY=$(curl -sS $APPLICATIONINSIGHTS_AUTH_URL) # there's no easy way to get the HTTP status code from curl, so just check if the result is well formatted if [[ $KEY =~ ^[A-Za-z0-9=]+$ ]]; then break else - sleep $((2**$BACKOFF / 4)) # (exponential backoff) + sleep $((2 ** $BACKOFF / 4)) # (exponential backoff) fi done # validate that the retrieved data is an instrumentation key if [[ $KEY =~ ^[A-Za-z0-9=]+$ ]]; then export APPLICATIONINSIGHTS_AUTH=$(echo $KEY) - echo "export APPLICATIONINSIGHTS_AUTH=$APPLICATIONINSIGHTS_AUTH" >> ~/.bashrc + echo "export APPLICATIONINSIGHTS_AUTH=$APPLICATIONINSIGHTS_AUTH" >>~/.bashrc echo "Using cloud-specific instrumentation key" else # no ikey can be retrieved. Disable telemetry and continue export DISABLE_TELEMETRY=true - echo "export DISABLE_TELEMETRY=true" >> ~/.bashrc + echo "export DISABLE_TELEMETRY=true" >>~/.bashrc echo "Could not get cloud-specific instrumentation key (network error?). Disabling telemetry" fi fi - aikey=$(echo $APPLICATIONINSIGHTS_AUTH | base64 --decode) export TELEMETRY_APPLICATIONINSIGHTS_KEY=$aikey -echo "export TELEMETRY_APPLICATIONINSIGHTS_KEY=$aikey" >> ~/.bashrc +echo "export TELEMETRY_APPLICATIONINSIGHTS_KEY=$aikey" >>~/.bashrc source ~/.bashrc @@ -363,7 +456,7 @@ if [ "${CONTAINER_TYPE}" != "PrometheusSidecar" ]; then /usr/bin/ruby2.7 tomlparser.rb cat config_env_var | while read line; do - echo $line >> ~/.bashrc + echo $line >>~/.bashrc done source config_env_var fi @@ -399,18 +492,18 @@ fi if [ ! -e "/etc/config/kube.conf" ]; then if [ "${CONTAINER_TYPE}" == "PrometheusSidecar" ]; then cat defaultpromenvvariables-sidecar | while read line; do - echo $line >> ~/.bashrc + echo $line >>~/.bashrc done source defaultpromenvvariables-sidecar else cat defaultpromenvvariables | while read line; do - echo $line >> ~/.bashrc + echo $line >>~/.bashrc done source defaultpromenvvariables fi else cat defaultpromenvvariables-rs | while read line; do - echo $line >> ~/.bashrc + echo $line >>~/.bashrc done source defaultpromenvvariables-rs fi @@ -418,7 +511,7 @@ fi #Sourcing environment variable file if it exists. This file has telemetry and whether kubernetes pods are monitored if [ -e "telemetry_prom_config_env_var" ]; then cat telemetry_prom_config_env_var | while read line; do - echo $line >> ~/.bashrc + echo $line >>~/.bashrc done source telemetry_prom_config_env_var fi @@ -431,20 +524,19 @@ if [ ! -e "/etc/config/kube.conf" ]; then #Sourcing config environment variable file if it exists if [ -e "side_car_fbit_config_env_var" ]; then cat side_car_fbit_config_env_var | while read line; do - echo $line >> ~/.bashrc + echo $line >>~/.bashrc done source side_car_fbit_config_env_var fi fi fi - #Parse the configmap to set the right environment variables for MDM metrics configuration for Alerting. if [ "${CONTAINER_TYPE}" != "PrometheusSidecar" ]; then /usr/bin/ruby2.7 tomlparser-mdm-metrics-config.rb cat config_mdm_metrics_env_var | while read line; do - echo $line >> ~/.bashrc + echo $line >>~/.bashrc done source config_mdm_metrics_env_var @@ -452,7 +544,7 @@ if [ "${CONTAINER_TYPE}" != "PrometheusSidecar" ]; then /usr/bin/ruby2.7 tomlparser-metric-collection-config.rb cat config_metric_collection_env_var | while read line; do - echo $line >> ~/.bashrc + echo $line >>~/.bashrc done source config_metric_collection_env_var fi @@ -464,15 +556,15 @@ if [[ ( ( ! -e "/etc/config/kube.conf" ) && ( "${CONTAINER_TYPE}" == "Prometheus if [ -e "integration_osm_config_env_var" ]; then cat integration_osm_config_env_var | while read line; do - echo $line >> ~/.bashrc + echo $line >>~/.bashrc done source integration_osm_config_env_var fi fi -# If the prometheus sidecar isn't doing anything then there's no need to run mdsd and telegraf in it. -if [[ ( "${CONTAINER_TYPE}" == "PrometheusSidecar" ) && - ( "${TELEMETRY_CUSTOM_PROM_MONITOR_PODS}" == "false" ) && +# If the prometheus sidecar isn't doing anything then there's no need to run mdsd and telegraf in it. +if [[ ( "${CONTAINER_TYPE}" == "PrometheusSidecar" ) && + ( "${TELEMETRY_CUSTOM_PROM_MONITOR_PODS}" == "false" ) && ( "${TELEMETRY_OSM_CONFIGURATION_NAMESPACES_COUNT}" -eq 0 ) ]]; then setGlobalEnvVar MUTE_PROM_SIDECAR true else @@ -498,21 +590,20 @@ fi export CONTAINER_RUNTIME="containerd" export NODE_NAME="" - if [ "$cAdvisorIsSecure" = true ]; then echo "Using port 10250" export IS_SECURE_CADVISOR_PORT=true - echo "export IS_SECURE_CADVISOR_PORT=true" >> ~/.bashrc + echo "export IS_SECURE_CADVISOR_PORT=true" >>~/.bashrc export CADVISOR_METRICS_URL="https://$NODE_IP:10250/metrics" - echo "export CADVISOR_METRICS_URL=https://$NODE_IP:10250/metrics" >> ~/.bashrc + echo "export CADVISOR_METRICS_URL=https://$NODE_IP:10250/metrics" >>~/.bashrc echo "Making curl request to cadvisor endpoint /pods with port 10250 to get the configured container runtime on kubelet" podWithValidContainerId=$(curl -s -k -H "Authorization: Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" https://$NODE_IP:10250/pods | jq -R 'fromjson? | [ .items[] | select( any(.status.phase; contains("Running")) ) ] | .[0]') else echo "Using port 10255" export IS_SECURE_CADVISOR_PORT=false - echo "export IS_SECURE_CADVISOR_PORT=false" >> ~/.bashrc + echo "export IS_SECURE_CADVISOR_PORT=false" >>~/.bashrc export CADVISOR_METRICS_URL="http://$NODE_IP:10255/metrics" - echo "export CADVISOR_METRICS_URL=http://$NODE_IP:10255/metrics" >> ~/.bashrc + echo "export CADVISOR_METRICS_URL=http://$NODE_IP:10255/metrics" >>~/.bashrc echo "Making curl request to cadvisor endpoint with port 10255 to get the configured container runtime on kubelet" podWithValidContainerId=$(curl -s http://$NODE_IP:10255/pods | jq -R 'fromjson? | [ .items[] | select( any(.status.phase; contains("Running")) ) ] | .[0]') fi @@ -524,13 +615,13 @@ if [ ! -z "$podWithValidContainerId" ]; then containerRuntime=$(echo $containerRuntime | tr "[:upper:]" "[:lower:]") nodeName=$(echo $nodeName | tr "[:upper:]" "[:lower:]") # use default container runtime if obtained runtime value is either empty or null - if [ -z "$containerRuntime" -o "$containerRuntime" == null ]; then + if [ -z "$containerRuntime" -o "$containerRuntime" == null ]; then echo "using default container runtime as $CONTAINER_RUNTIME since got containeRuntime as empty or null" else export CONTAINER_RUNTIME=$containerRuntime fi - if [ -z "$nodeName" -o "$nodeName" == null ]; then + if [ -z "$nodeName" -o "$nodeName" == null ]; then echo "-e error nodeName in /pods API response is empty" else export NODE_NAME=$nodeName @@ -540,21 +631,21 @@ else fi echo "configured container runtime on kubelet is : "$CONTAINER_RUNTIME -echo "export CONTAINER_RUNTIME="$CONTAINER_RUNTIME >> ~/.bashrc +echo "export CONTAINER_RUNTIME="$CONTAINER_RUNTIME >>~/.bashrc export KUBELET_RUNTIME_OPERATIONS_TOTAL_METRIC="kubelet_runtime_operations_total" -echo "export KUBELET_RUNTIME_OPERATIONS_TOTAL_METRIC="$KUBELET_RUNTIME_OPERATIONS_TOTAL_METRIC >> ~/.bashrc +echo "export KUBELET_RUNTIME_OPERATIONS_TOTAL_METRIC="$KUBELET_RUNTIME_OPERATIONS_TOTAL_METRIC >>~/.bashrc export KUBELET_RUNTIME_OPERATIONS_ERRORS_TOTAL_METRIC="kubelet_runtime_operations_errors_total" -echo "export KUBELET_RUNTIME_OPERATIONS_ERRORS_TOTAL_METRIC="$KUBELET_RUNTIME_OPERATIONS_ERRORS_TOTAL_METRIC >> ~/.bashrc +echo "export KUBELET_RUNTIME_OPERATIONS_ERRORS_TOTAL_METRIC="$KUBELET_RUNTIME_OPERATIONS_ERRORS_TOTAL_METRIC >>~/.bashrc # default to docker metrics export KUBELET_RUNTIME_OPERATIONS_METRIC="kubelet_docker_operations" export KUBELET_RUNTIME_OPERATIONS_ERRORS_METRIC="kubelet_docker_operations_errors" if [ "$CONTAINER_RUNTIME" != "docker" ]; then - # these metrics are avialble only on k8s versions <1.18 and will get deprecated from 1.18 - export KUBELET_RUNTIME_OPERATIONS_METRIC="kubelet_runtime_operations" - export KUBELET_RUNTIME_OPERATIONS_ERRORS_METRIC="kubelet_runtime_operations_errors" + # these metrics are avialble only on k8s versions <1.18 and will get deprecated from 1.18 + export KUBELET_RUNTIME_OPERATIONS_METRIC="kubelet_runtime_operations" + export KUBELET_RUNTIME_OPERATIONS_ERRORS_METRIC="kubelet_runtime_operations_errors" fi echo "set caps for ruby process to read container env from proc" @@ -564,7 +655,7 @@ echo "export KUBELET_RUNTIME_OPERATIONS_ERRORS_METRIC="$KUBELET_RUNTIME_OPERATIO source ~/.bashrc -echo $NODE_NAME > /var/opt/microsoft/docker-cimprov/state/containerhostname +echo $NODE_NAME >/var/opt/microsoft/docker-cimprov/state/containerhostname #check if file was written successfully. cat /var/opt/microsoft/docker-cimprov/state/containerhostname @@ -577,16 +668,20 @@ dpkg -l | grep docker-cimprov | awk '{print $2 " " $3}' DOCKER_CIMPROV_VERSION=$(dpkg -l | grep docker-cimprov | awk '{print $3}') echo "DOCKER_CIMPROV_VERSION=$DOCKER_CIMPROV_VERSION" export DOCKER_CIMPROV_VERSION=$DOCKER_CIMPROV_VERSION -echo "export DOCKER_CIMPROV_VERSION=$DOCKER_CIMPROV_VERSION" >> ~/.bashrc +echo "export DOCKER_CIMPROV_VERSION=$DOCKER_CIMPROV_VERSION" >>~/.bashrc +if [ "${CONTROLLER_TYPE}" == "ReplicaSet" ]; then + echo "*** set applicable replicaset config ***" + setReplicaSetSpecificConfig +fi #skip imds lookup since not used either legacy or aad msi auth path export SKIP_IMDS_LOOKUP_FOR_LEGACY_AUTH="true" -echo "export SKIP_IMDS_LOOKUP_FOR_LEGACY_AUTH=$SKIP_IMDS_LOOKUP_FOR_LEGACY_AUTH" >> ~/.bashrc +echo "export SKIP_IMDS_LOOKUP_FOR_LEGACY_AUTH=$SKIP_IMDS_LOOKUP_FOR_LEGACY_AUTH" >>~/.bashrc # this used by mdsd to determine cloud specific LA endpoints export OMS_TLD=$domain -echo "export OMS_TLD=$OMS_TLD" >> ~/.bashrc +echo "export OMS_TLD=$OMS_TLD" >>~/.bashrc cat /etc/mdsd.d/envmdsd | while read line; do - echo $line >> ~/.bashrc + echo $line >>~/.bashrc done source /etc/mdsd.d/envmdsd MDSD_AAD_MSI_AUTH_ARGS="" @@ -650,25 +745,25 @@ if [ "${CONTAINER_TYPE}" == "PrometheusSidecar" ]; then echo "not starting mdsd (no metrics to scrape since MUTE_PROM_SIDECAR is true)" fi else - echo "starting mdsd mode in main container..." - # add -T 0xFFFF for full traces - mdsd ${MDSD_AAD_MSI_AUTH_ARGS} -e ${MDSD_LOG}/mdsd.err -w ${MDSD_LOG}/mdsd.warn -o ${MDSD_LOG}/mdsd.info -q ${MDSD_LOG}/mdsd.qos 2>> /dev/null & + echo "starting mdsd in main container..." + # add -T 0xFFFF for full traces + mdsd ${MDSD_AAD_MSI_AUTH_ARGS} -e ${MDSD_LOG}/mdsd.err -w ${MDSD_LOG}/mdsd.warn -o ${MDSD_LOG}/mdsd.info -q ${MDSD_LOG}/mdsd.qos 2>>/dev/null & fi # Set up a cron job for logrotation if [ ! -f /etc/cron.d/ci-agent ]; then - echo "setting up cronjob for ci agent log rotation" - echo "*/5 * * * * root /usr/sbin/logrotate -s /var/lib/logrotate/ci-agent-status /etc/logrotate.d/ci-agent >/dev/null 2>&1" > /etc/cron.d/ci-agent + echo "setting up cronjob for ci agent log rotation" + echo "*/5 * * * * root /usr/sbin/logrotate -s /var/lib/logrotate/ci-agent-status /etc/logrotate.d/ci-agent >/dev/null 2>&1" >/etc/cron.d/ci-agent fi # no dependency on fluentd for prometheus side car container if [ "${CONTAINER_TYPE}" != "PrometheusSidecar" ]; then if [ ! -e "/etc/config/kube.conf" ]; then - echo "*** starting fluentd v1 in daemonset" - fluentd -c /etc/fluent/container.conf -o /var/opt/microsoft/docker-cimprov/log/fluentd.log --log-rotate-age 5 --log-rotate-size 20971520 & + echo "*** starting fluentd v1 in daemonset" + fluentd -c /etc/fluent/container.conf -o /var/opt/microsoft/docker-cimprov/log/fluentd.log --log-rotate-age 5 --log-rotate-size 20971520 & else - echo "*** starting fluentd v1 in replicaset" - fluentd -c /etc/fluent/kube.conf -o /var/opt/microsoft/docker-cimprov/log/fluentd.log --log-rotate-age 5 --log-rotate-size 20971520 & + echo "*** starting fluentd v1 in replicaset" + fluentd -c /etc/fluent/kube.conf -o /var/opt/microsoft/docker-cimprov/log/fluentd.log --log-rotate-age 5 --log-rotate-size 20971520 & fi fi @@ -699,13 +794,13 @@ if [ ! -e "/etc/config/kube.conf" ]; then fi else if [ -e "/opt/telegraf-test-rs.conf" ]; then - echo "****************Start Telegraf in Test Mode**************************" - /opt/telegraf --config /opt/telegraf-test-rs.conf --input-filter file -test - if [ $? -eq 0 ]; then - mv "/opt/telegraf-test-rs.conf" "/etc/opt/microsoft/docker-cimprov/telegraf-rs.conf" - echo "Moving test conf file to telegraf replicaset conf since test run succeeded" - fi - echo "****************End Telegraf Run in Test Mode**************************" + echo "****************Start Telegraf in Test Mode**************************" + /opt/telegraf --config /opt/telegraf-test-rs.conf --input-filter file -test + if [ $? -eq 0 ]; then + mv "/opt/telegraf-test-rs.conf" "/etc/opt/microsoft/docker-cimprov/telegraf-rs.conf" + echo "Moving test conf file to telegraf replicaset conf since test run succeeded" + fi + echo "****************End Telegraf Run in Test Mode**************************" fi fi @@ -753,15 +848,15 @@ else fi export TELEMETRY_AKS_RESOURCE_ID=$telemetry_aks_resource_id -echo "export TELEMETRY_AKS_RESOURCE_ID=$telemetry_aks_resource_id" >> ~/.bashrc +echo "export TELEMETRY_AKS_RESOURCE_ID=$telemetry_aks_resource_id" >>~/.bashrc export TELEMETRY_AKS_REGION=$telemetry_aks_region -echo "export TELEMETRY_AKS_REGION=$telemetry_aks_region" >> ~/.bashrc +echo "export TELEMETRY_AKS_REGION=$telemetry_aks_region" >>~/.bashrc export TELEMETRY_CLUSTER_NAME=$telemetry_cluster_name -echo "export TELEMETRY_CLUSTER_NAME=$telemetry_cluster_name" >> ~/.bashrc +echo "export TELEMETRY_CLUSTER_NAME=$telemetry_cluster_name" >>~/.bashrc export TELEMETRY_ACS_RESOURCE_NAME=$telemetry_acs_resource_name -echo "export TELEMETRY_ACS_RESOURCE_NAME=$telemetry_acs_resource_name" >> ~/.bashrc +echo "export TELEMETRY_ACS_RESOURCE_NAME=$telemetry_acs_resource_name" >>~/.bashrc export TELEMETRY_CLUSTER_TYPE=$telemetry_cluster_type -echo "export TELEMETRY_CLUSTER_TYPE=$telemetry_cluster_type" >> ~/.bashrc +echo "export TELEMETRY_CLUSTER_TYPE=$telemetry_cluster_type" >>~/.bashrc #if [ ! -e "/etc/config/kube.conf" ]; then # nodename=$(cat /hostfs/etc/hostname) @@ -773,15 +868,15 @@ echo "replacing nodename in telegraf config" sed -i -e "s/placeholder_hostname/$nodename/g" $telegrafConfFile export HOST_MOUNT_PREFIX=/hostfs -echo "export HOST_MOUNT_PREFIX=/hostfs" >> ~/.bashrc +echo "export HOST_MOUNT_PREFIX=/hostfs" >>~/.bashrc export HOST_PROC=/hostfs/proc -echo "export HOST_PROC=/hostfs/proc" >> ~/.bashrc +echo "export HOST_PROC=/hostfs/proc" >>~/.bashrc export HOST_SYS=/hostfs/sys -echo "export HOST_SYS=/hostfs/sys" >> ~/.bashrc +echo "export HOST_SYS=/hostfs/sys" >>~/.bashrc export HOST_ETC=/hostfs/etc -echo "export HOST_ETC=/hostfs/etc" >> ~/.bashrc +echo "export HOST_ETC=/hostfs/etc" >>~/.bashrc export HOST_VAR=/hostfs/var -echo "export HOST_VAR=/hostfs/var" >> ~/.bashrc +echo "export HOST_VAR=/hostfs/var" >>~/.bashrc if [ ! -e "/etc/config/kube.conf" ]; then if [ "${CONTAINER_TYPE}" == "PrometheusSidecar" ]; then @@ -830,9 +925,10 @@ else fi shutdown() { - pkill -f mdsd - } + pkill -f mdsd +} trap "shutdown" SIGTERM -sleep inf & wait +sleep inf & +wait diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index 4e021e1b8..d2d7a0c87 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -403,6 +403,8 @@ spec: # this used for e2e test and setting this just emits some additional log statements which used for the e2e tests - name: ISTEST value: "true" + - name: EMIT_CACHE_TELEMETRY + value: "false" #Uncomment below two lines for ACS clusters and set the cluster names manually. Also comment out the above two lines for ACS clusters #- name: ACS_RESOURCE_NAME # value: "my_acs_cluster_name" @@ -661,6 +663,13 @@ spec: cpu: 150m memory: 250Mi env: + - name: NUM_OF_FLUENTD_WORKERS + valueFrom: + resourceFieldRef: + containerName: omsagent + resource: limits.cpu + - name: EMIT_CACHE_TELEMETRY + value: "false" # enable only debug or test purpose and disable for prod - name: AKS_RESOURCE_ID value: "VALUE_AKS_RESOURCE_ID_VALUE" - name: AKS_REGION diff --git a/source/plugins/ruby/KubernetesApiClient.rb b/source/plugins/ruby/KubernetesApiClient.rb index 8925248d7..ffd76bfbd 100644 --- a/source/plugins/ruby/KubernetesApiClient.rb +++ b/source/plugins/ruby/KubernetesApiClient.rb @@ -11,6 +11,8 @@ class KubernetesApiClient require_relative "oms_common" require_relative "constants" + require_relative "WatchStream" + require_relative "kubernetes_container_inventory" @@ApiVersion = "v1" @@ApiVersionApps = "v1" @@ -35,8 +37,6 @@ class KubernetesApiClient @Log = Logger.new(@LogPath, 2, 10 * 1048576) #keep last 2 files, max log file size = 10M @@TokenFileName = "/var/run/secrets/kubernetes.io/serviceaccount/token" @@TokenStr = nil - @@NodeMetrics = Hash.new - @@WinNodeArray = [] @@telemetryTimeTracker = DateTime.now.to_time.to_i @@resourceLimitsTelemetryHash = {} @@ -75,6 +75,39 @@ def getKubeResourceInfo(resource, api_group: nil) return response end + def getKubeResourceInfoV2(resource, api_group: nil) + headers = {} + response = nil + responseCode = nil + @Log.info "Getting Kube resource: #{resource}" + begin + resourceUri = getResourceUri(resource, api_group) + if !resourceUri.nil? + uri = URI.parse(resourceUri) + if !File.exist?(@@CaFile) + raise "#{@@CaFile} doesnt exist" + else + Net::HTTP.start(uri.host, uri.port, :use_ssl => true, :ca_file => @@CaFile, :verify_mode => OpenSSL::SSL::VERIFY_PEER, :open_timeout => 20, :read_timeout => 40) do |http| + kubeApiRequest = Net::HTTP::Get.new(uri.request_uri) + kubeApiRequest["Authorization"] = "Bearer " + getTokenStr + @Log.info "KubernetesAPIClient::getKubeResourceInfoV2 : Making request to #{uri.request_uri} @ #{Time.now.utc.iso8601}" + response = http.request(kubeApiRequest) + responseCode = response.code + @Log.info "KubernetesAPIClient::getKubeResourceInfoV2 : Got response of #{response.code} for #{uri.request_uri} @ #{Time.now.utc.iso8601}" + end + end + end + rescue => error + @Log.warn("kubernetes api request failed: #{error} for #{resource} @ #{Time.now.utc.iso8601}") + end + if (!response.nil?) + if (!response.body.nil? && response.body.empty?) + @Log.warn("KubernetesAPIClient::getKubeResourceInfoV2 : Got empty response from Kube API for #{resource} @ #{Time.now.utc.iso8601}") + end + end + return responseCode, response + end + def getTokenStr return @@TokenStr if !@@TokenStr.nil? begin @@ -88,7 +121,7 @@ def getTokenStr end end - def getClusterRegion(env=ENV) + def getClusterRegion(env = ENV) if env["AKS_REGION"] return env["AKS_REGION"] else @@ -97,7 +130,7 @@ def getClusterRegion(env=ENV) end end - def getResourceUri(resource, api_group, env=ENV) + def getResourceUri(resource, api_group, env = ENV) begin if env["KUBERNETES_SERVICE_HOST"] && env["KUBERNETES_PORT_443_TCP_PORT"] if api_group.nil? @@ -114,7 +147,7 @@ def getResourceUri(resource, api_group, env=ENV) end end - def getClusterName(env=ENV) + def getClusterName(env = ENV) return @@ClusterName if !@@ClusterName.nil? @@ClusterName = "None" begin @@ -148,7 +181,7 @@ def getClusterName(env=ENV) return @@ClusterName end - def getClusterId(env=ENV) + def getClusterId(env = ENV) return @@ClusterId if !@@ClusterId.nil? #By default initialize ClusterId to ClusterName. # In ACS/On-prem, we need to figure out how we can generate ClusterId @@ -292,8 +325,6 @@ def getWindowsNodes resourceUri = getNodesResourceUri("nodes?labelSelector=kubernetes.io%2Fos%3Dwindows") nodeInventory = JSON.parse(getKubeResourceInfo(resourceUri).body) @Log.info "KubernetesAPIClient::getWindowsNodes : Got nodes from kube api" - # Resetting the windows node cache - @@WinNodeArray.clear if (!nodeInventory.empty?) nodeInventory["items"].each do |item| # check for windows operating system in node metadata @@ -303,11 +334,6 @@ def getWindowsNodes if !nodeStatus.nil? && !nodeStatus["nodeInfo"].nil? && !nodeStatus["nodeInfo"]["operatingSystem"].nil? operatingSystem = nodeStatus["nodeInfo"]["operatingSystem"] if (operatingSystem.is_a?(String) && operatingSystem.casecmp("windows") == 0) - # Adding windows nodes to winNodeArray so that it can be used in kubepodinventory to send ContainerInventory data - # to get images and image tags for containers in windows nodes - if !nodeMetadata.nil? && !nodeMetadata["name"].nil? - @@WinNodeArray.push(nodeMetadata["name"]) - end nodeStatusAddresses = nodeStatus["addresses"] if !nodeStatusAddresses.nil? nodeStatusAddresses.each do |address| @@ -327,7 +353,33 @@ def getWindowsNodes end def getWindowsNodesArray - return @@WinNodeArray + winNodeArray = [] + begin + # get only windows nodes + resourceUri = getNodesResourceUri("nodes?labelSelector=kubernetes.io%2Fos%3Dwindows") + nodeInventory = JSON.parse(getKubeResourceInfo(resourceUri).body) + @Log.info "KubernetesAPIClient::getWindowsNodes : Got nodes from kube api" + if (!nodeInventory.empty?) + nodeInventory["items"].each do |item| + # check for windows operating system in node metadata + nodeStatus = item["status"] + nodeMetadata = item["metadata"] + if !nodeStatus.nil? && !nodeStatus["nodeInfo"].nil? && !nodeStatus["nodeInfo"]["operatingSystem"].nil? + operatingSystem = nodeStatus["nodeInfo"]["operatingSystem"] + if (operatingSystem.is_a?(String) && operatingSystem.casecmp("windows") == 0) + # Adding windows nodes to winNodeArray so that it can be used in kubepodinventory to send ContainerInventory data + # to get images and image tags for containers in windows nodes + if !nodeMetadata.nil? && !nodeMetadata["name"].nil? + winNodeArray.push(nodeMetadata["name"]) + end + end + end + end + end + rescue => error + @Log.warn("KubernetesApiClient::getWindowsNodesArray:failed with an error: #{error}") + end + return winNodeArray end def getContainerIDs(namespace) @@ -409,7 +461,7 @@ def getPodUid(podNameSpace, podMetadata) return podUid end - def getContainerResourceRequestsAndLimits(pod, metricCategory, metricNameToCollect, metricNametoReturn, metricTime = Time.now.utc.iso8601) + def getContainerResourceRequestsAndLimits(pod, metricCategory, metricNameToCollect, metricNametoReturn, nodeAllocatableRecord, metricTime = Time.now.utc.iso8601) metricItems = [] begin clusterId = getClusterId @@ -456,19 +508,16 @@ def getContainerResourceRequestsAndLimits(pod, metricCategory, metricNameToColle metricCollection = {} metricCollection["CounterName"] = metricNametoReturn metricCollection["Value"] = metricValue - + metricProps["json_Collections"] = [] - metricCollections = [] - metricCollections.push(metricCollection) + metricCollections = [] + metricCollections.push(metricCollection) metricProps["json_Collections"] = metricCollections.to_json - metricItems.push(metricProps) + metricItems.push(metricProps) #No container level limit for the given metric, so default to node level limit else - nodeMetricsHashKey = clusterId + "/" + nodeName + "_" + "allocatable" + "_" + metricNameToCollect - if (metricCategory == "limits" && @@NodeMetrics.has_key?(nodeMetricsHashKey)) - metricValue = @@NodeMetrics[nodeMetricsHashKey] - #@Log.info("Limits not set for container #{clusterId + "/" + podUid + "/" + containerName} using node level limits: #{nodeMetricsHashKey}=#{metricValue} ") - + if (metricCategory == "limits" && !nodeAllocatableRecord.nil? && !nodeAllocatableRecord.empty? && nodeAllocatableRecord.has_key?(metricNameToCollect)) + metricValue = nodeAllocatableRecord[metricNameToCollect] metricProps = {} metricProps["Timestamp"] = metricTime metricProps["Host"] = nodeName @@ -481,10 +530,10 @@ def getContainerResourceRequestsAndLimits(pod, metricCategory, metricNameToColle metricCollection["CounterName"] = metricNametoReturn metricCollection["Value"] = metricValue metricProps["json_Collections"] = [] - metricCollections = [] - metricCollections.push(metricCollection) + metricCollections = [] + metricCollections.push(metricCollection) metricProps["json_Collections"] = metricCollections.to_json - metricItems.push(metricProps) + metricItems.push(metricProps) end end end @@ -496,7 +545,7 @@ def getContainerResourceRequestsAndLimits(pod, metricCategory, metricNameToColle return metricItems end #getContainerResourceRequestAndLimits - def getContainerResourceRequestsAndLimitsAsInsightsMetrics(pod, metricCategory, metricNameToCollect, metricNametoReturn, metricTime = Time.now.utc.iso8601) + def getContainerResourceRequestsAndLimitsAsInsightsMetrics(pod, metricCategory, metricNameToCollect, metricNametoReturn, nodeAllocatableRecord, metricTime = Time.now.utc.iso8601) metricItems = [] begin clusterId = getClusterId @@ -541,8 +590,9 @@ def getContainerResourceRequestsAndLimitsAsInsightsMetrics(pod, metricCategory, else #No container level limit for the given metric, so default to node level limit for non-gpu metrics if (metricNameToCollect.downcase != "nvidia.com/gpu") && (metricNameToCollect.downcase != "amd.com/gpu") - nodeMetricsHashKey = clusterId + "/" + nodeName + "_" + "allocatable" + "_" + metricNameToCollect - metricValue = @@NodeMetrics[nodeMetricsHashKey] + if !nodeAllocatableRecord.nil? && !nodeAllocatableRecord.empty? && nodeAllocatableRecord.has_key?(metricNameToCollect) + metricValue = nodeAllocatableRecord[metricNameToCollect] + end end end if (!metricValue.nil?) @@ -615,15 +665,10 @@ def parseNodeLimitsFromNodeItem(node, metricCategory, metricNameToCollect, metri metricCollection["CounterName"] = metricNametoReturn metricCollection["Value"] = metricValue metricCollections = [] - metricCollections.push(metricCollection) - + metricCollections.push(metricCollection) + metricItem["json_Collections"] = [] metricItem["json_Collections"] = metricCollections.to_json - - #push node level metrics to a inmem hash so that we can use it looking up at container level. - #Currently if container level cpu & memory limits are not defined we default to node level limits - @@NodeMetrics[clusterId + "/" + node["metadata"]["name"] + "_" + metricCategory + "_" + metricNameToCollect] = metricValue - #@Log.info ("Node metric hash: #{@@NodeMetrics}") end rescue => error @Log.warn("parseNodeLimitsFromNodeItem failed: #{error} for metric #{metricCategory} #{metricNameToCollect}") @@ -657,13 +702,6 @@ def parseNodeLimitsAsInsightsMetrics(node, metricCategory, metricNameToCollect, metricTags[Constants::INSIGHTSMETRICS_TAGS_GPU_VENDOR] = metricNameToCollect metricItem["Tags"] = metricTags - - #push node level metrics (except gpu ones) to a inmem hash so that we can use it looking up at container level. - #Currently if container level cpu & memory limits are not defined we default to node level limits - if (metricNameToCollect.downcase != "nvidia.com/gpu") && (metricNameToCollect.downcase != "amd.com/gpu") - @@NodeMetrics[clusterId + "/" + node["metadata"]["name"] + "_" + metricCategory + "_" + metricNameToCollect] = metricValue - #@Log.info ("Node metric hash: #{@@NodeMetrics}") - end end rescue => error @Log.warn("parseNodeLimitsAsInsightsMetrics failed: #{error} for metric #{metricCategory} #{metricNameToCollect}") @@ -754,6 +792,31 @@ def getMetricNumericValue(metricName, metricVal) return metricValue end # getMetricNumericValue + def getResourcesAndContinuationTokenV2(uri, api_group: nil) + continuationToken = nil + resourceInventory = nil + responseCode = nil + begin + @Log.info "KubernetesApiClient::getResourcesAndContinuationTokenV2 : Getting resources from Kube API using url: #{uri} @ #{Time.now.utc.iso8601}" + responseCode, resourceInfo = getKubeResourceInfoV2(uri, api_group: api_group) + @Log.info "KubernetesApiClient::getResourcesAndContinuationTokenV2 : Done getting resources from Kube API using url: #{uri} @ #{Time.now.utc.iso8601}" + if !responseCode.nil? && responseCode == "200" && !resourceInfo.nil? + @Log.info "KubernetesApiClient::getResourcesAndContinuationTokenV2:Start:Parsing data for #{uri} using yajl @ #{Time.now.utc.iso8601}" + resourceInventory = Yajl::Parser.parse(StringIO.new(resourceInfo.body)) + @Log.info "KubernetesApiClient::getResourcesAndContinuationTokenV2:End:Parsing data for #{uri} using yajl @ #{Time.now.utc.iso8601}" + resourceInfo = nil + end + if (!resourceInventory.nil? && !resourceInventory["metadata"].nil?) + continuationToken = resourceInventory["metadata"]["continue"] + end + rescue => errorStr + @Log.warn "KubernetesApiClient::getResourcesAndContinuationTokenV2:Failed in get resources for #{uri} and continuation token: #{errorStr}" + ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) + resourceInventory = nil + end + return continuationToken, resourceInventory, responseCode + end #getResourcesAndContinuationTokenV2 + def getResourcesAndContinuationToken(uri, api_group: nil) continuationToken = nil resourceInventory = nil @@ -778,7 +841,7 @@ def getResourcesAndContinuationToken(uri, api_group: nil) return continuationToken, resourceInventory end #getResourcesAndContinuationToken - def getKubeAPIServerUrl(env=ENV) + def getKubeAPIServerUrl(env = ENV) apiServerUrl = nil begin if env["KUBERNETES_SERVICE_HOST"] && env["KUBERNETES_PORT_443_TCP_PORT"] @@ -818,5 +881,518 @@ def getKubeServicesInventoryRecords(serviceList, batchTime = Time.utc.iso8601) end return kubeServiceRecords end + + # Accepts the following options: + # :namespace (string) - the namespace of the entity. + # :name (string) - the name of the entity to watch. + # :label_selector (string) - a selector to restrict the list of returned objects by labels. + # :field_selector (string) - a selector to restrict the list of returned objects by fields. + # :resource_version (string) - shows changes that occur after passed version of a resource. + # :allow_watch_bookmarks (bool) - flag to indicate whether to use bookmark or not. + def watch(resource_name, options = {}) + begin + if !File.exist?(@@CaFile) + raise "#{@@CaFile} doesnt exist" + end + http_options = { + use_ssl: true, + open_timeout: 60, + read_timeout: 240, # https://github.com/kubernetes-client/java/issues/1370 https://github.com/kubernetes-client/java/issues/1578 + ca_file: @@CaFile, + verify_mode: OpenSSL::SSL::VERIFY_PEER, + } + http_headers = { + Authorization: "Bearer " + getTokenStr, + } + ns = "" + if !options[:namespace].to_s.empty? + ns = "namespaces/#{namespace}/" + end + path = "watch/#{ns}#{resource_name}" + path += "/#{options[:name]}" if options[:name] + api_endpoint = "https://#{ENV["KUBERNETES_SERVICE_HOST"]}:#{ENV["KUBERNETES_PORT_443_TCP_PORT"]}/api/" + @@ApiVersion + "/" + "#{path}" + uri = URI.parse(api_endpoint) + params = {} + WATCH_ARGUMENTS.each { |k, v| params[k] = options[v] if options[v] } + uri.query = URI.encode_www_form(params) if params.any? + watcher = WatchStream.new( + uri, + http_options, + http_headers, + @Log + ) + return watcher unless block_given? + begin + watcher.each(&block) + ensure + watcher.finish if watcher + end + rescue => errorStr + @Log.warn "KubernetesApiClient::watch:Failed with an error: #{errorStr}" + end + end + + def getOptimizedItem(resource, resourceItem, isWindowsItem = false) + case resource + when "pods" + return getPodOptimizedItem(resourceItem, isWindowsItem) + when "pods-perf" + return getPodPerfOptimizedItem(resourceItem) + when "nodes" + return getNodeOptimizedItem(resourceItem) + when "services" + return getServiceOptimizedItem(resourceItem) + when "deployments" + return getDeploymentOptimizedItem(resourceItem) + when "horizontalpodautoscalers" + return getHpaOptimizedItem(resourceItem) + else + return resourceItem + end + end + + def getServiceOptimizedItem(resourceItem) + item = {} + begin + item["metadata"] = {} + if !resourceItem["metadata"].nil? + item["metadata"]["name"] = resourceItem["metadata"]["name"] + item["metadata"]["namespace"] = resourceItem["metadata"]["namespace"] + end + item["spec"] = {} + if !resourceItem["spec"].nil? + item["spec"]["selector"] = [] + if !resourceItem["spec"]["selector"].nil? + item["spec"]["selector"] = resourceItem["spec"]["selector"] + end + item["spec"]["clusterIP"] = "" + if !resourceItem["spec"]["clusterIP"].nil? + item["spec"]["clusterIP"] = resourceItem["spec"]["clusterIP"] + end + item["spec"]["type"] = "" + if !resourceItem["spec"]["type"].nil? + item["spec"]["type"] = resourceItem["spec"]["type"] + end + end + rescue => errorStr + @Log.warn "KubernetesApiClient::getServiceOptimizedItem:Failed with an error : #{errorStr}" + end + return item + end + + def isWindowsNodeItem(nodeResourceItem) + isWindowsNodeItem = false + begin + nodeStatus = nodeResourceItem["status"] + if !nodeStatus.nil? && !nodeStatus["nodeInfo"].nil? && !nodeStatus["nodeInfo"]["operatingSystem"].nil? + operatingSystem = nodeStatus["nodeInfo"]["operatingSystem"] + if (operatingSystem.is_a?(String) && operatingSystem.casecmp("windows") == 0) + isWindowsNodeItem = true + end + end + rescue => errorStr + $Log.warn "KubernetesApiClient::::isWindowsNodeItem: failed with an error: #{errorStr} @ #{Time.now.utc.iso8601}" + end + return isWindowsNodeItem + end + + def getPodPerfOptimizedItem(resourceItem) + item = {} + begin + item["metadata"] = {} + if !resourceItem["metadata"].nil? + if !resourceItem["metadata"]["annotations"].nil? + item["metadata"]["annotations"] = {} + item["metadata"]["annotations"]["kubernetes.io/config.hash"] = resourceItem["metadata"]["annotations"]["kubernetes.io/config.hash"] + end + + if !resourceItem["metadata"]["ownerReferences"].nil? && resourceItem["metadata"]["ownerReferences"].length > 0 + item["metadata"]["ownerReferences"] = [] + ownerReference = {} + ownerReference["name"] = resourceItem["metadata"]["ownerReferences"][0]["name"] + ownerReference["kind"] = resourceItem["metadata"]["ownerReferences"][0]["kind"] + item["metadata"]["ownerReferences"].push(ownerReference) + end + item["metadata"]["name"] = resourceItem["metadata"]["name"] + item["metadata"]["namespace"] = resourceItem["metadata"]["namespace"] + item["metadata"]["uid"] = resourceItem["metadata"]["uid"] + end + + item["spec"] = {} + if !resourceItem["spec"].nil? + item["spec"]["containers"] = [] + if !resourceItem["spec"]["containers"].nil? + resourceItem["spec"]["containers"].each do |container| + currentContainer = {} + currentContainer["name"] = container["name"] + currentContainer["resources"] = container["resources"] + item["spec"]["containers"].push(currentContainer) + end + end + item["spec"]["initContainers"] = [] + if !resourceItem["spec"]["initContainers"].nil? + resourceItem["spec"]["initContainers"].each do |container| + currentContainer = {} + currentContainer["name"] = container["name"] + currentContainer["resources"] = container["resources"] + item["spec"]["initContainers"].push(currentContainer) + end + end + item["spec"]["nodeName"] = "" + if !resourceItem["spec"]["nodeName"].nil? + item["spec"]["nodeName"] = resourceItem["spec"]["nodeName"] + end + end + rescue => errorStr + @Log.warn "KubernetesApiClient::getPodPerfOptimizedItem:Failed with an error : #{errorStr}" + end + return item + end + + def getPodOptimizedItem(resourceItem, isWindowsPodItem) + item = {} + begin + item["metadata"] = {} + if !resourceItem["metadata"].nil? + if !resourceItem["metadata"]["annotations"].nil? + item["metadata"]["annotations"] = {} + item["metadata"]["annotations"]["kubernetes.io/config.hash"] = resourceItem["metadata"]["annotations"]["kubernetes.io/config.hash"] + end + if !resourceItem["metadata"]["labels"].nil? + item["metadata"]["labels"] = resourceItem["metadata"]["labels"] + end + if !resourceItem["metadata"]["ownerReferences"].nil? && resourceItem["metadata"]["ownerReferences"].length > 0 + item["metadata"]["ownerReferences"] = [] + ownerReference = {} + ownerReference["name"] = resourceItem["metadata"]["ownerReferences"][0]["name"] + ownerReference["kind"] = resourceItem["metadata"]["ownerReferences"][0]["kind"] + item["metadata"]["ownerReferences"].push(ownerReference) + end + item["metadata"]["name"] = resourceItem["metadata"]["name"] + item["metadata"]["namespace"] = resourceItem["metadata"]["namespace"] + item["metadata"]["uid"] = resourceItem["metadata"]["uid"] + item["metadata"]["creationTimestamp"] = resourceItem["metadata"]["creationTimestamp"] + if !resourceItem["metadata"]["deletionTimestamp"].nil? + item["metadata"]["deletionTimestamp"] = resourceItem["metadata"]["deletionTimestamp"] + end + end + + item["spec"] = {} + if !resourceItem["spec"].nil? + item["spec"]["containers"] = [] + item["spec"]["initContainers"] = [] + isDisableClusterCollectEnvVar = false + clusterCollectEnvironmentVar = ENV["AZMON_CLUSTER_COLLECT_ENV_VAR"] + if !clusterCollectEnvironmentVar.nil? && !clusterCollectEnvironmentVar.empty? && clusterCollectEnvironmentVar.casecmp("false") == 0 + isDisableClusterCollectEnvVar = true + end + + # container spec required only for windows container inventory records + if isWindowsPodItem + if !resourceItem["spec"]["containers"].nil? + resourceItem["spec"]["containers"].each do |container| + currentContainer = {} + currentContainer["name"] = container["name"] + currentContainer["resources"] = container["resources"] + # fields required for windows containers records + if isWindowsPodItem + currentContainer["image"] = container["image"] + currentContainer["ports"] = container["ports"] + currentContainer["command"] = container["command"] + currentContainer["env"] = "" + if !isDisableClusterCollectEnvVar + currentContainer["env"] = KubernetesContainerInventory.obtainContainerEnvironmentVarsFromPodsResponse(resourceItem, container) + end + end + item["spec"]["containers"].push(currentContainer) + end + end + if !resourceItem["spec"]["initContainers"].nil? + resourceItem["spec"]["initContainers"].each do |container| + currentContainer = {} + currentContainer["name"] = container["name"] + currentContainer["resources"] = container["resources"] + # fields required for windows containers records + if isWindowsPodItem + currentContainer["image"] = container["image"] + currentContainer["ports"] = container["ports"] + currentContainer["command"] = container["command"] + currentContainer["env"] = "" + if !isDisableClusterCollectEnvVar + currentContainer["env"] = KubernetesContainerInventory.obtainContainerEnvironmentVarsFromPodsResponse(resourceItem, container) + end + end + item["spec"]["initContainers"].push(currentContainer) + end + end + end + + item["spec"]["nodeName"] = "" + if !resourceItem["spec"]["nodeName"].nil? + item["spec"]["nodeName"] = resourceItem["spec"]["nodeName"] + end + end + item["status"] = {} + + if !resourceItem["status"].nil? + if !resourceItem["status"]["startTime"].nil? + item["status"]["startTime"] = resourceItem["status"]["startTime"] + end + if !resourceItem["status"]["reason"].nil? + item["status"]["reason"] = resourceItem["status"]["reason"] + end + if !resourceItem["status"]["podIP"].nil? + item["status"]["podIP"] = resourceItem["status"]["podIP"] + end + if !resourceItem["status"]["phase"].nil? + item["status"]["phase"] = resourceItem["status"]["phase"] + end + if !resourceItem["status"]["conditions"].nil? + item["status"]["conditions"] = [] + resourceItem["status"]["conditions"].each do |condition| + currentCondition = {} + currentCondition["type"] = condition["type"] + currentCondition["status"] = condition["status"] + item["status"]["conditions"].push(currentCondition) + end + end + item["status"]["initContainerStatuses"] = [] + if !resourceItem["status"]["initContainerStatuses"].nil? + resourceItem["status"]["initContainerStatuses"].each do |containerStatus| + currentContainerStatus = {} + currentContainerStatus["containerID"] = containerStatus["containerID"] + currentContainerStatus["name"] = containerStatus["name"] + currentContainerStatus["restartCount"] = containerStatus["restartCount"] + currentContainerStatus["state"] = containerStatus["state"] + currentContainerStatus["lastState"] = containerStatus["lastState"] + if isWindowsPodItem + currentContainerStatus["imageID"] = containerStatus["imageID"] + end + item["status"]["initContainerStatuses"].push(currentContainerStatus) + end + end + item["status"]["containerStatuses"] = [] + if !resourceItem["status"]["containerStatuses"].nil? + resourceItem["status"]["containerStatuses"].each do |containerStatus| + currentContainerStatus = {} + currentContainerStatus["containerID"] = containerStatus["containerID"] + currentContainerStatus["name"] = containerStatus["name"] + currentContainerStatus["restartCount"] = containerStatus["restartCount"] + currentContainerStatus["state"] = containerStatus["state"] + currentContainerStatus["lastState"] = containerStatus["lastState"] + if isWindowsPodItem + currentContainerStatus["imageID"] = containerStatus["imageID"] + end + item["status"]["containerStatuses"].push(currentContainerStatus) + end + end + # this metadata used to identify the pod scheduled onto windows node + # so that pod inventory can make decision to extract containerinventory records or not + if isWindowsPodItem + item["isWindows"] = "true" + end + end + rescue => errorStr + @Log.warn "KubernetesApiClient::getPodOptimizedItem:Failed with an error : #{errorStr}" + end + return item + end + + def getNodeAllocatableValues(nodeResourceItem) + nodeAllocatable = {} + begin + if !nodeResourceItem["status"].nil? && + !nodeResourceItem["status"]["allocatable"].nil? && + !nodeResourceItem["status"]["allocatable"].empty? + nodeAllocatable["cpu"] = nodeResourceItem["status"]["allocatable"]["cpu"] + nodeAllocatable["memory"] = nodeResourceItem["status"]["allocatable"]["memory"] + end + rescue => errorStr + @Log.warn "KubernetesApiClient::getNodeAllocatableValues:Failed with an error : #{errorStr}" + end + return nodeAllocatable + end + + def getNodeOptimizedItem(resourceItem) + item = {} + begin + item["metadata"] = {} + if !resourceItem["metadata"].nil? + item["metadata"]["name"] = resourceItem["metadata"]["name"] + item["metadata"]["creationTimestamp"] = resourceItem["metadata"]["creationTimestamp"] + if !resourceItem["metadata"]["labels"].nil? + item["metadata"]["labels"] = resourceItem["metadata"]["labels"] + end + end + item["spec"] = {} + if !resourceItem["spec"].nil? + if !resourceItem["spec"]["providerID"].nil? && !resourceItem["spec"]["providerID"].empty? + provider = resourceItem["spec"]["providerID"].split(":")[0] + if !provider.nil? && !provider.empty? + item["spec"]["providerID"] = provider + end + end + end + item["status"] = {} + if !resourceItem["status"].nil? + item["status"]["conditions"] = [] + if !resourceItem["status"]["conditions"].nil? + resourceItem["status"]["conditions"].each do |condition| + currentCondition = {} + currentCondition["type"] = condition["type"] + currentCondition["status"] = condition["status"] + currentCondition["lastTransitionTime"] = condition["lastTransitionTime"] + item["status"]["conditions"].push(currentCondition) + end + end + + nodeInfo = {} + if !resourceItem["status"]["nodeInfo"].nil? && !resourceItem["status"]["nodeInfo"].empty? + nodeInfo["kubeletVersion"] = resourceItem["status"]["nodeInfo"]["kubeletVersion"] + nodeInfo["kubeProxyVersion"] = resourceItem["status"]["nodeInfo"]["kubeProxyVersion"] + nodeInfo["osImage"] = resourceItem["status"]["nodeInfo"]["osImage"] + nodeInfo["containerRuntimeVersion"] = resourceItem["status"]["nodeInfo"]["containerRuntimeVersion"] + nodeInfo["operatingSystem"] = resourceItem["status"]["nodeInfo"]["operatingSystem"] + nodeInfo["kernelVersion"] = resourceItem["status"]["nodeInfo"]["kernelVersion"] + end + item["status"]["nodeInfo"] = nodeInfo + + nodeAllocatable = {} + if !resourceItem["status"]["allocatable"].nil? && !resourceItem["status"]["allocatable"].empty? + nodeAllocatable["cpu"] = resourceItem["status"]["allocatable"]["cpu"] + nodeAllocatable["memory"] = resourceItem["status"]["allocatable"]["memory"] + if !resourceItem["status"]["allocatable"]["nvidia.com/gpu"].nil? + nodeAllocatable["nvidia.com/gpu"] = resourceItem["status"]["allocatable"]["nvidia.com/gpu"] + end + if !resourceItem["status"]["allocatable"]["amd.com/gpu"].nil? + nodeAllocatable["amd.com/gpu"] = resourceItem["status"]["allocatable"]["amd.com/gpu"] + end + end + item["status"]["allocatable"] = nodeAllocatable + + nodeCapacity = {} + if !resourceItem["status"]["capacity"].nil? && !resourceItem["status"]["capacity"].empty? + nodeCapacity["cpu"] = resourceItem["status"]["capacity"]["cpu"] + nodeCapacity["memory"] = resourceItem["status"]["capacity"]["memory"] + if !resourceItem["status"]["capacity"]["nvidia.com/gpu"].nil? + nodeCapacity["nvidia.com/gpu"] = resourceItem["status"]["capacity"]["nvidia.com/gpu"] + end + if !resourceItem["status"]["capacity"]["amd.com/gpu"].nil? + nodeCapacity["amd.com/gpu"] = resourceItem["status"]["capacity"]["amd.com/gpu"] + end + end + item["status"]["capacity"] = nodeCapacity + end + rescue => errorStr + @Log.warn "KubernetesApiClient::getNodeOptimizedItem:Failed with an error : #{errorStr}" + end + return item + end + + def getDeploymentOptimizedItem(resourceItem) + item = {} + begin + item["metadata"] = {} + if !resourceItem["metadata"].nil? + item["metadata"]["name"] = resourceItem["metadata"]["name"] + item["metadata"]["creationTimestamp"] = resourceItem["metadata"]["creationTimestamp"] + end + item["spec"] = {} + if !resourceItem["spec"].nil? + item["spec"]["strategy"] = {} + if !resourceItem["spec"]["strategy"].nil? && !resourceItem["spec"]["strategy"].empty? && !resourceItem["spec"]["strategy"]["type"].nil? + item["spec"]["strategy"]["type"] = resourceItem["spec"]["strategy"]["type"] + end + if !resourceItem["spec"]["replicas"].nil? + item["spec"]["replicas"] = resourceItem["spec"]["replicas"] + end + end + item["status"] = {} + if !resourceItem["status"].nil? + if !resourceItem["status"]["readyReplicas"].nil? + item["status"]["readyReplicas"] = resourceItem["status"]["readyReplicas"] + end + if !resourceItem["status"]["updatedReplicas"].nil? + item["status"]["updatedReplicas"] = resourceItem["status"]["updatedReplicas"] + end + if !resourceItem["status"]["availableReplicas"].nil? + item["status"]["availableReplicas"] = resourceItem["status"]["availableReplicas"] + end + end + rescue => errorStr + @Log.warn "KubernetesApiClient::getDeploymentOptimizedItem:Failed with an error : #{errorStr}" + end + return item + end + + def getHpaOptimizedItem(resourceItem) + item = {} + begin + item["metadata"] = {} + if !resourceItem["metadata"].nil? + item["metadata"]["name"] = resourceItem["metadata"]["name"] + item["metadata"]["namespace"] = resourceItem["metadata"]["namespace"] + item["metadata"]["creationTimestamp"] = resourceItem["metadata"]["creationTimestamp"] + end + item["spec"] = {} + if !resourceItem["spec"].nil? + if !resourceItem["spec"]["minReplicas"].nil? + item["spec"]["minReplicas"] = resourceItem["spec"]["minReplicas"] + end + if !resourceItem["spec"]["maxReplicas"].nil? + item["spec"]["maxReplicas"] = resourceItem["spec"]["maxReplicas"] + end + item["spec"]["scaleTargetRef"] = {} + if !resourceItem["spec"]["scaleTargetRef"].nil? && !resourceItem["spec"]["scaleTargetRef"]["kind"].nil? + item["spec"]["scaleTargetRef"]["kind"] = resourceItem["spec"]["scaleTargetRef"]["kind"] + end + if !resourceItem["spec"]["scaleTargetRef"].nil? && !resourceItem["spec"]["scaleTargetRef"]["name"].nil? + item["spec"]["scaleTargetRef"]["name"] = resourceItem["spec"]["scaleTargetRef"]["name"] + end + end + item["status"] = {} + if !resourceItem["status"].nil? + if !resourceItem["status"]["currentReplicas"].nil? + item["status"]["currentReplicas"] = resourceItem["status"]["currentReplicas"] + end + if !resourceItem["status"]["desiredReplicas"].nil? + item["status"]["desiredReplicas"] = resourceItem["status"]["desiredReplicas"] + end + if !resourceItem["status"]["lastScaleTime"].nil? + item["status"]["lastScaleTime"] = resourceItem["status"]["lastScaleTime"] + end + end + rescue => errorStr + @Log.warn "KubernetesApiClient::getHpaOptimizedItem:Failed with an error : #{errorStr}" + end + return item + end + + def getPodReadyCondition(podStatusConditions) + podReadyCondition = false + begin + if !podStatusConditions.nil? && !podStatusConditions.empty? + podStatusConditions.each do |condition| + if condition["type"] == "Ready" + if condition["status"].downcase == "true" + podReadyCondition = true + end + break #Exit the for loop since we found the ready condition + end + end + end + rescue => err + @Log.warn "in_kube_podinventory::getPodReadyCondition failed with an error: #{err}" + end + return podReadyCondition + end + + def isEmitCacheTelemetry + isEmitCacheTelemtryEnabled = false + if !ENV["EMIT_CACHE_TELEMETRY"].nil? && !ENV["EMIT_CACHE_TELEMETRY"].empty? && ENV["EMIT_CACHE_TELEMETRY"].downcase == "true" + isEmitCacheTelemtryEnabled = true + end + return isEmitCacheTelemtryEnabled + end end end diff --git a/source/plugins/ruby/WatchStream.rb b/source/plugins/ruby/WatchStream.rb new file mode 100644 index 000000000..6cc850450 --- /dev/null +++ b/source/plugins/ruby/WatchStream.rb @@ -0,0 +1,70 @@ +#!/usr/local/bin/ruby +# frozen_string_literal: true + +require "net/http" +require "net/https" +require "yajl/json_gem" +require "logger" +require "time" + +WATCH_ARGUMENTS = { + "labelSelector" => :label_selector, + "fieldSelector" => :field_selector, + "resourceVersion" => :resource_version, + "allowWatchBookmarks" => :allow_watch_bookmarks, + "timeoutSeconds" => :timeout_seconds, +}.freeze + +# HTTP Stream used to watch changes on entities +class WatchStream + def initialize(uri, http_options, http_headers, logger) + @uri = uri + @http_client = nil + @http_options = http_options + @http_headers = http_headers + @logger = logger + @path = "" + @logger.info "WatchStream::initialize @ #{Time.now.utc.iso8601}" + end + + def each + @finished = false + buffer = +"" + @logger.info "WatchStream::each:Opening TCP session @ #{Time.now.utc.iso8601}" + @http_client = Net::HTTP.start(@uri.host, @uri.port, @http_options) + if @http_client.nil? + raise "WatchStream::each:Failed to create HTTPClient object @ #{Time.now.utc.iso8601}" + end + @path = @uri.path + if @path.nil? || @path.empty? + raise "WatchStream::each:URI path should not be empty or nil @ #{Time.now.utc.iso8601}" + end + if !@uri.query.nil? && !@uri.query.empty? + @path += "?" + @uri.query + end + @logger.info "WatchStream::each:Making GET API call for Watch with path: #{@path} @ #{Time.now.utc.iso8601}" + @http_client.request_get(@path, @http_headers) do |response| + if !response.nil? && response.code.to_i > 300 + raise "WatchStream::each:Watch connection of the path: #{@path} failed with an http status code: #{response.code} @ #{Time.now.utc.iso8601}" + end + response.read_body do |chunk| + buffer << chunk + while (line = buffer.slice!(/.+\n/)) + yield(Yajl::Parser.parse(StringIO.new(line.chomp))) + end + end + end + rescue => e + raise e + end + + def finish + begin + @finished = true + @logger.info "WatchStream::finish:Closing HTTP session of the path:#{@path} @ #{Time.now.utc.iso8601}" + @http_client.finish if !@http_client.nil? && @http_client.started? + rescue => error + @logger.warn "WatchStream::finish:Closing of HTTP session of the path: #{@path} failed with an error: #{error} @ #{Time.now.utc.iso8601}" + end + end +end diff --git a/source/plugins/ruby/constants.rb b/source/plugins/ruby/constants.rb index 542f342a6..5f57b465a 100644 --- a/source/plugins/ruby/constants.rb +++ b/source/plugins/ruby/constants.rb @@ -136,6 +136,12 @@ class Constants #To evaluate switching to Windows AMA 64KB impacts any existing customers MAX_RECORD_OR_FIELD_SIZE_FOR_TELEMETRY = 65536 + # FileName for MDM POD Inventory state + MDM_POD_INVENTORY_STATE_FILE = "/var/opt/microsoft/docker-cimprov/state/MDMPodInventoryState.json" + # FileName for NodeAllocatable Records state + NODE_ALLOCATABLE_RECORDS_STATE_FILE = "/var/opt/microsoft/docker-cimprov/state/NodeAllocatableRecords.json" + # Emit Stream size for Pod MDM metric + POD_MDM_EMIT_STREAM_BATCH_SIZE = 5000 # each record is 200 bytes, 5k records ~2MB # only used in windows in AAD MSI auth mode IMDS_TOKEN_PATH_FOR_WINDOWS = "c:/etc/imds-access-token/token" end diff --git a/source/plugins/ruby/in_kube_nodes.rb b/source/plugins/ruby/in_kube_nodes.rb index 5a52a089b..a3cbb5a85 100644 --- a/source/plugins/ruby/in_kube_nodes.rb +++ b/source/plugins/ruby/in_kube_nodes.rb @@ -7,11 +7,12 @@ module Fluent::Plugin class Kube_nodeInventory_Input < Input Fluent::Plugin.register_input("kube_nodes", self) - def initialize(kubernetesApiClient = nil, + def initialize(is_unit_test_mode = nil, kubernetesApiClient = nil, applicationInsightsUtility = nil, extensionUtils = nil, env = nil, - telemetry_flush_interval = nil) + telemetry_flush_interval = nil, + node_items_test_cache = nil) super() require "yaml" @@ -30,6 +31,8 @@ def initialize(kubernetesApiClient = nil, @extensionUtils = extensionUtils == nil ? ExtensionUtils : extensionUtils @env = env == nil ? ENV : env @TELEMETRY_FLUSH_INTERVAL_IN_MINUTES = telemetry_flush_interval == nil ? Constants::TELEMETRY_FLUSH_INTERVAL_IN_MINUTES : telemetry_flush_interval + @is_unit_test_mode = is_unit_test_mode == nil ? false : true + @node_items_test_cache = node_items_test_cache # these defines were previously at class scope Moving them into the constructor so that they can be set by unit tests @@configMapMountPath = "/etc/config/settings/log-data-collection-settings" @@ -63,6 +66,9 @@ def initialize(kubernetesApiClient = nil, require_relative "constants" @NodeCache = NodeStatsCache.new() + @watchNodesThread = nil + @nodeItemsCache = {} + @nodeItemsCacheSizeKB = 0 end config_param :run_interval, :time, :default => 60 @@ -96,6 +102,8 @@ def start @finished = false @condition = ConditionVariable.new @mutex = Mutex.new + @nodeCacheMutex = Mutex.new + @watchNodesThread = Thread.new(&method(:watch_nodes)) @thread = Thread.new(&method(:run_periodic)) @@nodeTelemetryTimeTracker = DateTime.now.to_time.to_i @@nodeInventoryLatencyTelemetryTimeTracker = DateTime.now.to_time.to_i @@ -109,6 +117,7 @@ def shutdown @condition.signal } @thread.join + @watchNodesThread.join super # This super must be at the end of shutdown method end end @@ -147,43 +156,30 @@ def enumerate # Initializing continuation token to nil continuationToken = nil - $log.info("in_kube_nodes::enumerate : Getting nodes from Kube API @ #{Time.now.utc.iso8601}") - # KubernetesApiClient.getNodesResourceUri is a pure function, so call it from the actual module instead of from the mock - resourceUri = KubernetesApiClient.getNodesResourceUri("nodes?limit=#{@NODES_CHUNK_SIZE}") - continuationToken, nodeInventory = @kubernetesApiClient.getResourcesAndContinuationToken(resourceUri) - $log.info("in_kube_nodes::enumerate : Done getting nodes from Kube API @ #{Time.now.utc.iso8601}") + nodeInventory = {} + @nodeItemsCacheSizeKB = 0 + nodeCount = 0 + nodeInventory["items"] = getNodeItemsFromCache() nodesAPIChunkEndTime = (Time.now.to_f * 1000).to_i @nodesAPIE2ELatencyMs = (nodesAPIChunkEndTime - nodesAPIChunkStartTime) if (!nodeInventory.nil? && !nodeInventory.empty? && nodeInventory.key?("items") && !nodeInventory["items"].nil? && !nodeInventory["items"].empty?) - nodeCount += nodeInventory["items"].length - $log.info("in_kube_nodes::enumerate : number of node items :#{nodeInventory["items"].length} from Kube API @ #{Time.now.utc.iso8601}") + nodeCount = nodeInventory["items"].length + $log.info("in_kube_nodes::enumerate : number of node items :#{nodeCount} from Kube API @ #{Time.now.utc.iso8601}") parse_and_emit_records(nodeInventory, batchTime) else $log.warn "in_kube_nodes::enumerate:Received empty nodeInventory" end - - #If we receive a continuation token, make calls, process and flush data until we have processed all data - while (!continuationToken.nil? && !continuationToken.empty?) - nodesAPIChunkStartTime = (Time.now.to_f * 1000).to_i - continuationToken, nodeInventory = @kubernetesApiClient.getResourcesAndContinuationToken(resourceUri + "&continue=#{continuationToken}") - nodesAPIChunkEndTime = (Time.now.to_f * 1000).to_i - @nodesAPIE2ELatencyMs = @nodesAPIE2ELatencyMs + (nodesAPIChunkEndTime - nodesAPIChunkStartTime) - if (!nodeInventory.nil? && !nodeInventory.empty? && nodeInventory.key?("items") && !nodeInventory["items"].nil? && !nodeInventory["items"].empty?) - nodeCount += nodeInventory["items"].length - $log.info("in_kube_nodes::enumerate : number of node items :#{nodeInventory["items"].length} from Kube API @ #{Time.now.utc.iso8601}") - parse_and_emit_records(nodeInventory, batchTime) - else - $log.warn "in_kube_nodes::enumerate:Received empty nodeInventory" - end - end - @nodeInventoryE2EProcessingLatencyMs = ((Time.now.to_f * 1000).to_i - nodeInventoryStartTime) timeDifference = (DateTime.now.to_time.to_i - @@nodeInventoryLatencyTelemetryTimeTracker).abs timeDifferenceInMinutes = timeDifference / 60 if (timeDifferenceInMinutes >= @TELEMETRY_FLUSH_INTERVAL_IN_MINUTES) @applicationInsightsUtility.sendMetricTelemetry("NodeInventoryE2EProcessingLatencyMs", @nodeInventoryE2EProcessingLatencyMs, {}) @applicationInsightsUtility.sendMetricTelemetry("NodesAPIE2ELatencyMs", @nodesAPIE2ELatencyMs, {}) - @applicationInsightsUtility.sendMetricTelemetry("NodeCount", nodeCount, {}) + telemetryProperties = {} + if KubernetesApiClient.isEmitCacheTelemetry() + telemetryProperties["NODE_ITEMS_CACHE_SIZE_KB"] = @nodeItemsCacheSizeKB + end + ApplicationInsightsUtility.sendMetricTelemetry("NodeCount", nodeCount, telemetryProperties) @@nodeInventoryLatencyTelemetryTimeTracker = DateTime.now.to_time.to_i end # Setting this to nil so that we dont hold memory until GC kicks in @@ -205,10 +201,19 @@ def parse_and_emit_records(nodeInventory, batchTime = Time.utc.iso8601) insightsMetricsEventStream = Fluent::MultiEventStream.new kubePerfEventStream = Fluent::MultiEventStream.new @@istestvar = @env["ISTEST"] + nodeAllocatableRecords = {} #get node inventory nodeInventory["items"].each do |item| # node inventory nodeInventoryRecord = getNodeInventoryRecord(item, batchTime) + # node allocatble records for the kube perf plugin + nodeName = item["metadata"]["name"] + if !nodeName.nil? && !nodeName.empty? + nodeAllocatable = KubernetesApiClient.getNodeAllocatableValues(item) + if !nodeAllocatable.nil? && !nodeAllocatable.empty? + nodeAllocatableRecords[nodeName] = nodeAllocatable + end + end eventStream.add(emitTime, nodeInventoryRecord) if nodeInventoryRecord if @NODES_EMIT_STREAM_BATCH_SIZE > 0 && eventStream.count >= @NODES_EMIT_STREAM_BATCH_SIZE $log.info("in_kube_node::parse_and_emit_records: number of node inventory records emitted #{eventStream.count} @ #{Time.now.utc.iso8601}") @@ -428,6 +433,17 @@ def parse_and_emit_records(nodeInventory, batchTime = Time.utc.iso8601) $log.info("kubeNodeInsightsMetricsEmitStreamSuccess @ #{Time.now.utc.iso8601}") end end + if !nodeAllocatableRecords.nil? && !nodeAllocatableRecords.empty? + nodeAllocatableRecordsJson = nodeAllocatableRecords.to_json + if !nodeAllocatableRecordsJson.empty? + @log.info "Writing node allocatable records to state file with size(bytes): #{nodeAllocatableRecordsJson.length}" + @log.info "in_kube_nodes::parse_and_emit_records:Start:writeNodeAllocatableRecords @ #{Time.now.utc.iso8601}" + writeNodeAllocatableRecords(nodeAllocatableRecordsJson) + @log.info "in_kube_nodes::parse_and_emit_records:End:writeNodeAllocatableRecords @ #{Time.now.utc.iso8601}" + end + nodeAllocatableRecordsJson = nil + nodeAllocatableRecords = nil + end rescue => errorStr $log.warn "Failed to retrieve node inventory: #{errorStr}" $log.debug_backtrace(errorStr.backtrace) @@ -577,6 +593,211 @@ def getNodeTelemetryProps(item) end return properties end + + def watch_nodes + if !@is_unit_test_mode + $log.info("in_kube_nodes::watch_nodes:Start @ #{Time.now.utc.iso8601}") + nodesResourceVersion = nil + loop do + begin + if nodesResourceVersion.nil? + # clear cache before filling the cache with list + @nodeCacheMutex.synchronize { + @nodeItemsCache.clear() + } + continuationToken = nil + resourceUri = KubernetesApiClient.getNodesResourceUri("nodes?limit=#{@NODES_CHUNK_SIZE}") + $log.info("in_kube_nodes::watch_nodes:Getting nodes from Kube API: #{resourceUri} @ #{Time.now.utc.iso8601}") + continuationToken, nodeInventory, responseCode = KubernetesApiClient.getResourcesAndContinuationTokenV2(resourceUri) + if responseCode.nil? || responseCode != "200" + $log.warn("in_kube_nodes::watch_nodes:Getting nodes from Kube API: #{resourceUri} failed with statuscode: #{responseCode} @ #{Time.now.utc.iso8601}") + else + $log.info("in_kube_nodes::watch_nodes:Done getting nodes from Kube API: #{resourceUri} @ #{Time.now.utc.iso8601}") + if (!nodeInventory.nil? && !nodeInventory.empty?) + nodesResourceVersion = nodeInventory["metadata"]["resourceVersion"] + if (nodeInventory.key?("items") && !nodeInventory["items"].nil? && !nodeInventory["items"].empty?) + $log.info("in_kube_nodes::watch_nodes: number of node items :#{nodeInventory["items"].length} from Kube API @ #{Time.now.utc.iso8601}") + nodeInventory["items"].each do |item| + key = item["metadata"]["uid"] + if !key.nil? && !key.empty? + nodeItem = KubernetesApiClient.getOptimizedItem("nodes", item) + if !nodeItem.nil? && !nodeItem.empty? + @nodeCacheMutex.synchronize { + @nodeItemsCache[key] = nodeItem + } + else + $log.warn "in_kube_nodes::watch_nodes:Received nodeItem nil or empty @ #{Time.now.utc.iso8601}" + end + else + $log.warn "in_kube_nodes::watch_nodes:Received node uid either nil or empty @ #{Time.now.utc.iso8601}" + end + end + end + else + $log.warn "in_kube_nodes::watch_nodes:Received empty nodeInventory @ #{Time.now.utc.iso8601}" + end + while (!continuationToken.nil? && !continuationToken.empty?) + continuationToken, nodeInventory, responseCode = KubernetesApiClient.getResourcesAndContinuationTokenV2(resourceUri + "&continue=#{continuationToken}") + if responseCode.nil? || responseCode != "200" + $log.warn("in_kube_nodes::watch_nodes:Getting nodes from Kube API: #{resourceUri}&continue=#{continuationToken} failed with statuscode: #{responseCode} @ #{Time.now.utc.iso8601}") + nodesResourceVersion = nil # break, if any of the pagination call failed so that full cache can be rebuild with LIST again + break + else + if (!nodeInventory.nil? && !nodeInventory.empty?) + nodesResourceVersion = nodeInventory["metadata"]["resourceVersion"] + if (nodeInventory.key?("items") && !nodeInventory["items"].nil? && !nodeInventory["items"].empty?) + $log.info("in_kube_nodes::watch_nodes : number of node items :#{nodeInventory["items"].length} from Kube API @ #{Time.now.utc.iso8601}") + nodeInventory["items"].each do |item| + key = item["metadata"]["uid"] + if !key.nil? && !key.empty? + nodeItem = KubernetesApiClient.getOptimizedItem("nodes", item) + if !nodeItem.nil? && !nodeItem.empty? + @nodeCacheMutex.synchronize { + @nodeItemsCache[key] = nodeItem + } + else + $log.warn "in_kube_nodes::watch_nodes:Received nodeItem nil or empty @ #{Time.now.utc.iso8601}" + end + else + $log.warn "in_kube_nodes::watch_nodes:Received node uid either nil or empty @ #{Time.now.utc.iso8601}" + end + end + end + else + $log.warn "in_kube_nodes::watch_nodes:Received empty nodeInventory @ #{Time.now.utc.iso8601}" + end + end + end + end + end + if nodesResourceVersion.nil? || nodesResourceVersion.empty? || nodesResourceVersion == "0" + # https://github.com/kubernetes/kubernetes/issues/74022 + $log.warn("in_kube_nodes::watch_nodes:received nodesResourceVersion either nil or empty or 0 @ #{Time.now.utc.iso8601}") + nodesResourceVersion = nil # for the LIST to happen again + sleep(30) # do not overwhelm the api-server if api-server broken + else + begin + $log.info("in_kube_nodes::watch_nodes:Establishing Watch connection for nodes with resourceversion: #{nodesResourceVersion} @ #{Time.now.utc.iso8601}") + watcher = KubernetesApiClient.watch("nodes", resource_version: nodesResourceVersion, allow_watch_bookmarks: true) + if watcher.nil? + $log.warn("in_kube_nodes::watch_nodes:watch API returned nil watcher for watch connection with resource version: #{nodesResourceVersion} @ #{Time.now.utc.iso8601}") + else + watcher.each do |notice| + case notice["type"] + when "ADDED", "MODIFIED", "DELETED", "BOOKMARK" + item = notice["object"] + # extract latest resource version to use for watch reconnect + if !item.nil? && !item.empty? && + !item["metadata"].nil? && !item["metadata"].empty? && + !item["metadata"]["resourceVersion"].nil? && !item["metadata"]["resourceVersion"].empty? + nodesResourceVersion = item["metadata"]["resourceVersion"] + # $log.info("in_kube_nodes::watch_nodes: received event type: #{notice["type"]} with resource version: #{nodesResourceVersion} @ #{Time.now.utc.iso8601}") + else + $log.info("in_kube_nodes::watch_nodes: received event type with no resourceVersion hence stopping watcher to reconnect @ #{Time.now.utc.iso8601}") + nodesResourceVersion = nil + # We have to abort here because this might cause lastResourceVersion inconsistency by skipping a potential RV with valid data! + break + end + if ((notice["type"] == "ADDED") || (notice["type"] == "MODIFIED")) + key = item["metadata"]["uid"] + if !key.nil? && !key.empty? + nodeItem = KubernetesApiClient.getOptimizedItem("nodes", item) + if !nodeItem.nil? && !nodeItem.empty? + @nodeCacheMutex.synchronize { + @nodeItemsCache[key] = nodeItem + } + else + $log.warn "in_kube_nodes::watch_nodes:Received nodeItem nil or empty @ #{Time.now.utc.iso8601}" + end + else + $log.warn "in_kube_nodes::watch_nodes:Received node uid either nil or empty @ #{Time.now.utc.iso8601}" + end + elsif notice["type"] == "DELETED" + key = item["metadata"]["uid"] + if !key.nil? && !key.empty? + @nodeCacheMutex.synchronize { + @nodeItemsCache.delete(key) + } + end + end + when "ERROR" + nodesResourceVersion = nil + $log.warn("in_kube_nodes::watch_nodes:ERROR event with :#{notice["object"]} @ #{Time.now.utc.iso8601}") + break + else + nodesResourceVersion = nil + $log.warn("in_kube_nodes::watch_nodes:Unsupported event type #{notice["type"]} @ #{Time.now.utc.iso8601}") + break + end + end + end + rescue Net::ReadTimeout => errorStr + ## This expected if there is no activity on the cluster for more than readtimeout value used in the connection + # $log.warn("in_kube_nodes::watch_nodes:failed with an error: #{errorStr} @ #{Time.now.utc.iso8601}") + rescue => errorStr + $log.warn("in_kube_nodes::watch_nodes:failed with an error: #{errorStr} @ #{Time.now.utc.iso8601}") + nodesResourceVersion = nil + sleep(5) # do not overwhelm the api-server if api-server broken + ensure + watcher.finish if watcher + end + end + rescue => errorStr + $log.warn("in_kube_nodes::watch_nodes:failed with an error: #{errorStr} @ #{Time.now.utc.iso8601}") + nodesResourceVersion = nil + end + end + $log.info("in_kube_nodes::watch_nodes:End @ #{Time.now.utc.iso8601}") + end + end + + def writeNodeAllocatableRecords(nodeAllocatbleRecordsJson) + maxRetryCount = 5 + initialRetryDelaySecs = 0.5 + retryAttemptCount = 1 + begin + f = File.open(Constants::NODE_ALLOCATABLE_RECORDS_STATE_FILE, "w") + if !f.nil? + isAcquiredLock = f.flock(File::LOCK_EX | File::LOCK_NB) + raise "in_kube_nodes::writeNodeAllocatableRecords:Failed to acquire file lock" if !isAcquiredLock + startTime = (Time.now.to_f * 1000).to_i + f.write(nodeAllocatbleRecordsJson) + f.flush + timetakenMs = ((Time.now.to_f * 1000).to_i - startTime) + $log.info "in_kube_nodes::writeNodeAllocatableRecords:Successfull and with time taken(ms): #{timetakenMs}" + else + raise "in_kube_nodes::writeNodeAllocatableRecords:Failed to open file for write" + end + rescue => err + if retryAttemptCount < maxRetryCount + f.flock(File::LOCK_UN) if !f.nil? + f.close if !f.nil? + retryAttemptCount = retryAttemptCount + 1 + sleep (initialRetryDelaySecs * retryAttemptCount) + retry + end + $log.warn "in_kube_nodes::writeNodeAllocatableRecords failed with an error: #{err} after retries: #{maxRetryCount} @ #{Time.now.utc.iso8601}" + ApplicationInsightsUtility.sendExceptionTelemetry(err) + ensure + f.flock(File::LOCK_UN) if !f.nil? + f.close if !f.nil? + end + end + + def getNodeItemsFromCache() + nodeItems = {} + if @is_unit_test_mode + nodeItems = @node_items_test_cache + else + @nodeCacheMutex.synchronize { + nodeItems = @nodeItemsCache.values.clone + if KubernetesApiClient.isEmitCacheTelemetry() + @nodeItemsCacheSizeKB = @nodeItemsCache.to_s.length / 1024 + end + } + end + return nodeItems + end end # Kube_Node_Input class NodeStatsCache diff --git a/source/plugins/ruby/in_kube_nodes_test.rb b/source/plugins/ruby/in_kube_nodes_test.rb index 8f4984c6c..7d55ea32d 100644 --- a/source/plugins/ruby/in_kube_nodes_test.rb +++ b/source/plugins/ruby/in_kube_nodes_test.rb @@ -1,10 +1,10 @@ -require 'minitest/autorun' +require "minitest/autorun" -require 'fluent/test' -require 'fluent/test/driver/input' -require 'fluent/test/helpers' +require "fluent/test" +require "fluent/test/driver/input" +require "fluent/test/helpers" -require_relative 'in_kube_nodes.rb' +require_relative "in_kube_nodes.rb" class InKubeNodesTests < Minitest::Test include Fluent::Test::Helpers @@ -13,20 +13,22 @@ def setup Fluent::Test.setup end - def create_driver(conf = {}, kubernetesApiClient=nil, applicationInsightsUtility=nil, extensionUtils=nil, env=nil, telemetry_flush_interval=nil) - Fluent::Test::Driver::Input.new(Fluent::Plugin::Kube_nodeInventory_Input.new(kubernetesApiClient=kubernetesApiClient, - applicationInsightsUtility=applicationInsightsUtility, - extensionUtils=extensionUtils, - env=env)).configure(conf) + def create_driver(conf = {}, is_unit_test_mode = true, kubernetesApiClient = nil, applicationInsightsUtility = nil, extensionUtils = nil, env = nil, telemetry_flush_interval = nil, node_items_test_cache) + Fluent::Test::Driver::Input.new(Fluent::Plugin::Kube_nodeInventory_Input.new(is_unit_test_mode, kubernetesApiClient = kubernetesApiClient, + applicationInsightsUtility = applicationInsightsUtility, + extensionUtils = extensionUtils, + env = env, + telemetry_flush_interval, + node_items_test_cache)).configure(conf) end # Collection time of scrapped data will always be different. Overwrite it in any records returned by in_kube_ndes.rb def overwrite_collection_time(data) if data.key?("CollectionTime") - data["CollectionTime"] = "~CollectionTime~" + data["CollectionTime"] = "~CollectionTime~" end if data.key?("Timestamp") - data["Timestamp"] = "~Timestamp~" + data["Timestamp"] = "~Timestamp~" end return data end @@ -45,41 +47,46 @@ def test_basic_single_node # isAADMSIAuthMode() is called multiple times and we don't really care how many time it is called. This is the same as mocking # but it doesn't track how many times isAADMSIAuthMode is called def extensionUtils.isAADMSIAuthMode - false + false end nodes_api_response = eval(File.open("test/unit-tests/canned-api-responses/kube-nodes.txt").read) - kubeApiClient.expect(:getResourcesAndContinuationToken, [nil, nodes_api_response], ["nodes?limit=200"]) + node_items_test_cache = nodes_api_response["items"] + kubeApiClient.expect(:getClusterName, "/cluster-name") kubeApiClient.expect(:getClusterId, "/cluster-id") + def appInsightsUtil.sendExceptionTelemetry(exception) + if exception.to_s != "undefined method `[]' for nil:NilClass" + raise "an unexpected exception has occured" + end + end config = "run_interval 999999999" # only run once - d = create_driver(config, kubernetesApiClient=kubeApiClient, applicationInsightsUtility=appInsightsUtil, extensionUtils=extensionUtils, env=env) + d = create_driver(config, true, kubernetesApiClient = kubeApiClient, applicationInsightsUtility = appInsightsUtil, extensionUtils = extensionUtils, env = env, node_items_test_cache) d.instance.start d.instance.enumerate d.run(timeout: 99999) # Input plugins decide when to run, so we have to give it enough time to run - - expected_responses = { ["oneagent.containerInsights.KUBE_NODE_INVENTORY_BLOB", overwrite_collection_time({"CollectionTime"=>"2021-08-17T20:24:18Z", "Computer"=>"aks-nodepool1-24816391-vmss000000", "ClusterName"=>"/cluster-name", "ClusterId"=>"/cluster-id", "CreationTimeStamp"=>"2021-07-21T23:40:14Z", "Labels"=>[{"agentpool"=>"nodepool1", "beta.kubernetes.io/arch"=>"amd64", "beta.kubernetes.io/instance-type"=>"Standard_DS2_v2", "beta.kubernetes.io/os"=>"linux", "failure-domain.beta.kubernetes.io/region"=>"westus2", "failure-domain.beta.kubernetes.io/zone"=>"0", "kubernetes.azure.com/cluster"=>"MC_davidaks16_davidaks16_westus2", "kubernetes.azure.com/mode"=>"system", "kubernetes.azure.com/node-image-version"=>"AKSUbuntu-1804gen2containerd-2021.07.03", "kubernetes.azure.com/os-sku"=>"Ubuntu", "kubernetes.azure.com/role"=>"agent", "kubernetes.io/arch"=>"amd64", "kubernetes.io/hostname"=>"aks-nodepool1-24816391-vmss000000", "kubernetes.io/os"=>"linux", "kubernetes.io/role"=>"agent", "node-role.kubernetes.io/agent"=>"", "node.kubernetes.io/instance-type"=>"Standard_DS2_v2", "storageprofile"=>"managed", "storagetier"=>"Premium_LRS", "topology.kubernetes.io/region"=>"westus2", "topology.kubernetes.io/zone"=>"0"}], "Status"=>"Ready", "KubernetesProviderID"=>"azure", "LastTransitionTimeReady"=>"2021-07-21T23:40:24Z", "KubeletVersion"=>"v1.19.11", "KubeProxyVersion"=>"v1.19.11"})] => true, - ["mdm.kubenodeinventory", overwrite_collection_time({"CollectionTime"=>"2021-08-17T20:24:18Z", "Computer"=>"aks-nodepool1-24816391-vmss000000", "ClusterName"=>"/cluster-name", "ClusterId"=>"/cluster-id", "CreationTimeStamp"=>"2021-07-21T23:40:14Z", "Labels"=>[{"agentpool"=>"nodepool1", "beta.kubernetes.io/arch"=>"amd64", "beta.kubernetes.io/instance-type"=>"Standard_DS2_v2", "beta.kubernetes.io/os"=>"linux", "failure-domain.beta.kubernetes.io/region"=>"westus2", "failure-domain.beta.kubernetes.io/zone"=>"0", "kubernetes.azure.com/cluster"=>"MC_davidaks16_davidaks16_westus2", "kubernetes.azure.com/mode"=>"system", "kubernetes.azure.com/node-image-version"=>"AKSUbuntu-1804gen2containerd-2021.07.03", "kubernetes.azure.com/os-sku"=>"Ubuntu", "kubernetes.azure.com/role"=>"agent", "kubernetes.io/arch"=>"amd64", "kubernetes.io/hostname"=>"aks-nodepool1-24816391-vmss000000", "kubernetes.io/os"=>"linux", "kubernetes.io/role"=>"agent", "node-role.kubernetes.io/agent"=>"", "node.kubernetes.io/instance-type"=>"Standard_DS2_v2", "storageprofile"=>"managed", "storagetier"=>"Premium_LRS", "topology.kubernetes.io/region"=>"westus2", "topology.kubernetes.io/zone"=>"0"}], "Status"=>"Ready", "KubernetesProviderID"=>"azure", "LastTransitionTimeReady"=>"2021-07-21T23:40:24Z", "KubeletVersion"=>"v1.19.11", "KubeProxyVersion"=>"v1.19.11"})] => true, - ["oneagent.containerInsights.CONTAINER_NODE_INVENTORY_BLOB", overwrite_collection_time({"CollectionTime"=>"2021-08-17T20:24:18Z", "Computer"=>"aks-nodepool1-24816391-vmss000000", "OperatingSystem"=>"Ubuntu 18.04.5 LTS", "DockerVersion"=>"containerd://1.4.4+azure"})] => true, - ["oneagent.containerInsights.LINUX_PERF_BLOB", overwrite_collection_time({"Timestamp"=>"2021-08-17T20:24:18Z", "Host"=>"aks-nodepool1-24816391-vmss000000", "Computer"=>"aks-nodepool1-24816391-vmss000000", "ObjectName"=>"K8SNode", "InstanceName"=>"None/aks-nodepool1-24816391-vmss000000", "json_Collections"=>"[{\"CounterName\":\"cpuAllocatableNanoCores\",\"Value\":1900000000.0}]"})] => true, - ["oneagent.containerInsights.LINUX_PERF_BLOB", overwrite_collection_time({"Timestamp"=>"2021-08-17T20:24:18Z", "Host"=>"aks-nodepool1-24816391-vmss000000", "Computer"=>"aks-nodepool1-24816391-vmss000000", "ObjectName"=>"K8SNode", "InstanceName"=>"None/aks-nodepool1-24816391-vmss000000", "json_Collections"=>"[{\"CounterName\":\"memoryAllocatableBytes\",\"Value\":4787511296.0}]"})] => true, - ["oneagent.containerInsights.LINUX_PERF_BLOB", overwrite_collection_time({"Timestamp"=>"2021-08-17T20:24:18Z", "Host"=>"aks-nodepool1-24816391-vmss000000", "Computer"=>"aks-nodepool1-24816391-vmss000000", "ObjectName"=>"K8SNode", "InstanceName"=>"None/aks-nodepool1-24816391-vmss000000", "json_Collections"=>"[{\"CounterName\":\"cpuCapacityNanoCores\",\"Value\":2000000000.0}]"})] => true, - ["oneagent.containerInsights.LINUX_PERF_BLOB", overwrite_collection_time({"Timestamp"=>"2021-08-17T20:24:18Z", "Host"=>"aks-nodepool1-24816391-vmss000000", "Computer"=>"aks-nodepool1-24816391-vmss000000", "ObjectName"=>"K8SNode", "InstanceName"=>"None/aks-nodepool1-24816391-vmss000000", "json_Collections"=>"[{\"CounterName\":\"memoryCapacityBytes\",\"Value\":7291510784.0}]"})] => true} + expected_responses = { ["oneagent.containerInsights.KUBE_NODE_INVENTORY_BLOB", overwrite_collection_time({ "CollectionTime" => "2021-08-17T20:24:18Z", "Computer" => "aks-nodepool1-24816391-vmss000000", "ClusterName" => "/cluster-name", "ClusterId" => "/cluster-id", "CreationTimeStamp" => "2021-07-21T23:40:14Z", "Labels" => [{ "agentpool" => "nodepool1", "beta.kubernetes.io/arch" => "amd64", "beta.kubernetes.io/instance-type" => "Standard_DS2_v2", "beta.kubernetes.io/os" => "linux", "failure-domain.beta.kubernetes.io/region" => "westus2", "failure-domain.beta.kubernetes.io/zone" => "0", "kubernetes.azure.com/cluster" => "MC_davidaks16_davidaks16_westus2", "kubernetes.azure.com/mode" => "system", "kubernetes.azure.com/node-image-version" => "AKSUbuntu-1804gen2containerd-2021.07.03", "kubernetes.azure.com/os-sku" => "Ubuntu", "kubernetes.azure.com/role" => "agent", "kubernetes.io/arch" => "amd64", "kubernetes.io/hostname" => "aks-nodepool1-24816391-vmss000000", "kubernetes.io/os" => "linux", "kubernetes.io/role" => "agent", "node-role.kubernetes.io/agent" => "", "node.kubernetes.io/instance-type" => "Standard_DS2_v2", "storageprofile" => "managed", "storagetier" => "Premium_LRS", "topology.kubernetes.io/region" => "westus2", "topology.kubernetes.io/zone" => "0" }], "Status" => "Ready", "KubernetesProviderID" => "azure", "LastTransitionTimeReady" => "2021-07-21T23:40:24Z", "KubeletVersion" => "v1.19.11", "KubeProxyVersion" => "v1.19.11" })] => true, + ["mdm.kubenodeinventory", overwrite_collection_time({ "CollectionTime" => "2021-08-17T20:24:18Z", "Computer" => "aks-nodepool1-24816391-vmss000000", "ClusterName" => "/cluster-name", "ClusterId" => "/cluster-id", "CreationTimeStamp" => "2021-07-21T23:40:14Z", "Labels" => [{ "agentpool" => "nodepool1", "beta.kubernetes.io/arch" => "amd64", "beta.kubernetes.io/instance-type" => "Standard_DS2_v2", "beta.kubernetes.io/os" => "linux", "failure-domain.beta.kubernetes.io/region" => "westus2", "failure-domain.beta.kubernetes.io/zone" => "0", "kubernetes.azure.com/cluster" => "MC_davidaks16_davidaks16_westus2", "kubernetes.azure.com/mode" => "system", "kubernetes.azure.com/node-image-version" => "AKSUbuntu-1804gen2containerd-2021.07.03", "kubernetes.azure.com/os-sku" => "Ubuntu", "kubernetes.azure.com/role" => "agent", "kubernetes.io/arch" => "amd64", "kubernetes.io/hostname" => "aks-nodepool1-24816391-vmss000000", "kubernetes.io/os" => "linux", "kubernetes.io/role" => "agent", "node-role.kubernetes.io/agent" => "", "node.kubernetes.io/instance-type" => "Standard_DS2_v2", "storageprofile" => "managed", "storagetier" => "Premium_LRS", "topology.kubernetes.io/region" => "westus2", "topology.kubernetes.io/zone" => "0" }], "Status" => "Ready", "KubernetesProviderID" => "azure", "LastTransitionTimeReady" => "2021-07-21T23:40:24Z", "KubeletVersion" => "v1.19.11", "KubeProxyVersion" => "v1.19.11" })] => true, + ["oneagent.containerInsights.CONTAINER_NODE_INVENTORY_BLOB", overwrite_collection_time({ "CollectionTime" => "2021-08-17T20:24:18Z", "Computer" => "aks-nodepool1-24816391-vmss000000", "OperatingSystem" => "Ubuntu 18.04.5 LTS", "DockerVersion" => "containerd://1.4.4+azure" })] => true, + ["oneagent.containerInsights.LINUX_PERF_BLOB", overwrite_collection_time({ "Timestamp" => "2021-08-17T20:24:18Z", "Host" => "aks-nodepool1-24816391-vmss000000", "Computer" => "aks-nodepool1-24816391-vmss000000", "ObjectName" => "K8SNode", "InstanceName" => "None/aks-nodepool1-24816391-vmss000000", "json_Collections" => "[{\"CounterName\":\"cpuAllocatableNanoCores\",\"Value\":1900000000.0}]" })] => true, + ["oneagent.containerInsights.LINUX_PERF_BLOB", overwrite_collection_time({ "Timestamp" => "2021-08-17T20:24:18Z", "Host" => "aks-nodepool1-24816391-vmss000000", "Computer" => "aks-nodepool1-24816391-vmss000000", "ObjectName" => "K8SNode", "InstanceName" => "None/aks-nodepool1-24816391-vmss000000", "json_Collections" => "[{\"CounterName\":\"memoryAllocatableBytes\",\"Value\":4787511296.0}]" })] => true, + ["oneagent.containerInsights.LINUX_PERF_BLOB", overwrite_collection_time({ "Timestamp" => "2021-08-17T20:24:18Z", "Host" => "aks-nodepool1-24816391-vmss000000", "Computer" => "aks-nodepool1-24816391-vmss000000", "ObjectName" => "K8SNode", "InstanceName" => "None/aks-nodepool1-24816391-vmss000000", "json_Collections" => "[{\"CounterName\":\"cpuCapacityNanoCores\",\"Value\":2000000000.0}]" })] => true, + ["oneagent.containerInsights.LINUX_PERF_BLOB", overwrite_collection_time({ "Timestamp" => "2021-08-17T20:24:18Z", "Host" => "aks-nodepool1-24816391-vmss000000", "Computer" => "aks-nodepool1-24816391-vmss000000", "ObjectName" => "K8SNode", "InstanceName" => "None/aks-nodepool1-24816391-vmss000000", "json_Collections" => "[{\"CounterName\":\"memoryCapacityBytes\",\"Value\":7291510784.0}]" })] => true } d.events.each do |tag, time, record| - cleaned_record = overwrite_collection_time record - if expected_responses.key?([tag, cleaned_record]) - expected_responses[[tag, cleaned_record]] = true - else - assert(false, "got unexpected record") - end + cleaned_record = overwrite_collection_time record + if expected_responses.key?([tag, cleaned_record]) + expected_responses[[tag, cleaned_record]] = true + else + assert(false, "got unexpected record: #{cleaned_record}") + end end expected_responses.each do |key, val| - assert(val, "expected record not emitted: #{key}") + assert(val, "expected record not emitted: #{key}") end # make sure all mocked methods were called the expected number of times @@ -104,7 +111,7 @@ def test_malformed_node_spec # isAADMSIAuthMode() is called multiple times and we don't really care how many time it is called. This is the same as mocking # but it doesn't track how many times isAADMSIAuthMode is called def extensionUtils.isAADMSIAuthMode - false + false end # Set up the KubernetesApiClient Mock. Note: most of the functions in KubernetesApiClient are pure (access no @@ -112,16 +119,17 @@ def extensionUtils.isAADMSIAuthMode # more brittle). Instead, in_kube_nodes bypasses the mock and directly calls these functions in KubernetesApiClient. # Ideally the pure functions in KubernetesApiClient would be refactored into their own file to reduce confusion. nodes_api_response = eval(File.open("test/unit-tests/canned-api-responses/kube-nodes-malformed.txt").read) - kubeApiClient.expect(:getResourcesAndContinuationToken, [nil, nodes_api_response], ["nodes?limit=200"]) + node_items_test_cache = nodes_api_response["items"] + kubeApiClient.expect(:getClusterName, "/cluster-name") kubeApiClient.expect(:getClusterName, "/cluster-name") kubeApiClient.expect(:getClusterId, "/cluster-id") kubeApiClient.expect(:getClusterId, "/cluster-id") def appInsightsUtil.sendExceptionTelemetry(exception) - if exception.to_s != "undefined method `[]' for nil:NilClass" - raise "an unexpected exception has occured" - end + if exception.to_s != "undefined method `[]' for nil:NilClass" + raise "an unexpected exception has occured" + end end # This test doesn't care if metric telemetry is sent properly. Looking for an unnecessary value would make it needlessly rigid @@ -130,38 +138,38 @@ def appInsightsUtil.sendMetricTelemetry(a, b, c) config = "run_interval 999999999" # only run once - d = create_driver(config, kubernetesApiClient=kubeApiClient, applicationInsightsUtility=appInsightsUtil, extensionUtils=extensionUtils, env=env, telemetry_flush_interval=0) + d = create_driver(config, true, kubernetesApiClient = kubeApiClient, applicationInsightsUtility = appInsightsUtil, extensionUtils = extensionUtils, env = env, telemetry_flush_interval = 0, node_items_test_cache) d.instance.start d.instance.enumerate d.run(timeout: 99999) #TODO: is this necessary? expected_responses = { - ["oneagent.containerInsights.KUBE_NODE_INVENTORY_BLOB", {"CollectionTime"=>"~CollectionTime~", "Computer"=>"correct-node", "ClusterName"=>"/cluster-name", "ClusterId"=>"/cluster-id", "CreationTimeStamp"=>"2021-07-21T23:40:14Z", "Labels"=>[{"agentpool"=>"nodepool1", "beta.kubernetes.io/arch"=>"amd64", "beta.kubernetes.io/instance-type"=>"Standard_DS2_v2", "beta.kubernetes.io/os"=>"linux", "failure-domain.beta.kubernetes.io/region"=>"westus2", "failure-domain.beta.kubernetes.io/zone"=>"0", "kubernetes.azure.com/cluster"=>"MC_davidaks16_davidaks16_westus2", "kubernetes.azure.com/mode"=>"system", "kubernetes.azure.com/node-image-version"=>"AKSUbuntu-1804gen2containerd-2021.07.03", "kubernetes.azure.com/os-sku"=>"Ubuntu", "kubernetes.azure.com/role"=>"agent", "kubernetes.io/arch"=>"amd64", "kubernetes.io/hostname"=>"correct-node", "kubernetes.io/os"=>"linux", "kubernetes.io/role"=>"agent", "node-role.kubernetes.io/agent"=>"", "node.kubernetes.io/instance-type"=>"Standard_DS2_v2", "storageprofile"=>"managed", "storagetier"=>"Premium_LRS", "topology.kubernetes.io/region"=>"westus2", "topology.kubernetes.io/zone"=>"0"}], "Status"=>"Ready", "KubernetesProviderID"=>"azure", "LastTransitionTimeReady"=>"2021-07-21T23:40:24Z", "KubeletVersion"=>"v1.19.11", "KubeProxyVersion"=>"v1.19.11"}] => false, - ["mdm.kubenodeinventory", {"CollectionTime"=>"~CollectionTime~", "Computer"=>"correct-node", "ClusterName"=>"/cluster-name", "ClusterId"=>"/cluster-id", "CreationTimeStamp"=>"2021-07-21T23:40:14Z", "Labels"=>[{"agentpool"=>"nodepool1", "beta.kubernetes.io/arch"=>"amd64", "beta.kubernetes.io/instance-type"=>"Standard_DS2_v2", "beta.kubernetes.io/os"=>"linux", "failure-domain.beta.kubernetes.io/region"=>"westus2", "failure-domain.beta.kubernetes.io/zone"=>"0", "kubernetes.azure.com/cluster"=>"MC_davidaks16_davidaks16_westus2", "kubernetes.azure.com/mode"=>"system", "kubernetes.azure.com/node-image-version"=>"AKSUbuntu-1804gen2containerd-2021.07.03", "kubernetes.azure.com/os-sku"=>"Ubuntu", "kubernetes.azure.com/role"=>"agent", "kubernetes.io/arch"=>"amd64", "kubernetes.io/hostname"=>"correct-node", "kubernetes.io/os"=>"linux", "kubernetes.io/role"=>"agent", "node-role.kubernetes.io/agent"=>"", "node.kubernetes.io/instance-type"=>"Standard_DS2_v2", "storageprofile"=>"managed", "storagetier"=>"Premium_LRS", "topology.kubernetes.io/region"=>"westus2", "topology.kubernetes.io/zone"=>"0"}], "Status"=>"Ready", "KubernetesProviderID"=>"azure", "LastTransitionTimeReady"=>"2021-07-21T23:40:24Z", "KubeletVersion"=>"v1.19.11", "KubeProxyVersion"=>"v1.19.11"}] => false, - ["oneagent.containerInsights.CONTAINER_NODE_INVENTORY_BLOB", {"CollectionTime"=>"~CollectionTime~", "Computer"=>"correct-node", "OperatingSystem"=>"Ubuntu 18.04.5 LTS", "DockerVersion"=>"containerd://1.4.4+azure"}] => false, - ["oneagent.containerInsights.LINUX_PERF_BLOB", {"Timestamp"=>"~Timestamp~", "Host"=>"correct-node", "Computer"=>"correct-node", "ObjectName"=>"K8SNode", "InstanceName"=>"None/correct-node", "json_Collections"=>"[{\"CounterName\":\"cpuAllocatableNanoCores\",\"Value\":1000000.0}]"}] => false, - ["oneagent.containerInsights.LINUX_PERF_BLOB", {"Timestamp"=>"~Timestamp~", "Host"=>"correct-node", "Computer"=>"correct-node", "ObjectName"=>"K8SNode", "InstanceName"=>"None/correct-node", "json_Collections"=>"[{\"CounterName\":\"memoryAllocatableBytes\",\"Value\":444.0}]"}] => false, - ["oneagent.containerInsights.LINUX_PERF_BLOB", {"Timestamp"=>"~Timestamp~", "Host"=>"correct-node", "Computer"=>"correct-node", "ObjectName"=>"K8SNode", "InstanceName"=>"None/correct-node", "json_Collections"=>"[{\"CounterName\":\"cpuCapacityNanoCores\",\"Value\":2000000.0}]"}] => false, - ["oneagent.containerInsights.LINUX_PERF_BLOB", {"Timestamp"=>"~Timestamp~", "Host"=>"correct-node", "Computer"=>"correct-node", "ObjectName"=>"K8SNode", "InstanceName"=>"None/correct-node", "json_Collections"=>"[{\"CounterName\":\"memoryCapacityBytes\",\"Value\":555.0}]"}] => false, - - # these records are for the malformed node (it doesn't have limits or requests set so there are no PERF records) - ["oneagent.containerInsights.KUBE_NODE_INVENTORY_BLOB", {"CollectionTime"=>"~CollectionTime~", "Computer"=>"malformed-node", "ClusterName"=>"/cluster-name", "ClusterId"=>"/cluster-id", "CreationTimeStamp"=>"2021-07-21T23:40:14Z", "Labels"=>[{"agentpool"=>"nodepool1", "beta.kubernetes.io/arch"=>"amd64", "beta.kubernetes.io/instance-type"=>"Standard_DS2_v2", "beta.kubernetes.io/os"=>"linux", "failure-domain.beta.kubernetes.io/region"=>"westus2", "failure-domain.beta.kubernetes.io/zone"=>"0", "kubernetes.azure.com/cluster"=>"MC_davidaks16_davidaks16_westus2", "kubernetes.azure.com/mode"=>"system", "kubernetes.azure.com/node-image-version"=>"AKSUbuntu-1804gen2containerd-2021.07.03", "kubernetes.azure.com/os-sku"=>"Ubuntu", "kubernetes.azure.com/role"=>"agent", "kubernetes.io/arch"=>"amd64", "kubernetes.io/hostname"=>"malformed-node", "kubernetes.io/os"=>"linux", "kubernetes.io/role"=>"agent", "node-role.kubernetes.io/agent"=>"", "node.kubernetes.io/instance-type"=>"Standard_DS2_v2", "storageprofile"=>"managed", "storagetier"=>"Premium_LRS", "topology.kubernetes.io/region"=>"westus2", "topology.kubernetes.io/zone"=>"0"}], "Status"=>"Ready", "KubernetesProviderID"=>"azure", "LastTransitionTimeReady"=>"2021-07-21T23:40:24Z", "KubeletVersion"=>"v1.19.11", "KubeProxyVersion"=>"v1.19.11"}] => false, - ["mdm.kubenodeinventory", {"CollectionTime"=>"~CollectionTime~", "Computer"=>"malformed-node", "ClusterName"=>"/cluster-name", "ClusterId"=>"/cluster-id", "CreationTimeStamp"=>"2021-07-21T23:40:14Z", "Labels"=>[{"agentpool"=>"nodepool1", "beta.kubernetes.io/arch"=>"amd64", "beta.kubernetes.io/instance-type"=>"Standard_DS2_v2", "beta.kubernetes.io/os"=>"linux", "failure-domain.beta.kubernetes.io/region"=>"westus2", "failure-domain.beta.kubernetes.io/zone"=>"0", "kubernetes.azure.com/cluster"=>"MC_davidaks16_davidaks16_westus2", "kubernetes.azure.com/mode"=>"system", "kubernetes.azure.com/node-image-version"=>"AKSUbuntu-1804gen2containerd-2021.07.03", "kubernetes.azure.com/os-sku"=>"Ubuntu", "kubernetes.azure.com/role"=>"agent", "kubernetes.io/arch"=>"amd64", "kubernetes.io/hostname"=>"malformed-node", "kubernetes.io/os"=>"linux", "kubernetes.io/role"=>"agent", "node-role.kubernetes.io/agent"=>"", "node.kubernetes.io/instance-type"=>"Standard_DS2_v2", "storageprofile"=>"managed", "storagetier"=>"Premium_LRS", "topology.kubernetes.io/region"=>"westus2", "topology.kubernetes.io/zone"=>"0"}], "Status"=>"Ready", "KubernetesProviderID"=>"azure", "LastTransitionTimeReady"=>"2021-07-21T23:40:24Z", "KubeletVersion"=>"v1.19.11", "KubeProxyVersion"=>"v1.19.11"}] => false, - ["oneagent.containerInsights.CONTAINER_NODE_INVENTORY_BLOB", {"CollectionTime"=>"~CollectionTime~", "Computer"=>"malformed-node", "OperatingSystem"=>"Ubuntu 18.04.5 LTS", "DockerVersion"=>"containerd://1.4.4+azure"}] => false + ["oneagent.containerInsights.KUBE_NODE_INVENTORY_BLOB", { "CollectionTime" => "~CollectionTime~", "Computer" => "correct-node", "ClusterName" => "/cluster-name", "ClusterId" => "/cluster-id", "CreationTimeStamp" => "2021-07-21T23:40:14Z", "Labels" => [{ "agentpool" => "nodepool1", "beta.kubernetes.io/arch" => "amd64", "beta.kubernetes.io/instance-type" => "Standard_DS2_v2", "beta.kubernetes.io/os" => "linux", "failure-domain.beta.kubernetes.io/region" => "westus2", "failure-domain.beta.kubernetes.io/zone" => "0", "kubernetes.azure.com/cluster" => "MC_davidaks16_davidaks16_westus2", "kubernetes.azure.com/mode" => "system", "kubernetes.azure.com/node-image-version" => "AKSUbuntu-1804gen2containerd-2021.07.03", "kubernetes.azure.com/os-sku" => "Ubuntu", "kubernetes.azure.com/role" => "agent", "kubernetes.io/arch" => "amd64", "kubernetes.io/hostname" => "correct-node", "kubernetes.io/os" => "linux", "kubernetes.io/role" => "agent", "node-role.kubernetes.io/agent" => "", "node.kubernetes.io/instance-type" => "Standard_DS2_v2", "storageprofile" => "managed", "storagetier" => "Premium_LRS", "topology.kubernetes.io/region" => "westus2", "topology.kubernetes.io/zone" => "0" }], "Status" => "Ready", "KubernetesProviderID" => "azure", "LastTransitionTimeReady" => "2021-07-21T23:40:24Z", "KubeletVersion" => "v1.19.11", "KubeProxyVersion" => "v1.19.11" }] => false, + ["mdm.kubenodeinventory", { "CollectionTime" => "~CollectionTime~", "Computer" => "correct-node", "ClusterName" => "/cluster-name", "ClusterId" => "/cluster-id", "CreationTimeStamp" => "2021-07-21T23:40:14Z", "Labels" => [{ "agentpool" => "nodepool1", "beta.kubernetes.io/arch" => "amd64", "beta.kubernetes.io/instance-type" => "Standard_DS2_v2", "beta.kubernetes.io/os" => "linux", "failure-domain.beta.kubernetes.io/region" => "westus2", "failure-domain.beta.kubernetes.io/zone" => "0", "kubernetes.azure.com/cluster" => "MC_davidaks16_davidaks16_westus2", "kubernetes.azure.com/mode" => "system", "kubernetes.azure.com/node-image-version" => "AKSUbuntu-1804gen2containerd-2021.07.03", "kubernetes.azure.com/os-sku" => "Ubuntu", "kubernetes.azure.com/role" => "agent", "kubernetes.io/arch" => "amd64", "kubernetes.io/hostname" => "correct-node", "kubernetes.io/os" => "linux", "kubernetes.io/role" => "agent", "node-role.kubernetes.io/agent" => "", "node.kubernetes.io/instance-type" => "Standard_DS2_v2", "storageprofile" => "managed", "storagetier" => "Premium_LRS", "topology.kubernetes.io/region" => "westus2", "topology.kubernetes.io/zone" => "0" }], "Status" => "Ready", "KubernetesProviderID" => "azure", "LastTransitionTimeReady" => "2021-07-21T23:40:24Z", "KubeletVersion" => "v1.19.11", "KubeProxyVersion" => "v1.19.11" }] => false, + ["oneagent.containerInsights.CONTAINER_NODE_INVENTORY_BLOB", { "CollectionTime" => "~CollectionTime~", "Computer" => "correct-node", "OperatingSystem" => "Ubuntu 18.04.5 LTS", "DockerVersion" => "containerd://1.4.4+azure" }] => false, + ["oneagent.containerInsights.LINUX_PERF_BLOB", { "Timestamp" => "~Timestamp~", "Host" => "correct-node", "Computer" => "correct-node", "ObjectName" => "K8SNode", "InstanceName" => "None/correct-node", "json_Collections" => "[{\"CounterName\":\"cpuAllocatableNanoCores\",\"Value\":1000000.0}]" }] => false, + ["oneagent.containerInsights.LINUX_PERF_BLOB", { "Timestamp" => "~Timestamp~", "Host" => "correct-node", "Computer" => "correct-node", "ObjectName" => "K8SNode", "InstanceName" => "None/correct-node", "json_Collections" => "[{\"CounterName\":\"memoryAllocatableBytes\",\"Value\":444.0}]" }] => false, + ["oneagent.containerInsights.LINUX_PERF_BLOB", { "Timestamp" => "~Timestamp~", "Host" => "correct-node", "Computer" => "correct-node", "ObjectName" => "K8SNode", "InstanceName" => "None/correct-node", "json_Collections" => "[{\"CounterName\":\"cpuCapacityNanoCores\",\"Value\":2000000.0}]" }] => false, + ["oneagent.containerInsights.LINUX_PERF_BLOB", { "Timestamp" => "~Timestamp~", "Host" => "correct-node", "Computer" => "correct-node", "ObjectName" => "K8SNode", "InstanceName" => "None/correct-node", "json_Collections" => "[{\"CounterName\":\"memoryCapacityBytes\",\"Value\":555.0}]" }] => false, + + # these records are for the malformed node (it doesn't have limits or requests set so there are no PERF records) + ["oneagent.containerInsights.KUBE_NODE_INVENTORY_BLOB", { "CollectionTime" => "~CollectionTime~", "Computer" => "malformed-node", "ClusterName" => "/cluster-name", "ClusterId" => "/cluster-id", "CreationTimeStamp" => "2021-07-21T23:40:14Z", "Labels" => [{ "agentpool" => "nodepool1", "beta.kubernetes.io/arch" => "amd64", "beta.kubernetes.io/instance-type" => "Standard_DS2_v2", "beta.kubernetes.io/os" => "linux", "failure-domain.beta.kubernetes.io/region" => "westus2", "failure-domain.beta.kubernetes.io/zone" => "0", "kubernetes.azure.com/cluster" => "MC_davidaks16_davidaks16_westus2", "kubernetes.azure.com/mode" => "system", "kubernetes.azure.com/node-image-version" => "AKSUbuntu-1804gen2containerd-2021.07.03", "kubernetes.azure.com/os-sku" => "Ubuntu", "kubernetes.azure.com/role" => "agent", "kubernetes.io/arch" => "amd64", "kubernetes.io/hostname" => "malformed-node", "kubernetes.io/os" => "linux", "kubernetes.io/role" => "agent", "node-role.kubernetes.io/agent" => "", "node.kubernetes.io/instance-type" => "Standard_DS2_v2", "storageprofile" => "managed", "storagetier" => "Premium_LRS", "topology.kubernetes.io/region" => "westus2", "topology.kubernetes.io/zone" => "0" }], "Status" => "Ready", "KubernetesProviderID" => "azure", "LastTransitionTimeReady" => "2021-07-21T23:40:24Z", "KubeletVersion" => "v1.19.11", "KubeProxyVersion" => "v1.19.11" }] => false, + ["mdm.kubenodeinventory", { "CollectionTime" => "~CollectionTime~", "Computer" => "malformed-node", "ClusterName" => "/cluster-name", "ClusterId" => "/cluster-id", "CreationTimeStamp" => "2021-07-21T23:40:14Z", "Labels" => [{ "agentpool" => "nodepool1", "beta.kubernetes.io/arch" => "amd64", "beta.kubernetes.io/instance-type" => "Standard_DS2_v2", "beta.kubernetes.io/os" => "linux", "failure-domain.beta.kubernetes.io/region" => "westus2", "failure-domain.beta.kubernetes.io/zone" => "0", "kubernetes.azure.com/cluster" => "MC_davidaks16_davidaks16_westus2", "kubernetes.azure.com/mode" => "system", "kubernetes.azure.com/node-image-version" => "AKSUbuntu-1804gen2containerd-2021.07.03", "kubernetes.azure.com/os-sku" => "Ubuntu", "kubernetes.azure.com/role" => "agent", "kubernetes.io/arch" => "amd64", "kubernetes.io/hostname" => "malformed-node", "kubernetes.io/os" => "linux", "kubernetes.io/role" => "agent", "node-role.kubernetes.io/agent" => "", "node.kubernetes.io/instance-type" => "Standard_DS2_v2", "storageprofile" => "managed", "storagetier" => "Premium_LRS", "topology.kubernetes.io/region" => "westus2", "topology.kubernetes.io/zone" => "0" }], "Status" => "Ready", "KubernetesProviderID" => "azure", "LastTransitionTimeReady" => "2021-07-21T23:40:24Z", "KubeletVersion" => "v1.19.11", "KubeProxyVersion" => "v1.19.11" }] => false, + ["oneagent.containerInsights.CONTAINER_NODE_INVENTORY_BLOB", { "CollectionTime" => "~CollectionTime~", "Computer" => "malformed-node", "OperatingSystem" => "Ubuntu 18.04.5 LTS", "DockerVersion" => "containerd://1.4.4+azure" }] => false, } d.events.each do |tag, time, record| - cleaned_record = overwrite_collection_time record - if expected_responses.key?([tag, cleaned_record]) - expected_responses[[tag, cleaned_record]] = true - end - # don't do anything if an unexpected record was emitted. Since the node spec is malformed, there will be some partial data. - # we care more that the non-malformed data is still emitted + cleaned_record = overwrite_collection_time record + if expected_responses.key?([tag, cleaned_record]) + expected_responses[[tag, cleaned_record]] = true + end + # don't do anything if an unexpected record was emitted. Since the node spec is malformed, there will be some partial data. + # we care more that the non-malformed data is still emitted end expected_responses.each do |key, val| - assert(val, "expected record not emitted: #{key}") + assert(val, "expected record not emitted: #{key}") end kubeApiClient.verify diff --git a/source/plugins/ruby/in_kube_perfinventory.rb b/source/plugins/ruby/in_kube_perfinventory.rb new file mode 100644 index 000000000..ad8fdbf21 --- /dev/null +++ b/source/plugins/ruby/in_kube_perfinventory.rb @@ -0,0 +1,433 @@ +#!/usr/local/bin/ruby +# frozen_string_literal: true + +require "fluent/plugin/input" + +module Fluent::Plugin + class Kube_PerfInventory_Input < Input + Fluent::Plugin.register_input("kube_perfinventory", self) + + def initialize + super + require "yaml" + require "yajl/json_gem" + require "yajl" + require "set" + require "time" + require "net/http" + + require_relative "KubernetesApiClient" + require_relative "ApplicationInsightsUtility" + require_relative "oms_common" + require_relative "omslog" + require_relative "constants" + require_relative "extension_utils" + + # refer tomlparser-agent-config for updating defaults + # this configurable via configmap + @PODS_CHUNK_SIZE = 0 + @PODS_EMIT_STREAM_BATCH_SIZE = 0 + + @watchPodsThread = nil + @podItemsCache = {} + + @kubeperfTag = "oneagent.containerInsights.LINUX_PERF_BLOB" + @insightsMetricsTag = "oneagent.containerInsights.INSIGHTS_METRICS_BLOB" + end + + config_param :run_interval, :time, :default => 60 + config_param :tag, :string, :default => "oneagent.containerInsights.LINUX_PERF_BLOB" + + def configure(conf) + super + end + + def start + if @run_interval + super + if !ENV["PODS_CHUNK_SIZE"].nil? && !ENV["PODS_CHUNK_SIZE"].empty? && ENV["PODS_CHUNK_SIZE"].to_i > 0 + @PODS_CHUNK_SIZE = ENV["PODS_CHUNK_SIZE"].to_i + else + # this shouldnt happen just setting default here as safe guard + $log.warn("in_kube_perfinventory::start: setting to default value since got PODS_CHUNK_SIZE nil or empty") + @PODS_CHUNK_SIZE = 1000 + end + $log.info("in_kube_perfinventory::start: PODS_CHUNK_SIZE @ #{@PODS_CHUNK_SIZE}") + + if !ENV["PODS_EMIT_STREAM_BATCH_SIZE"].nil? && !ENV["PODS_EMIT_STREAM_BATCH_SIZE"].empty? && ENV["PODS_EMIT_STREAM_BATCH_SIZE"].to_i > 0 + @PODS_EMIT_STREAM_BATCH_SIZE = ENV["PODS_EMIT_STREAM_BATCH_SIZE"].to_i + else + # this shouldnt happen just setting default here as safe guard + $log.warn("in_kube_perfinventory::start: setting to default value since got PODS_EMIT_STREAM_BATCH_SIZE nil or empty") + @PODS_EMIT_STREAM_BATCH_SIZE = 200 + end + $log.info("in_kube_perfinventory::start: PODS_EMIT_STREAM_BATCH_SIZE @ #{@PODS_EMIT_STREAM_BATCH_SIZE}") + + @finished = false + @condition = ConditionVariable.new + @mutex = Mutex.new + @podCacheMutex = Mutex.new + @thread = Thread.new(&method(:run_periodic)) + @watchPodsThread = Thread.new(&method(:watch_pods)) + end + end + + def shutdown + if @run_interval + @mutex.synchronize { + @finished = true + @condition.signal + } + @thread.join + @watchPodsThread.join + super # This super must be at the end of shutdown method + end + end + + def enumerate(podList = nil) + begin + podInventory = podList + @podCount = 0 + currentTime = Time.now + batchTime = currentTime.utc.iso8601 + if ExtensionUtils.isAADMSIAuthMode() + $log.info("in_kube_perfinventory::enumerate: AAD AUTH MSI MODE") + if @kubeperfTag.nil? || !@kubeperfTag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) + @kubeperfTag = ExtensionUtils.getOutputStreamId(Constants::PERF_DATA_TYPE) + end + if @insightsMetricsTag.nil? || !@insightsMetricsTag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) + @insightsMetricsTag = ExtensionUtils.getOutputStreamId(Constants::INSIGHTS_METRICS_DATA_TYPE) + end + $log.info("in_kube_perfinventory::enumerate: using perf tag -#{@kubeperfTag} @ #{Time.now.utc.iso8601}") + $log.info("in_kube_perfinventory::enumerate: using insightsmetrics tag -#{@insightsMetricsTag} @ #{Time.now.utc.iso8601}") + end + + nodeAllocatableRecords = getNodeAllocatableRecords() + # Initializing continuation token to nil + continuationToken = nil + podItemsCacheSizeKB = 0 + podInventory = {} + @podCacheMutex.synchronize { + podInventory["items"] = @podItemsCache.values.clone + } + if (!podInventory.nil? && !podInventory.empty? && podInventory.key?("items") && !podInventory["items"].nil? && !podInventory["items"].empty?) + $log.info("in_kube_perfinventory::enumerate : number of pod items :#{podInventory["items"].length} from Kube API @ #{Time.now.utc.iso8601}") + parse_and_emit_records(podInventory, nodeAllocatableRecords, continuationToken, batchTime) + else + $log.warn "in_kube_perfinventory::enumerate:Received empty podInventory" + end + # Setting these to nil so that we dont hold memory until GC kicks in + podInventory = nil + nodeAllocatableRecords = nil + rescue => errorStr + $log.warn "in_kube_perfinventory::enumerate:Failed in enumerate: #{errorStr}" + $log.debug_backtrace(errorStr.backtrace) + ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) + end + end + + def parse_and_emit_records(podInventory, nodeAllocatableRecords, continuationToken, batchTime = Time.utc.iso8601) + currentTime = Time.now + emitTime = Fluent::Engine.now + kubePerfEventStream = Fluent::MultiEventStream.new + insightsMetricsEventStream = Fluent::MultiEventStream.new + @@istestvar = ENV["ISTEST"] + + begin #begin block start + podInventory["items"].each do |item| #podInventory block start + nodeName = "" + if !item["spec"]["nodeName"].nil? + nodeName = item["spec"]["nodeName"] + end + + nodeAllocatableRecord = {} + if !nodeName.empty? && !nodeAllocatableRecords.nil? && !nodeAllocatableRecords.empty? && nodeAllocatableRecords.has_key?(nodeName) + nodeAllocatableRecord = nodeAllocatableRecords[nodeName] + end + #container perf records + containerMetricDataItems = [] + containerMetricDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimits(item, "requests", "cpu", "cpuRequestNanoCores", nodeAllocatableRecord, batchTime)) + containerMetricDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimits(item, "requests", "memory", "memoryRequestBytes", nodeAllocatableRecord, batchTime)) + containerMetricDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimits(item, "limits", "cpu", "cpuLimitNanoCores", nodeAllocatableRecord, batchTime)) + containerMetricDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimits(item, "limits", "memory", "memoryLimitBytes", nodeAllocatableRecord, batchTime)) + + containerMetricDataItems.each do |record| + kubePerfEventStream.add(emitTime, record) if record + end + + if @PODS_EMIT_STREAM_BATCH_SIZE > 0 && kubePerfEventStream.count >= @PODS_EMIT_STREAM_BATCH_SIZE + $log.info("in_kube_perfinventory::parse_and_emit_records: number of container perf records emitted #{kubePerfEventStream.count} @ #{Time.now.utc.iso8601}") + router.emit_stream(@kubeperfTag, kubePerfEventStream) if kubePerfEventStream + if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) + $log.info("kubeContainerPerfEventEmitStreamSuccess @ #{Time.now.utc.iso8601}") + end + kubePerfEventStream = Fluent::MultiEventStream.new + end + + # container GPU records + containerGPUInsightsMetricsDataItems = [] + containerGPUInsightsMetricsDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimitsAsInsightsMetrics(item, "requests", "nvidia.com/gpu", "containerGpuRequests", nodeAllocatableRecord, batchTime)) + containerGPUInsightsMetricsDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimitsAsInsightsMetrics(item, "limits", "nvidia.com/gpu", "containerGpuLimits", nodeAllocatableRecord, batchTime)) + containerGPUInsightsMetricsDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimitsAsInsightsMetrics(item, "requests", "amd.com/gpu", "containerGpuRequests", nodeAllocatableRecord, batchTime)) + containerGPUInsightsMetricsDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimitsAsInsightsMetrics(item, "limits", "amd.com/gpu", "containerGpuLimits", nodeAllocatableRecord, batchTime)) + containerGPUInsightsMetricsDataItems.each do |insightsMetricsRecord| + insightsMetricsEventStream.add(emitTime, insightsMetricsRecord) if insightsMetricsRecord + end + + if @PODS_EMIT_STREAM_BATCH_SIZE > 0 && insightsMetricsEventStream.count >= @PODS_EMIT_STREAM_BATCH_SIZE + $log.info("in_kube_perfinventory::parse_and_emit_records: number of GPU insights metrics records emitted #{insightsMetricsEventStream.count} @ #{Time.now.utc.iso8601}") + if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) + $log.info("kubePodInsightsMetricsEmitStreamSuccess @ #{Time.now.utc.iso8601}") + end + router.emit_stream(@insightsMetricsTag, insightsMetricsEventStream) if insightsMetricsEventStream + insightsMetricsEventStream = Fluent::MultiEventStream.new + end + end #podInventory block end + + if kubePerfEventStream.count > 0 + $log.info("in_kube_perfinventory::parse_and_emit_records: number of perf records emitted #{kubePerfEventStream.count} @ #{Time.now.utc.iso8601}") + router.emit_stream(@kubeperfTag, kubePerfEventStream) if kubePerfEventStream + kubePerfEventStream = nil + if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) + $log.info("kubeContainerPerfEventEmitStreamSuccess @ #{Time.now.utc.iso8601}") + end + end + + if insightsMetricsEventStream.count > 0 + $log.info("in_kube_perfinventory::parse_and_emit_records: number of insights metrics records emitted #{insightsMetricsEventStream.count} @ #{Time.now.utc.iso8601}") + router.emit_stream(@insightsMetricsTag, insightsMetricsEventStream) if insightsMetricsEventStream + if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) + $log.info("kubePodInsightsMetricsEmitStreamSuccess @ #{Time.now.utc.iso8601}") + end + insightsMetricsEventStream = nil + end + rescue => errorStr + $log.warn "Failed in parse_and_emit_record kube perf inventory: #{errorStr}" + $log.debug_backtrace(errorStr.backtrace) + ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) + end #begin block end + end + + def run_periodic + @mutex.lock + done = @finished + @nextTimeToRun = Time.now + @waitTimeout = @run_interval + until done + @nextTimeToRun = @nextTimeToRun + @run_interval + @now = Time.now + if @nextTimeToRun <= @now + @waitTimeout = 1 + @nextTimeToRun = @now + else + @waitTimeout = @nextTimeToRun - @now + end + @condition.wait(@mutex, @waitTimeout) + done = @finished + @mutex.unlock + if !done + begin + $log.info("in_kube_perfinventory::run_periodic.enumerate.start #{Time.now.utc.iso8601}") + enumerate + $log.info("in_kube_perfinventory::run_periodic.enumerate.end #{Time.now.utc.iso8601}") + rescue => errorStr + $log.warn "in_kube_perfinventory::run_periodic: enumerate Failed to retrieve perf inventory: #{errorStr}" + ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) + end + end + @mutex.lock + end + @mutex.unlock + end + + def watch_pods + $log.info("in_kube_perfinventory::watch_pods:Start @ #{Time.now.utc.iso8601}") + podsResourceVersion = nil + loop do + begin + if podsResourceVersion.nil? + # clear cache before filling the cache with list + @podCacheMutex.synchronize { + @podItemsCache.clear() + } + continuationToken = nil + resourceUri = "pods?limit=#{@PODS_CHUNK_SIZE}" + $log.info("in_kube_perfinventory::watch_pods:Getting pods from Kube API: #{resourceUri} @ #{Time.now.utc.iso8601}") + continuationToken, podInventory, responseCode = KubernetesApiClient.getResourcesAndContinuationTokenV2(resourceUri) + if responseCode.nil? || responseCode != "200" + $log.warn("in_kube_perfinventory::watch_pods:Getting pods from Kube API: #{resourceUri} failed with statuscode: #{responseCode} @ #{Time.now.utc.iso8601}") + else + $log.info("in_kube_perfinventory::watch_pods:Done getting pods from Kube API:#{resourceUri} @ #{Time.now.utc.iso8601}") + if (!podInventory.nil? && !podInventory.empty?) + podsResourceVersion = podInventory["metadata"]["resourceVersion"] + if (podInventory.key?("items") && !podInventory["items"].nil? && !podInventory["items"].empty?) + $log.info("in_kube_perfinventory::watch_pods:number of pod items :#{podInventory["items"].length} from Kube API @ #{Time.now.utc.iso8601}") + podInventory["items"].each do |item| + key = item["metadata"]["uid"] + if !key.nil? && !key.empty? + podItem = KubernetesApiClient.getOptimizedItem("pods-perf", item) + if !podItem.nil? && !podItem.empty? + @podCacheMutex.synchronize { + @podItemsCache[key] = podItem + } + else + $log.warn "in_kube_perfinventory::watch_pods:Received podItem either empty or nil @ #{Time.now.utc.iso8601}" + end + else + $log.warn "in_kube_perfinventory::watch_pods:Received poduid either nil or empty @ #{Time.now.utc.iso8601}" + end + end + end + else + $log.warn "in_kube_perfinventory::watch_pods:Received empty podInventory" + end + while (!continuationToken.nil? && !continuationToken.empty?) + resourceUri = "pods?limit=#{@PODS_CHUNK_SIZE}&continue=#{continuationToken}" + continuationToken, podInventory, responseCode = KubernetesApiClient.getResourcesAndContinuationTokenV2(resourceUri) + if responseCode.nil? || responseCode != "200" + $log.warn("in_kube_perfinventory::watch_pods:Getting pods from Kube API: #{resourceUri} failed with statuscode: #{responseCode} @ #{Time.now.utc.iso8601}") + podsResourceVersion = nil + break # break, if any of the pagination call failed so that full cache will rebuild with LIST again + else + if (!podInventory.nil? && !podInventory.empty?) + podsResourceVersion = podInventory["metadata"]["resourceVersion"] + if (podInventory.key?("items") && !podInventory["items"].nil? && !podInventory["items"].empty?) + $log.info("in_kube_perfinventory::watch_pods:number of pod items :#{podInventory["items"].length} from Kube API @ #{Time.now.utc.iso8601}") + podInventory["items"].each do |item| + key = item["metadata"]["uid"] + if !key.nil? && !key.empty? + podItem = KubernetesApiClient.getOptimizedItem("pods-perf", item) + if !podItem.nil? && !podItem.empty? + @podCacheMutex.synchronize { + @podItemsCache[key] = podItem + } + else + $log.warn "in_kube_perfinventory::watch_pods:Received podItem is empty or nil @ #{Time.now.utc.iso8601}" + end + else + $log.warn "in_kube_perfinventory::watch_pods:Received poduid either nil or empty @ #{Time.now.utc.iso8601}" + end + end + end + else + $log.warn "in_kube_perfinventory::watch_pods:Received empty podInventory @ #{Time.now.utc.iso8601}" + end + end + end + end + end + if podsResourceVersion.nil? || podsResourceVersion.empty? || podsResourceVersion == "0" + # https://github.com/kubernetes/kubernetes/issues/74022 + $log.warn("in_kube_perfinventory::watch_pods:received podsResourceVersion: #{podsResourceVersion} either nil or empty or 0 @ #{Time.now.utc.iso8601}") + podsResourceVersion = nil # for the LIST to happen again + sleep(30) # do not overwhelm the api-server if api-server broken + else + begin + $log.info("in_kube_perfinventory::watch_pods:Establishing Watch connection for pods with resourceversion: #{podsResourceVersion} @ #{Time.now.utc.iso8601}") + watcher = KubernetesApiClient.watch("pods", resource_version: podsResourceVersion, allow_watch_bookmarks: true) + if watcher.nil? + $log.warn("in_kube_perfinventory::watch_pods:watch API returned nil watcher for watch connection with resource version: #{podsResourceVersion} @ #{Time.now.utc.iso8601}") + else + watcher.each do |notice| + case notice["type"] + when "ADDED", "MODIFIED", "DELETED", "BOOKMARK" + item = notice["object"] + # extract latest resource version to use for watch reconnect + if !item.nil? && !item.empty? && + !item["metadata"].nil? && !item["metadata"].empty? && + !item["metadata"]["resourceVersion"].nil? && !item["metadata"]["resourceVersion"].empty? + podsResourceVersion = item["metadata"]["resourceVersion"] + # $log.info("in_kube_perfinventory::watch_pods:received event type: #{notice["type"]} with resource version: #{podsResourceVersion} @ #{Time.now.utc.iso8601}") + else + $log.warn("in_kube_perfinventory::watch_pods:received event type with no resourceVersion hence stopping watcher to reconnect @ #{Time.now.utc.iso8601}") + podsResourceVersion = nil + # We have to abort here because this might cause lastResourceVersion inconsistency by skipping a potential RV with valid data! + break + end + if ((notice["type"] == "ADDED") || (notice["type"] == "MODIFIED")) + key = item["metadata"]["uid"] + if !key.nil? && !key.empty? + podItem = KubernetesApiClient.getOptimizedItem("pods-perf", item) + if !podItem.nil? && !podItem.empty? + @podCacheMutex.synchronize { + @podItemsCache[key] = podItem + } + else + $log.warn "in_kube_perfinventory::watch_pods:Received podItem is empty or nil @ #{Time.now.utc.iso8601}" + end + else + $log.warn "in_kube_perfinventory::watch_pods:Received poduid either nil or empty @ #{Time.now.utc.iso8601}" + end + elsif notice["type"] == "DELETED" + key = item["metadata"]["uid"] + if !key.nil? && !key.empty? + @podCacheMutex.synchronize { + @podItemsCache.delete(key) + } + end + end + when "ERROR" + podsResourceVersion = nil + $log.warn("in_kube_perfinventory::watch_pods:ERROR event with :#{notice["object"]} @ #{Time.now.utc.iso8601}") + break + else + podsResourceVersion = nil + $log.warn("in_kube_perfinventory::watch_pods:Unsupported event type #{notice["type"]} @ #{Time.now.utc.iso8601}") + end + end + $log.warn("in_kube_perfinventory::watch_pods:Watch connection got disconnected for pods @ #{Time.now.utc.iso8601}") + end + rescue Net::ReadTimeout => errorStr + ## This expected if there is no activity more than readtimeout value used in the connection + # $log.warn("in_kube_perfinventory::watch_pods:Watch failed with an error: #{errorStr} @ #{Time.now.utc.iso8601}") + rescue => errorStr + $log.warn("in_kube_perfinventory::watch_pods:Watch failed with an error: #{errorStr} @ #{Time.now.utc.iso8601}") + podsResourceVersion = nil + sleep(5) # do not overwhelm the api-server if api-server broken + ensure + watcher.finish if watcher + end + end + rescue => errorStr + $log.warn("in_kube_perfinventory::watch_pods:failed with an error: #{errorStr} @ #{Time.now.utc.iso8601}") + podsResourceVersion = nil + end + end + $log.info("in_kube_perfinventory::watch_pods:End @ #{Time.now.utc.iso8601}") + end + + def getNodeAllocatableRecords() + maxRetryCount = 5 + initialRetryDelaySecs = 0.5 + retryAttemptCount = 1 + nodeAllocatableRecords = {} + begin + f = File.open(Constants::NODE_ALLOCATABLE_RECORDS_STATE_FILE, "r") + if !f.nil? + isAcquiredLock = f.flock(File::LOCK_EX | File::LOCK_NB) + raise "in_kube_perfinventory:getNodeAllocatableRecords:Failed to acquire file lock" if !isAcquiredLock + startTime = (Time.now.to_f * 1000).to_i + nodeAllocatableRecords = Yajl::Parser.parse(f) + timetakenMs = ((Time.now.to_f * 1000).to_i - startTime) + $log.info "in_kube_perfinventory:getNodeAllocatableRecords:Number of Node Allocatable records: #{nodeAllocatableRecords.length} with time taken(ms) for read: #{timetakenMs} @ #{Time.now.utc.iso8601}" + else + raise "in_kube_perfinventory:getNodeAllocatableRecords:Failed to open file for read" + end + rescue => err + if retryAttemptCount < maxRetryCount + f.flock(File::LOCK_UN) if !f.nil? + f.close if !f.nil? + sleep (initialRetryDelaySecs * retryAttemptCount) + retryAttemptCount = retryAttemptCount + 1 + retry + end + $log.warn "in_kube_perfinventory:getNodeAllocatableRecords failed with an error: #{err} after retries: #{maxRetryCount} @ #{Time.now.utc.iso8601}" + ApplicationInsightsUtility.sendExceptionTelemetry(err) + ensure + f.flock(File::LOCK_UN) if !f.nil? + f.close if !f.nil? + end + return nodeAllocatableRecords + end + end # Kube_Pod_Input +end # module diff --git a/source/plugins/ruby/in_kube_podinventory.rb b/source/plugins/ruby/in_kube_podinventory.rb index f979ef7c5..bdbc465ec 100644 --- a/source/plugins/ruby/in_kube_podinventory.rb +++ b/source/plugins/ruby/in_kube_podinventory.rb @@ -4,12 +4,9 @@ require "fluent/plugin/input" module Fluent::Plugin - require_relative "podinventory_to_mdm" - class Kube_PodInventory_Input < Input Fluent::Plugin.register_input("kube_podinventory", self) - @@MDMKubePodInventoryTag = "mdm.kubepodinventory" @@hostName = (OMS::Common.get_hostname) def initialize @@ -19,6 +16,8 @@ def initialize require "yajl" require "set" require "time" + require "net/http" + require "fileutils" require_relative "kubernetes_container_inventory" require_relative "KubernetesApiClient" @@ -27,11 +26,13 @@ def initialize require_relative "omslog" require_relative "constants" require_relative "extension_utils" + require_relative "CustomMetricsUtils" # refer tomlparser-agent-config for updating defaults # this configurable via configmap @PODS_CHUNK_SIZE = 0 @PODS_EMIT_STREAM_BATCH_SIZE = 0 + @NODES_CHUNK_SIZE = 0 @podCount = 0 @containerCount = 0 @@ -47,11 +48,18 @@ def initialize @controllerData = {} @podInventoryE2EProcessingLatencyMs = 0 @podsAPIE2ELatencyMs = 0 + @watchPodsThread = nil + @podItemsCache = {} + + @watchServicesThread = nil + @serviceItemsCache = {} + + @watchWinNodesThread = nil + @windowsNodeNameListCache = [] + @windowsContainerRecordsCacheSizeBytes = 0 - @kubeperfTag = "oneagent.containerInsights.LINUX_PERF_BLOB" @kubeservicesTag = "oneagent.containerInsights.KUBE_SERVICES_BLOB" @containerInventoryTag = "oneagent.containerInsights.CONTAINER_INVENTORY_BLOB" - @insightsMetricsTag = "oneagent.containerInsights.INSIGHTS_METRICS_BLOB" end config_param :run_interval, :time, :default => 60 @@ -59,7 +67,6 @@ def initialize def configure(conf) super - @inventoryToMdmConvertor = Inventory2MdmConvertor.new() end def start @@ -82,10 +89,26 @@ def start @PODS_EMIT_STREAM_BATCH_SIZE = 200 end $log.info("in_kube_podinventory::start: PODS_EMIT_STREAM_BATCH_SIZE @ #{@PODS_EMIT_STREAM_BATCH_SIZE}") + + if !ENV["NODES_CHUNK_SIZE"].nil? && !ENV["NODES_CHUNK_SIZE"].empty? && ENV["NODES_CHUNK_SIZE"].to_i > 0 + @NODES_CHUNK_SIZE = ENV["NODES_CHUNK_SIZE"].to_i + else + # this shouldnt happen just setting default here as safe guard + $log.warn("in_kube_podinventory::start: setting to default value since got NODES_CHUNK_SIZE nil or empty") + @NODES_CHUNK_SIZE = 250 + end + $log.info("in_kube_podinventory::start : NODES_CHUNK_SIZE @ #{@NODES_CHUNK_SIZE}") + @finished = false @condition = ConditionVariable.new @mutex = Mutex.new + @podCacheMutex = Mutex.new + @serviceCacheMutex = Mutex.new + @windowsNodeNameCacheMutex = Mutex.new @thread = Thread.new(&method(:run_periodic)) + @watchWinNodesThread = Thread.new(&method(:watch_windows_nodes)) + @watchPodsThread = Thread.new(&method(:watch_pods)) + @watchServicesThread = Thread.new(&method(:watch_services)) @@podTelemetryTimeTracker = DateTime.now.to_time.to_i end end @@ -97,6 +120,9 @@ def shutdown @condition.signal } @thread.join + @watchPodsThread.join + @watchServicesThread.join + @watchWinNodesThread.join super # This super must be at the end of shutdown method end end @@ -110,6 +136,7 @@ def enumerate(podList = nil) @serviceCount = 0 @controllerSet = Set.new [] @winContainerCount = 0 + @windowsContainerRecordsCacheSizeBytes = 0 @winContainerInventoryTotalSizeBytes = 0 @winContainerCountWithInventoryRecordSize64KBOrMore = 0 @winContainerCountWithEnvVarSize64KBOrMore = 0 @@ -121,6 +148,7 @@ def enumerate(podList = nil) batchTime = currentTime.utc.iso8601 serviceRecords = [] @podInventoryE2EProcessingLatencyMs = 0 + @mdmPodRecordItems = [] podInventoryStartTime = (Time.now.to_f * 1000).to_i if ExtensionUtils.isAADMSIAuthMode() $log.info("in_kube_podinventory::enumerate: AAD AUTH MSI MODE") @@ -146,32 +174,31 @@ def enumerate(podList = nil) $log.info("in_kube_podinventory::enumerate: using kubepodinventory tag -#{@tag} @ #{Time.now.utc.iso8601}") end - # Get services first so that we dont need to make a call for very chunk - $log.info("in_kube_podinventory::enumerate : Getting services from Kube API @ #{Time.now.utc.iso8601}") - serviceInfo = KubernetesApiClient.getKubeResourceInfo("services") - # serviceList = JSON.parse(KubernetesApiClient.getKubeResourceInfo("services").body) - $log.info("in_kube_podinventory::enumerate : Done getting services from Kube API @ #{Time.now.utc.iso8601}") - - if !serviceInfo.nil? - $log.info("in_kube_podinventory::enumerate:Start:Parsing services data using yajl @ #{Time.now.utc.iso8601}") - serviceList = Yajl::Parser.parse(StringIO.new(serviceInfo.body)) - $log.info("in_kube_podinventory::enumerate:End:Parsing services data using yajl @ #{Time.now.utc.iso8601}") - serviceInfo = nil - # service inventory records much smaller and fixed size compared to serviceList - serviceRecords = KubernetesApiClient.getKubeServicesInventoryRecords(serviceList, batchTime) - # updating for telemetry - @serviceCount += serviceRecords.length - serviceList = nil - end + serviceInventory = {} + serviceItemsCacheSizeKB = 0 + @serviceCacheMutex.synchronize { + serviceInventory["items"] = @serviceItemsCache.values.clone + if KubernetesApiClient.isEmitCacheTelemetry() + serviceItemsCacheSizeKB = @serviceItemsCache.to_s.length / 1024 + end + } + serviceRecords = KubernetesApiClient.getKubeServicesInventoryRecords(serviceInventory, batchTime) + # updating for telemetry + @serviceCount = serviceRecords.length + $log.info("in_kube_podinventory::enumerate : number of service items :#{@serviceCount} from Kube API @ #{Time.now.utc.iso8601}") - # to track e2e processing latency @podsAPIE2ELatencyMs = 0 podsAPIChunkStartTime = (Time.now.to_f * 1000).to_i # Initializing continuation token to nil continuationToken = nil - $log.info("in_kube_podinventory::enumerate : Getting pods from Kube API @ #{Time.now.utc.iso8601}") - continuationToken, podInventory = KubernetesApiClient.getResourcesAndContinuationToken("pods?limit=#{@PODS_CHUNK_SIZE}") - $log.info("in_kube_podinventory::enumerate : Done getting pods from Kube API @ #{Time.now.utc.iso8601}") + podItemsCacheSizeKB = 0 + podInventory = {} + @podCacheMutex.synchronize { + podInventory["items"] = @podItemsCache.values.clone + if KubernetesApiClient.isEmitCacheTelemetry() + podItemsCacheSizeKB = @podItemsCache.to_s.length / 1024 + end + } podsAPIChunkEndTime = (Time.now.to_f * 1000).to_i @podsAPIE2ELatencyMs = (podsAPIChunkEndTime - podsAPIChunkStartTime) if (!podInventory.nil? && !podInventory.empty? && podInventory.key?("items") && !podInventory["items"].nil? && !podInventory["items"].empty?) @@ -180,25 +207,11 @@ def enumerate(podList = nil) else $log.warn "in_kube_podinventory::enumerate:Received empty podInventory" end - - #If we receive a continuation token, make calls, process and flush data until we have processed all data - while (!continuationToken.nil? && !continuationToken.empty?) - podsAPIChunkStartTime = (Time.now.to_f * 1000).to_i - continuationToken, podInventory = KubernetesApiClient.getResourcesAndContinuationToken("pods?limit=#{@PODS_CHUNK_SIZE}&continue=#{continuationToken}") - podsAPIChunkEndTime = (Time.now.to_f * 1000).to_i - @podsAPIE2ELatencyMs = @podsAPIE2ELatencyMs + (podsAPIChunkEndTime - podsAPIChunkStartTime) - if (!podInventory.nil? && !podInventory.empty? && podInventory.key?("items") && !podInventory["items"].nil? && !podInventory["items"].empty?) - $log.info("in_kube_podinventory::enumerate : number of pod items :#{podInventory["items"].length} from Kube API @ #{Time.now.utc.iso8601}") - parse_and_emit_records(podInventory, serviceRecords, continuationToken, batchTime) - else - $log.warn "in_kube_podinventory::enumerate:Received empty podInventory" - end - end - @podInventoryE2EProcessingLatencyMs = ((Time.now.to_f * 1000).to_i - podInventoryStartTime) # Setting these to nil so that we dont hold memory until GC kicks in podInventory = nil serviceRecords = nil + @mdmPodRecordItems = nil # Adding telemetry to send pod telemetry every 5 minutes timeDifference = (DateTime.now.to_time.to_i - @@podTelemetryTimeTracker).abs @@ -213,6 +226,11 @@ def enumerate(podList = nil) telemetryProperties["Computer"] = @@hostName telemetryProperties["PODS_CHUNK_SIZE"] = @PODS_CHUNK_SIZE telemetryProperties["PODS_EMIT_STREAM_BATCH_SIZE"] = @PODS_EMIT_STREAM_BATCH_SIZE + if KubernetesApiClient.isEmitCacheTelemetry() + telemetryProperties["POD_ITEMS_CACHE_SIZE_KB"] = podItemsCacheSizeKB + telemetryProperties["SERVICE_ITEMS_CACHE_SIZE_KB"] = serviceItemsCacheSizeKB + telemetryProperties["WINDOWS_CONTAINER_RECORDS_CACHE_SIZE_KB"] = @windowsContainerRecordsCacheSizeBytes / 1024 + end ApplicationInsightsUtility.sendCustomEvent("KubePodInventoryHeartBeatEvent", telemetryProperties) ApplicationInsightsUtility.sendMetricTelemetry("PodCount", @podCount, {}) ApplicationInsightsUtility.sendMetricTelemetry("ContainerCount", @containerCount, {}) @@ -221,7 +239,7 @@ def enumerate(podList = nil) ApplicationInsightsUtility.sendMetricTelemetry("ControllerCount", @controllerSet.length, telemetryProperties) if @winContainerCount > 0 telemetryProperties["ClusterWideWindowsContainersCount"] = @winContainerCount - telemetryProperties["WindowsNodeCount"] = @windowsNodeCount + telemetryProperties["WindowsNodeCount"] = @windowsNodeNameListCache.length telemetryProperties["ClusterWideWindowsContainerInventoryTotalSizeKB"] = @winContainerInventoryTotalSizeBytes / 1024 telemetryProperties["WindowsContainerCountWithInventoryRecordSize64KBorMore"] = @winContainerCountWithInventoryRecordSize64KBOrMore if @winContainerCountWithEnvVarSize64KBOrMore > 0 @@ -257,8 +275,6 @@ def parse_and_emit_records(podInventory, serviceRecords, continuationToken, batc @@istestvar = ENV["ISTEST"] begin #begin block start - # Getting windows nodes from kubeapi - winNodes = KubernetesApiClient.getWindowsNodesArray podInventory["items"].each do |item| #podInventory block start # pod inventory records podInventoryRecords = getPodInventoryRecords(item, serviceRecords, batchTime) @@ -266,40 +282,39 @@ def parse_and_emit_records(podInventory, serviceRecords, continuationToken, batc podInventoryRecords.each do |record| if !record.nil? eventStream.add(emitTime, record) if record - @inventoryToMdmConvertor.process_pod_inventory_record(record) end end # Setting this flag to true so that we can send ContainerInventory records for containers # on windows nodes and parse environment variables for these containers - if winNodes.length > 0 - nodeName = "" - if !item["spec"]["nodeName"].nil? - nodeName = item["spec"]["nodeName"] + nodeName = "" + if !item["spec"]["nodeName"].nil? + nodeName = item["spec"]["nodeName"] + end + if (!item["isWindows"].nil? && !item["isWindows"].empty? && item["isWindows"].downcase == "true") + clusterCollectEnvironmentVar = ENV["AZMON_CLUSTER_COLLECT_ENV_VAR"] + #Generate ContainerInventory records for windows nodes so that we can get image and image tag in property panel + containerInventoryRecords = KubernetesContainerInventory.getContainerInventoryRecords(item, batchTime, clusterCollectEnvironmentVar, true) + if KubernetesApiClient.isEmitCacheTelemetry() + @windowsContainerRecordsCacheSizeBytes += containerInventoryRecords.to_s.length end - @windowsNodeCount = winNodes.length - if (!nodeName.empty? && (winNodes.include? nodeName)) - clusterCollectEnvironmentVar = ENV["AZMON_CLUSTER_COLLECT_ENV_VAR"] - #Generate ContainerInventory records for windows nodes so that we can get image and image tag in property panel - containerInventoryRecords = KubernetesContainerInventory.getContainerInventoryRecords(item, batchTime, clusterCollectEnvironmentVar, true) - # Send container inventory records for containers on windows nodes - @winContainerCount += containerInventoryRecords.length - containerInventoryRecords.each do |cirecord| - if !cirecord.nil? - containerInventoryStream.add(emitTime, cirecord) if cirecord - ciRecordSize = cirecord.to_s.length - @winContainerInventoryTotalSizeBytes += ciRecordSize - if ciRecordSize >= Constants::MAX_RECORD_OR_FIELD_SIZE_FOR_TELEMETRY - @winContainerCountWithInventoryRecordSize64KBOrMore += 1 - end - if !cirecord["EnvironmentVar"].nil? && !cirecord["EnvironmentVar"].empty? && cirecord["EnvironmentVar"].length >= Constants::MAX_RECORD_OR_FIELD_SIZE_FOR_TELEMETRY - @winContainerCountWithEnvVarSize64KBOrMore += 1 - end - if !cirecord["Ports"].nil? && !cirecord["Ports"].empty? && cirecord["Ports"].length >= Constants::MAX_RECORD_OR_FIELD_SIZE_FOR_TELEMETRY - @winContainerCountWithPortsSize64KBOrMore += 1 - end - if !cirecord["Command"].nil? && !cirecord["Command"].empty? && cirecord["Command"].length >= Constants::MAX_RECORD_OR_FIELD_SIZE_FOR_TELEMETRY - @winContainerCountWithCommandSize64KBOrMore += 1 - end + # Send container inventory records for containers on windows nodes + @winContainerCount += containerInventoryRecords.length + containerInventoryRecords.each do |cirecord| + if !cirecord.nil? + containerInventoryStream.add(emitTime, cirecord) if cirecord + ciRecordSize = cirecord.to_s.length + @winContainerInventoryTotalSizeBytes += ciRecordSize + if ciRecordSize >= Constants::MAX_RECORD_OR_FIELD_SIZE_FOR_TELEMETRY + @winContainerCountWithInventoryRecordSize64KBOrMore += 1 + end + if !cirecord["EnvironmentVar"].nil? && !cirecord["EnvironmentVar"].empty? && cirecord["EnvironmentVar"].length >= Constants::MAX_RECORD_OR_FIELD_SIZE_FOR_TELEMETRY + @winContainerCountWithEnvVarSize64KBOrMore += 1 + end + if !cirecord["Ports"].nil? && !cirecord["Ports"].empty? && cirecord["Ports"].length >= Constants::MAX_RECORD_OR_FIELD_SIZE_FOR_TELEMETRY + @winContainerCountWithPortsSize64KBOrMore += 1 + end + if !cirecord["Command"].nil? && !cirecord["Command"].empty? && cirecord["Command"].length >= Constants::MAX_RECORD_OR_FIELD_SIZE_FOR_TELEMETRY + @winContainerCountWithCommandSize64KBOrMore += 1 end end end @@ -313,45 +328,6 @@ def parse_and_emit_records(podInventory, serviceRecords, continuationToken, batc router.emit_stream(@tag, eventStream) if eventStream eventStream = Fluent::MultiEventStream.new end - - #container perf records - containerMetricDataItems = [] - containerMetricDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimits(item, "requests", "cpu", "cpuRequestNanoCores", batchTime)) - containerMetricDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimits(item, "requests", "memory", "memoryRequestBytes", batchTime)) - containerMetricDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimits(item, "limits", "cpu", "cpuLimitNanoCores", batchTime)) - containerMetricDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimits(item, "limits", "memory", "memoryLimitBytes", batchTime)) - - containerMetricDataItems.each do |record| - kubePerfEventStream.add(emitTime, record) if record - end - - if @PODS_EMIT_STREAM_BATCH_SIZE > 0 && kubePerfEventStream.count >= @PODS_EMIT_STREAM_BATCH_SIZE - $log.info("in_kube_podinventory::parse_and_emit_records: number of container perf records emitted #{kubePerfEventStream.count} @ #{Time.now.utc.iso8601}") - router.emit_stream(@kubeperfTag, kubePerfEventStream) if kubePerfEventStream - if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) - $log.info("kubeContainerPerfEventEmitStreamSuccess @ #{Time.now.utc.iso8601}") - end - kubePerfEventStream = Fluent::MultiEventStream.new - end - - # container GPU records - containerGPUInsightsMetricsDataItems = [] - containerGPUInsightsMetricsDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimitsAsInsightsMetrics(item, "requests", "nvidia.com/gpu", "containerGpuRequests", batchTime)) - containerGPUInsightsMetricsDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimitsAsInsightsMetrics(item, "limits", "nvidia.com/gpu", "containerGpuLimits", batchTime)) - containerGPUInsightsMetricsDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimitsAsInsightsMetrics(item, "requests", "amd.com/gpu", "containerGpuRequests", batchTime)) - containerGPUInsightsMetricsDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimitsAsInsightsMetrics(item, "limits", "amd.com/gpu", "containerGpuLimits", batchTime)) - containerGPUInsightsMetricsDataItems.each do |insightsMetricsRecord| - insightsMetricsEventStream.add(emitTime, insightsMetricsRecord) if insightsMetricsRecord - end - - if @PODS_EMIT_STREAM_BATCH_SIZE > 0 && insightsMetricsEventStream.count >= @PODS_EMIT_STREAM_BATCH_SIZE - $log.info("in_kube_podinventory::parse_and_emit_records: number of GPU insights metrics records emitted #{insightsMetricsEventStream.count} @ #{Time.now.utc.iso8601}") - if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) - $log.info("kubePodInsightsMetricsEmitStreamSuccess @ #{Time.now.utc.iso8601}") - end - router.emit_stream(@insightsMetricsTag, insightsMetricsEventStream) if insightsMetricsEventStream - insightsMetricsEventStream = Fluent::MultiEventStream.new - end end #podInventory block end if eventStream.count > 0 @@ -372,33 +348,26 @@ def parse_and_emit_records(podInventory, serviceRecords, continuationToken, batc containerInventoryStream = nil end - if kubePerfEventStream.count > 0 - $log.info("in_kube_podinventory::parse_and_emit_records: number of perf records emitted #{kubePerfEventStream.count} @ #{Time.now.utc.iso8601}") - router.emit_stream(@kubeperfTag, kubePerfEventStream) if kubePerfEventStream - kubePerfEventStream = nil - if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) - $log.info("kubeContainerPerfEventEmitStreamSuccess @ #{Time.now.utc.iso8601}") - end - end - - if insightsMetricsEventStream.count > 0 - $log.info("in_kube_podinventory::parse_and_emit_records: number of insights metrics records emitted #{insightsMetricsEventStream.count} @ #{Time.now.utc.iso8601}") - router.emit_stream(@insightsMetricsTag, insightsMetricsEventStream) if insightsMetricsEventStream - if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) - $log.info("kubePodInsightsMetricsEmitStreamSuccess @ #{Time.now.utc.iso8601}") + if continuationToken.nil? #no more chunks in this batch to be sent, write all mdm pod inventory records to send + if CustomMetricsUtils.check_custom_metrics_availability + begin + if !@mdmPodRecordItems.nil? && @mdmPodRecordItems.length > 0 + mdmPodRecords = { + "collectionTime": batchTime, + "items": @mdmPodRecordItems, + } + mdmPodRecordsJson = mdmPodRecords.to_json + @log.info "Writing pod inventory mdm records to mdm podinventory state file with size(bytes): #{mdmPodRecordsJson.length}" + @log.info "in_kube_podinventory::parse_and_emit_records:Start:writeMDMRecords @ #{Time.now.utc.iso8601}" + writeMDMRecords(mdmPodRecordsJson) + mdmPodRecords = nil + mdmPodRecordsJson = nil + @log.info "in_kube_podinventory::parse_and_emit_records:End:writeMDMRecords @ #{Time.now.utc.iso8601}" + end + rescue => err + @log.warn "in_kube_podinventory::parse_and_emit_records: failed to write MDMRecords with an error: #{err} @ #{Time.now.utc.iso8601}" + end end - insightsMetricsEventStream = nil - end - - if continuationToken.nil? #no more chunks in this batch to be sent, get all mdm pod inventory records to send - @log.info "Sending pod inventory mdm records to out_mdm" - pod_inventory_mdm_records = @inventoryToMdmConvertor.get_pod_inventory_mdm_records(batchTime) - @log.info "pod_inventory_mdm_records.size #{pod_inventory_mdm_records.size}" - mdm_pod_inventory_es = Fluent::MultiEventStream.new - pod_inventory_mdm_records.each { |pod_inventory_mdm_record| - mdm_pod_inventory_es.add(batchTime, pod_inventory_mdm_record) if pod_inventory_mdm_record - } if pod_inventory_mdm_records - router.emit_stream(@@MDMKubePodInventoryTag, mdm_pod_inventory_es) if mdm_pod_inventory_es end if continuationToken.nil? # sending kube services inventory records @@ -477,6 +446,7 @@ def getPodInventoryRecords(item, serviceRecords, batchTime = Time.utc.iso8601) record = {} begin + mdmPodRecord = {} record["CollectionTime"] = batchTime #This is the time that is mapped to become TimeGenerated record["Name"] = item["metadata"]["name"] podNameSpace = item["metadata"]["namespace"] @@ -552,7 +522,14 @@ def getPodInventoryRecords(item, serviceRecords, batchTime = Time.utc.iso8601) record["PodRestartCount"] = 0 #Invoke the helper method to compute ready/not ready mdm metric - @inventoryToMdmConvertor.process_record_for_pods_ready_metric(record["ControllerName"], record["Namespace"], item["status"]["conditions"]) + mdmPodRecord["PodUid"] = podUid + mdmPodRecord["Computer"] = nodeName + mdmPodRecord["ControllerName"] = record["ControllerName"] + mdmPodRecord["Namespace"] = record["Namespace"] + mdmPodRecord["PodStatus"] = record["PodStatus"] + mdmPodRecord["PodReadyCondition"] = KubernetesApiClient.getPodReadyCondition(item["status"]["conditions"]) + mdmPodRecord["ControllerKind"] = record["ControllerKind"] + mdmPodRecord["containerRecords"] = [] podContainers = [] if item["status"].key?("containerStatuses") && !item["status"]["containerStatuses"].empty? @@ -589,6 +566,8 @@ def getPodInventoryRecords(item, serviceRecords, batchTime = Time.utc.iso8601) record["ContainerRestartCount"] = containerRestartCount containerStatus = container["state"] + + mdmContainerRecord = {} record["ContainerStatusReason"] = "" # state is of the following form , so just picking up the first key name # "state": { @@ -613,7 +592,7 @@ def getPodInventoryRecords(item, serviceRecords, batchTime = Time.utc.iso8601) end # Process the record to see if job was completed 6 hours ago. If so, send metric to mdm if !record["ControllerKind"].nil? && record["ControllerKind"].downcase == Constants::CONTROLLER_KIND_JOB - @inventoryToMdmConvertor.process_record_for_terminated_job_metric(record["ControllerName"], record["Namespace"], containerStatus) + mdmContainerRecord["state"] = containerStatus end end @@ -641,7 +620,7 @@ def getPodInventoryRecords(item, serviceRecords, batchTime = Time.utc.iso8601) #Populate mdm metric for OOMKilled container count if lastStateReason is OOMKilled if lastStateReason.downcase == Constants::REASON_OOM_KILLED - @inventoryToMdmConvertor.process_record_for_oom_killed_metric(record["ControllerName"], record["Namespace"], lastFinishedTime) + mdmContainerRecord["lastState"] = container["lastState"] end lastStateReason = nil else @@ -653,7 +632,8 @@ def getPodInventoryRecords(item, serviceRecords, batchTime = Time.utc.iso8601) #Populate mdm metric for container restart count if greater than 0 if (!containerRestartCount.nil? && (containerRestartCount.is_a? Integer) && containerRestartCount > 0) - @inventoryToMdmConvertor.process_record_for_container_restarts_metric(record["ControllerName"], record["Namespace"], lastFinishedTime) + mdmContainerRecord["restartCount"] = containerRestartCount + mdmContainerRecord["lastState"] = container["lastState"] end rescue => errorStr $log.warn "Failed in parse_and_emit_record pod inventory while processing ContainerLastStatus: #{errorStr}" @@ -662,6 +642,10 @@ def getPodInventoryRecords(item, serviceRecords, batchTime = Time.utc.iso8601) record["ContainerLastStatus"] = Hash.new end + if !mdmContainerRecord.empty? + mdmPodRecord["containerRecords"].push(mdmContainerRecord.dup) + end + podRestartCount += containerRestartCount records.push(record.dup) end @@ -669,6 +653,8 @@ def getPodInventoryRecords(item, serviceRecords, batchTime = Time.utc.iso8601) records.push(record) end #container status block end + @mdmPodRecordItems.push(mdmPodRecord.dup) + records.each do |record| if !record.nil? record["PodRestartCount"] = podRestartCount @@ -715,5 +701,499 @@ def getServiceNameFromLabels(namespace, labels, serviceRecords) end return serviceName end + + def watch_pods + $log.info("in_kube_podinventory::watch_pods:Start @ #{Time.now.utc.iso8601}") + podsResourceVersion = nil + # invoke getWindowsNodes to handle scenario where windowsNodeNameCache not populated yet on containerstart + winNodes = KubernetesApiClient.getWindowsNodesArray() + if winNodes.length > 0 + @windowsNodeNameCacheMutex.synchronize { + @windowsNodeNameListCache = winNodes.dup + } + end + loop do + begin + if podsResourceVersion.nil? + # clear cache before filling the cache with list + @podCacheMutex.synchronize { + @podItemsCache.clear() + } + currentWindowsNodeNameList = [] + @windowsNodeNameCacheMutex.synchronize { + currentWindowsNodeNameList = @windowsNodeNameListCache.dup + } + continuationToken = nil + resourceUri = "pods?limit=#{@PODS_CHUNK_SIZE}" + $log.info("in_kube_podinventory::watch_pods:Getting pods from Kube API: #{resourceUri} @ #{Time.now.utc.iso8601}") + continuationToken, podInventory, responseCode = KubernetesApiClient.getResourcesAndContinuationTokenV2(resourceUri) + if responseCode.nil? || responseCode != "200" + $log.warn("in_kube_podinventory::watch_pods: getting pods from Kube API: #{resourceUri} failed with statuscode: #{responseCode} @ #{Time.now.utc.iso8601}") + else + $log.info("in_kube_podinventory::watch_pods:Done getting pods from Kube API: #{resourceUri} @ #{Time.now.utc.iso8601}") + if (!podInventory.nil? && !podInventory.empty?) + podsResourceVersion = podInventory["metadata"]["resourceVersion"] + if (podInventory.key?("items") && !podInventory["items"].nil? && !podInventory["items"].empty?) + $log.info("in_kube_podinventory::watch_pods:number of pod items :#{podInventory["items"].length} from Kube API @ #{Time.now.utc.iso8601}") + podInventory["items"].each do |item| + key = item["metadata"]["uid"] + if !key.nil? && !key.empty? + nodeName = (!item["spec"].nil? && !item["spec"]["nodeName"].nil?) ? item["spec"]["nodeName"] : "" + isWindowsPodItem = false + if !nodeName.empty? && + !currentWindowsNodeNameList.nil? && + !currentWindowsNodeNameList.empty? && + currentWindowsNodeNameList.include?(nodeName) + isWindowsPodItem = true + end + podItem = KubernetesApiClient.getOptimizedItem("pods", item, isWindowsPodItem) + if !podItem.nil? && !podItem.empty? + @podCacheMutex.synchronize { + @podItemsCache[key] = podItem + } + else + $log.warn "in_kube_podinventory::watch_pods:Received podItem either empty or nil @ #{Time.now.utc.iso8601}" + end + else + $log.warn "in_kube_podinventory::watch_pods:Received poduid either nil or empty @ #{Time.now.utc.iso8601}" + end + end + end + else + $log.warn "in_kube_podinventory::watch_pods:Received empty podInventory" + end + while (!continuationToken.nil? && !continuationToken.empty?) + resourceUri = "pods?limit=#{@PODS_CHUNK_SIZE}&continue=#{continuationToken}" + continuationToken, podInventory, responseCode = KubernetesApiClient.getResourcesAndContinuationTokenV2(resourceUri) + if responseCode.nil? || responseCode != "200" + $log.warn("in_kube_podinventory::watch_pods: getting pods from Kube API: #{resourceUri} failed with statuscode: #{responseCode} @ #{Time.now.utc.iso8601}") + podsResourceVersion = nil + break # break, if any of the pagination call failed so that full cache will rebuild with LIST again + else + if (!podInventory.nil? && !podInventory.empty?) + podsResourceVersion = podInventory["metadata"]["resourceVersion"] + if (podInventory.key?("items") && !podInventory["items"].nil? && !podInventory["items"].empty?) + $log.info("in_kube_podinventory::watch_pods:number of pod items :#{podInventory["items"].length} from Kube API @ #{Time.now.utc.iso8601}") + podInventory["items"].each do |item| + key = item["metadata"]["uid"] + if !key.nil? && !key.empty? + nodeName = (!item["spec"].nil? && !item["spec"]["nodeName"].nil?) ? item["spec"]["nodeName"] : "" + isWindowsPodItem = false + if !nodeName.empty? && + !currentWindowsNodeNameList.nil? && + !currentWindowsNodeNameList.empty? && + currentWindowsNodeNameList.include?(nodeName) + isWindowsPodItem = true + end + podItem = KubernetesApiClient.getOptimizedItem("pods", item, isWindowsPodItem) + if !podItem.nil? && !podItem.empty? + @podCacheMutex.synchronize { + @podItemsCache[key] = podItem + } + else + $log.warn "in_kube_podinventory::watch_pods:Received podItem is empty or nil @ #{Time.now.utc.iso8601}" + end + else + $log.warn "in_kube_podinventory::watch_pods:Received poduid either nil or empty @ #{Time.now.utc.iso8601}" + end + end + end + else + $log.warn "in_kube_podinventory::watch_pods:Received empty podInventory @ #{Time.now.utc.iso8601}" + end + end + end + end + end + if podsResourceVersion.nil? || podsResourceVersion.empty? || podsResourceVersion == "0" + # https://github.com/kubernetes/kubernetes/issues/74022 + $log.warn("in_kube_podinventory::watch_pods:received podsResourceVersion either nil or empty or 0 @ #{Time.now.utc.iso8601}") + podsResourceVersion = nil # for the LIST to happen again + sleep(30) # do not overwhelm the api-server if api-server down + else + begin + $log.info("in_kube_podinventory::watch_pods:Establishing Watch connection for pods with resourceversion: #{podsResourceVersion} @ #{Time.now.utc.iso8601}") + watcher = KubernetesApiClient.watch("pods", resource_version: podsResourceVersion, allow_watch_bookmarks: true) + if watcher.nil? + $log.warn("in_kube_podinventory::watch_pods:watch API returned nil watcher for watch connection with resource version: #{podsResourceVersion} @ #{Time.now.utc.iso8601}") + else + watcher.each do |notice| + case notice["type"] + when "ADDED", "MODIFIED", "DELETED", "BOOKMARK" + item = notice["object"] + # extract latest resource version to use for watch reconnect + if !item.nil? && !item.empty? && + !item["metadata"].nil? && !item["metadata"].empty? && + !item["metadata"]["resourceVersion"].nil? && !item["metadata"]["resourceVersion"].empty? + podsResourceVersion = item["metadata"]["resourceVersion"] + # $log.info("in_kube_podinventory::watch_pods:received event type: #{notice["type"]} with resource version: #{podsResourceVersion} @ #{Time.now.utc.iso8601}") + else + $log.warn("in_kube_podinventory::watch_pods:received event type with no resourceVersion hence stopping watcher to reconnect @ #{Time.now.utc.iso8601}") + podsResourceVersion = nil + # We have to abort here because this might cause lastResourceVersion inconsistency by skipping a potential RV with valid data! + break + end + if ((notice["type"] == "ADDED") || (notice["type"] == "MODIFIED")) + key = item["metadata"]["uid"] + if !key.nil? && !key.empty? + currentWindowsNodeNameList = [] + @windowsNodeNameCacheMutex.synchronize { + currentWindowsNodeNameList = @windowsNodeNameListCache.dup + } + isWindowsPodItem = false + nodeName = (!item["spec"].nil? && !item["spec"]["nodeName"].nil?) ? item["spec"]["nodeName"] : "" + if !nodeName.empty? && + !currentWindowsNodeNameList.nil? && + !currentWindowsNodeNameList.empty? && + currentWindowsNodeNameList.include?(nodeName) + isWindowsPodItem = true + end + podItem = KubernetesApiClient.getOptimizedItem("pods", item, isWindowsPodItem) + if !podItem.nil? && !podItem.empty? + @podCacheMutex.synchronize { + @podItemsCache[key] = podItem + } + else + $log.warn "in_kube_podinventory::watch_pods:Received podItem is empty or nil @ #{Time.now.utc.iso8601}" + end + else + $log.warn "in_kube_podinventory::watch_pods:Received poduid either nil or empty @ #{Time.now.utc.iso8601}" + end + elsif notice["type"] == "DELETED" + key = item["metadata"]["uid"] + if !key.nil? && !key.empty? + @podCacheMutex.synchronize { + @podItemsCache.delete(key) + } + end + end + when "ERROR" + podsResourceVersion = nil + $log.warn("in_kube_podinventory::watch_pods:ERROR event with :#{notice["object"]} @ #{Time.now.utc.iso8601}") + break + else + $log.warn("in_kube_podinventory::watch_pods:Unsupported event type #{notice["type"]} @ #{Time.now.utc.iso8601}") + # enforce LIST again otherwise cause inconsistency by skipping a potential RV with valid data! + podsResourceVersion = nil + break + end + end + $log.warn("in_kube_podinventory::watch_pods:Watch connection got disconnected for pods @ #{Time.now.utc.iso8601}") + end + rescue Net::ReadTimeout => errorStr + ## This expected if there is no activity on the cluster for more than readtimeout value used in the connection + # $log.warn("in_kube_podinventory::watch_pods:Watch failed with an error: #{errorStr} @ #{Time.now.utc.iso8601}") + rescue => errorStr + $log.warn("in_kube_podinventory::watch_pods:Watch failed with an error: #{errorStr} @ #{Time.now.utc.iso8601}") + podsResourceVersion = nil + sleep(5) # do not overwhelm the api-server if api-server down + ensure + watcher.finish if watcher + end + end + rescue => errorStr + $log.warn("in_kube_podinventory::watch_pods:failed with an error: #{errorStr} @ #{Time.now.utc.iso8601}") + podsResourceVersion = nil + end + end + $log.info("in_kube_podinventory::watch_pods:End @ #{Time.now.utc.iso8601}") + end + + def watch_services + $log.info("in_kube_podinventory::watch_services:Start @ #{Time.now.utc.iso8601}") + servicesResourceVersion = nil + loop do + begin + if servicesResourceVersion.nil? + # clear cache before filling the cache with list + @serviceCacheMutex.synchronize { + @serviceItemsCache.clear() + } + $log.info("in_kube_podinventory::watch_services:Getting services from Kube API @ #{Time.now.utc.iso8601}") + responseCode, serviceInfo = KubernetesApiClient.getKubeResourceInfoV2("services") + if responseCode.nil? || responseCode != "200" + $log.info("in_kube_podinventory::watch_services:Getting services from Kube API failed with statuscode: #{responseCode} @ #{Time.now.utc.iso8601}") + else + $log.info("in_kube_podinventory::watch_services: Done getting services from Kube API @ #{Time.now.utc.iso8601}") + if !serviceInfo.nil? + $log.info("in_kube_podinventory::watch_services:Start:Parsing services data using yajl @ #{Time.now.utc.iso8601}") + serviceInventory = Yajl::Parser.parse(StringIO.new(serviceInfo.body)) + $log.info("in_kube_podinventory::watch_services:End:Parsing services data using yajl @ #{Time.now.utc.iso8601}") + serviceInfo = nil + if (!serviceInventory.nil? && !serviceInventory.empty?) + servicesResourceVersion = serviceInventory["metadata"]["resourceVersion"] + if (serviceInventory.key?("items") && !serviceInventory["items"].nil? && !serviceInventory["items"].empty?) + $log.info("in_kube_podinventory::watch_services:number of service items #{serviceInventory["items"].length} @ #{Time.now.utc.iso8601}") + serviceInventory["items"].each do |item| + key = item["metadata"]["uid"] + if !key.nil? && !key.empty? + serviceItem = KubernetesApiClient.getOptimizedItem("services", item) + if !serviceItem.nil? && !serviceItem.empty? + @serviceCacheMutex.synchronize { + @serviceItemsCache[key] = serviceItem + } + else + $log.warn "in_kube_podinventory::watch_services:Received serviceItem either nil or empty @ #{Time.now.utc.iso8601}" + end + else + $log.warn "in_kube_podinventory::watch_services:Received serviceuid either nil or empty @ #{Time.now.utc.iso8601}" + end + end + end + else + $log.warn "in_kube_podinventory::watch_services:Received empty serviceInventory @ #{Time.now.utc.iso8601}" + end + serviceInventory = nil + end + end + end + if servicesResourceVersion.nil? || servicesResourceVersion == "" || servicesResourceVersion == "0" + # https://github.com/kubernetes/kubernetes/issues/74022 + $log.warn("in_kube_podinventory::watch_services:received servicesResourceVersion either nil or empty or 0 @ #{Time.now.utc.iso8601}") + servicesResourceVersion = nil # for the LIST to happen again + sleep(30) # do not overwhelm the api-server if api-server down + else + begin + $log.info("in_kube_podinventory::watch_services:Establishing Watch connection for services with resourceversion: #{servicesResourceVersion} @ #{Time.now.utc.iso8601}") + watcher = KubernetesApiClient.watch("services", resource_version: servicesResourceVersion, allow_watch_bookmarks: true) + if watcher.nil? + $log.warn("in_kube_podinventory::watch_services:watch API returned nil watcher for watch connection with resource version: #{servicesResourceVersion} @ #{Time.now.utc.iso8601}") + else + watcher.each do |notice| + case notice["type"] + when "ADDED", "MODIFIED", "DELETED", "BOOKMARK" + item = notice["object"] + # extract latest resource version to use for watch reconnect + if !item.nil? && !item.empty? && + !item["metadata"].nil? && !item["metadata"].empty? && + !item["metadata"]["resourceVersion"].nil? && !item["metadata"]["resourceVersion"].empty? + servicesResourceVersion = item["metadata"]["resourceVersion"] + # $log.info("in_kube_podinventory::watch_services: received event type: #{notice["type"]} with resource version: #{servicesResourceVersion} @ #{Time.now.utc.iso8601}") + else + $log.warn("in_kube_podinventory::watch_services: received event type with no resourceVersion hence stopping watcher to reconnect @ #{Time.now.utc.iso8601}") + servicesResourceVersion = nil + # We have to abort here because this might cause lastResourceVersion inconsistency by skipping a potential RV with valid data! + break + end + if ((notice["type"] == "ADDED") || (notice["type"] == "MODIFIED")) + key = item["metadata"]["uid"] + if !key.nil? && !key.empty? + serviceItem = KubernetesApiClient.getOptimizedItem("services", item) + if !serviceItem.nil? && !serviceItem.empty? + @serviceCacheMutex.synchronize { + @serviceItemsCache[key] = serviceItem + } + else + $log.warn "in_kube_podinventory::watch_services:Received serviceItem either nil or empty @ #{Time.now.utc.iso8601}" + end + else + $log.warn "in_kube_podinventory::watch_services:Received serviceuid either nil or empty @ #{Time.now.utc.iso8601}" + end + elsif notice["type"] == "DELETED" + key = item["metadata"]["uid"] + if !key.nil? && !key.empty? + @serviceCacheMutex.synchronize { + @serviceItemsCache.delete(key) + } + end + end + when "ERROR" + servicesResourceVersion = nil + $log.warn("in_kube_podinventory::watch_services:ERROR event with :#{notice["object"]} @ #{Time.now.utc.iso8601}") + break + else + servicesResourceVersion = nil + $log.warn("in_kube_podinventory::watch_services:Unsupported event type #{notice["type"]} @ #{Time.now.utc.iso8601}") + break + end + end + end + rescue Net::ReadTimeout => errorStr + # $log.warn("in_kube_podinventory::watch_services:failed with an error: #{errorStr} @ #{Time.now.utc.iso8601}") + rescue => errorStr + $log.warn("in_kube_podinventory::watch_services:failed with an error: #{errorStr} @ #{Time.now.utc.iso8601}") + servicesResourceVersion = nil + sleep(5) # do not overwhelm the api-server if api-server down + ensure + watcher.finish if watcher + end + end + rescue => errorStr + $log.warn("in_kube_podinventory::watch_services:failed with an error: #{errorStr} @ #{Time.now.utc.iso8601}") + servicesResourceVersion = nil + end + end + $log.info("in_kube_podinventory::watch_services:End @ #{Time.now.utc.iso8601}") + end + + def watch_windows_nodes + $log.info("in_kube_podinventory::watch_windows_nodes:Start @ #{Time.now.utc.iso8601}") + nodesResourceVersion = nil + loop do + begin + if nodesResourceVersion.nil? + @windowsNodeNameCacheMutex.synchronize { + @windowsNodeNameListCache.clear() + } + continuationToken = nil + resourceUri = KubernetesApiClient.getNodesResourceUri("nodes?labelSelector=kubernetes.io%2Fos%3Dwindows&limit=#{@NODES_CHUNK_SIZE}") + $log.info("in_kube_podinventory::watch_windows_nodes:Getting windows nodes from Kube API: #{resourceUri} @ #{Time.now.utc.iso8601}") + continuationToken, nodeInventory, responseCode = KubernetesApiClient.getResourcesAndContinuationTokenV2(resourceUri) + if responseCode.nil? || responseCode != "200" + $log.info("in_kube_podinventory::watch_windows_nodes:Getting windows nodes from Kube API: #{resourceUri} failed with statuscode: #{responseCode} @ #{Time.now.utc.iso8601}") + else + $log.info("in_kube_podinventory::watch_windows_nodes:Done getting windows nodes from Kube API @ #{Time.now.utc.iso8601}") + if (!nodeInventory.nil? && !nodeInventory.empty?) + nodesResourceVersion = nodeInventory["metadata"]["resourceVersion"] + if (nodeInventory.key?("items") && !nodeInventory["items"].nil? && !nodeInventory["items"].empty?) + $log.info("in_kube_podinventory::watch_windows_nodes: number of windows node items :#{nodeInventory["items"].length} from Kube API @ #{Time.now.utc.iso8601}") + nodeInventory["items"].each do |item| + key = item["metadata"]["name"] + if !key.nil? && !key.empty? + @windowsNodeNameCacheMutex.synchronize { + if !@windowsNodeNameListCache.include?(key) + @windowsNodeNameListCache.push(key) + end + } + else + $log.warn "in_kube_podinventory::watch_windows_nodes:Received node name either nil or empty @ #{Time.now.utc.iso8601}" + end + end + end + else + $log.warn "in_kube_podinventory::watch_windows_nodes:Received empty nodeInventory @ #{Time.now.utc.iso8601}" + end + while (!continuationToken.nil? && !continuationToken.empty?) + continuationToken, nodeInventory, responseCode = KubernetesApiClient.getResourcesAndContinuationTokenV2(resourceUri + "&continue=#{continuationToken}") + if responseCode.nil? || responseCode != "200" + $log.info("in_kube_podinventory::watch_windows_nodes:Getting windows nodes from Kube API: #{resourceUri}&continue=#{continuationToken} failed with statuscode: #{responseCode} @ #{Time.now.utc.iso8601}") + nodesResourceVersion = nil + break # break, if any of the pagination call failed so that full cache can be rebuild with LIST again + else + if (!nodeInventory.nil? && !nodeInventory.empty?) + nodesResourceVersion = nodeInventory["metadata"]["resourceVersion"] + if (nodeInventory.key?("items") && !nodeInventory["items"].nil? && !nodeInventory["items"].empty?) + $log.info("in_kube_podinventory::watch_windows_nodes : number of windows node items :#{nodeInventory["items"].length} from Kube API @ #{Time.now.utc.iso8601}") + nodeInventory["items"].each do |item| + key = item["metadata"]["name"] + if !key.nil? && !key.empty? + @windowsNodeNameCacheMutex.synchronize { + if !@windowsNodeNameListCache.include?(key) + @windowsNodeNameListCache.push(key) + end + } + else + $log.warn "in_kube_podinventory::watch_windows_nodes:Received node name either nil or empty @ #{Time.now.utc.iso8601}" + end + end + end + else + $log.warn "in_kube_podinventory::watch_windows_nodes:Received empty nodeInventory @ #{Time.now.utc.iso8601}" + end + end + end + end + end + if nodesResourceVersion.nil? || nodesResourceVersion.empty? || nodesResourceVersion == "0" + # https://github.com/kubernetes/kubernetes/issues/74022 + $log.warn("in_kube_podinventory::watch_windows_nodes:received nodesResourceVersion either nil or empty or 0 @ #{Time.now.utc.iso8601}") + nodesResourceVersion = nil # for the LIST to happen again + sleep(30) # do not overwhelm the api-server if api-server down + else + begin + $log.info("in_kube_podinventory::watch_windows_nodes:Establishing Watch connection for nodes with resourceversion: #{nodesResourceVersion} @ #{Time.now.utc.iso8601}") + watcher = KubernetesApiClient.watch("nodes", label_selector: "kubernetes.io/os=windows", resource_version: nodesResourceVersion, allow_watch_bookmarks: true) + if watcher.nil? + $log.warn("in_kube_podinventory::watch_windows_nodes:watch API returned nil watcher for watch connection with resource version: #{nodesResourceVersion} @ #{Time.now.utc.iso8601}") + else + watcher.each do |notice| + case notice["type"] + when "ADDED", "MODIFIED", "DELETED", "BOOKMARK" + item = notice["object"] + # extract latest resource version to use for watch reconnect + if !item.nil? && !item.empty? && + !item["metadata"].nil? && !item["metadata"].empty? && + !item["metadata"]["resourceVersion"].nil? && !item["metadata"]["resourceVersion"].empty? + nodesResourceVersion = item["metadata"]["resourceVersion"] + # $log.info("in_kube_podinventory::watch_windows_nodes: received event type: #{notice["type"]} with resource version: #{nodesResourceVersion} @ #{Time.now.utc.iso8601}") + else + $log.warn("in_kube_podinventory::watch_windows_nodes: received event type with no resourceVersion hence stopping watcher to reconnect @ #{Time.now.utc.iso8601}") + nodesResourceVersion = nil + # We have to abort here because this might cause lastResourceVersion inconsistency by skipping a potential RV with valid data! + break + end + if notice["type"] == "ADDED" # we dont need to worry about modified event since we only need node name + key = item["metadata"]["name"] + @windowsNodeNameCacheMutex.synchronize { + if !@windowsNodeNameListCache.include?(key) + @windowsNodeNameListCache.push(key) + end + } + elsif notice["type"] == "DELETED" + key = item["metadata"]["name"] + @windowsNodeNameCacheMutex.synchronize { + @windowsNodeNameListCache.delete(key) + } + end + when "ERROR" + nodesResourceVersion = nil + $log.warn("in_kube_podinventory::watch_windows_nodes:ERROR event with :#{notice["object"]} @ #{Time.now.utc.iso8601}") + break + else + $log.warn("in_kube_podinventory::watch_windows_nodes:Unsupported event type #{notice["type"]} @ #{Time.now.utc.iso8601}") + end + end + end + rescue Net::ReadTimeout => errorStr + ## This expected if there is no activity more than readtimeout value used in the connection + # $log.warn("in_kube_podinventory::watch_windows_nodes:failed with an error: #{errorStr} @ #{Time.now.utc.iso8601}") + rescue => errorStr + $log.warn("in_kube_podinventory::watch_windows_nodes:failed with an error: #{errorStr} @ #{Time.now.utc.iso8601}") + nodesResourceVersion = nil + sleep(5) # do not overwhelm the api-server if api-server broken + ensure + watcher.finish if watcher + end + end + rescue => errorStr + $log.warn("in_kube_podinventory::watch_windows_nodes:failed with an error: #{errorStr} @ #{Time.now.utc.iso8601}") + nodesResourceVersion = nil + end + end + $log.info("in_kube_podinventory::watch_windows_nodes:End @ #{Time.now.utc.iso8601}") + end + + def writeMDMRecords(mdmRecordsJson) + maxRetryCount = 5 + initialRetryDelaySecs = 0.5 + retryAttemptCount = 1 + begin + f = File.open(Constants::MDM_POD_INVENTORY_STATE_FILE, "w") + if !f.nil? + isAcquiredLock = f.flock(File::LOCK_EX | File::LOCK_NB) + raise "in_kube_podinventory:writeMDMRecords:Failed to acquire file lock" if !isAcquiredLock + startTime = (Time.now.to_f * 1000).to_i + f.write(mdmRecordsJson) + f.flush + timetakenMs = ((Time.now.to_f * 1000).to_i - startTime) + $log.info "in_kube_podinventory:writeMDMRecords:Successfull and with time taken(ms): #{timetakenMs}" + else + raise "in_kube_podinventory:writeMDMRecords:Failed to open file for write" + end + rescue => err + if retryAttemptCount <= maxRetryCount + f.flock(File::LOCK_UN) if !f.nil? + f.close if !f.nil? + sleep (initialRetryDelaySecs * retryAttemptCount) + retryAttemptCount = retryAttemptCount + 1 + retry + end + $log.warn "in_kube_podinventory:writeMDMRecords failed with an error: #{err} after retries: #{maxRetryCount} @ #{Time.now.utc.iso8601}" + ApplicationInsightsUtility.sendExceptionTelemetry(err) + ensure + f.flock(File::LOCK_UN) if !f.nil? + f.close if !f.nil? + end + end end # Kube_Pod_Input end # module diff --git a/source/plugins/ruby/in_kube_podmdminventory.rb b/source/plugins/ruby/in_kube_podmdminventory.rb new file mode 100644 index 000000000..bfc5227f3 --- /dev/null +++ b/source/plugins/ruby/in_kube_podmdminventory.rb @@ -0,0 +1,217 @@ +#!/usr/local/bin/ruby +# frozen_string_literal: true + +require "fluent/plugin/input" + +module Fluent::Plugin + require_relative "podinventory_to_mdm" + + class Kube_PodMDMInventory_Input < Input + Fluent::Plugin.register_input("kube_podmdminventory", self) + + @@MDMKubePodInventoryTag = "mdm.kubepodinventory" + + def initialize + super + require "yaml" + require "yajl/json_gem" + require "yajl" + require "set" + require "time" + require "net/http" + require "fileutils" + require_relative "ApplicationInsightsUtility" + require_relative "oms_common" + require_relative "omslog" + require_relative "constants" + require_relative "CustomMetricsUtils" + end + + config_param :run_interval, :time, :default => 60 + + def configure(conf) + super + @inventoryToMdmConvertor = Inventory2MdmConvertor.new() + end + + def start + if @run_interval + super + $log.info("in_kube_podmdminventory::start @ #{Time.now.utc.iso8601}") + @isCustomMetricsAvailability = CustomMetricsUtils.check_custom_metrics_availability + @finished = false + @prevCollectionTime = nil + @condition = ConditionVariable.new + @mutex = Mutex.new + @thread = Thread.new(&method(:run_periodic)) + end + end + + def shutdown + if @run_interval + @mutex.synchronize { + @finished = true + @condition.signal + } + @thread.join + super # This super must be at the end of shutdown method + end + end + + def enumerate + begin + if !@isCustomMetricsAvailability + $log.warn "in_kube_podmdminventory::enumerate:skipping since custom metrics not available either for this cluster type or the region" + else + parse_and_emit_records() + end + rescue => errorStr + $log.warn "in_kube_podmdminventory::enumerate:Failed in enumerate: #{errorStr}" + $log.debug_backtrace(errorStr.backtrace) + ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) + end + end + + def parse_and_emit_records() + begin + $log.info "in_kube_podmdminventory:parse_and_emit_records:Start:getMDMRecords @ #{Time.now.utc.iso8601}" + mdmPodRecords = getMDMRecords() + $log.info "in_kube_podmdminventory:parse_and_emit_records:End:getMDMRecords @ #{Time.now.utc.iso8601}" + if !mdmPodRecords.nil? && !mdmPodRecords.empty? && mdmPodRecords["items"].length > 0 + batchTime = mdmPodRecords["collectionTime"] # This is same batchTime used in KubePODinventory + mdmPodRecords["items"].each do |record| + @inventoryToMdmConvertor.process_pod_inventory_record(record) + @inventoryToMdmConvertor.process_record_for_pods_ready_metric(record["ControllerName"], record["Namespace"], record["PodReadyCondition"]) + containerRecords = record["containerRecords"] + if !containerRecords.nil? && !containerRecords.empty? && containerRecords.length > 0 + containerRecords.each do |containerRecord| + if !containerRecord["state"].nil? && !containerRecord["state"].empty? + @inventoryToMdmConvertor.process_record_for_terminated_job_metric(record["ControllerName"], record["Namespace"], containerRecord["state"]) + end + begin + if !containerRecord["lastState"].nil? && containerRecord["lastState"].keys.length == 1 + lastStateName = containerRecord["lastState"].keys[0] + lastStateObject = containerRecord["lastState"][lastStateName] + if !lastStateObject.is_a?(Hash) + raise "expected a hash object. This could signify a bug or a kubernetes API change" + end + if lastStateObject.key?("reason") && lastStateObject.key?("startedAt") && lastStateObject.key?("finishedAt") + lastStateReason = lastStateObject["reason"] + lastFinishedTime = lastStateObject["finishedAt"] + #Populate mdm metric for OOMKilled container count if lastStateReason is OOMKilled + if lastStateReason.downcase == Constants::REASON_OOM_KILLED + @inventoryToMdmConvertor.process_record_for_oom_killed_metric(record["ControllerName"], record["Namespace"], lastFinishedTime) + end + lastStateReason = nil + end + end + containerRestartCount = containerRecord["restartCount"] + #Populate mdm metric for container restart count if greater than 0 + if (!containerRestartCount.nil? && (containerRestartCount.is_a? Integer) && containerRestartCount > 0) + @inventoryToMdmConvertor.process_record_for_container_restarts_metric(record["ControllerName"], record["Namespace"], lastFinishedTime) + end + rescue => err + $log.warn "in_kube_podmdminventory:parse_and_emit_records: failed while processing ContainerLastStatus: #{err}" + $log.debug_backtrace(err.backtrace) + ApplicationInsightsUtility.sendExceptionTelemetry(err) + end + end + end + end + @log.info "in_kube_podmdminventory:parse_and_emit_records:Sending pod inventory mdm records to out_mdm @ #{Time.now.utc.iso8601}" + pod_inventory_mdm_records = @inventoryToMdmConvertor.get_pod_inventory_mdm_records(batchTime) + @log.info "in_kube_podmdminventory:parse_and_emit_records:pod_inventory_mdm_records.size #{pod_inventory_mdm_records.size} @ #{Time.now.utc.iso8601}" + if !pod_inventory_mdm_records.nil? && pod_inventory_mdm_records.length > 0 + startTime = (Time.now.to_f * 1000).to_i + recordCount = pod_inventory_mdm_records.length + while recordCount > 0 + record_array = pod_inventory_mdm_records.take(Constants::POD_MDM_EMIT_STREAM_BATCH_SIZE) + time_array = Array.new(record_array.length) { batchTime } + mdm_pod_inventory_es = Fluent::MultiEventStream.new(time_array, record_array) + router.emit_stream(@@MDMKubePodInventoryTag, mdm_pod_inventory_es) + pod_inventory_mdm_records = pod_inventory_mdm_records.drop(Constants::POD_MDM_EMIT_STREAM_BATCH_SIZE) + recordCount = pod_inventory_mdm_records.length + time_array = nil + end + flushTimeMs = (Time.now.to_f * 1000).to_i - startTime + @log.info "in_kube_podmdminventory:parse_and_emit_records:timetaken to flush all Pod MDM records: #{flushTimeMs} @ #{Time.now.utc.iso8601}" + end + end + rescue => errorStr + $log.warn "in_kube_podmdminventory:parse_and_emit_records: failed with an error #{errorStr}" + $log.debug_backtrace(errorStr.backtrace) + ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) + end + end + + def run_periodic + @mutex.lock + done = @finished + @nextTimeToRun = Time.now + @waitTimeout = @run_interval + until done + @nextTimeToRun = @nextTimeToRun + @run_interval + @now = Time.now + if @nextTimeToRun <= @now + @waitTimeout = 1 + @nextTimeToRun = @now + else + @waitTimeout = @nextTimeToRun - @now + end + @condition.wait(@mutex, @waitTimeout) + done = @finished + @mutex.unlock + if !done + begin + $log.info("in_kube_podmdminventory::run_periodic.enumerate.start #{Time.now.utc.iso8601}") + enumerate + $log.info("in_kube_podmdminventory::run_periodic.enumerate.end #{Time.now.utc.iso8601}") + rescue => errorStr + $log.warn "in_kube_podmdminventory::run_periodic: enumerate Failed to retrieve pod inventory: #{errorStr}" + ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) + end + end + @mutex.lock + end + @mutex.unlock + end + + def getMDMRecords() + maxRetryCount = 5 + initialRetryDelaySecs = 0.5 + retryAttemptCount = 1 + mdmRecords = {} + begin + f = File.open(Constants::MDM_POD_INVENTORY_STATE_FILE, "r") + if !f.nil? + isAcquiredLock = f.flock(File::LOCK_EX | File::LOCK_NB) + raise "in_kube_podmdminventory:getMDMRecords:Failed to acquire file lock" if !isAcquiredLock + startTime = (Time.now.to_f * 1000).to_i + mdmRecords = Yajl::Parser.parse(f) + timetakenMs = ((Time.now.to_f * 1000).to_i - startTime) + if mdmRecords.nil? || mdmRecords.empty? || mdmRecords["items"].nil? || mdmRecords["collectionTime"] == @prevCollectionTime + raise "in_kube_podmdminventory:getMDMRecords: either read mdmRecords is nil or empty or stale" + end + @prevCollectionTime = mdmRecords["collectionTime"] + $log.info "in_kube_podmdminventory:getMDMRecords:Number of MDM records: #{mdmRecords["items"].length} with time taken(ms) for read: #{timetakenMs} @ #{Time.now.utc.iso8601}" + else + raise "in_kube_podmdminventory:getMDMRecords:Failed to open file for read" + end + rescue => err + if retryAttemptCount <= maxRetryCount + f.flock(File::LOCK_UN) if !f.nil? + f.close if !f.nil? + sleep (initialRetryDelaySecs * retryAttemptCount) + retryAttemptCount = retryAttemptCount + 1 + retry + end + $log.warn "in_kube_podmdminventory:getMDMRecords failed with an error: #{err} after retries: #{maxRetryCount} @ #{Time.now.utc.iso8601}" + ApplicationInsightsUtility.sendExceptionTelemetry(err) + ensure + f.flock(File::LOCK_UN) if !f.nil? + f.close if !f.nil? + end + return mdmRecords + end + end # Kube_Pod_Input +end # module diff --git a/source/plugins/ruby/kubernetes_container_inventory.rb b/source/plugins/ruby/kubernetes_container_inventory.rb index 82e36c8cc..81889b61b 100644 --- a/source/plugins/ruby/kubernetes_container_inventory.rb +++ b/source/plugins/ruby/kubernetes_container_inventory.rb @@ -50,7 +50,7 @@ def getContainerInventoryRecords(podItem, batchTime, clusterCollectEnvironmentVa if !atLocation.nil? containerInventoryRecord["ImageId"] = imageIdValue[(atLocation + 1)..-1] end - end + end containerInventoryRecord["ExitCode"] = 0 isContainerTerminated = false isContainerWaiting = false @@ -84,19 +84,19 @@ def getContainerInventoryRecords(podItem, batchTime, clusterCollectEnvironmentVa end containerInfoMap = containersInfoMap[containerName] - # image can be in any one of below format in spec - # repository/image[:imagetag | @digest], repository/image:imagetag@digest, repo/image, image:imagetag, image@digest, image + # image can be in any one of below format in spec + # repository/image[:imagetag | @digest], repository/image:imagetag@digest, repo/image, image:imagetag, image@digest, image imageValue = containerInfoMap["image"] if !imageValue.nil? && !imageValue.empty? # Find delimiters in image format atLocation = imageValue.index("@") - isDigestSpecified = false + isDigestSpecified = false if !atLocation.nil? # repository/image@digest or repository/image:imagetag@digest, image@digest imageValue = imageValue[0..(atLocation - 1)] # Use Digest from the spec's image in case when the status doesnt get populated i.e. container in pending or image pull back etc. if containerInventoryRecord["ImageId"].nil? || containerInventoryRecord["ImageId"].empty? - containerInventoryRecord["ImageId"] = imageValue[(atLocation + 1)..-1] + containerInventoryRecord["ImageId"] = imageValue[(atLocation + 1)..-1] end isDigestSpecified = true end @@ -105,14 +105,14 @@ def getContainerInventoryRecords(podItem, batchTime, clusterCollectEnvironmentVa if !colonLocation.nil? if slashLocation.nil? # image:imagetag - containerInventoryRecord["Image"] = imageValue[0..(colonLocation - 1)] + containerInventoryRecord["Image"] = imageValue[0..(colonLocation - 1)] else # repository/image:imagetag containerInventoryRecord["Repository"] = imageValue[0..(slashLocation - 1)] containerInventoryRecord["Image"] = imageValue[(slashLocation + 1)..(colonLocation - 1)] end containerInventoryRecord["ImageTag"] = imageValue[(colonLocation + 1)..-1] - else + else if slashLocation.nil? # image containerInventoryRecord["Image"] = imageValue @@ -120,15 +120,15 @@ def getContainerInventoryRecords(podItem, batchTime, clusterCollectEnvironmentVa # repo/image containerInventoryRecord["Repository"] = imageValue[0..(slashLocation - 1)] containerInventoryRecord["Image"] = imageValue[(slashLocation + 1)..-1] - end + end # if no tag specified, k8s assumes latest as imagetag and this is same behavior from docker API and from status. # Ref - https://kubernetes.io/docs/concepts/containers/images/#image-names - if isDigestSpecified == false + if isDigestSpecified == false containerInventoryRecord["ImageTag"] = "latest" end - end + end end - + podName = containerInfoMap["PodName"] namespace = containerInfoMap["Namespace"] # containername in the format what docker sees @@ -199,7 +199,12 @@ def getContainersInfoMap(podItem, isWindows) cmdValue = container["command"] cmdValueString = (cmdValue.nil?) ? "" : cmdValue.to_s containerInfoMap["Command"] = cmdValueString - containerInfoMap["EnvironmentVar"] = obtainContainerEnvironmentVarsFromPodsResponse(podItem, container) + if isWindows + # For windows container inventory, we dont need to get envvars from pods response since its already taken care in KPI as part of pod optimized item + containerInfoMap["EnvironmentVar"] = container["env"] + else + containerInfoMap["EnvironmentVar"] = obtainContainerEnvironmentVarsFromPodsResponse(podItem, container) + end containersInfoMap[containerName] = containerInfoMap end end @@ -212,47 +217,47 @@ def getContainersInfoMap(podItem, isWindows) return containersInfoMap end - def obtainContainerEnvironmentVars(containerId) + def obtainContainerEnvironmentVars(containerId) envValueString = "" begin - isCGroupPidFetchRequired = false + isCGroupPidFetchRequired = false if !@@containerCGroupCache.has_key?(containerId) - isCGroupPidFetchRequired = true + isCGroupPidFetchRequired = true else cGroupPid = @@containerCGroupCache[containerId] - if cGroupPid.nil? || cGroupPid.empty? + if cGroupPid.nil? || cGroupPid.empty? isCGroupPidFetchRequired = true @@containerCGroupCache.delete(containerId) - elsif !File.exist?("/hostfs/proc/#{cGroupPid}/environ") + elsif !File.exist?("/hostfs/proc/#{cGroupPid}/environ") isCGroupPidFetchRequired = true - @@containerCGroupCache.delete(containerId) - end + @@containerCGroupCache.delete(containerId) + end end - if isCGroupPidFetchRequired + if isCGroupPidFetchRequired Dir["/hostfs/proc/*/cgroup"].each do |filename| begin if File.file?(filename) && File.exist?(filename) && File.foreach(filename).grep(/#{containerId}/).any? # file full path is /hostfs/proc//cgroup - cGroupPid = filename.split("/")[3] - if is_number?(cGroupPid) + cGroupPid = filename.split("/")[3] + if is_number?(cGroupPid) if @@containerCGroupCache.has_key?(containerId) - tempCGroupPid = @@containerCGroupCache[containerId] + tempCGroupPid = @@containerCGroupCache[containerId] if tempCGroupPid.to_i > cGroupPid.to_i @@containerCGroupCache[containerId] = cGroupPid end else @@containerCGroupCache[containerId] = cGroupPid - end + end end end - rescue SystemCallError # ignore Error::ENOENT,Errno::ESRCH which is expected if any of the container gone while we read - end - end + rescue SystemCallError # ignore Error::ENOENT,Errno::ESRCH which is expected if any of the container gone while we read + end + end end cGroupPid = @@containerCGroupCache[containerId] if !cGroupPid.nil? && !cGroupPid.empty? - environFilePath = "/hostfs/proc/#{cGroupPid}/environ" + environFilePath = "/hostfs/proc/#{cGroupPid}/environ" if File.exist?(environFilePath) # Skip environment variable processing if it contains the flag AZMON_COLLECT_ENV=FALSE # Check to see if the environment variable collection is disabled for this container. @@ -265,7 +270,7 @@ def obtainContainerEnvironmentVars(containerId) if !envVars.nil? && !envVars.empty? envVars = envVars.split("\0") envValueString = envVars.to_json - envValueStringLength = envValueString.length + envValueStringLength = envValueString.length if envValueStringLength >= 200000 lastIndex = envValueString.rindex("\",") if !lastIndex.nil? @@ -376,6 +381,7 @@ def deleteCGroupCacheEntryForDeletedContainer(containerId) ApplicationInsightsUtility.sendExceptionTelemetry(error) end end + def is_number?(value) true if Integer(value) rescue false end diff --git a/source/plugins/ruby/out_mdm.rb b/source/plugins/ruby/out_mdm.rb index e882f5ec7..4561cdd9a 100644 --- a/source/plugins/ruby/out_mdm.rb +++ b/source/plugins/ruby/out_mdm.rb @@ -12,6 +12,7 @@ def initialize super require "net/http" require "net/https" + require "securerandom" require "uri" require "yajl/json_gem" require_relative "KubernetesApiClient" @@ -43,7 +44,6 @@ def initialize @data_hash = {} @parsed_token_uri = nil - @http_client = nil @token_expiry_time = Time.now @cached_access_token = String.new @last_post_attempt_time = Time.now @@ -63,6 +63,7 @@ def initialize @mdm_exceptions_hash = {} @mdm_exceptions_count = 0 @mdm_exception_telemetry_time_tracker = DateTime.now.to_time.to_i + @proxy = nil end def configure(conf) @@ -110,15 +111,7 @@ def start end @@post_request_url = @@post_request_url_template % { metrics_endpoint: metrics_endpoint, aks_resource_id: aks_resource_id } @post_request_uri = URI.parse(@@post_request_url) - proxy = (ProxyUtils.getProxyConfiguration) - if proxy.nil? || proxy.empty? - @http_client = Net::HTTP.new(@post_request_uri.host, @post_request_uri.port) - else - @log.info "Proxy configured on this cluster: #{aks_resource_id}" - @http_client = Net::HTTP.new(@post_request_uri.host, @post_request_uri.port, proxy[:addr], proxy[:port], proxy[:user], proxy[:pass]) - end - - @http_client.use_ssl = true + @proxy = (ProxyUtils.getProxyConfiguration) @log.info "POST Request url: #{@@post_request_url}" ApplicationInsightsUtility.sendCustomEvent("AKSCustomMetricsMDMPluginStart", {}) @@ -165,6 +158,10 @@ def start end end + def multi_workers_ready? + return true + end + # get the access token only if the time to expiry is less than 5 minutes and get_access_token_backoff has expired def get_access_token if (Time.now > @get_access_token_backoff_expiry) @@ -356,47 +353,56 @@ def send_to_mdm(post_body) else access_token = get_access_token end + if @proxy.nil? || @proxy.empty? + http_client = Net::HTTP.new(@post_request_uri.host, @post_request_uri.port) + else + @log.info "Proxy configured on this cluster: #{aks_resource_id}" + http_client = Net::HTTP.new(@post_request_uri.host, @post_request_uri.port, @proxy[:addr], @proxy[:port], @proxy[:user], @proxy[:pass]) + end + http_client.use_ssl = true + requestId = SecureRandom.uuid.to_s request = Net::HTTP::Post.new(@post_request_uri.request_uri) request["Content-Type"] = "application/x-ndjson" request["Authorization"] = "Bearer #{access_token}" + request["x-request-id"] = requestId request.body = post_body.join("\n") - @log.info "REQUEST BODY SIZE #{request.body.bytesize / 1024}" - response = @http_client.request(request) + @log.info "REQUEST BODY SIZE #{request.body.bytesize / 1024} for requestId: #{requestId}" + response = http_client.request(request) response.value # this throws for non 200 HTTP response code - @log.info "HTTP Post Response Code : #{response.code}" + @log.info "HTTP Post Response Code : #{response.code} for requestId: #{requestId}" if @last_telemetry_sent_time.nil? || @last_telemetry_sent_time + 60 * 60 < Time.now ApplicationInsightsUtility.sendCustomEvent("AKSCustomMetricsMDMSendSuccessful", {}) @last_telemetry_sent_time = Time.now end rescue Net::HTTPClientException => e # see https://docs.ruby-lang.org/en/2.6.0/NEWS.html about deprecating HTTPServerException and adding HTTPClientException if !response.nil? && !response.body.nil? #body will have actual error - @log.info "Failed to Post Metrics to MDM : #{e} Response.body: #{response.body}" + @log.info "Failed to Post Metrics to MDM for requestId: #{requestId} exception: #{e} Response.body: #{response.body}" else - @log.info "Failed to Post Metrics to MDM : #{e} Response: #{response}" + @log.info "Failed to Post Metrics to MDM for requestId: #{requestId} exception: #{e} Response: #{response}" end @log.debug_backtrace(e.backtrace) if !response.code.empty? && response.code == 403.to_s - @log.info "Response Code #{response.code} Updating @last_post_attempt_time" + @log.info "Response Code #{response.code} for requestId: #{requestId} Updating @last_post_attempt_time" @last_post_attempt_time = Time.now @first_post_attempt_made = true # Not raising exception, as that will cause retries to happen elsif !response.code.empty? && response.code.start_with?("4") # Log 400 errors and continue - @log.info "Non-retryable HTTPClientException when POSTing Metrics to MDM #{e} Response: #{response}" + @log.info "Non-retryable HTTPClientException when POSTing Metrics to MDM for requestId: #{requestId} exception: #{e} Response: #{response}" else # raise if the response code is non-400 - @log.info "HTTPServerException when POSTing Metrics to MDM #{e} Response: #{response}" + @log.info "HTTPServerException when POSTing Metrics to MDM for requestId: #{requestId} exception: #{e} Response: #{response}" raise e end # Adding exceptions to hash to aggregate and send telemetry for all 400 error codes exception_aggregator(e) rescue Errno::ETIMEDOUT => e - @log.info "Timed out when POSTing Metrics to MDM : #{e} Response: #{response}" + @log.info "Timed out when POSTing Metrics to MDM for requestId: #{requestId} exception: #{e} Response: #{response}" @log.debug_backtrace(e.backtrace) raise e rescue Exception => e - @log.info "Exception POSTing Metrics to MDM : #{e} Response: #{response}" + @log.info "Exception POSTing Metrics to MDM for requestId: #{requestId} exception: #{e} Response: #{response}" @log.debug_backtrace(e.backtrace) raise e end diff --git a/source/plugins/ruby/podinventory_to_mdm.rb b/source/plugins/ruby/podinventory_to_mdm.rb index c24a91a87..a7f9c5435 100644 --- a/source/plugins/ruby/podinventory_to_mdm.rb +++ b/source/plugins/ruby/podinventory_to_mdm.rb @@ -129,7 +129,7 @@ def get_pod_inventory_mdm_records(batch_time) controllerNameDimValue: podControllerNameDimValue, podCountMetricValue: value, } - records.push(JSON.parse(record)) + records.push(Yajl::Parser.parse(record)) } #Add pod metric records @@ -218,24 +218,13 @@ def process_record_for_container_restarts_metric(podControllerNameDimValue, podN end end - def process_record_for_pods_ready_metric(podControllerNameDimValue, podNamespaceDimValue, podStatusConditions) + def process_record_for_pods_ready_metric(podControllerNameDimValue, podNamespaceDimValue, podReadyCondition) if @process_incoming_stream begin @log.info "in process_record_for_pods_ready_metric..." if podControllerNameDimValue.nil? || podControllerNameDimValue.empty? podControllerNameDimValue = "No Controller" end - podReadyCondition = false - if !podStatusConditions.nil? && !podStatusConditions.empty? - podStatusConditions.each do |condition| - if condition["type"] == "Ready" - if condition["status"].downcase == "true" - podReadyCondition = true - end - break #Exit the for loop since we found the ready condition - end - end - end MdmMetricsGenerator.generatePodReadyMetrics(podControllerNameDimValue, podNamespaceDimValue, podReadyCondition) rescue => errorStr From 5f2900a4ac32a85a15ea2b04a45f52a83e7190f0 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Tue, 14 Jun 2022 13:59:31 -0700 Subject: [PATCH 237/301] changes related to june 2022 release (#778) --- ReleaseNotes.md | 17 ++++++++++++++++- build/version | 4 ++-- charts/azuremonitor-containers/Chart.yaml | 2 +- charts/azuremonitor-containers/values.yaml | 6 +++--- kubernetes/linux/Dockerfile | 2 +- kubernetes/linux/Dockerfile.multiarch | 2 +- kubernetes/omsagent.yaml | 14 +++++++------- kubernetes/windows/Dockerfile | 2 +- 8 files changed, 32 insertions(+), 17 deletions(-) diff --git a/ReleaseNotes.md b/ReleaseNotes.md index 176cbc2b8..fb992f09c 100644 --- a/ReleaseNotes.md +++ b/ReleaseNotes.md @@ -11,6 +11,21 @@ additional questions or comments. Note : The agent version(s) below has dates (ciprod), which indicate the agent build dates (not release dates) +### 6/14/2022 - +##### Version microsoft/oms:ciprod06142022 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod06142022 (linux) +##### Version microsoft/oms:win-ciprod06142022 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod06142022 (windows) +##### Code change log +- Linux Agent + - Prometheus sidecar memory optimization + - Fix for issue of Telegraf connecting to FluentD Port 25228 during container startup + - Add integration for collecting Subnets IP usage metrics for Azure CNI (turned OFF by default) + - Replicaset Agent improvements related to supporting of 5K Node cluster scale +- Common (Linux & Windows Agent) + - Make custom metrics endpoint configurable to support edge environments +- Misc + - Moved Trivy image scan to Azure Pipeline + + ### 5/19/2022 - ##### Version microsoft/oms:ciprod05192022 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod05192022 (linux) ##### Version microsoft/oms:win-ciprod05192022 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod05192022 (windows) @@ -19,7 +34,7 @@ Note : The agent version(s) below has dates (ciprod), which indicate t - PodReadyPercentage metric bug fix - add cifs & fuse file systems to ignore list - CA Cert Fix for Mariner Hosts in Air Gap - - Disk usage metrics will no longer be collected for the paths "/mnt/containers" and "/mnt/docker" + - Disk usage metrics will no longer be collected for the paths "/mnt/containers" and "/mnt/docker" - Windows Agent - Ruby version upgrade from 2.6.5.1 to 2.7.5.1 - Added Support for Windows Server 2022 diff --git a/build/version b/build/version index 19787cb72..dcbea0179 100644 --- a/build/version +++ b/build/version @@ -2,11 +2,11 @@ # Build Version Information -CONTAINER_BUILDVERSION_MAJOR=17 +CONTAINER_BUILDVERSION_MAJOR=18 CONTAINER_BUILDVERSION_MINOR=0 CONTAINER_BUILDVERSION_PATCH=0 CONTAINER_BUILDVERSION_BUILDNR=0 -CONTAINER_BUILDVERSION_DATE=20220519 +CONTAINER_BUILDVERSION_DATE=20220614 CONTAINER_BUILDVERSION_STATUS=Developer_Build #-------------------------------- End of File ----------------------------------- diff --git a/charts/azuremonitor-containers/Chart.yaml b/charts/azuremonitor-containers/Chart.yaml index 8e9f4847f..a8268d63d 100644 --- a/charts/azuremonitor-containers/Chart.yaml +++ b/charts/azuremonitor-containers/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v1 appVersion: 7.0.0-1 description: Helm chart for deploying Azure Monitor container monitoring agent in Kubernetes name: azuremonitor-containers -version: 2.9.3 +version: 2.9.4 kubeVersion: "^1.10.0-0" keywords: - monitoring diff --git a/charts/azuremonitor-containers/values.yaml b/charts/azuremonitor-containers/values.yaml index 91b8270cd..64f48212b 100644 --- a/charts/azuremonitor-containers/values.yaml +++ b/charts/azuremonitor-containers/values.yaml @@ -22,10 +22,10 @@ Azure: omsagent: image: repo: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod" - tag: "ciprod05192022" - tagWindows: "win-ciprod05192022" + tag: "ciprod06142022" + tagWindows: "win-ciprod06142022" pullPolicy: IfNotPresent - dockerProviderVersion: "17.0.0-0" + dockerProviderVersion: "18.0.0-0" agentVersion: "azure-mdsd-1.17.0" winAgentVersion: "0.0.0-0" # there is no base agent version for windows agent diff --git a/kubernetes/linux/Dockerfile b/kubernetes/linux/Dockerfile index 6f68f664e..af1cab3d9 100644 --- a/kubernetes/linux/Dockerfile +++ b/kubernetes/linux/Dockerfile @@ -17,7 +17,7 @@ ENV RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR 0.9 RUN /usr/bin/apt-get update && /usr/bin/apt-get install -y libc-bin wget openssl curl sudo python-ctypes init-system-helpers net-tools rsyslog cron vim dmidecode apt-transport-https gnupg && rm -rf /var/lib/apt/lists/* COPY setup.sh main.sh defaultpromenvvariables defaultpromenvvariables-rs defaultpromenvvariables-sidecar mdsd.xml envmdsd logrotate.conf $tmpdir/ -ARG IMAGE_TAG=ciprod05192022 +ARG IMAGE_TAG=ciprod06142022 ENV AGENT_VERSION ${IMAGE_TAG} WORKDIR ${tmpdir} diff --git a/kubernetes/linux/Dockerfile.multiarch b/kubernetes/linux/Dockerfile.multiarch index fd0330d5d..ad177d8f0 100644 --- a/kubernetes/linux/Dockerfile.multiarch +++ b/kubernetes/linux/Dockerfile.multiarch @@ -29,7 +29,7 @@ RUN /usr/bin/apt-get update && /usr/bin/apt-get install -y libc-bin wget openssl COPY --from=builder /src/kubernetes/linux/Linux_ULINUX_1.0_*_64_Release/docker-cimprov-*.*.*-*.*.sh $tmpdir/ COPY kubernetes/linux/setup.sh kubernetes/linux/main.sh kubernetes/linux/defaultpromenvvariables kubernetes/linux/defaultpromenvvariables-rs kubernetes/linux/defaultpromenvvariables-sidecar kubernetes/linux/mdsd.xml kubernetes/linux/envmdsd kubernetes/linux/logrotate.conf $tmpdir/ -ARG IMAGE_TAG=ciprod05192022 +ARG IMAGE_TAG=ciprod06142022 ENV AGENT_VERSION ${IMAGE_TAG} WORKDIR ${tmpdir} diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index d2d7a0c87..bb83f6faf 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -334,7 +334,7 @@ spec: tier: node annotations: agentVersion: "azure-mdsd-1.17.0" - dockerProviderVersion: "17.0.0-0" + dockerProviderVersion: "18.0.0-0" schema-versions: "v1" spec: serviceAccountName: omsagent @@ -379,7 +379,7 @@ spec: # - NET_ADMIN # - NET_RAW - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod05192022" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod06142022" imagePullPolicy: IfNotPresent resources: limits: @@ -468,7 +468,7 @@ spec: timeoutSeconds: 15 #Only in sidecar scraping mode - name: omsagent-prometheus - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod05192022" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod06142022" imagePullPolicy: IfNotPresent resources: limits: @@ -612,7 +612,7 @@ spec: rsName: "omsagent-rs" annotations: agentVersion: "azure-mdsd-1.17.0" - dockerProviderVersion: "17.0.0-0" + dockerProviderVersion: "18.0.0-0" schema-versions: "v1" spec: serviceAccountName: omsagent @@ -653,7 +653,7 @@ spec: # - NET_ADMIN # - NET_RAW - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod05192022" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod06142022" imagePullPolicy: IfNotPresent resources: limits: @@ -821,7 +821,7 @@ spec: tier: node-win annotations: agentVersion: "0.0.0-0" - dockerProviderVersion: "17.0.0-0" + dockerProviderVersion: "18.0.0-0" schema-versions: "v1" spec: serviceAccountName: omsagent @@ -831,7 +831,7 @@ spec: value: "3" containers: - name: omsagent-win - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod05192022" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod06142022" imagePullPolicy: IfNotPresent resources: limits: diff --git a/kubernetes/windows/Dockerfile b/kubernetes/windows/Dockerfile index 383652e0e..3b663132e 100644 --- a/kubernetes/windows/Dockerfile +++ b/kubernetes/windows/Dockerfile @@ -5,7 +5,7 @@ MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=win-ciprod05192022 +ARG IMAGE_TAG=win-ciprod06142022 # Do not split this into multiple RUN! # Docker creates a layer for every RUN-Statement From fd7577b271a01a5d5df53383378d87334ddab87e Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Tue, 21 Jun 2022 18:04:05 -0700 Subject: [PATCH 238/301] Gangams/ARM Template updates for the DCR API version and stream group (#784) * update to use stream group * update DCR api version & stream group --- .../existingClusterOnboarding.json | 28 ++----------------- .../existingClusterOnboarding.json | 28 ++----------------- 2 files changed, 6 insertions(+), 50 deletions(-) diff --git a/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json b/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json index c42a1d074..7664e7705 100644 --- a/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json +++ b/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json @@ -61,7 +61,7 @@ "resources": [ { "type": "Microsoft.Insights/dataCollectionRules", - "apiVersion": "2019-11-01-preview", + "apiVersion": "2021-09-01-preview", "name": "[variables('dcrName')]", "location": "[parameters('workspaceLocation')]", "tags": "[parameters('resourceTagValues')]", @@ -72,18 +72,7 @@ { "name": "ContainerInsightsExtension", "streams": [ - "Microsoft-Perf", - "Microsoft-ContainerInventory", - "Microsoft-ContainerLog", - "Microsoft-ContainerLogV2", - "Microsoft-ContainerNodeInventory", - "Microsoft-KubeEvents", - "Microsoft-KubeMonAgentEvents", - "Microsoft-KubeNodeInventory", - "Microsoft-KubePodInventory", - "Microsoft-KubePVInventory", - "Microsoft-KubeServices", - "Microsoft-InsightsMetrics" + "Microsoft-ContainerInsights-Group-Default" ], "extensionName": "ContainerInsights" } @@ -100,18 +89,7 @@ "dataFlows": [ { "streams": [ - "Microsoft-Perf", - "Microsoft-ContainerInventory", - "Microsoft-ContainerLog", - "Microsoft-ContainerLogV2", - "Microsoft-ContainerNodeInventory", - "Microsoft-KubeEvents", - "Microsoft-KubeMonAgentEvents", - "Microsoft-KubeNodeInventory", - "Microsoft-KubePodInventory", - "Microsoft-KubePVInventory", - "Microsoft-KubeServices", - "Microsoft-InsightsMetrics" + "Microsoft-ContainerInsights-Group-Default" ], "destinations": [ "ciworkspace" diff --git a/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterOnboarding.json b/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterOnboarding.json index a4a4e3453..3010226f9 100644 --- a/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterOnboarding.json +++ b/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterOnboarding.json @@ -75,7 +75,7 @@ "resources": [ { "type": "Microsoft.Insights/dataCollectionRules", - "apiVersion": "2019-11-01-preview", + "apiVersion": "2021-09-01-preview", "name": "[variables('dcrName')]", "location": "[parameters('workspaceRegion')]", "tags": "[parameters('resourceTagValues')]", @@ -86,18 +86,7 @@ { "name": "ContainerInsightsExtension", "streams": [ - "Microsoft-Perf", - "Microsoft-ContainerInventory", - "Microsoft-ContainerLog", - "Microsoft-ContainerLogV2", - "Microsoft-ContainerNodeInventory", - "Microsoft-KubeEvents", - "Microsoft-KubeMonAgentEvents", - "Microsoft-KubeNodeInventory", - "Microsoft-KubePodInventory", - "Microsoft-KubePVInventory", - "Microsoft-KubeServices", - "Microsoft-InsightsMetrics" + "Microsoft-ContainerInsights-Group-Default" ], "extensionName": "ContainerInsights" } @@ -114,18 +103,7 @@ "dataFlows": [ { "streams": [ - "Microsoft-Perf", - "Microsoft-ContainerInventory", - "Microsoft-ContainerLog", - "Microsoft-ContainerLogV2", - "Microsoft-ContainerNodeInventory", - "Microsoft-KubeEvents", - "Microsoft-KubeMonAgentEvents", - "Microsoft-KubeNodeInventory", - "Microsoft-KubePodInventory", - "Microsoft-KubePVInventory", - "Microsoft-KubeServices", - "Microsoft-InsightsMetrics" + "Microsoft-ContainerInsights-Group-Default" ], "destinations": [ "ciworkspace" From 7e7a2b2d9c83e6fd38ddfa6e953053608412c1c7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 27 Jun 2022 06:54:06 -0700 Subject: [PATCH 239/301] Bump Newtonsoft.Json in /build/windows/installer/certificategenerator (#785) Bumps [Newtonsoft.Json](https://github.com/JamesNK/Newtonsoft.Json) from 12.0.3 to 13.0.1. - [Release notes](https://github.com/JamesNK/Newtonsoft.Json/releases) - [Commits](https://github.com/JamesNK/Newtonsoft.Json/compare/12.0.3...13.0.1) --- updated-dependencies: - dependency-name: Newtonsoft.Json dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .../installer/certificategenerator/CertificateGenerator.csproj | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build/windows/installer/certificategenerator/CertificateGenerator.csproj b/build/windows/installer/certificategenerator/CertificateGenerator.csproj index b14cc4502..dfbbc51e0 100644 --- a/build/windows/installer/certificategenerator/CertificateGenerator.csproj +++ b/build/windows/installer/certificategenerator/CertificateGenerator.csproj @@ -8,7 +8,7 @@ - + From 1da9eac144c3869a6e16e7b216068a9b9657922d Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Mon, 27 Jun 2022 09:56:53 -0700 Subject: [PATCH 240/301] Gangams/fix file access exceptions (#787) * fix file access exception * move insights metrics conf to common * clear file content before writing content * add timestamp to debug logs * release updates for linux agent --- ReleaseNotes.md | 7 +++ build/linux/installer/conf/kube.conf | 54 +++++++++---------- build/version | 4 +- charts/azuremonitor-containers/values.yaml | 4 +- kubernetes/linux/Dockerfile | 2 +- kubernetes/linux/Dockerfile.multiarch | 2 +- kubernetes/omsagent.yaml | 12 ++--- source/plugins/ruby/in_kube_nodes.rb | 11 ++-- source/plugins/ruby/in_kube_perfinventory.rb | 6 +-- source/plugins/ruby/in_kube_podinventory.rb | 11 ++-- .../plugins/ruby/in_kube_podmdminventory.rb | 8 +-- 11 files changed, 65 insertions(+), 56 deletions(-) diff --git a/ReleaseNotes.md b/ReleaseNotes.md index fb992f09c..39eeb6a50 100644 --- a/ReleaseNotes.md +++ b/ReleaseNotes.md @@ -11,6 +11,13 @@ additional questions or comments. Note : The agent version(s) below has dates (ciprod), which indicate the agent build dates (not release dates) +### 06/27/2022 - +##### Version microsoft/oms:ciprod06272022 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod06272022 (linux) +##### Code change log +- Fixes for following bugs in ciprod06142022 which are caught in AKS Canary region deployment + - Fix the exceptions related to file write & read access of the MDM inventory state file + - Fix for missing Node GPU allocatable & capacity metrics for the clusters which are whitelisted for AKS LargeCluster Private Preview feature + ### 6/14/2022 - ##### Version microsoft/oms:ciprod06142022 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod06142022 (linux) ##### Version microsoft/oms:win-ciprod06142022 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod06142022 (windows) diff --git a/build/linux/installer/conf/kube.conf b/build/linux/installer/conf/kube.conf index 5b3837748..c506f849d 100644 --- a/build/linux/installer/conf/kube.conf +++ b/build/linux/installer/conf/kube.conf @@ -30,6 +30,33 @@ keepalive true + #InsightsMetrics + + @type forward + @id out_insights_metrics_fwd + @log_level debug + send_timeout 30 + connect_timeout 30 + heartbeat_type none + + host 0.0.0.0 + port "#{ENV['MDSD_FLUENT_SOCKET_PORT']}" + + + @type file + path /var/opt/microsoft/docker-cimprov/state/insightsmetrics*.buffer + overflow_action drop_oldest_chunk + chunk_limit_size 4m + queue_limit_length "#{ENV['FLUENTD_QUEUE_LIMIT_LENGTH']}" + flush_interval "#{ENV['FLUENTD_FLUSH_INTERVAL']}" + retry_max_times 10 + retry_wait 5s + retry_max_interval 5m + flush_thread_count 5 + + keepalive true + + #custom_metrics_mdm filter plugin for perf data from windows nodes @type cadvisor2mdm @@ -340,33 +367,6 @@ keepalive true - #InsightsMetrics - #kubestate - - @type forward - @log_level debug - send_timeout 30 - connect_timeout 30 - heartbeat_type none - - host 0.0.0.0 - port "#{ENV['MDSD_FLUENT_SOCKET_PORT']}" - - - @type file - path /var/opt/microsoft/docker-cimprov/state/insightsmetrics*.buffer - overflow_action drop_oldest_chunk - chunk_limit_size 4m - queue_limit_length "#{ENV['FLUENTD_QUEUE_LIMIT_LENGTH']}" - flush_interval "#{ENV['FLUENTD_FLUSH_INTERVAL']}" - retry_max_times 10 - retry_wait 5s - retry_max_interval 5m - flush_thread_count 5 - - keepalive true - - @type mdm @id out_mdm_perf diff --git a/build/version b/build/version index dcbea0179..f2021864e 100644 --- a/build/version +++ b/build/version @@ -4,9 +4,9 @@ CONTAINER_BUILDVERSION_MAJOR=18 CONTAINER_BUILDVERSION_MINOR=0 -CONTAINER_BUILDVERSION_PATCH=0 +CONTAINER_BUILDVERSION_PATCH=1 CONTAINER_BUILDVERSION_BUILDNR=0 -CONTAINER_BUILDVERSION_DATE=20220614 +CONTAINER_BUILDVERSION_DATE=20220627 CONTAINER_BUILDVERSION_STATUS=Developer_Build #-------------------------------- End of File ----------------------------------- diff --git a/charts/azuremonitor-containers/values.yaml b/charts/azuremonitor-containers/values.yaml index 64f48212b..d528115cf 100644 --- a/charts/azuremonitor-containers/values.yaml +++ b/charts/azuremonitor-containers/values.yaml @@ -22,10 +22,10 @@ Azure: omsagent: image: repo: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod" - tag: "ciprod06142022" + tag: "ciprod06272022" tagWindows: "win-ciprod06142022" pullPolicy: IfNotPresent - dockerProviderVersion: "18.0.0-0" + dockerProviderVersion: "18.0.1-0" agentVersion: "azure-mdsd-1.17.0" winAgentVersion: "0.0.0-0" # there is no base agent version for windows agent diff --git a/kubernetes/linux/Dockerfile b/kubernetes/linux/Dockerfile index af1cab3d9..b9927be7c 100644 --- a/kubernetes/linux/Dockerfile +++ b/kubernetes/linux/Dockerfile @@ -17,7 +17,7 @@ ENV RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR 0.9 RUN /usr/bin/apt-get update && /usr/bin/apt-get install -y libc-bin wget openssl curl sudo python-ctypes init-system-helpers net-tools rsyslog cron vim dmidecode apt-transport-https gnupg && rm -rf /var/lib/apt/lists/* COPY setup.sh main.sh defaultpromenvvariables defaultpromenvvariables-rs defaultpromenvvariables-sidecar mdsd.xml envmdsd logrotate.conf $tmpdir/ -ARG IMAGE_TAG=ciprod06142022 +ARG IMAGE_TAG=ciprod06272022 ENV AGENT_VERSION ${IMAGE_TAG} WORKDIR ${tmpdir} diff --git a/kubernetes/linux/Dockerfile.multiarch b/kubernetes/linux/Dockerfile.multiarch index ad177d8f0..c96a93802 100644 --- a/kubernetes/linux/Dockerfile.multiarch +++ b/kubernetes/linux/Dockerfile.multiarch @@ -29,7 +29,7 @@ RUN /usr/bin/apt-get update && /usr/bin/apt-get install -y libc-bin wget openssl COPY --from=builder /src/kubernetes/linux/Linux_ULINUX_1.0_*_64_Release/docker-cimprov-*.*.*-*.*.sh $tmpdir/ COPY kubernetes/linux/setup.sh kubernetes/linux/main.sh kubernetes/linux/defaultpromenvvariables kubernetes/linux/defaultpromenvvariables-rs kubernetes/linux/defaultpromenvvariables-sidecar kubernetes/linux/mdsd.xml kubernetes/linux/envmdsd kubernetes/linux/logrotate.conf $tmpdir/ -ARG IMAGE_TAG=ciprod06142022 +ARG IMAGE_TAG=ciprod06272022 ENV AGENT_VERSION ${IMAGE_TAG} WORKDIR ${tmpdir} diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index bb83f6faf..88d2fdda8 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -334,7 +334,7 @@ spec: tier: node annotations: agentVersion: "azure-mdsd-1.17.0" - dockerProviderVersion: "18.0.0-0" + dockerProviderVersion: "18.0.1-0" schema-versions: "v1" spec: serviceAccountName: omsagent @@ -379,7 +379,7 @@ spec: # - NET_ADMIN # - NET_RAW - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod06142022" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod06272022" imagePullPolicy: IfNotPresent resources: limits: @@ -468,7 +468,7 @@ spec: timeoutSeconds: 15 #Only in sidecar scraping mode - name: omsagent-prometheus - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod06142022" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod06272022" imagePullPolicy: IfNotPresent resources: limits: @@ -612,7 +612,7 @@ spec: rsName: "omsagent-rs" annotations: agentVersion: "azure-mdsd-1.17.0" - dockerProviderVersion: "18.0.0-0" + dockerProviderVersion: "18.0.1-0" schema-versions: "v1" spec: serviceAccountName: omsagent @@ -653,7 +653,7 @@ spec: # - NET_ADMIN # - NET_RAW - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod06142022" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod06272022" imagePullPolicy: IfNotPresent resources: limits: @@ -821,7 +821,7 @@ spec: tier: node-win annotations: agentVersion: "0.0.0-0" - dockerProviderVersion: "18.0.0-0" + dockerProviderVersion: "18.0.1-0" schema-versions: "v1" spec: serviceAccountName: omsagent diff --git a/source/plugins/ruby/in_kube_nodes.rb b/source/plugins/ruby/in_kube_nodes.rb index a3cbb5a85..368eb61d4 100644 --- a/source/plugins/ruby/in_kube_nodes.rb +++ b/source/plugins/ruby/in_kube_nodes.rb @@ -756,24 +756,25 @@ def writeNodeAllocatableRecords(nodeAllocatbleRecordsJson) initialRetryDelaySecs = 0.5 retryAttemptCount = 1 begin - f = File.open(Constants::NODE_ALLOCATABLE_RECORDS_STATE_FILE, "w") + f = File.open(Constants::NODE_ALLOCATABLE_RECORDS_STATE_FILE, File::RDWR | File::CREAT, 0644) if !f.nil? isAcquiredLock = f.flock(File::LOCK_EX | File::LOCK_NB) - raise "in_kube_nodes::writeNodeAllocatableRecords:Failed to acquire file lock" if !isAcquiredLock + raise "in_kube_nodes::writeNodeAllocatableRecords:Failed to acquire file lock @ #{Time.now.utc.iso8601}" if !isAcquiredLock startTime = (Time.now.to_f * 1000).to_i + File.truncate(Constants::NODE_ALLOCATABLE_RECORDS_STATE_FILE, 0) f.write(nodeAllocatbleRecordsJson) f.flush timetakenMs = ((Time.now.to_f * 1000).to_i - startTime) - $log.info "in_kube_nodes::writeNodeAllocatableRecords:Successfull and with time taken(ms): #{timetakenMs}" + $log.info "in_kube_nodes::writeNodeAllocatableRecords:Successfull and with time taken(ms): #{timetakenMs} @ #{Time.now.utc.iso8601}" else - raise "in_kube_nodes::writeNodeAllocatableRecords:Failed to open file for write" + raise "in_kube_nodes::writeNodeAllocatableRecords:Failed to open file for write @ #{Time.now.utc.iso8601}" end rescue => err if retryAttemptCount < maxRetryCount f.flock(File::LOCK_UN) if !f.nil? f.close if !f.nil? + sleep (initialRetryDelaySecs * (maxRetryCount - retryAttemptCount)) retryAttemptCount = retryAttemptCount + 1 - sleep (initialRetryDelaySecs * retryAttemptCount) retry end $log.warn "in_kube_nodes::writeNodeAllocatableRecords failed with an error: #{err} after retries: #{maxRetryCount} @ #{Time.now.utc.iso8601}" diff --git a/source/plugins/ruby/in_kube_perfinventory.rb b/source/plugins/ruby/in_kube_perfinventory.rb index ad8fdbf21..20589167b 100644 --- a/source/plugins/ruby/in_kube_perfinventory.rb +++ b/source/plugins/ruby/in_kube_perfinventory.rb @@ -405,13 +405,13 @@ def getNodeAllocatableRecords() f = File.open(Constants::NODE_ALLOCATABLE_RECORDS_STATE_FILE, "r") if !f.nil? isAcquiredLock = f.flock(File::LOCK_EX | File::LOCK_NB) - raise "in_kube_perfinventory:getNodeAllocatableRecords:Failed to acquire file lock" if !isAcquiredLock + raise "in_kube_perfinventory:getNodeAllocatableRecords:Failed to acquire file lock @ #{Time.now.utc.iso8601}" if !isAcquiredLock startTime = (Time.now.to_f * 1000).to_i nodeAllocatableRecords = Yajl::Parser.parse(f) timetakenMs = ((Time.now.to_f * 1000).to_i - startTime) - $log.info "in_kube_perfinventory:getNodeAllocatableRecords:Number of Node Allocatable records: #{nodeAllocatableRecords.length} with time taken(ms) for read: #{timetakenMs} @ #{Time.now.utc.iso8601}" + $log.info "in_kube_perfinventory:getNodeAllocatableRecords:Number of Node Allocatable records: #{nodeAllocatableRecords.length} with time taken(ms) for read: #{timetakenMs} @ #{Time.now.utc.iso8601}" else - raise "in_kube_perfinventory:getNodeAllocatableRecords:Failed to open file for read" + raise "in_kube_perfinventory:getNodeAllocatableRecords:Failed to open file for read @ #{Time.now.utc.iso8601}" end rescue => err if retryAttemptCount < maxRetryCount diff --git a/source/plugins/ruby/in_kube_podinventory.rb b/source/plugins/ruby/in_kube_podinventory.rb index bdbc465ec..37c9741c3 100644 --- a/source/plugins/ruby/in_kube_podinventory.rb +++ b/source/plugins/ruby/in_kube_podinventory.rb @@ -1168,23 +1168,24 @@ def writeMDMRecords(mdmRecordsJson) initialRetryDelaySecs = 0.5 retryAttemptCount = 1 begin - f = File.open(Constants::MDM_POD_INVENTORY_STATE_FILE, "w") + f = File.open(Constants::MDM_POD_INVENTORY_STATE_FILE, File::RDWR | File::CREAT, 0644) if !f.nil? isAcquiredLock = f.flock(File::LOCK_EX | File::LOCK_NB) - raise "in_kube_podinventory:writeMDMRecords:Failed to acquire file lock" if !isAcquiredLock + raise "in_kube_podinventory:writeMDMRecords:Failed to acquire file lock @ #{Time.now.utc.iso8601}" if !isAcquiredLock startTime = (Time.now.to_f * 1000).to_i + File.truncate(Constants::MDM_POD_INVENTORY_STATE_FILE, 0) f.write(mdmRecordsJson) f.flush timetakenMs = ((Time.now.to_f * 1000).to_i - startTime) - $log.info "in_kube_podinventory:writeMDMRecords:Successfull and with time taken(ms): #{timetakenMs}" + $log.info "in_kube_podinventory:writeMDMRecords:Successfull and with time taken(ms): #{timetakenMs} @ #{Time.now.utc.iso8601}" else - raise "in_kube_podinventory:writeMDMRecords:Failed to open file for write" + raise "in_kube_podinventory:writeMDMRecords:Failed to open file for write @ #{Time.now.utc.iso8601}" end rescue => err if retryAttemptCount <= maxRetryCount f.flock(File::LOCK_UN) if !f.nil? f.close if !f.nil? - sleep (initialRetryDelaySecs * retryAttemptCount) + sleep (initialRetryDelaySecs * (maxRetryCount - retryAttemptCount)) retryAttemptCount = retryAttemptCount + 1 retry end diff --git a/source/plugins/ruby/in_kube_podmdminventory.rb b/source/plugins/ruby/in_kube_podmdminventory.rb index bfc5227f3..b872650d2 100644 --- a/source/plugins/ruby/in_kube_podmdminventory.rb +++ b/source/plugins/ruby/in_kube_podmdminventory.rb @@ -185,17 +185,17 @@ def getMDMRecords() f = File.open(Constants::MDM_POD_INVENTORY_STATE_FILE, "r") if !f.nil? isAcquiredLock = f.flock(File::LOCK_EX | File::LOCK_NB) - raise "in_kube_podmdminventory:getMDMRecords:Failed to acquire file lock" if !isAcquiredLock + raise "in_kube_podmdminventory:getMDMRecords:Failed to acquire file lock @ #{Time.now.utc.iso8601}" if !isAcquiredLock startTime = (Time.now.to_f * 1000).to_i mdmRecords = Yajl::Parser.parse(f) timetakenMs = ((Time.now.to_f * 1000).to_i - startTime) if mdmRecords.nil? || mdmRecords.empty? || mdmRecords["items"].nil? || mdmRecords["collectionTime"] == @prevCollectionTime - raise "in_kube_podmdminventory:getMDMRecords: either read mdmRecords is nil or empty or stale" + raise "in_kube_podmdminventory:getMDMRecords: either read mdmRecords is nil or empty or stale @ #{Time.now.utc.iso8601}" end @prevCollectionTime = mdmRecords["collectionTime"] - $log.info "in_kube_podmdminventory:getMDMRecords:Number of MDM records: #{mdmRecords["items"].length} with time taken(ms) for read: #{timetakenMs} @ #{Time.now.utc.iso8601}" + $log.info "in_kube_podmdminventory:getMDMRecords:Number of MDM records: #{mdmRecords["items"].length} with time taken(ms) for read: #{timetakenMs} @ #{Time.now.utc.iso8601}" else - raise "in_kube_podmdminventory:getMDMRecords:Failed to open file for read" + raise "in_kube_podmdminventory:getMDMRecords:Failed to open file for read @ #{Time.now.utc.iso8601}" end rescue => err if retryAttemptCount <= maxRetryCount From d83ddf84246e6671584466f2f6b72c28248c8e33 Mon Sep 17 00:00:00 2001 From: Amol Agrawal Date: Mon, 27 Jun 2022 11:04:56 -0700 Subject: [PATCH 241/301] Adhere to containers security guidance (#783) - move away from dockerhub images to MCR images - parameterize images in dockerfiles - use azure pipelines variables to pass appropriate MCR images during buildtime Co-authored-by: Amol Agrawal --- .pipelines/azure_pipeline_dev.yaml | 10 +-- .pipelines/azure_pipeline_prod.yaml | 10 +-- README.md | 39 ++++----- kubernetes/linux/Dockerfile | 3 +- kubernetes/linux/Dockerfile.multiarch | 10 ++- .../build-and-publish-docker-image.sh | 80 +++++++------------ .../linux/install-build-pre-requisites.sh | 16 ++-- .../windows/install-build-pre-requisites.ps1 | 6 +- test/e2e/src/core/Dockerfile | 10 ++- 9 files changed, 87 insertions(+), 97 deletions(-) diff --git a/.pipelines/azure_pipeline_dev.yaml b/.pipelines/azure_pipeline_dev.yaml index 9147501ba..eed3bdc57 100644 --- a/.pipelines/azure_pipeline_dev.yaml +++ b/.pipelines/azure_pipeline_dev.yaml @@ -115,14 +115,14 @@ jobs: az acr login -n ${{ variables.containerRegistry }} if [ "$(Build.Reason)" != "PullRequest" ]; then - docker buildx build --platform linux/amd64,linux/arm64 --tag ${{ variables.repoImageName }}:$(linuxImagetag) -f kubernetes/linux/Dockerfile.multiarch --metadata-file $(Build.ArtifactStagingDirectory)/linux/metadata.json --build-arg IMAGE_TAG=$(linuxImagetag) --push . + docker buildx build --platform linux/amd64,linux/arm64 --tag ${{ variables.repoImageName }}:$(linuxImagetag) -f kubernetes/linux/Dockerfile.multiarch --metadata-file $(Build.ArtifactStagingDirectory)/linux/metadata.json --build-arg IMAGE_TAG=$(linuxImagetag) --build-arg GOLANG_BASE_IMAGE=$(GOLANG_BASE_IMAGE) --build-arg CI_BASE_IMAGE=$(CI_BASE_IMAGE) --push . docker pull ${{ variables.repoImageName }}:$(linuxImagetag) else - docker buildx build --platform linux/amd64,linux/arm64 --tag ${{ variables.repoImageName }}:$(linuxImagetag) -f kubernetes/linux/Dockerfile.multiarch --metadata-file $(Build.ArtifactStagingDirectory)/linux/metadata.json --build-arg IMAGE_TAG=$(linuxImagetag) . + docker buildx build --platform linux/amd64,linux/arm64 --tag ${{ variables.repoImageName }}:$(linuxImagetag) -f kubernetes/linux/Dockerfile.multiarch --metadata-file $(Build.ArtifactStagingDirectory)/linux/metadata.json --build-arg IMAGE_TAG=$(linuxImagetag) --build-arg GOLANG_BASE_IMAGE=$(GOLANG_BASE_IMAGE) --build-arg CI_BASE_IMAGE=$(CI_BASE_IMAGE) . # load the multi-arch image to run tests - docker buildx build --tag ${{ variables.repoImageName }}:$(linuxImagetag) -f kubernetes/linux/Dockerfile.multiarch --metadata-file $(Build.ArtifactStagingDirectory)/linux/metadata.json --build-arg IMAGE_TAG=$(linuxImagetag) --load . + docker buildx build --tag ${{ variables.repoImageName }}:$(linuxImagetag) -f kubernetes/linux/Dockerfile.multiarch --metadata-file $(Build.ArtifactStagingDirectory)/linux/metadata.json --build-arg IMAGE_TAG=$(linuxImagetag) --build-arg GOLANG_BASE_IMAGE=$(GOLANG_BASE_IMAGE) --build-arg CI_BASE_IMAGE=$(CI_BASE_IMAGE) --load . fi curl -sfL https://raw.githubusercontent.com/aquasecurity/trivy/main/contrib/install.sh | sh -s -- -b /usr/local/bin @@ -135,14 +135,14 @@ jobs: condition: eq(variables.IS_PR, true) inputs: BuildDropPath: '$(Build.ArtifactStagingDirectory)/linux' - DockerImagesToScan: 'golang:1.15.14, ubuntu:18.04' + DockerImagesToScan: '$(GOLANG_BASE_IMAGE), $(CI_BASE_IMAGE)' - task: AzureArtifacts.manifest-generator-task.manifest-generator-task.ManifestGeneratorTask@0 displayName: 'Generation Task' condition: eq(variables.IS_PR, false) inputs: BuildDropPath: '$(Build.ArtifactStagingDirectory)/linux' - DockerImagesToScan: 'golang:1.15.14, ubuntu:18.04, ${{ variables.repoImageName }}:$(linuxImagetag)' + DockerImagesToScan: '$(GOLANG_BASE_IMAGE), $(CI_BASE_IMAGE), ${{ variables.repoImageName }}:$(linuxImagetag)' - task: PublishBuildArtifacts@1 inputs: diff --git a/.pipelines/azure_pipeline_prod.yaml b/.pipelines/azure_pipeline_prod.yaml index 5e22bdd3b..74650914e 100644 --- a/.pipelines/azure_pipeline_prod.yaml +++ b/.pipelines/azure_pipeline_prod.yaml @@ -119,14 +119,14 @@ jobs: az acr login -n ${{ variables.containerRegistry }} if [ "$(Build.Reason)" != "PullRequest" ]; then - docker buildx build --platform linux/amd64,linux/arm64 --tag ${{ variables.repoImageNameLinux }}:$(linuxImagetag) -f kubernetes/linux/Dockerfile.multiarch --metadata-file $(Build.ArtifactStagingDirectory)/linux/metadata.json --push . + docker buildx build --platform linux/amd64,linux/arm64 --tag ${{ variables.repoImageNameLinux }}:$(linuxImagetag) -f kubernetes/linux/Dockerfile.multiarch --metadata-file $(Build.ArtifactStagingDirectory)/linux/metadata.json --build-arg GOLANG_BASE_IMAGE=$(GOLANG_BASE_IMAGE) --build-arg CI_BASE_IMAGE=$(CI_BASE_IMAGE) --push . docker pull ${{ variables.repoImageNameLinux }}:$(linuxImagetag) else - docker buildx build --platform linux/amd64,linux/arm64 --tag ${{ variables.repoImageNameLinux }}:$(linuxImagetag) -f kubernetes/linux/Dockerfile.multiarch --metadata-file $(Build.ArtifactStagingDirectory)/linux/metadata.json . + docker buildx build --platform linux/amd64,linux/arm64 --tag ${{ variables.repoImageNameLinux }}:$(linuxImagetag) -f kubernetes/linux/Dockerfile.multiarch --metadata-file $(Build.ArtifactStagingDirectory)/linux/metadata.json --build-arg GOLANG_BASE_IMAGE=$(GOLANG_BASE_IMAGE) --build-arg CI_BASE_IMAGE=$(CI_BASE_IMAGE) . # load the multi-arch image to run tests - docker buildx build --tag ${{ variables.repoImageNameLinux }}:$(linuxImagetag) -f kubernetes/linux/Dockerfile.multiarch --metadata-file $(Build.ArtifactStagingDirectory)/linux/metadata.json --load . + docker buildx build --tag ${{ variables.repoImageNameLinux }}:$(linuxImagetag) -f kubernetes/linux/Dockerfile.multiarch --metadata-file $(Build.ArtifactStagingDirectory)/linux/metadata.json --build-arg GOLANG_BASE_IMAGE=$(GOLANG_BASE_IMAGE) --build-arg CI_BASE_IMAGE=$(CI_BASE_IMAGE) --load . fi curl -sfL https://raw.githubusercontent.com/aquasecurity/trivy/main/contrib/install.sh | sh -s -- -b /usr/local/bin @@ -138,14 +138,14 @@ jobs: condition: eq(variables.IS_PR, true) inputs: BuildDropPath: '$(Build.ArtifactStagingDirectory)/linux' - DockerImagesToScan: 'golang:1.15.14, ubuntu:18.04' + DockerImagesToScan: '$(GOLANG_BASE_IMAGE), $(CI_BASE_IMAGE)' - task: AzureArtifacts.manifest-generator-task.manifest-generator-task.ManifestGeneratorTask@0 displayName: 'Generation Task' condition: eq(variables.IS_PR, false) inputs: BuildDropPath: '$(Build.ArtifactStagingDirectory)/linux' - DockerImagesToScan: 'golang:1.15.14, ubuntu:18.04, ${{ variables.repoImageNameLinux }}:$(linuxImagetag)' + DockerImagesToScan: '$(GOLANG_BASE_IMAGE), $(CI_BASE_IMAGE), ${{ variables.repoImageNameLinux }}:$(linuxImagetag)' - task: PublishBuildArtifacts@1 inputs: diff --git a/README.md b/README.md index 6e51d256b..60ed39901 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ Feel free to contact engineering team owners in case you have any questions abou ## Common - [Visual Studio Code](https://code.visualstudio.com/) for authoring -- [Go lang](https://golang.org/) for building go code. Go lang version 1.15.14 (both Linux & Windows) +- [Go lang](https://golang.org/) for building go code. Go lang version 1.18.3 (both Linux & Windows) > Note: If you are using WSL2, make sure you have cloned the code onto ubuntu not onto windows @@ -121,7 +121,7 @@ We recommend using [Visual Studio Code](https://code.visualstudio.com/) for auth ### Install Pre-requisites -1. Install go1.15.14, dotnet, powershell, docker and build dependencies to build go code for both Linux and Windows platforms +1. Install go1.18.3, dotnet, powershell, docker and build dependencies to build go code for both Linux and Windows platforms ``` bash ~/Docker-Provider/scripts/build/linux/install-build-pre-requisites.sh ``` @@ -143,31 +143,34 @@ bash ~/Docker-Provider/scripts/build/linux/install-build-pre-requisites.sh > Note: If you are using WSL2, ensure `Docker for windows` running with Linux containers mode on your windows machine to build Linux agent image successfully +> Note: format of the imagetag will be `ci`. possible values for release are test, dev, preview, dogfood, prod etc. Please use MCR urls while building internally. + +Preferred Way: You can build and push images for multiple architectures. This is powered by docker buildx +Directly use the docker buildx commands (the MCR images can be found in our internal wiki to be used as arguments) +``` +# multiple platforms +cd ~/Docker-Provider +docker buildx build --platform linux/arm64/v8,linux/amd64 -t /: --build-arg IMAGE_TAG= --build-arg CI_BASE_IMAGE= --build-arg GOLANG_BASE_IMAGE= -f kubernetes/linux/Dockerfile.multiarch --push . + +# single platform +cd ~/Docker-Provider +docker buildx build --platform linux/amd64 -t /: --build-arg IMAGE_TAG= --build-arg CI_BASE_IMAGE= --build-arg GOLANG_BASE_IMAGE= -f kubernetes/linux/Dockerfile.multiarch --push . +``` + +Using the build and publish script + ``` cd ~/Docker-Provider/kubernetes/linux/dockerbuild sudo docker login # if you want to publish the image to acr then login to acr via `docker login ` # build provider, docker image and publish to docker image -bash build-and-publish-docker-image.sh --image /: +bash build-and-publish-docker-image.sh --image /: --ubuntu --golang ``` -> Note: format of the imagetag will be `ci`. possible values for release are test, dev, preview, dogfood, prod etc. -You can also build and push images for multiple architectures. This is powered by docker buildx ``` cd ~/Docker-Provider/kubernetes/linux/dockerbuild sudo docker login # if you want to publish the image to acr then login to acr via `docker login ` # build and publish using docker buildx -bash build-and-publish-docker-image.sh --image /: --multiarch -``` - -or directly use the docker buildx commands -``` -# multiple platforms -cd ~/Docker-Provider -docker buildx build --platform linux/arm64/v8,linux/amd64 -t /: --build-arg IMAGE_TAG= -f kubernetes/linux/Dockerfile.multiarch --push . - -# single platform -cd ~/Docker-Provider -docker buildx build --platform linux/amd64 -t /: --build-arg IMAGE_TAG= -f kubernetes/linux/Dockerfile.multiarch --push . +bash build-and-publish-docker-image.sh --image /: --ubuntu --golang --multiarch ``` If you prefer to build docker provider shell bundle and image separately, then you can follow below instructions @@ -182,7 +185,7 @@ make ``` cd ~/Docker-Provider/kubernetes/linux/ -docker build -t /: --build-arg IMAGE_TAG= . +docker build -t /: --build-arg IMAGE_TAG= --build-arg CI_BASE_IMAGE= . docker push /: ``` ## Windows Agent diff --git a/kubernetes/linux/Dockerfile b/kubernetes/linux/Dockerfile index b9927be7c..211d37259 100644 --- a/kubernetes/linux/Dockerfile +++ b/kubernetes/linux/Dockerfile @@ -1,4 +1,5 @@ -FROM ubuntu:18.04 +ARG CI_BASE_IMAGE= +FROM ${CI_BASE_IMAGE} MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" diff --git a/kubernetes/linux/Dockerfile.multiarch b/kubernetes/linux/Dockerfile.multiarch index c96a93802..a89fd1781 100644 --- a/kubernetes/linux/Dockerfile.multiarch +++ b/kubernetes/linux/Dockerfile.multiarch @@ -1,4 +1,8 @@ -FROM --platform=$BUILDPLATFORM golang:1.15.14 AS builder +# Default base images. If you update them don't forgot to update variables in our build pipelines. Default values can be found in internal wiki. External can use ubuntu 18.04 and golang 1.18.3 +ARG GOLANG_BASE_IMAGE= +ARG CI_BASE_IMAGE= + +FROM --platform=$BUILDPLATFORM ${GOLANG_BASE_IMAGE} AS builder ARG TARGETOS TARGETARCH RUN /usr/bin/apt-get update && /usr/bin/apt-get install git g++ make pkg-config libssl-dev libpam0g-dev rpm librpm-dev uuid-dev libkrb5-dev python sudo gcc-aarch64-linux-gnu -y @@ -7,7 +11,7 @@ COPY source /src/source RUN cd /src/build/linux && make arch=${TARGETARCH} -FROM ubuntu:18.04 AS base_image +FROM ${CI_BASE_IMAGE} AS base_image ARG TARGETOS TARGETARCH MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ @@ -38,8 +42,8 @@ RUN chmod 775 $tmpdir/*.sh; sync; $tmpdir/setup.sh ${TARGETARCH} # Do vulnerability scan in a seperate stage to avoid adding layer FROM base_image AS vulnscan -COPY --from=aquasec/trivy:latest /usr/local/bin/trivy /usr/local/bin/trivy COPY .trivyignore .trivyignore +RUN curl -sfL https://raw.githubusercontent.com/aquasecurity/trivy/main/contrib/install.sh | sh -s -- -b /usr/local/bin v0.28.1 RUN trivy rootfs --ignore-unfixed --no-progress --severity HIGH,CRITICAL,MEDIUM --skip-files "/usr/local/bin/trivy" / RUN trivy rootfs --ignore-unfixed --no-progress --severity HIGH,CRITICAL,MEDIUM /usr/lib RUN trivy rootfs --exit-code 1 --ignore-unfixed --no-progress --severity HIGH,CRITICAL,MEDIUM --skip-files "/usr/local/bin/trivy" / > /dev/null 2>&1 && trivy rootfs --exit-code 1 --ignore-unfixed --no-progress --severity HIGH,CRITICAL,MEDIUM /usr/lib > /dev/null 2>&1 diff --git a/kubernetes/linux/dockerbuild/build-and-publish-docker-image.sh b/kubernetes/linux/dockerbuild/build-and-publish-docker-image.sh index 638236507..40ce83cd4 100755 --- a/kubernetes/linux/dockerbuild/build-and-publish-docker-image.sh +++ b/kubernetes/linux/dockerbuild/build-and-publish-docker-image.sh @@ -13,8 +13,8 @@ usage() local basename=`basename $0` echo echo "Build and publish docker image:" - echo "$basename --image " - echo "$basename --image --multiarch" + echo "$basename --image --ubuntu --golang " + echo "$basename --image --ubuntu --golang --multiarch" } parse_args() @@ -32,6 +32,8 @@ for arg in "$@"; do case "$arg" in "--image") set -- "$@" "-i" ;; "--multiarch") set -- "$@" "-m" ;; + "--ubuntu") set -- "$@" "-u" ;; + "--golang") set -- "$@" "-g" ;; "--"*) usage ;; *) set -- "$@" "$arg" esac @@ -39,7 +41,7 @@ done local OPTIND opt -while getopts 'hi:m' opt; do +while getopts 'hi:u:g:m' opt; do case "$opt" in h) usage @@ -54,7 +56,12 @@ while getopts 'hi:m' opt; do multi=1 echo "using multiarch dockerfile" ;; - + u) + ci_base_image=$OPTARG + ;; + g) + golang_base_image=$OPTARG + ;; ?) usage exit 1 @@ -69,6 +76,16 @@ while getopts 'hi:m' opt; do exit 1 fi + if [ -z "$ci_base_image" ]; then + echo "-e invalid ubuntu image url. please try with valid values from internal wiki. do not use 3P entries" + exit 1 + fi + + if [ -z "$golang_base_image" ]; then + echo "-e invalid golang image url. please try with valid values from internal wiki. do not use 3P entries" + exit 1 + fi + # extract image tag imageTag=$(echo ${image} | sed "s/.*://") @@ -89,39 +106,6 @@ fi } -build_docker_provider() -{ - echo "building docker provider shell bundle" - cd $buildDir - echo "trigger make to build docker build provider shell bundle" - make - echo "building docker provider shell bundle completed" -} - -login_to_docker() -{ - echo "login to docker with provided creds" - # sudo docker login --username=$dockerUser - sudo docker login - echo "login to docker with provided creds completed" -} - -build_docker_image() -{ - echo "build docker image: $image and image tage is $imageTag" - cd $baseDir/kubernetes/linux - sudo docker build -t $image --build-arg IMAGE_TAG=$imageTag . - - echo "build docker image completed" -} - -publish_docker_image() -{ - echo "publishing docker image: $image" - sudo docker push $image - echo "publishing docker image: $image done." -} - # parse and validate args parse_args $@ @@ -138,22 +122,18 @@ echo "source code base directory: $baseDir" echo "build directory for docker provider: $buildDir" echo "docker file directory: $dockerFileDir" +echo "build docker image: $image and image tage is $imageTag" + if [ -n "$multi" ] && [ "$multi" -eq "1" ]; then echo "building multiarch" cd $baseDir - docker buildx build --platform linux/arm64/v8,linux/amd64 -t $image --build-arg IMAGE_TAG=$imageTag -f $linuxDir/Dockerfile.multiarch --push . - exit 0 + docker buildx build --platform linux/arm64/v8,linux/amd64 -t $image --build-arg IMAGE_TAG=$imageTag --build-arg CI_BASE_IMAGE="$ci_base_image" --build-arg GOLANG_BASE_IMAGE="$golang_base_image" -f $linuxDir/Dockerfile.multiarch --push . +else + echo "building amd64" + cd $baseDir + docker buildx build --platform linux/amd64 -t $image --build-arg IMAGE_TAG=$imageTag --build-arg CI_BASE_IMAGE="$ci_base_image" --build-arg GOLANG_BASE_IMAGE="$golang_base_image" -f $linuxDir/Dockerfile.multiarch --push . fi -# build docker provider shell bundle -build_docker_provider - -# build docker image -build_docker_image - -# publish docker image -publish_docker_image - -cd $currentDir - +echo "build and push docker image completed" +cd $currentDir \ No newline at end of file diff --git a/scripts/build/linux/install-build-pre-requisites.sh b/scripts/build/linux/install-build-pre-requisites.sh index b85e54fc4..88f9fbef9 100644 --- a/scripts/build/linux/install-build-pre-requisites.sh +++ b/scripts/build/linux/install-build-pre-requisites.sh @@ -8,17 +8,17 @@ TEMP_DIR=temp-$RANDOM install_go_lang() { export goVersion="$(echo $(go version))" - if [[ $goVersion == *go1.15.14* ]] ; then - echo "found existing installation of go version 1.15.14 so skipping the installation of go" + if [[ $goVersion == *go1.18.3* ]] ; then + echo "found existing installation of go version 1.18.3 so skipping the installation of go" else - echo "installing go 1.15.14 version ..." - sudo curl -O https://dl.google.com/go/go1.15.14.linux-amd64.tar.gz - sudo tar -xvf go1.15.14.linux-amd64.tar.gz + echo "installing go 1.18.3 version ..." + sudo curl -O https://dl.google.com/go/go1.18.3.linux-amd64.tar.gz + sudo tar -xvf go1.18.3.linux-amd64.tar.gz sudo mv -f go /usr/local echo "set file permission for go bin" sudo chmod 744 /usr/local/go/bin - echo "installation of go 1.15.14 completed." - echo "installation of go 1.15.14 completed." + echo "installation of go 1.18.3 completed." + echo "installation of go 1.18.3 completed." fi } @@ -173,4 +173,4 @@ sudo rm -rf $TEMP_DIR # set go env vars install_go_env_vars -echo "installing build pre-requisites python, go 1.15.14, dotnet, powershell, build dependencies and docker completed" +echo "installing build pre-requisites python, go 1.18.3, dotnet, powershell, build dependencies and docker completed" diff --git a/scripts/build/windows/install-build-pre-requisites.ps1 b/scripts/build/windows/install-build-pre-requisites.ps1 index 235f6ace9..1ceeda353 100644 --- a/scripts/build/windows/install-build-pre-requisites.ps1 +++ b/scripts/build/windows/install-build-pre-requisites.ps1 @@ -13,8 +13,8 @@ function Install-Go { exit 1 } - $url = "https://go.dev/dl/go1.15.14.windows-amd64.msi" - $output = Join-Path -Path $tempGo -ChildPath "go1.15.14.windows-amd64.msi" + $url = "https://go.dev/dl/go1.18.3.windows-amd64.msi" + $output = Join-Path -Path $tempGo -ChildPath "go1.18.3.windows-amd64.msi" Write-Host("downloading go msi into directory path : " + $output + " ...") Invoke-WebRequest -Uri $url -OutFile $output -ErrorAction Stop Write-Host("downloading of go msi into directory path : " + $output + " completed") @@ -137,7 +137,7 @@ function Install-Docker() { # https://stackoverflow.com/questions/28682642/powershell-why-is-using-invoke-webrequest-much-slower-than-a-browser-download $ProgressPreference = 'SilentlyContinue' -Write-Host "Install GO 1.15.14 version" +Write-Host "Install GO 1.18.3 version" Install-Go Write-Host "Install Build dependencies" Build-Dependencies diff --git a/test/e2e/src/core/Dockerfile b/test/e2e/src/core/Dockerfile index 52bcd7cf8..ba73e74f7 100644 --- a/test/e2e/src/core/Dockerfile +++ b/test/e2e/src/core/Dockerfile @@ -1,4 +1,6 @@ -FROM python:3.6 +# default value can be found in internal wiki. External can use python 3.6 base image +ARG PYTHON_BASE_IMAGE= +FROM ${PYTHON_BASE_IMAGE} RUN pip install --trusted-host pypi.org --trusted-host files.pythonhosted.org pytest pytest-xdist filelock requests kubernetes adal msrestazure @@ -11,14 +13,14 @@ RUN apt-get update && apt-get -y upgrade && \ CLI_REPO=$(lsb_release -cs) && \ echo "deb [arch=amd64] https://packages.microsoft.com/repos/azure-cli/ ${CLI_REPO} main" \ > /etc/apt/sources.list.d/azure-cli.list && \ + curl -fsSLo /usr/share/keyrings/kubernetes-archive-keyring.gpg https://packages.cloud.google.com/apt/doc/apt-key.gpg && \ + echo "deb [signed-by=/usr/share/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | tee /etc/apt/sources.list.d/kubernetes.list && \ apt-get update && \ - apt-get install -y azure-cli && \ + apt-get install -y azure-cli kubectl && \ rm -rf /var/lib/apt/lists/* RUN python3 -m pip install junit_xml -COPY --from=lachlanevenson/k8s-kubectl:v1.20.5 /usr/local/bin/kubectl /usr/local/bin/kubectl - COPY ./core/e2e_tests.sh / COPY ./core/setup_failure_handler.py / COPY ./core/pytest.ini /e2etests/ From a9ec7eed6a23079514d274154c493a3d3826b21f Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Wed, 29 Jun 2022 09:06:44 -0700 Subject: [PATCH 242/301] update to DCR & DCR-A api version 2021-04-01 (#789) --- .../onboarding-using-msi-auth/existingClusterOnboarding.json | 4 ++-- .../arc-k8s-extension-msi-auth/existingClusterOnboarding.json | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json b/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json index 7664e7705..2024e611a 100644 --- a/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json +++ b/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json @@ -61,7 +61,7 @@ "resources": [ { "type": "Microsoft.Insights/dataCollectionRules", - "apiVersion": "2021-09-01-preview", + "apiVersion": "2021-04-01", "name": "[variables('dcrName')]", "location": "[parameters('workspaceLocation')]", "tags": "[parameters('resourceTagValues')]", @@ -123,7 +123,7 @@ { "type": "Microsoft.ContainerService/managedClusters/providers/dataCollectionRuleAssociations", "name": "[concat(variables('clusterName'),'/microsoft.insights/', variables('associationName'))]", - "apiVersion": "2019-11-01-preview", + "apiVersion": "2021-04-01", "properties": { "description": "Association of data collection rule. Deleting this association will break the data collection for this AKS Cluster.", "dataCollectionRuleId": "[variables('dataCollectionRuleId')]" diff --git a/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterOnboarding.json b/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterOnboarding.json index 3010226f9..424572857 100644 --- a/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterOnboarding.json +++ b/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterOnboarding.json @@ -75,7 +75,7 @@ "resources": [ { "type": "Microsoft.Insights/dataCollectionRules", - "apiVersion": "2021-09-01-preview", + "apiVersion": "2021-04-01", "name": "[variables('dcrName')]", "location": "[parameters('workspaceRegion')]", "tags": "[parameters('resourceTagValues')]", @@ -137,7 +137,7 @@ { "type": "Microsoft.Kubernetes/connectedClusters/providers/dataCollectionRuleAssociations", "name": "[concat(variables('clusterName'),'/microsoft.insights/', variables('associationName'))]", - "apiVersion": "2019-11-01-preview", + "apiVersion": "2021-04-01", "properties": { "description": "Association of data collection rule. Deleting this association will break the data collection for this AKS Cluster.", "dataCollectionRuleId": "[variables('dataCollectionRuleId')]" From 5c2a9593f5da668be8987a213bc959e866ce9d32 Mon Sep 17 00:00:00 2001 From: Amol Agrawal Date: Thu, 21 Jul 2022 17:02:39 -0700 Subject: [PATCH 243/301] fix telegraf vulns (#795) Co-authored-by: Amol Agrawal --- .trivyignore | 10 +++++----- kubernetes/linux/setup.sh | 7 ++++--- kubernetes/windows/setup.ps1 | 2 +- 3 files changed, 10 insertions(+), 9 deletions(-) diff --git a/.trivyignore b/.trivyignore index 56ac504d5..0e3293641 100644 --- a/.trivyignore +++ b/.trivyignore @@ -2,10 +2,10 @@ #[vishwa] - Fix telegraf & test all for next release - see work item #https://msazure.visualstudio.com/InfrastructureInsights/_workitems/edit/13322134 # Unfixed as of 4/28/2022 CVE-2019-3826 -CVE-2022-27191 - -#still present in mdsd telegraf -CVE-2021-42836 +CVE-2022-1996 +CVE-2022-29190 +CVE-2022-29222 +CVE-2022-29189 # ruby in /usr/lib CVE-2020-36327 @@ -16,4 +16,4 @@ CVE-2021-31799 CVE-2021-28965 #dpkg vulnerability in ubuntu -CVE-2022-1304 \ No newline at end of file +CVE-2022-1304 diff --git a/kubernetes/linux/setup.sh b/kubernetes/linux/setup.sh index c478af0e5..004fe7806 100644 --- a/kubernetes/linux/setup.sh +++ b/kubernetes/linux/setup.sh @@ -25,6 +25,7 @@ fi /usr/bin/dpkg -i $TMPDIR/azure-mdsd*.deb cp -f $TMPDIR/mdsd.xml /etc/mdsd.d cp -f $TMPDIR/envmdsd /etc/mdsd.d +rm /usr/sbin/telegraf # log rotate conf for mdsd and can be extended for other log files as well cp -f $TMPDIR/logrotate.conf /etc/logrotate.d/ci-agent @@ -40,10 +41,10 @@ sudo apt-get install jq=1.5+dfsg-2 -y #used to setcaps for ruby process to read /proc/env sudo apt-get install libcap2-bin -y -wget https://dl.influxdata.com/telegraf/releases/telegraf-1.22.2_linux_$ARCH.tar.gz -tar -zxvf telegraf-1.22.2_linux_$ARCH.tar.gz +wget https://dl.influxdata.com/telegraf/releases/telegraf-1.23.2_linux_$ARCH.tar.gz +tar -zxvf telegraf-1.23.2_linux_$ARCH.tar.gz -mv /opt/telegraf-1.22.2/usr/bin/telegraf /opt/telegraf +mv /opt/telegraf-1.23.2/usr/bin/telegraf /opt/telegraf chmod 544 /opt/telegraf diff --git a/kubernetes/windows/setup.ps1 b/kubernetes/windows/setup.ps1 index 2fd429e43..c5f1f422d 100644 --- a/kubernetes/windows/setup.ps1 +++ b/kubernetes/windows/setup.ps1 @@ -35,7 +35,7 @@ Write-Host ('Finished Installing Fluentbit') Write-Host ('Installing Telegraf'); try { - $telegrafUri='https://dl.influxdata.com/telegraf/releases/telegraf-1.22.2_windows_amd64.zip' + $telegrafUri='https://dl.influxdata.com/telegraf/releases/telegraf-1.23.2_windows_amd64.zip' Invoke-WebRequest -Uri $telegrafUri -OutFile /installation/telegraf.zip Expand-Archive -Path /installation/telegraf.zip -Destination /installation/telegraf Move-Item -Path /installation/telegraf/*/* -Destination /opt/telegraf/ -ErrorAction SilentlyContinue From eb7548953f2c0dea0c5236b6d54c48c134eb41cf Mon Sep 17 00:00:00 2001 From: Amol Agrawal Date: Fri, 22 Jul 2022 11:29:51 -0700 Subject: [PATCH 244/301] Address vulnerabilities through package updates (#794) - Updates to ruby 3.1.1 - Uses RVM as ruby manager instead of the brightbox ppa - Updates fluentd to 1.14.6 - Use default JSON gem instead of yajl-json - Consume tomlrb as a gem instead of committed source code Co-authored-by: Amol Agrawal Co-authored-by: Ganga Mahesh Siddem --- .trivyignore | 14 +- README.md | 1 - .../scripts/tomlparser-agent-config.rb | 6 +- .../scripts/tomlparser-mdm-metrics-config.rb | 6 +- .../scripts/tomlparser-prom-agent-config.rb | 6 +- .../scripts/tomlparser-prom-customconfig.rb | 8 +- build/common/installer/scripts/tomlparser.rb | 6 +- .../installer/datafiles/base_container.data | 9 - .../tomlparser-metric-collection-config.rb | 2 +- .../scripts/tomlparser-npm-config.rb | 6 +- .../scripts/tomlparser-osm-config.rb | 2 +- build/windows/installer/conf/fluent.conf | 2 +- kubernetes/linux/main.sh | 23 +- kubernetes/linux/setup.sh | 27 +- kubernetes/windows/Dockerfile | 13 +- kubernetes/windows/Dockerfile-dev-base-image | 6 +- .../ruby/ApplicationInsightsUtility.rb | 2 +- .../plugins/ruby/CAdvisorMetricsAPIClient.rb | 2 +- .../plugins/ruby/ContainerInventoryState.rb | 2 +- source/plugins/ruby/DockerApiClient.rb | 2 +- source/plugins/ruby/KubernetesApiClient.rb | 14 +- source/plugins/ruby/MdmMetricsGenerator.rb | 21 +- source/plugins/ruby/WatchStream.rb | 4 +- .../plugins/ruby/arc_k8s_cluster_identity.rb | 2 +- source/plugins/ruby/filter_cadvisor2mdm.rb | 2 +- source/plugins/ruby/filter_inventory2mdm.rb | 2 +- source/plugins/ruby/filter_telegraf2mdm.rb | 2 +- source/plugins/ruby/in_cadvisor_perf.rb | 2 +- source/plugins/ruby/in_containerinventory.rb | 2 +- source/plugins/ruby/in_kube_events.rb | 3 +- source/plugins/ruby/in_kube_nodes.rb | 3 +- source/plugins/ruby/in_kube_perfinventory.rb | 5 +- source/plugins/ruby/in_kube_podinventory.rb | 9 +- .../plugins/ruby/in_kube_podmdminventory.rb | 5 +- source/plugins/ruby/in_kube_pvinventory.rb | 3 +- .../plugins/ruby/in_kubestate_deployments.rb | 3 +- source/plugins/ruby/in_kubestate_hpa.rb | 3 +- source/plugins/ruby/in_win_cadvisor_perf.rb | 2 +- source/plugins/ruby/kubelet_utils.rb | 34 +- .../ruby/kubernetes_container_inventory.rb | 3 +- .../channel/contracts/json_serializable.rb | 2 +- .../channel/sender_base.rb | 2 +- source/plugins/ruby/out_mdm.rb | 2 +- source/plugins/ruby/podinventory_to_mdm.rb | 4 +- source/toml-parser/tomlrb.rb | 44 -- source/toml-parser/tomlrb/generated_parser.rb | 542 ------------------ source/toml-parser/tomlrb/handler.rb | 73 --- source/toml-parser/tomlrb/parser.rb | 18 - source/toml-parser/tomlrb/parser.y | 104 ---- source/toml-parser/tomlrb/scanner.rb | 54 -- source/toml-parser/tomlrb/string_utils.rb | 33 -- source/toml-parser/tomlrb/version.rb | 3 - 52 files changed, 120 insertions(+), 1030 deletions(-) delete mode 100644 source/toml-parser/tomlrb.rb delete mode 100644 source/toml-parser/tomlrb/generated_parser.rb delete mode 100644 source/toml-parser/tomlrb/handler.rb delete mode 100644 source/toml-parser/tomlrb/parser.rb delete mode 100644 source/toml-parser/tomlrb/parser.y delete mode 100644 source/toml-parser/tomlrb/scanner.rb delete mode 100644 source/toml-parser/tomlrb/string_utils.rb delete mode 100644 source/toml-parser/tomlrb/version.rb diff --git a/.trivyignore b/.trivyignore index 0e3293641..91ee2f5bb 100644 --- a/.trivyignore +++ b/.trivyignore @@ -1,19 +1,9 @@ -# related to telegraf -#[vishwa] - Fix telegraf & test all for next release - see work item #https://msazure.visualstudio.com/InfrastructureInsights/_workitems/edit/13322134 -# Unfixed as of 4/28/2022 +# telegraf vulnerabilities CVE-2019-3826 -CVE-2022-1996 CVE-2022-29190 CVE-2022-29222 CVE-2022-29189 - -# ruby in /usr/lib -CVE-2020-36327 -CVE-2021-43809 -CVE-2021-41816 -CVE-2021-41819 -CVE-2021-31799 -CVE-2021-28965 +CVE-2022-1996 #dpkg vulnerability in ubuntu CVE-2022-1304 diff --git a/README.md b/README.md index 60ed39901..2b0ab246e 100644 --- a/README.md +++ b/README.md @@ -90,7 +90,6 @@ The general directory structure is: │ │ | ├── health/ - code for health feature │ │ | ├── lib/ - lib for app insights ruby and this code of application_insights gem │ │ | ... - plugins in, out and filters code in ruby -│ ├── toml-parser/ - code for parsing of toml configuration files ├── test/ - source code for tests │ ├── e2e/ - e2e tests to validate agent and e2e workflow(s) │ ├── unit-tests/ - unit tests code diff --git a/build/common/installer/scripts/tomlparser-agent-config.rb b/build/common/installer/scripts/tomlparser-agent-config.rb index ebe1e3982..a6b6ee0a1 100644 --- a/build/common/installer/scripts/tomlparser-agent-config.rb +++ b/build/common/installer/scripts/tomlparser-agent-config.rb @@ -2,11 +2,7 @@ #this should be require relative in Linux and require in windows, since it is a gem install on windows @os_type = ENV["OS_TYPE"] -if !@os_type.nil? && !@os_type.empty? && @os_type.strip.casecmp("windows") == 0 - require "tomlrb" -else - require_relative "tomlrb" -end +require "tomlrb" require_relative "ConfigParseErrorLogger" diff --git a/build/common/installer/scripts/tomlparser-mdm-metrics-config.rb b/build/common/installer/scripts/tomlparser-mdm-metrics-config.rb index b6a4419cf..17c1ca118 100644 --- a/build/common/installer/scripts/tomlparser-mdm-metrics-config.rb +++ b/build/common/installer/scripts/tomlparser-mdm-metrics-config.rb @@ -3,11 +3,7 @@ #this should be require relative in Linux and require in windows, since it is a gem install on windows @os_type = ENV["OS_TYPE"] -if !@os_type.nil? && !@os_type.empty? && @os_type.strip.casecmp("windows") == 0 - require "tomlrb" -else - require_relative "tomlrb" -end +require "tomlrb" require_relative "/etc/fluent/plugin/constants" require_relative "ConfigParseErrorLogger" diff --git a/build/common/installer/scripts/tomlparser-prom-agent-config.rb b/build/common/installer/scripts/tomlparser-prom-agent-config.rb index 664691a44..abc939f52 100644 --- a/build/common/installer/scripts/tomlparser-prom-agent-config.rb +++ b/build/common/installer/scripts/tomlparser-prom-agent-config.rb @@ -2,11 +2,7 @@ #this should be require relative in Linux and require in windows, since it is a gem install on windows @os_type = ENV["OS_TYPE"] -if !@os_type.nil? && !@os_type.empty? && @os_type.strip.casecmp("windows") == 0 - require "tomlrb" -else - require_relative "tomlrb" -end +require "tomlrb" require_relative "ConfigParseErrorLogger" diff --git a/build/common/installer/scripts/tomlparser-prom-customconfig.rb b/build/common/installer/scripts/tomlparser-prom-customconfig.rb index 642eadc14..76909b17c 100644 --- a/build/common/installer/scripts/tomlparser-prom-customconfig.rb +++ b/build/common/installer/scripts/tomlparser-prom-customconfig.rb @@ -2,12 +2,8 @@ #this should be require relative in Linux and require in windows, since it is a gem install on windows @os_type = ENV["OS_TYPE"] -if !@os_type.nil? && !@os_type.empty? && @os_type.strip.casecmp("windows") == 0 - require "tomlrb" -else - require_relative "tomlrb" -end -# require_relative "tomlrb" +require "tomlrb" + require_relative "ConfigParseErrorLogger" require "fileutils" diff --git a/build/common/installer/scripts/tomlparser.rb b/build/common/installer/scripts/tomlparser.rb index 6a2f3c6d6..6d3ee6e78 100644 --- a/build/common/installer/scripts/tomlparser.rb +++ b/build/common/installer/scripts/tomlparser.rb @@ -2,11 +2,7 @@ #this should be require relative in Linux and require in windows, since it is a gem install on windows @os_type = ENV["OS_TYPE"] -if !@os_type.nil? && !@os_type.empty? && @os_type.strip.casecmp("windows") == 0 - require "tomlrb" -else - require_relative "tomlrb" -end +require "tomlrb" require_relative "ConfigParseErrorLogger" diff --git a/build/linux/installer/datafiles/base_container.data b/build/linux/installer/datafiles/base_container.data index 92b494ae3..db7acb57b 100644 --- a/build/linux/installer/datafiles/base_container.data +++ b/build/linux/installer/datafiles/base_container.data @@ -20,15 +20,6 @@ MAINTAINER: 'Microsoft Corporation' /etc/opt/microsoft/docker-cimprov/container.conf; build/linux/installer/conf/container.conf; 644; root; root -/opt/tomlrb.rb; source/toml-parser/tomlrb.rb; 644; root; root -/opt/tomlrb/generated_parser.rb; source/toml-parser/tomlrb/generated_parser.rb; 644; root; root -/opt/tomlrb/handler.rb; source/toml-parser/tomlrb/handler.rb; 644; root; root -/opt/tomlrb/parser.rb; source/toml-parser/tomlrb/parser.rb; 644; root; root -/opt/tomlrb/parser.y; source/toml-parser/tomlrb/parser.y; 644; root; root -/opt/tomlrb/scanner.rb; source/toml-parser/tomlrb/scanner.rb; 644; root; root -/opt/tomlrb/string_utils.rb; source/toml-parser/tomlrb/string_utils.rb; 644; root; root -/opt/tomlrb/version.rb; source/toml-parser/tomlrb/version.rb; 644; root; root - /opt/td-agent-bit/bin/out_oms.so; intermediate/${{BUILD_CONFIGURATION}}/out_oms.so; 755; root; root /etc/opt/microsoft/docker-cimprov/prometheus-side-car.conf; build/linux/installer/conf/prometheus-side-car.conf; 644; root; root /etc/opt/microsoft/docker-cimprov/td-agent-bit.conf; build/linux/installer/conf/td-agent-bit.conf; 644; root; root diff --git a/build/linux/installer/scripts/tomlparser-metric-collection-config.rb b/build/linux/installer/scripts/tomlparser-metric-collection-config.rb index cee41312b..3001fdbaf 100644 --- a/build/linux/installer/scripts/tomlparser-metric-collection-config.rb +++ b/build/linux/installer/scripts/tomlparser-metric-collection-config.rb @@ -1,7 +1,7 @@ #!/usr/local/bin/ruby # frozen_string_literal: true -require_relative "tomlrb" +require "tomlrb" require_relative "ConfigParseErrorLogger" require_relative "/etc/fluent/plugin/constants" diff --git a/build/linux/installer/scripts/tomlparser-npm-config.rb b/build/linux/installer/scripts/tomlparser-npm-config.rb index e8cf216fd..664ed4f0b 100644 --- a/build/linux/installer/scripts/tomlparser-npm-config.rb +++ b/build/linux/installer/scripts/tomlparser-npm-config.rb @@ -2,11 +2,7 @@ #this should be require relative in Linux and require in windows, since it is a gem install on windows @os_type = ENV["OS_TYPE"] -if !@os_type.nil? && !@os_type.empty? && @os_type.strip.casecmp("windows") == 0 - require "tomlrb" -else - require_relative "tomlrb" -end +require "tomlrb" require_relative "ConfigParseErrorLogger" diff --git a/build/linux/installer/scripts/tomlparser-osm-config.rb b/build/linux/installer/scripts/tomlparser-osm-config.rb index 096064db8..2ac5ef387 100644 --- a/build/linux/installer/scripts/tomlparser-osm-config.rb +++ b/build/linux/installer/scripts/tomlparser-osm-config.rb @@ -1,6 +1,6 @@ #!/usr/local/bin/ruby -require_relative "tomlrb" +require "tomlrb" require "fileutils" require_relative "ConfigParseErrorLogger" diff --git a/build/windows/installer/conf/fluent.conf b/build/windows/installer/conf/fluent.conf index a78ac58fa..73d62a3ff 100644 --- a/build/windows/installer/conf/fluent.conf +++ b/build/windows/installer/conf/fluent.conf @@ -1,5 +1,5 @@ - type heartbeat_request + @type heartbeat_request run_interval 30m @log_level info diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index 1e00457d9..efb95698a 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -88,6 +88,8 @@ checkAgentOnboardingStatus() { fi } +# setup paths for ruby +[ -f /etc/profile.d/rvm.sh ] && source /etc/profile.d/rvm.sh setReplicaSetSpecificConfig() { echo "num of fluentd workers:${NUM_OF_FLUENTD_WORKERS}" export FLUENTD_FLUSH_INTERVAL="20s" @@ -453,7 +455,7 @@ source ~/.bashrc if [ "${CONTAINER_TYPE}" != "PrometheusSidecar" ]; then #Parse the configmap to set the right environment variables. - /usr/bin/ruby2.7 tomlparser.rb + ruby tomlparser.rb cat config_env_var | while read line; do echo $line >>~/.bashrc @@ -464,7 +466,7 @@ fi #Parse the configmap to set the right environment variables for agent config. #Note > tomlparser-agent-config.rb has to be parsed first before td-agent-bit-conf-customizer.rb for fbit agent settings if [ "${CONTAINER_TYPE}" != "PrometheusSidecar" ]; then - /usr/bin/ruby2.7 tomlparser-agent-config.rb + ruby tomlparser-agent-config.rb cat agent_config_env_var | while read line; do echo $line >> ~/.bashrc @@ -472,7 +474,7 @@ if [ "${CONTAINER_TYPE}" != "PrometheusSidecar" ]; then source agent_config_env_var #Parse the configmap to set the right environment variables for network policy manager (npm) integration. - /usr/bin/ruby2.7 tomlparser-npm-config.rb + ruby tomlparser-npm-config.rb cat integration_npm_config_env_var | while read line; do echo $line >> ~/.bashrc @@ -482,11 +484,11 @@ fi #Replace the placeholders in td-agent-bit.conf file for fluentbit with custom/default values in daemonset if [ ! -e "/etc/config/kube.conf" ] && [ "${CONTAINER_TYPE}" != "PrometheusSidecar" ]; then - /usr/bin/ruby2.7 td-agent-bit-conf-customizer.rb + ruby td-agent-bit-conf-customizer.rb fi #Parse the prometheus configmap to create a file with new custom settings. -/usr/bin/ruby2.7 tomlparser-prom-customconfig.rb +ruby tomlparser-prom-customconfig.rb #Setting default environment variables to be used in any case of failure in the above steps if [ ! -e "/etc/config/kube.conf" ]; then @@ -520,7 +522,7 @@ fi if [ ! -e "/etc/config/kube.conf" ]; then if [ "${CONTAINER_TYPE}" == "PrometheusSidecar" ]; then #Parse the agent configmap to create a file with new custom settings. - /usr/bin/ruby2.7 tomlparser-prom-agent-config.rb + ruby tomlparser-prom-agent-config.rb #Sourcing config environment variable file if it exists if [ -e "side_car_fbit_config_env_var" ]; then cat side_car_fbit_config_env_var | while read line; do @@ -533,7 +535,7 @@ fi #Parse the configmap to set the right environment variables for MDM metrics configuration for Alerting. if [ "${CONTAINER_TYPE}" != "PrometheusSidecar" ]; then - /usr/bin/ruby2.7 tomlparser-mdm-metrics-config.rb + ruby tomlparser-mdm-metrics-config.rb cat config_mdm_metrics_env_var | while read line; do echo $line >>~/.bashrc @@ -541,7 +543,7 @@ if [ "${CONTAINER_TYPE}" != "PrometheusSidecar" ]; then source config_mdm_metrics_env_var #Parse the configmap to set the right environment variables for metric collection settings - /usr/bin/ruby2.7 tomlparser-metric-collection-config.rb + ruby tomlparser-metric-collection-config.rb cat config_metric_collection_env_var | while read line; do echo $line >>~/.bashrc @@ -552,7 +554,7 @@ fi # OSM scraping to be done in replicaset if sidecar car scraping is disabled and always do the scraping from the sidecar (It will always be either one of the two) if [[ ( ( ! -e "/etc/config/kube.conf" ) && ( "${CONTAINER_TYPE}" == "PrometheusSidecar" ) ) || ( ( -e "/etc/config/kube.conf" ) && ( "${SIDECAR_SCRAPING_ENABLED}" == "false" ) ) ]]; then - /usr/bin/ruby2.7 tomlparser-osm-config.rb + ruby tomlparser-osm-config.rb if [ -e "integration_osm_config_env_var" ]; then cat integration_osm_config_env_var | while read line; do @@ -649,7 +651,8 @@ if [ "$CONTAINER_RUNTIME" != "docker" ]; then fi echo "set caps for ruby process to read container env from proc" -sudo setcap cap_sys_ptrace,cap_dac_read_search+ep /usr/bin/ruby2.7 +RUBY_PATH=$(which ruby) +sudo setcap cap_sys_ptrace,cap_dac_read_search+ep "$RUBY_PATH" echo "export KUBELET_RUNTIME_OPERATIONS_METRIC="$KUBELET_RUNTIME_OPERATIONS_METRIC >> ~/.bashrc echo "export KUBELET_RUNTIME_OPERATIONS_ERRORS_METRIC="$KUBELET_RUNTIME_OPERATIONS_ERRORS_METRIC >> ~/.bashrc diff --git a/kubernetes/linux/setup.sh b/kubernetes/linux/setup.sh index 004fe7806..0e3e43757 100644 --- a/kubernetes/linux/setup.sh +++ b/kubernetes/linux/setup.sh @@ -1,3 +1,5 @@ +#!/bin/bash + TMPDIR="/opt" cd $TMPDIR @@ -11,6 +13,20 @@ fi #upgrade apt to latest version apt-get update && apt-get install -y apt && DEBIAN_FRONTEND=noninteractive apt-get install -y locales + +curl -sSL https://rvm.io/mpapis.asc | gpg --import - +curl -sSL https://rvm.io/pkuczynski.asc | gpg --import - +curl -sSL https://get.rvm.io | bash -s stable + +# setup paths for ruby and rvm +if [ -f /etc/profile.d/rvm.sh ]; then + source /etc/profile.d/rvm.sh + echo "[ -f /etc/profile.d/rvm.sh ] && source /etc/profile.d/rvm.sh" >> ~/.bashrc +fi + +rvm install 3.1.1 +rvm --default use 3.1.1 + sed -i -e 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen && \ dpkg-reconfigure --frontend=noninteractive locales && \ update-locale LANG=en_US.UTF-8 @@ -57,15 +73,11 @@ sudo echo "deb https://packages.fluentbit.io/ubuntu/bionic bionic main" >> /etc/ sudo apt-get update sudo apt-get install td-agent-bit=1.7.8 -y -# install ruby2.7 -sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys F5DA5F09C3173AA6 -sudo echo "deb http://ppa.launchpad.net/brightbox/ruby-ng/ubuntu bionic main" >> /etc/apt/sources.list -sudo apt-get update -sudo apt-get install ruby2.7 ruby2.7-dev gcc make -y # fluentd v1 gem -gem install fluentd -v "1.14.2" --no-document +gem install fluentd -v "1.14.6" --no-document fluentd --setup ./fluent gem install gyoku iso8601 --no-doc +gem install tomlrb -v "2.0.1" --no-document rm -f $TMPDIR/docker-cimprov*.sh @@ -75,7 +87,8 @@ rm -f $TMPDIR/envmdsd rm -f $TMPDIR/telegraf-*.tar.gz # remove build dependencies -sudo apt-get remove ruby2.7-dev gcc make -y +sudo apt-get remove gcc make -y +sudo apt autoremove -y # Remove settings for cron.daily that conflict with the node's cron.daily. Since both are trying to rotate the same files # in /var/log at the same time, the rotation doesn't happen correctly and then the *.1 file is forever logged to. diff --git a/kubernetes/windows/Dockerfile b/kubernetes/windows/Dockerfile index 3b663132e..019e9cda1 100644 --- a/kubernetes/windows/Dockerfile +++ b/kubernetes/windows/Dockerfile @@ -11,28 +11,27 @@ ARG IMAGE_TAG=win-ciprod06142022 # Docker creates a layer for every RUN-Statement RUN powershell -Command "Set-ExecutionPolicy Bypass -Scope Process -Force; iex ((New-Object System.Net.WebClient).DownloadString('https://chocolatey.org/install.ps1'))" # Fluentd depends on cool.io whose fat gem is only available for Ruby < 2.5, so need to specify --platform ruby when install Ruby > 2.5 and install msys2 to get dev tools -RUN choco install -y ruby --version 2.7.5.1 --params "'/InstallDir:C:\ruby27'" \ -&& choco install -y msys2 --version 20211130.0.0 --params "'/NoPath /NoUpdate /InstallDir:C:\ruby27\msys64'" \ +RUN choco install -y ruby --version 3.1.1.1 --params "'/InstallDir:C:\ruby31'" \ +&& choco install -y msys2 --version 20211130.0.0 --params "'/NoPath /NoUpdate /InstallDir:C:\ruby31\msys64'" \ && choco install -y vim # gangams - optional MSYS2 update via ridk failing in merged docker file so skipping that since we dont need optional update RUN refreshenv \ && ridk install 3 \ && echo gem: --no-document >> C:\ProgramData\gemrc \ -&& gem install cool.io -v 1.5.4 --platform ruby \ +&& gem install cool.io -v 1.7.1 --platform ruby \ && gem install oj -v 3.3.10 \ -&& gem install json -v 2.2.0 \ -&& gem install fluentd -v 1.14.2 \ +&& gem install fluentd -v 1.14.6 \ && gem install win32-service -v 1.0.1 \ && gem install win32-ipc -v 0.7.0 \ && gem install win32-event -v 0.6.3 \ && gem install windows-pr -v 1.2.6 \ -&& gem install tomlrb -v 1.3.0 \ +&& gem install tomlrb -v 2.0.1 \ && gem install gyoku -v 1.3.1 \ && gem sources --clear-all # Remove gem cache and chocolatey -RUN powershell -Command "Remove-Item -Force C:\ruby27\lib\ruby\gems\2.7.0\cache\*.gem; Remove-Item -Recurse -Force 'C:\ProgramData\chocolatey'" +RUN powershell -Command "Remove-Item -Force C:\ruby31\lib\ruby\gems\3.1.0\cache\*.gem; Remove-Item -Recurse -Force 'C:\ProgramData\chocolatey'" SHELL ["powershell"] diff --git a/kubernetes/windows/Dockerfile-dev-base-image b/kubernetes/windows/Dockerfile-dev-base-image index 3aca6ae20..bcf28326e 100644 --- a/kubernetes/windows/Dockerfile-dev-base-image +++ b/kubernetes/windows/Dockerfile-dev-base-image @@ -8,8 +8,8 @@ LABEL vendor=Microsoft\ Corp \ # Docker creates a layer for every RUN-Statement RUN powershell -Command "Set-ExecutionPolicy Bypass -Scope Process -Force; iex ((New-Object System.Net.WebClient).DownloadString('https://chocolatey.org/install.ps1'))" # Fluentd depends on cool.io whose fat gem is only available for Ruby < 2.5, so need to specify --platform ruby when install Ruby > 2.5 and install msys2 to get dev tools -RUN choco install -y ruby --version 2.7.5.1 --params "'/InstallDir:C:\ruby27'" \ -&& choco install -y msys2 --version 20210604.0.0 --params "'/NoPath /NoUpdate /InstallDir:C:\ruby27\msys64'" \ +RUN choco install -y ruby --version 3.1.1.1 --params "'/InstallDir:C:\ruby31'" \ +&& choco install -y msys2 --version 20210604.0.0 --params "'/NoPath /NoUpdate /InstallDir:C:\ruby31\msys64'" \ && choco install -y vim # gangams - optional MSYS2 update via ridk failing in merged docker file so skipping that since we dont need optional update @@ -29,7 +29,7 @@ RUN refreshenv \ && gem sources --clear-all # Remove gem cache and chocolatey -RUN powershell -Command "Remove-Item -Force C:\ruby27\lib\ruby\gems\2.7.0\cache\*.gem; Remove-Item -Recurse -Force 'C:\ProgramData\chocolatey'" +RUN powershell -Command "Remove-Item -Force C:\ruby31\lib\ruby\gems\3.1.0\cache\*.gem; Remove-Item -Recurse -Force 'C:\ProgramData\chocolatey'" SHELL ["powershell"] diff --git a/source/plugins/ruby/ApplicationInsightsUtility.rb b/source/plugins/ruby/ApplicationInsightsUtility.rb index 70d0a400e..b34cb20ee 100644 --- a/source/plugins/ruby/ApplicationInsightsUtility.rb +++ b/source/plugins/ruby/ApplicationInsightsUtility.rb @@ -7,7 +7,7 @@ class ApplicationInsightsUtility require_relative "DockerApiClient" require_relative "oms_common" require_relative "proxy_utils" - require "yajl/json_gem" + require "json" require "base64" @@HeartBeat = "HeartBeatEvent" diff --git a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb index a0c50e6c5..b18e887fd 100644 --- a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb +++ b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb @@ -2,7 +2,7 @@ # frozen_string_literal: true class CAdvisorMetricsAPIClient - require "yajl/json_gem" + require "json" require "logger" require "net/http" require "net/https" diff --git a/source/plugins/ruby/ContainerInventoryState.rb b/source/plugins/ruby/ContainerInventoryState.rb index 170fa65e3..7e5ca18e8 100644 --- a/source/plugins/ruby/ContainerInventoryState.rb +++ b/source/plugins/ruby/ContainerInventoryState.rb @@ -2,7 +2,7 @@ # frozen_string_literal: true class ContainerInventoryState - require 'yajl/json_gem' + require 'json' require_relative 'omslog' @@InventoryDirectory = "/var/opt/microsoft/docker-cimprov/state/ContainerInventory/" diff --git a/source/plugins/ruby/DockerApiClient.rb b/source/plugins/ruby/DockerApiClient.rb index 53dd1f39f..cff9f359f 100644 --- a/source/plugins/ruby/DockerApiClient.rb +++ b/source/plugins/ruby/DockerApiClient.rb @@ -3,7 +3,7 @@ class DockerApiClient require "socket" - require "yajl/json_gem" + require "json" require "timeout" require_relative "omslog" require_relative "DockerApiRestHelper" diff --git a/source/plugins/ruby/KubernetesApiClient.rb b/source/plugins/ruby/KubernetesApiClient.rb index ffd76bfbd..612035625 100644 --- a/source/plugins/ruby/KubernetesApiClient.rb +++ b/source/plugins/ruby/KubernetesApiClient.rb @@ -2,7 +2,7 @@ # frozen_string_literal: true class KubernetesApiClient - require "yajl/json_gem" + require "json" require "logger" require "net/http" require "net/https" @@ -801,9 +801,9 @@ def getResourcesAndContinuationTokenV2(uri, api_group: nil) responseCode, resourceInfo = getKubeResourceInfoV2(uri, api_group: api_group) @Log.info "KubernetesApiClient::getResourcesAndContinuationTokenV2 : Done getting resources from Kube API using url: #{uri} @ #{Time.now.utc.iso8601}" if !responseCode.nil? && responseCode == "200" && !resourceInfo.nil? - @Log.info "KubernetesApiClient::getResourcesAndContinuationTokenV2:Start:Parsing data for #{uri} using yajl @ #{Time.now.utc.iso8601}" - resourceInventory = Yajl::Parser.parse(StringIO.new(resourceInfo.body)) - @Log.info "KubernetesApiClient::getResourcesAndContinuationTokenV2:End:Parsing data for #{uri} using yajl @ #{Time.now.utc.iso8601}" + @Log.info "KubernetesApiClient::getResourcesAndContinuationTokenV2:Start:Parsing data for #{uri} using JSON @ #{Time.now.utc.iso8601}" + resourceInventory = JSON.parse(resourceInfo.body) + @Log.info "KubernetesApiClient::getResourcesAndContinuationTokenV2:End:Parsing data for #{uri} using JSON @ #{Time.now.utc.iso8601}" resourceInfo = nil end if (!resourceInventory.nil? && !resourceInventory["metadata"].nil?) @@ -825,9 +825,9 @@ def getResourcesAndContinuationToken(uri, api_group: nil) resourceInfo = getKubeResourceInfo(uri, api_group: api_group) @Log.info "KubernetesApiClient::getResourcesAndContinuationToken : Done getting resources from Kube API using url: #{uri} @ #{Time.now.utc.iso8601}" if !resourceInfo.nil? - @Log.info "KubernetesApiClient::getResourcesAndContinuationToken:Start:Parsing data for #{uri} using yajl @ #{Time.now.utc.iso8601}" - resourceInventory = Yajl::Parser.parse(StringIO.new(resourceInfo.body)) - @Log.info "KubernetesApiClient::getResourcesAndContinuationToken:End:Parsing data for #{uri} using yajl @ #{Time.now.utc.iso8601}" + @Log.info "KubernetesApiClient::getResourcesAndContinuationToken:Start:Parsing data for #{uri} using JSON @ #{Time.now.utc.iso8601}" + resourceInventory = JSON.parse(resourceInfo.body) + @Log.info "KubernetesApiClient::getResourcesAndContinuationToken:End:Parsing data for #{uri} using JSON @ #{Time.now.utc.iso8601}" resourceInfo = nil end if (!resourceInventory.nil? && !resourceInventory["metadata"].nil?) diff --git a/source/plugins/ruby/MdmMetricsGenerator.rb b/source/plugins/ruby/MdmMetricsGenerator.rb index f4904697c..8ea06fe71 100644 --- a/source/plugins/ruby/MdmMetricsGenerator.rb +++ b/source/plugins/ruby/MdmMetricsGenerator.rb @@ -3,7 +3,6 @@ class MdmMetricsGenerator require "logger" - require "yajl/json_gem" require "json" require_relative "MdmAlertTemplates" require_relative "ApplicationInsightsUtility" @@ -140,7 +139,7 @@ def appendPodMetrics(records, metricName, metricHash, batch_time, metricsTemplat containerCountMetricValue: value, } end - records.push(Yajl::Parser.parse(StringIO.new(record))) + records.push(JSON.parse(record)) } else @log.info "No records found in hash for metric: #{metricName}" @@ -334,7 +333,7 @@ def getContainerResourceUtilMetricRecords(recordTimeStamp, metricName, percentag containerResourceUtilizationPercentage: percentageMetricValue, thresholdPercentageDimValue: thresholdPercentage, } - records.push(Yajl::Parser.parse(StringIO.new(resourceUtilRecord))) + records.push(JSON.parse(resourceUtilRecord)) # Adding another metric for threshold violation resourceThresholdViolatedRecord = MdmAlertTemplates::Container_resource_threshold_violation_template % { @@ -347,7 +346,7 @@ def getContainerResourceUtilMetricRecords(recordTimeStamp, metricName, percentag containerResourceThresholdViolated: isZeroFill ? 0 : 1, thresholdPercentageDimValue: thresholdPercentage, } - records.push(Yajl::Parser.parse(StringIO.new(resourceThresholdViolatedRecord))) + records.push(JSON.parse(resourceThresholdViolatedRecord)) rescue => errorStr @log.info "Error in getContainerResourceUtilMetricRecords: #{errorStr}" ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) @@ -374,7 +373,7 @@ def getPVResourceUtilMetricRecords(recordTimeStamp, metricName, computer, percen pvResourceUtilizationPercentage: percentageMetricValue, thresholdPercentageDimValue: thresholdPercentage, } - records.push(Yajl::Parser.parse(StringIO.new(resourceUtilRecord))) + records.push(JSON.parse(resourceUtilRecord)) # Adding another metric for threshold violation resourceThresholdViolatedRecord = MdmAlertTemplates::PV_resource_threshold_violation_template % { @@ -387,7 +386,7 @@ def getPVResourceUtilMetricRecords(recordTimeStamp, metricName, computer, percen pvResourceThresholdViolated: isZeroFill ? 0 : 1, thresholdPercentageDimValue: thresholdPercentage, } - records.push(Yajl::Parser.parse(StringIO.new(resourceThresholdViolatedRecord))) + records.push(JSON.parse(resourceThresholdViolatedRecord)) rescue => errorStr @log.info "Error in getPVResourceUtilMetricRecords: #{errorStr}" ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) @@ -418,7 +417,7 @@ def getDiskUsageMetricRecords(record) devicevalue: deviceName, diskUsagePercentageValue: usedPercent, } - records.push(Yajl::Parser.parse(StringIO.new(diskUsedPercentageRecord))) + records.push(JSON.parse(diskUsedPercentageRecord)) end rescue => errorStr @log.info "Error in getDiskUsageMetricRecords: #{errorStr}" @@ -469,7 +468,7 @@ def getMetricRecords(record) dimValues: dimValues, metricValue: v, } - records.push(Yajl::Parser.parse(StringIO.new(metricRecord))) + records.push(JSON.parse(metricRecord)) #@log.info "pushed mdmgenericmetric: #{k},#{v}" end } @@ -545,7 +544,7 @@ def getNodeResourceMetricRecords(record, metric_name, metric_value, percentage_m metricmaxvalue: metric_value, metricsumvalue: metric_value, } - records.push(Yajl::Parser.parse(StringIO.new(custommetricrecord))) + records.push(JSON.parse(custommetricrecord)) if !percentage_metric_value.nil? additional_record = MdmAlertTemplates::Node_resource_metrics_template % { @@ -558,7 +557,7 @@ def getNodeResourceMetricRecords(record, metric_name, metric_value, percentage_m metricmaxvalue: percentage_metric_value, metricsumvalue: percentage_metric_value, } - records.push(Yajl::Parser.parse(StringIO.new(additional_record))) + records.push(JSON.parse(additional_record)) end if !allocatable_percentage_metric_value.nil? @@ -572,7 +571,7 @@ def getNodeResourceMetricRecords(record, metric_name, metric_value, percentage_m metricmaxvalue: allocatable_percentage_metric_value, metricsumvalue: allocatable_percentage_metric_value, } - records.push(Yajl::Parser.parse(StringIO.new(additional_record))) + records.push(JSON.parse(additional_record)) end rescue => errorStr @log.info "Error in getNodeResourceMetricRecords: #{errorStr}" diff --git a/source/plugins/ruby/WatchStream.rb b/source/plugins/ruby/WatchStream.rb index 6cc850450..78ce25dd5 100644 --- a/source/plugins/ruby/WatchStream.rb +++ b/source/plugins/ruby/WatchStream.rb @@ -3,7 +3,7 @@ require "net/http" require "net/https" -require "yajl/json_gem" +require "json" require "logger" require "time" @@ -50,7 +50,7 @@ def each response.read_body do |chunk| buffer << chunk while (line = buffer.slice!(/.+\n/)) - yield(Yajl::Parser.parse(StringIO.new(line.chomp))) + yield(JSON.parse(line.chomp)) end end end diff --git a/source/plugins/ruby/arc_k8s_cluster_identity.rb b/source/plugins/ruby/arc_k8s_cluster_identity.rb index 39b8c1c96..43707b91f 100644 --- a/source/plugins/ruby/arc_k8s_cluster_identity.rb +++ b/source/plugins/ruby/arc_k8s_cluster_identity.rb @@ -3,7 +3,7 @@ require "net/http" require "net/https" require "uri" -require "yajl/json_gem" +require "json" require "base64" require "time" require_relative "KubernetesApiClient" diff --git a/source/plugins/ruby/filter_cadvisor2mdm.rb b/source/plugins/ruby/filter_cadvisor2mdm.rb index 6bafa372a..621c94992 100644 --- a/source/plugins/ruby/filter_cadvisor2mdm.rb +++ b/source/plugins/ruby/filter_cadvisor2mdm.rb @@ -6,7 +6,7 @@ module Fluent::Plugin require "logger" - require "yajl/json_gem" + require "json" require_relative "oms_common" require_relative "CustomMetricsUtils" require_relative "kubelet_utils" diff --git a/source/plugins/ruby/filter_inventory2mdm.rb b/source/plugins/ruby/filter_inventory2mdm.rb index 509ac608e..165bb63cf 100644 --- a/source/plugins/ruby/filter_inventory2mdm.rb +++ b/source/plugins/ruby/filter_inventory2mdm.rb @@ -6,7 +6,7 @@ module Fluent::Plugin require 'logger' - require 'yajl/json_gem' + require 'json' require_relative 'oms_common' require_relative 'CustomMetricsUtils' diff --git a/source/plugins/ruby/filter_telegraf2mdm.rb b/source/plugins/ruby/filter_telegraf2mdm.rb index fd71f1682..0819afdb7 100644 --- a/source/plugins/ruby/filter_telegraf2mdm.rb +++ b/source/plugins/ruby/filter_telegraf2mdm.rb @@ -6,7 +6,7 @@ module Fluent::Plugin require "logger" - require "yajl/json_gem" + require "json" require_relative "oms_common" require_relative "kubelet_utils" require_relative "MdmMetricsGenerator" diff --git a/source/plugins/ruby/in_cadvisor_perf.rb b/source/plugins/ruby/in_cadvisor_perf.rb index 901ecefab..d929e86fb 100644 --- a/source/plugins/ruby/in_cadvisor_perf.rb +++ b/source/plugins/ruby/in_cadvisor_perf.rb @@ -14,7 +14,7 @@ class CAdvisor_Perf_Input < Input def initialize super require "yaml" - require "yajl/json_gem" + require "json" require "time" require_relative "CAdvisorMetricsAPIClient" diff --git a/source/plugins/ruby/in_containerinventory.rb b/source/plugins/ruby/in_containerinventory.rb index c8ffe7d05..aeb70c68a 100644 --- a/source/plugins/ruby/in_containerinventory.rb +++ b/source/plugins/ruby/in_containerinventory.rb @@ -11,7 +11,7 @@ class Container_Inventory_Input < Input def initialize super - require "yajl/json_gem" + require "json" require "time" require_relative "ContainerInventoryState" require_relative "ApplicationInsightsUtility" diff --git a/source/plugins/ruby/in_kube_events.rb b/source/plugins/ruby/in_kube_events.rb index deeae6e14..6ccb02c54 100644 --- a/source/plugins/ruby/in_kube_events.rb +++ b/source/plugins/ruby/in_kube_events.rb @@ -10,8 +10,7 @@ class Kube_Event_Input < Input def initialize super - require "yajl/json_gem" - require "yajl" + require "json" require "time" require_relative "KubernetesApiClient" diff --git a/source/plugins/ruby/in_kube_nodes.rb b/source/plugins/ruby/in_kube_nodes.rb index 368eb61d4..8473cca81 100644 --- a/source/plugins/ruby/in_kube_nodes.rb +++ b/source/plugins/ruby/in_kube_nodes.rb @@ -16,8 +16,7 @@ def initialize(is_unit_test_mode = nil, kubernetesApiClient = nil, super() require "yaml" - require "yajl/json_gem" - require "yajl" + require "json" require "time" require_relative "KubernetesApiClient" diff --git a/source/plugins/ruby/in_kube_perfinventory.rb b/source/plugins/ruby/in_kube_perfinventory.rb index 20589167b..25f9c93e8 100644 --- a/source/plugins/ruby/in_kube_perfinventory.rb +++ b/source/plugins/ruby/in_kube_perfinventory.rb @@ -10,8 +10,7 @@ class Kube_PerfInventory_Input < Input def initialize super require "yaml" - require "yajl/json_gem" - require "yajl" + require "json" require "set" require "time" require "net/http" @@ -407,7 +406,7 @@ def getNodeAllocatableRecords() isAcquiredLock = f.flock(File::LOCK_EX | File::LOCK_NB) raise "in_kube_perfinventory:getNodeAllocatableRecords:Failed to acquire file lock @ #{Time.now.utc.iso8601}" if !isAcquiredLock startTime = (Time.now.to_f * 1000).to_i - nodeAllocatableRecords = Yajl::Parser.parse(f) + nodeAllocatableRecords = JSON.parse(f.read) timetakenMs = ((Time.now.to_f * 1000).to_i - startTime) $log.info "in_kube_perfinventory:getNodeAllocatableRecords:Number of Node Allocatable records: #{nodeAllocatableRecords.length} with time taken(ms) for read: #{timetakenMs} @ #{Time.now.utc.iso8601}" else diff --git a/source/plugins/ruby/in_kube_podinventory.rb b/source/plugins/ruby/in_kube_podinventory.rb index 37c9741c3..a1986bd4a 100644 --- a/source/plugins/ruby/in_kube_podinventory.rb +++ b/source/plugins/ruby/in_kube_podinventory.rb @@ -12,8 +12,7 @@ class Kube_PodInventory_Input < Input def initialize super require "yaml" - require "yajl/json_gem" - require "yajl" + require "json" require "set" require "time" require "net/http" @@ -916,9 +915,9 @@ def watch_services else $log.info("in_kube_podinventory::watch_services: Done getting services from Kube API @ #{Time.now.utc.iso8601}") if !serviceInfo.nil? - $log.info("in_kube_podinventory::watch_services:Start:Parsing services data using yajl @ #{Time.now.utc.iso8601}") - serviceInventory = Yajl::Parser.parse(StringIO.new(serviceInfo.body)) - $log.info("in_kube_podinventory::watch_services:End:Parsing services data using yajl @ #{Time.now.utc.iso8601}") + $log.info("in_kube_podinventory::watch_services:Start:Parsing services data using JSON @ #{Time.now.utc.iso8601}") + serviceInventory = JSON.parse(serviceInfo.body) + $log.info("in_kube_podinventory::watch_services:End:Parsing services data using JSON @ #{Time.now.utc.iso8601}") serviceInfo = nil if (!serviceInventory.nil? && !serviceInventory.empty?) servicesResourceVersion = serviceInventory["metadata"]["resourceVersion"] diff --git a/source/plugins/ruby/in_kube_podmdminventory.rb b/source/plugins/ruby/in_kube_podmdminventory.rb index b872650d2..38e07d860 100644 --- a/source/plugins/ruby/in_kube_podmdminventory.rb +++ b/source/plugins/ruby/in_kube_podmdminventory.rb @@ -14,8 +14,7 @@ class Kube_PodMDMInventory_Input < Input def initialize super require "yaml" - require "yajl/json_gem" - require "yajl" + require "json" require "set" require "time" require "net/http" @@ -187,7 +186,7 @@ def getMDMRecords() isAcquiredLock = f.flock(File::LOCK_EX | File::LOCK_NB) raise "in_kube_podmdminventory:getMDMRecords:Failed to acquire file lock @ #{Time.now.utc.iso8601}" if !isAcquiredLock startTime = (Time.now.to_f * 1000).to_i - mdmRecords = Yajl::Parser.parse(f) + mdmRecords = JSON.parse(f.read) timetakenMs = ((Time.now.to_f * 1000).to_i - startTime) if mdmRecords.nil? || mdmRecords.empty? || mdmRecords["items"].nil? || mdmRecords["collectionTime"] == @prevCollectionTime raise "in_kube_podmdminventory:getMDMRecords: either read mdmRecords is nil or empty or stale @ #{Time.now.utc.iso8601}" diff --git a/source/plugins/ruby/in_kube_pvinventory.rb b/source/plugins/ruby/in_kube_pvinventory.rb index fccfd459d..1e25e4057 100644 --- a/source/plugins/ruby/in_kube_pvinventory.rb +++ b/source/plugins/ruby/in_kube_pvinventory.rb @@ -12,8 +12,7 @@ class Kube_PVInventory_Input < Input def initialize super require "yaml" - require "yajl/json_gem" - require "yajl" + require "json" require "time" require_relative "KubernetesApiClient" require_relative "ApplicationInsightsUtility" diff --git a/source/plugins/ruby/in_kubestate_deployments.rb b/source/plugins/ruby/in_kubestate_deployments.rb index 0b563a890..92e6318b9 100644 --- a/source/plugins/ruby/in_kubestate_deployments.rb +++ b/source/plugins/ruby/in_kubestate_deployments.rb @@ -12,8 +12,7 @@ class Kube_Kubestate_Deployments_Input < Input def initialize super - require "yajl/json_gem" - require "yajl" + require "json" require "date" require "time" diff --git a/source/plugins/ruby/in_kubestate_hpa.rb b/source/plugins/ruby/in_kubestate_hpa.rb index 178f7944f..7f7e3aac5 100644 --- a/source/plugins/ruby/in_kubestate_hpa.rb +++ b/source/plugins/ruby/in_kubestate_hpa.rb @@ -10,8 +10,7 @@ class Kube_Kubestate_HPA_Input < Input def initialize super - require "yajl/json_gem" - require "yajl" + require "json" require "time" require_relative "KubernetesApiClient" diff --git a/source/plugins/ruby/in_win_cadvisor_perf.rb b/source/plugins/ruby/in_win_cadvisor_perf.rb index dd462fdf2..841c4867a 100644 --- a/source/plugins/ruby/in_win_cadvisor_perf.rb +++ b/source/plugins/ruby/in_win_cadvisor_perf.rb @@ -12,7 +12,7 @@ class Win_CAdvisor_Perf_Input < Input def initialize super require "yaml" - require "yajl/json_gem" + require "json" require "time" require_relative "CAdvisorMetricsAPIClient" diff --git a/source/plugins/ruby/kubelet_utils.rb b/source/plugins/ruby/kubelet_utils.rb index 368ca8639..b986f2ab7 100644 --- a/source/plugins/ruby/kubelet_utils.rb +++ b/source/plugins/ruby/kubelet_utils.rb @@ -3,7 +3,7 @@ # frozen_string_literal: true require "logger" -require "yajl/json_gem" +require "json" require_relative "CAdvisorMetricsAPIClient" require_relative "KubernetesApiClient" require "bigdecimal" @@ -52,7 +52,7 @@ def get_node_allocatable(cpu_capacity, memory_capacity) cpu_allocatable = 1.0 memory_allocatable = 1.0 - + allocatable_response = CAdvisorMetricsAPIClient.getCongifzCAdvisor(winNode: nil) parsed_response = JSON.parse(allocatable_response.body) @@ -66,7 +66,7 @@ def get_node_allocatable(cpu_capacity, memory_capacity) @log.error "Error in get_node_allocatable::kubereserved_cpu: #{errorStr}" kubereserved_cpu = "0.0" ApplicationInsightsUtility.sendExceptionTelemetry("Error in get_node_allocatable::kubereserved_cpu: #{errorStr}") - end + end begin kubereserved_memory = parsed_response["kubeletconfig"]["kubeReserved"]["memory"] @@ -78,7 +78,7 @@ def get_node_allocatable(cpu_capacity, memory_capacity) @log.error "Error in get_node_allocatable::kubereserved_memory: #{errorStr}" kubereserved_memory = "0.0" ApplicationInsightsUtility.sendExceptionTelemetry("Error in get_node_allocatable::kubereserved_memory: #{errorStr}") - end + end begin systemReserved_cpu = parsed_response["kubeletconfig"]["systemReserved"]["cpu"] if systemReserved_cpu.nil? || systemReserved_cpu == "" @@ -90,7 +90,7 @@ def get_node_allocatable(cpu_capacity, memory_capacity) @log.error "Error in get_node_allocatable::systemReserved_cpu: #{errorStr}" systemReserved_cpu = "0.0" ApplicationInsightsUtility.sendExceptionTelemetry("Error in get_node_allocatable::systemReserved_cpu: #{errorStr}") - end + end begin explicitlyReserved_cpu = parsed_response["kubeletconfig"]["reservedCPUs"] @@ -103,19 +103,19 @@ def get_node_allocatable(cpu_capacity, memory_capacity) @log.error "Error in get_node_allocatable::explicitlyReserved_cpu: #{errorStr}" explicitlyReserved_cpu = "0.0" ApplicationInsightsUtility.sendExceptionTelemetry("Error in get_node_allocatable::explicitlyReserved_cpu: #{errorStr}") - end + end begin - systemReserved_memory = parsed_response["kubeletconfig"]["systemReserved"]["memory"] - if systemReserved_memory.nil? || systemReserved_memory == "" + systemReserved_memory = parsed_response["kubeletconfig"]["systemReserved"]["memory"] + if systemReserved_memory.nil? || systemReserved_memory == "" systemReserved_memory = "0.0" - end - @log.info "get_node_allocatable::systemReserved_memory #{systemReserved_memory}" + end + @log.info "get_node_allocatable::systemReserved_memory #{systemReserved_memory}" rescue => errorStr - @log.error "Error in get_node_allocatable::systemReserved_memory: #{errorStr}" - systemReserved_memory = "0.0" + @log.error "Error in get_node_allocatable::systemReserved_memory: #{errorStr}" + systemReserved_memory = "0.0" ApplicationInsightsUtility.sendExceptionTelemetry("Error in get_node_allocatable::systemReserved_memory: #{errorStr}") - end + end begin evictionHard_memory = parsed_response["kubeletconfig"]["evictionHard"]["memory.available"] @@ -127,16 +127,16 @@ def get_node_allocatable(cpu_capacity, memory_capacity) @log.error "Error in get_node_allocatable::evictionHard_memory: #{errorStr}" evictionHard_memory = "0.0" ApplicationInsightsUtility.sendExceptionTelemetry("Error in get_node_allocatable::evictionHard_memory: #{errorStr}") - end + end # do calculation in nanocore since that's what KubernetesApiClient.getMetricNumericValue expects cpu_capacity_number = cpu_capacity.to_i * 1000.0 ** 2 # subtract to get allocatable. Formula : Allocatable = Capacity - ( kube reserved + system reserved + eviction threshold ) # https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/#node-allocatable if KubernetesApiClient.getMetricNumericValue("cpu", explicitlyReserved_cpu) > 0 - cpu_allocatable = cpu_capacity_number - KubernetesApiClient.getMetricNumericValue("cpu", explicitlyReserved_cpu) + cpu_allocatable = cpu_capacity_number - KubernetesApiClient.getMetricNumericValue("cpu", explicitlyReserved_cpu) else - cpu_allocatable = cpu_capacity_number - (KubernetesApiClient.getMetricNumericValue("cpu", kubereserved_cpu) + KubernetesApiClient.getMetricNumericValue("cpu", systemReserved_cpu)) + cpu_allocatable = cpu_capacity_number - (KubernetesApiClient.getMetricNumericValue("cpu", kubereserved_cpu) + KubernetesApiClient.getMetricNumericValue("cpu", systemReserved_cpu)) end # convert back to units similar to what we get for capacity cpu_allocatable = cpu_allocatable / (1000.0 ** 2) @@ -165,7 +165,7 @@ def get_all_container_limits containerResourceDimensionHash = {} response = CAdvisorMetricsAPIClient.getPodsFromCAdvisor(winNode: nil) if !response.nil? && !response.body.nil? && !response.body.empty? - podInventory = Yajl::Parser.parse(StringIO.new(response.body)) + podInventory = JSON.parse(response.body) podInventory["items"].each do |items| @log.info "in pod inventory items..." podNameSpace = items["metadata"]["namespace"] diff --git a/source/plugins/ruby/kubernetes_container_inventory.rb b/source/plugins/ruby/kubernetes_container_inventory.rb index 81889b61b..3c2bda2e5 100644 --- a/source/plugins/ruby/kubernetes_container_inventory.rb +++ b/source/plugins/ruby/kubernetes_container_inventory.rb @@ -2,9 +2,8 @@ # frozen_string_literal: true class KubernetesContainerInventory - require "yajl/json_gem" - require "time" require "json" + require "time" require_relative "omslog" require_relative "ApplicationInsightsUtility" diff --git a/source/plugins/ruby/lib/application_insights/channel/contracts/json_serializable.rb b/source/plugins/ruby/lib/application_insights/channel/contracts/json_serializable.rb index 60838e215..8f4677044 100644 --- a/source/plugins/ruby/lib/application_insights/channel/contracts/json_serializable.rb +++ b/source/plugins/ruby/lib/application_insights/channel/contracts/json_serializable.rb @@ -1,4 +1,4 @@ -require 'yajl/json_gem' +require 'json' module ApplicationInsights module Channel diff --git a/source/plugins/ruby/lib/application_insights/channel/sender_base.rb b/source/plugins/ruby/lib/application_insights/channel/sender_base.rb index e5a4dea62..f5102c27a 100644 --- a/source/plugins/ruby/lib/application_insights/channel/sender_base.rb +++ b/source/plugins/ruby/lib/application_insights/channel/sender_base.rb @@ -1,4 +1,4 @@ -require "yajl/json_gem" +require "json" require "net/http" require "openssl" require "stringio" diff --git a/source/plugins/ruby/out_mdm.rb b/source/plugins/ruby/out_mdm.rb index 4561cdd9a..6fcc22cda 100644 --- a/source/plugins/ruby/out_mdm.rb +++ b/source/plugins/ruby/out_mdm.rb @@ -14,7 +14,7 @@ def initialize require "net/https" require "securerandom" require "uri" - require "yajl/json_gem" + require "json" require_relative "KubernetesApiClient" require_relative "ApplicationInsightsUtility" require_relative "constants" diff --git a/source/plugins/ruby/podinventory_to_mdm.rb b/source/plugins/ruby/podinventory_to_mdm.rb index a7f9c5435..5102274ed 100644 --- a/source/plugins/ruby/podinventory_to_mdm.rb +++ b/source/plugins/ruby/podinventory_to_mdm.rb @@ -3,7 +3,7 @@ # frozen_string_literal: true require "logger" -require "yajl/json_gem" +require "json" require "time" require_relative "oms_common" require_relative "CustomMetricsUtils" @@ -129,7 +129,7 @@ def get_pod_inventory_mdm_records(batch_time) controllerNameDimValue: podControllerNameDimValue, podCountMetricValue: value, } - records.push(Yajl::Parser.parse(record)) + records.push(JSON.parse(record)) } #Add pod metric records diff --git a/source/toml-parser/tomlrb.rb b/source/toml-parser/tomlrb.rb deleted file mode 100644 index c0eff9093..000000000 --- a/source/toml-parser/tomlrb.rb +++ /dev/null @@ -1,44 +0,0 @@ -require "time" -require "stringio" -require_relative "tomlrb/version" -require_relative "tomlrb/string_utils" -require_relative "tomlrb/scanner" -require_relative "tomlrb/parser" -require_relative "tomlrb/handler" - -module Tomlrb - class ParseError < StandardError; end - - # Parses a valid TOML string into its Ruby data structure - # - # @param string_or_io [String, StringIO] the content - # @param options [Hash] the options hash - # @option options [Boolean] :symbolize_keys (false) whether to return the keys as symbols or strings - # @return [Hash] the Ruby data structure represented by the input - def self.parse(string_or_io, **options) - io = string_or_io.is_a?(String) ? StringIO.new(string_or_io) : string_or_io - scanner = Scanner.new(io) - parser = Parser.new(scanner, options) - begin - handler = parser.parse - rescue Racc::ParseError => e - raise ParseError, e.message - end - - handler.output - end - - # Reads a file content and parses it into its Ruby data structure - # - # @param path [String] the path to the file - # @param options [Hash] the options hash - # @option options [Boolean] :symbolize_keys (false) whether to return the keys as symbols or strings - # @return [Hash] the Ruby data structure represented by the input - def self.load_file(path, **options) - # By default Ruby sets the external encoding of an IO object to the - # default external encoding. The default external encoding is set by - # locale encoding or the interpreter -E option. - tmp = File.read(path, :encoding => "utf-8") - Tomlrb.parse(tmp, options) - end -end diff --git a/source/toml-parser/tomlrb/generated_parser.rb b/source/toml-parser/tomlrb/generated_parser.rb deleted file mode 100644 index ebf815e7d..000000000 --- a/source/toml-parser/tomlrb/generated_parser.rb +++ /dev/null @@ -1,542 +0,0 @@ -# -# DO NOT MODIFY!!!! -# This file is automatically generated by Racc 1.4.14 -# from Racc grammer file "". -# - -require 'racc/parser.rb' -module Tomlrb - class GeneratedParser < Racc::Parser -##### State transition tables begin ### - -racc_action_table = [ - 2, 17, 11, 31, 12, 31, 13, 27, 14, 77, - 15, 16, 8, 78, 32, 10, 33, 29, 34, 29, - 57, 58, 59, 60, 56, 53, 52, 54, 55, 46, - 40, 41, 10, 57, 58, 59, 60, 56, 53, 52, - 54, 55, 46, 69, 70, 10, 57, 58, 59, 60, - 56, 53, 52, 54, 55, 46, 35, 36, 10, 57, - 58, 59, 60, 56, 53, 52, 54, 55, 46, 37, - 38, 10, 57, 58, 59, 60, 56, 53, 52, 54, - 55, 46, 43, 66, 10, 57, 58, 59, 60, 56, - 53, 52, 54, 55, 46, nil, nil, 10, 57, 58, - 59, 60, 56, 53, 52, 54, 55, 46, nil, nil, - 10, 57, 58, 59, 60, 56, 53, 52, 54, 55, - 46, 73, nil, 10, 57, 58, 59, 60, 56, 53, - 52, 54, 55, 46, 73, 21, 10, 22, nil, 23, - nil, 24, nil, 25, 26, 21, 19, 22, nil, 23, - nil, 24, nil, 25, 26, nil, 19 ] - -racc_action_check = [ - 1, 2, 1, 9, 1, 70, 1, 8, 1, 74, - 1, 1, 1, 74, 11, 1, 12, 9, 13, 70, - 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, - 20, 20, 32, 33, 33, 33, 33, 33, 33, 33, - 33, 33, 33, 42, 42, 33, 34, 34, 34, 34, - 34, 34, 34, 34, 34, 34, 14, 15, 34, 35, - 35, 35, 35, 35, 35, 35, 35, 35, 35, 16, - 19, 35, 36, 36, 36, 36, 36, 36, 36, 36, - 36, 36, 30, 40, 36, 37, 37, 37, 37, 37, - 37, 37, 37, 37, 37, nil, nil, 37, 43, 43, - 43, 43, 43, 43, 43, 43, 43, 43, nil, nil, - 43, 45, 45, 45, 45, 45, 45, 45, 45, 45, - 45, 45, nil, 45, 78, 78, 78, 78, 78, 78, - 78, 78, 78, 78, 78, 7, 78, 7, nil, 7, - nil, 7, nil, 7, 7, 41, 7, 41, nil, 41, - nil, 41, nil, 41, 41, nil, 41 ] - -racc_action_pointer = [ - nil, 0, 1, nil, nil, nil, nil, 133, -5, 1, - nil, -4, -2, 0, 38, 39, 51, nil, nil, 57, - 17, nil, nil, nil, nil, nil, nil, nil, nil, nil, - 64, nil, 17, 30, 43, 56, 69, 82, nil, nil, - 70, 143, 27, 95, nil, 108, nil, nil, nil, nil, - nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - 3, nil, nil, nil, -4, nil, nil, nil, 121, nil ] - -racc_action_default = [ - -1, -56, -56, -2, -3, -4, -5, -56, -8, -56, - -22, -56, -56, -56, -56, -56, -56, 80, -6, -10, - -56, -15, -16, -17, -18, -19, -20, -7, -21, -23, - -56, -27, -46, -46, -46, -46, -46, -46, -9, -11, - -13, -56, -56, -46, -29, -46, -40, -41, -42, -43, - -44, -45, -47, -48, -49, -50, -51, -52, -53, -54, - -55, -30, -31, -32, -33, -34, -12, -14, -24, -25, - -56, -28, -35, -36, -56, -26, -37, -38, -46, -39 ] - -racc_goto_table = [ - 28, 18, 1, 72, 44, 61, 62, 63, 64, 65, - 3, 4, 5, 6, 7, 71, 39, 42, 68, 76, - nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - nil, nil, nil, nil, nil, 67, 79, nil, nil, nil, - nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - nil, 75 ] - -racc_goto_check = [ - 11, 7, 1, 18, 15, 15, 15, 15, 15, 15, - 2, 3, 4, 5, 6, 15, 9, 13, 14, 19, - nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - nil, nil, nil, nil, nil, 7, 18, nil, nil, nil, - nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - nil, 11 ] - -racc_goto_pointer = [ - nil, 2, 9, 10, 11, 12, 13, -6, nil, -4, - nil, -9, nil, -13, -24, -28, nil, nil, -42, -55, - nil, nil, nil ] - -racc_goto_default = [ - nil, nil, nil, nil, nil, 49, nil, nil, 20, nil, - 9, nil, 30, nil, nil, 74, 48, 45, nil, nil, - 47, 50, 51 ] - -racc_reduce_table = [ - 0, 0, :racc_error, - 0, 20, :_reduce_none, - 2, 20, :_reduce_none, - 1, 21, :_reduce_none, - 1, 21, :_reduce_none, - 1, 21, :_reduce_none, - 2, 22, :_reduce_none, - 2, 25, :_reduce_7, - 1, 25, :_reduce_8, - 2, 26, :_reduce_9, - 1, 26, :_reduce_10, - 2, 26, :_reduce_none, - 2, 28, :_reduce_12, - 1, 28, :_reduce_13, - 2, 28, :_reduce_none, - 1, 27, :_reduce_15, - 1, 27, :_reduce_16, - 1, 27, :_reduce_17, - 1, 27, :_reduce_18, - 1, 27, :_reduce_19, - 1, 27, :_reduce_20, - 2, 24, :_reduce_none, - 1, 29, :_reduce_22, - 1, 30, :_reduce_23, - 3, 30, :_reduce_none, - 1, 33, :_reduce_25, - 2, 33, :_reduce_none, - 1, 31, :_reduce_27, - 2, 32, :_reduce_none, - 3, 23, :_reduce_29, - 3, 23, :_reduce_30, - 3, 23, :_reduce_31, - 3, 23, :_reduce_32, - 3, 23, :_reduce_33, - 3, 23, :_reduce_34, - 2, 35, :_reduce_none, - 1, 37, :_reduce_36, - 2, 37, :_reduce_none, - 1, 38, :_reduce_38, - 2, 38, :_reduce_none, - 1, 36, :_reduce_40, - 1, 34, :_reduce_41, - 1, 34, :_reduce_none, - 1, 34, :_reduce_none, - 1, 39, :_reduce_none, - 1, 39, :_reduce_none, - 0, 41, :_reduce_none, - 1, 41, :_reduce_47, - 1, 41, :_reduce_48, - 1, 41, :_reduce_49, - 1, 41, :_reduce_50, - 1, 41, :_reduce_51, - 1, 40, :_reduce_52, - 1, 40, :_reduce_53, - 1, 40, :_reduce_54, - 1, 40, :_reduce_55 ] - -racc_reduce_n = 56 - -racc_shift_n = 80 - -racc_token_table = { - false => 0, - :error => 1, - :IDENTIFIER => 2, - :STRING_MULTI => 3, - :STRING_BASIC => 4, - :STRING_LITERAL_MULTI => 5, - :STRING_LITERAL => 6, - :DATETIME => 7, - :INTEGER => 8, - :FLOAT => 9, - :TRUE => 10, - :FALSE => 11, - "[" => 12, - "]" => 13, - "." => 14, - "{" => 15, - "}" => 16, - "," => 17, - "=" => 18 } - -racc_nt_base = 19 - -racc_use_result_var = true - -Racc_arg = [ - racc_action_table, - racc_action_check, - racc_action_default, - racc_action_pointer, - racc_goto_table, - racc_goto_check, - racc_goto_default, - racc_goto_pointer, - racc_nt_base, - racc_reduce_table, - racc_token_table, - racc_shift_n, - racc_reduce_n, - racc_use_result_var ] - -Racc_token_to_s_table = [ - "$end", - "error", - "IDENTIFIER", - "STRING_MULTI", - "STRING_BASIC", - "STRING_LITERAL_MULTI", - "STRING_LITERAL", - "DATETIME", - "INTEGER", - "FLOAT", - "TRUE", - "FALSE", - "\"[\"", - "\"]\"", - "\".\"", - "\"{\"", - "\"}\"", - "\",\"", - "\"=\"", - "$start", - "expressions", - "expression", - "table", - "assignment", - "inline_table", - "table_start", - "table_continued", - "table_identifier", - "table_next", - "inline_table_start", - "inline_continued", - "inline_assignment_key", - "inline_assignment_value", - "inline_next", - "value", - "array", - "start_array", - "array_continued", - "array_next", - "scalar", - "string", - "literal" ] - -Racc_debug_parser = false - -##### State transition tables end ##### - -# reduce 0 omitted - -# reduce 1 omitted - -# reduce 2 omitted - -# reduce 3 omitted - -# reduce 4 omitted - -# reduce 5 omitted - -# reduce 6 omitted - -module_eval(<<'.,.,', 'parser.y', 15) - def _reduce_7(val, _values, result) - @handler.start_(:array_of_tables) - result - end -.,., - -module_eval(<<'.,.,', 'parser.y', 16) - def _reduce_8(val, _values, result) - @handler.start_(:table) - result - end -.,., - -module_eval(<<'.,.,', 'parser.y', 19) - def _reduce_9(val, _values, result) - array = @handler.end_(:array_of_tables); @handler.set_context(array, is_array_of_tables: true) - result - end -.,., - -module_eval(<<'.,.,', 'parser.y', 20) - def _reduce_10(val, _values, result) - array = @handler.end_(:table); @handler.set_context(array) - result - end -.,., - -# reduce 11 omitted - -module_eval(<<'.,.,', 'parser.y', 24) - def _reduce_12(val, _values, result) - array = @handler.end_(:array_of_tables); @handler.set_context(array, is_array_of_tables: true) - result - end -.,., - -module_eval(<<'.,.,', 'parser.y', 25) - def _reduce_13(val, _values, result) - array = @handler.end_(:table); @handler.set_context(array) - result - end -.,., - -# reduce 14 omitted - -module_eval(<<'.,.,', 'parser.y', 29) - def _reduce_15(val, _values, result) - @handler.push(val[0]) - result - end -.,., - -module_eval(<<'.,.,', 'parser.y', 30) - def _reduce_16(val, _values, result) - @handler.push(val[0]) - result - end -.,., - -module_eval(<<'.,.,', 'parser.y', 31) - def _reduce_17(val, _values, result) - @handler.push(val[0]) - result - end -.,., - -module_eval(<<'.,.,', 'parser.y', 32) - def _reduce_18(val, _values, result) - @handler.push(val[0]) - result - end -.,., - -module_eval(<<'.,.,', 'parser.y', 33) - def _reduce_19(val, _values, result) - @handler.push(val[0]) - result - end -.,., - -module_eval(<<'.,.,', 'parser.y', 34) - def _reduce_20(val, _values, result) - @handler.push(val[0]) - result - end -.,., - -# reduce 21 omitted - -module_eval(<<'.,.,', 'parser.y', 40) - def _reduce_22(val, _values, result) - @handler.start_(:inline) - result - end -.,., - -module_eval(<<'.,.,', 'parser.y', 43) - def _reduce_23(val, _values, result) - array = @handler.end_(:inline); @handler.push(Hash[*array]) - result - end -.,., - -# reduce 24 omitted - -module_eval(<<'.,.,', 'parser.y', 48) - def _reduce_25(val, _values, result) - array = @handler.end_(:inline) - array.map!.with_index{ |n,i| i.even? ? n.to_sym : n } if @handler.symbolize_keys - @handler.push(Hash[*array]) - - result - end -.,., - -# reduce 26 omitted - -module_eval(<<'.,.,', 'parser.y', 55) - def _reduce_27(val, _values, result) - @handler.push(val[0]) - result - end -.,., - -# reduce 28 omitted - -module_eval(<<'.,.,', 'parser.y', 61) - def _reduce_29(val, _values, result) - @handler.assign(val[0]) - result - end -.,., - -module_eval(<<'.,.,', 'parser.y', 62) - def _reduce_30(val, _values, result) - @handler.assign(val[0]) - result - end -.,., - -module_eval(<<'.,.,', 'parser.y', 63) - def _reduce_31(val, _values, result) - @handler.assign(val[0]) - result - end -.,., - -module_eval(<<'.,.,', 'parser.y', 64) - def _reduce_32(val, _values, result) - @handler.assign(val[0]) - result - end -.,., - -module_eval(<<'.,.,', 'parser.y', 65) - def _reduce_33(val, _values, result) - @handler.assign(val[0]) - result - end -.,., - -module_eval(<<'.,.,', 'parser.y', 66) - def _reduce_34(val, _values, result) - @handler.assign(val[0]) - result - end -.,., - -# reduce 35 omitted - -module_eval(<<'.,.,', 'parser.y', 72) - def _reduce_36(val, _values, result) - array = @handler.end_(:array); @handler.push(array) - result - end -.,., - -# reduce 37 omitted - -module_eval(<<'.,.,', 'parser.y', 76) - def _reduce_38(val, _values, result) - array = @handler.end_(:array); @handler.push(array) - result - end -.,., - -# reduce 39 omitted - -module_eval(<<'.,.,', 'parser.y', 80) - def _reduce_40(val, _values, result) - @handler.start_(:array) - result - end -.,., - -module_eval(<<'.,.,', 'parser.y', 83) - def _reduce_41(val, _values, result) - @handler.push(val[0]) - result - end -.,., - -# reduce 42 omitted - -# reduce 43 omitted - -# reduce 44 omitted - -# reduce 45 omitted - -# reduce 46 omitted - -module_eval(<<'.,.,', 'parser.y', 92) - def _reduce_47(val, _values, result) - result = val[0].to_f - result - end -.,., - -module_eval(<<'.,.,', 'parser.y', 93) - def _reduce_48(val, _values, result) - result = val[0].to_i - result - end -.,., - -module_eval(<<'.,.,', 'parser.y', 94) - def _reduce_49(val, _values, result) - result = true - result - end -.,., - -module_eval(<<'.,.,', 'parser.y', 95) - def _reduce_50(val, _values, result) - result = false - result - end -.,., - -module_eval(<<'.,.,', 'parser.y', 96) - def _reduce_51(val, _values, result) - result = Time.new(*val[0]) - result - end -.,., - -module_eval(<<'.,.,', 'parser.y', 99) - def _reduce_52(val, _values, result) - result = StringUtils.replace_escaped_chars(StringUtils.multiline_replacements(val[0])) - result - end -.,., - -module_eval(<<'.,.,', 'parser.y', 100) - def _reduce_53(val, _values, result) - result = StringUtils.replace_escaped_chars(val[0]) - result - end -.,., - -module_eval(<<'.,.,', 'parser.y', 101) - def _reduce_54(val, _values, result) - result = StringUtils.strip_spaces(val[0]) - result - end -.,., - -module_eval(<<'.,.,', 'parser.y', 102) - def _reduce_55(val, _values, result) - result = val[0] - result - end -.,., - -def _reduce_none(val, _values, result) - val[0] -end - - end # class GeneratedParser - end # module Tomlrb diff --git a/source/toml-parser/tomlrb/handler.rb b/source/toml-parser/tomlrb/handler.rb deleted file mode 100644 index d60b54bc3..000000000 --- a/source/toml-parser/tomlrb/handler.rb +++ /dev/null @@ -1,73 +0,0 @@ -module Tomlrb - class Handler - attr_reader :output, :symbolize_keys - - def initialize(**options) - @output = {} - @current = @output - @stack = [] - @array_names = [] - @symbolize_keys = options[:symbolize_keys] - end - - def set_context(identifiers, is_array_of_tables: false) - @current = @output - - deal_with_array_of_tables(identifiers, is_array_of_tables) do |identifierz| - identifierz.each do |k| - k = k.to_sym if @symbolize_keys - if @current[k].is_a?(Array) - @current[k] << {} if @current[k].empty? - @current = @current[k].last - else - @current[k] ||= {} - @current = @current[k] - end - end - end - end - - def deal_with_array_of_tables(identifiers, is_array_of_tables) - identifiers.map!{|n| n.gsub("\"", '')} - stringified_identifier = identifiers.join('.') - - if is_array_of_tables - @array_names << stringified_identifier - last_identifier = identifiers.pop - elsif @array_names.include?(stringified_identifier) - raise ParseError, 'Cannot define a normal table with the same name as an already established array' - end - - yield(identifiers) - - if is_array_of_tables - last_identifier = last_identifier.to_sym if @symbolize_keys - @current[last_identifier] ||= [] - @current[last_identifier] << {} - @current = @current[last_identifier].last - end - end - - def assign(k) - k = k.to_sym if @symbolize_keys - @current[k] = @stack.pop - end - - def push(o) - @stack << o - end - - def start_(type) - push([type]) - end - - def end_(type) - array = [] - while (value = @stack.pop) != [type] - raise ParseError, 'Unclosed table' unless value - array.unshift(value) - end - array - end - end -end diff --git a/source/toml-parser/tomlrb/parser.rb b/source/toml-parser/tomlrb/parser.rb deleted file mode 100644 index 31771a1ca..000000000 --- a/source/toml-parser/tomlrb/parser.rb +++ /dev/null @@ -1,18 +0,0 @@ -require_relative "generated_parser" - -class Tomlrb::Parser < Tomlrb::GeneratedParser - def initialize(tokenizer, **options) - @tokenizer = tokenizer - @handler = Tomlrb::Handler.new(options) - super() - end - - def next_token - @tokenizer.next_token - end - - def parse - do_parse - @handler - end -end diff --git a/source/toml-parser/tomlrb/parser.y b/source/toml-parser/tomlrb/parser.y deleted file mode 100644 index fcebcac06..000000000 --- a/source/toml-parser/tomlrb/parser.y +++ /dev/null @@ -1,104 +0,0 @@ -class Tomlrb::GeneratedParser -token IDENTIFIER STRING_MULTI STRING_BASIC STRING_LITERAL_MULTI STRING_LITERAL DATETIME INTEGER FLOAT TRUE FALSE -rule - expressions - | expressions expression - ; - expression - : table - | assignment - | inline_table - ; - table - : table_start table_continued - ; - table_start - : '[' '[' { @handler.start_(:array_of_tables) } - | '[' { @handler.start_(:table) } - ; - table_continued - : ']' ']' { array = @handler.end_(:array_of_tables); @handler.set_context(array, is_array_of_tables: true) } - | ']' { array = @handler.end_(:table); @handler.set_context(array) } - | table_identifier table_next - ; - table_next - : ']' ']' { array = @handler.end_(:array_of_tables); @handler.set_context(array, is_array_of_tables: true) } - | ']' { array = @handler.end_(:table); @handler.set_context(array) } - | '.' table_continued - ; - table_identifier - : IDENTIFIER { @handler.push(val[0]) } - | STRING_BASIC { @handler.push(val[0]) } - | STRING_LITERAL { @handler.push(val[0]) } - | INTEGER { @handler.push(val[0]) } - | TRUE { @handler.push(val[0]) } - | FALSE { @handler.push(val[0]) } - ; - inline_table - : inline_table_start inline_continued - ; - inline_table_start - : '{' { @handler.start_(:inline) } - ; - inline_continued - : '}' { array = @handler.end_(:inline); @handler.push(Hash[*array]) } - | inline_assignment_key inline_assignment_value inline_next - ; - inline_next - : '}' { - array = @handler.end_(:inline) - array.map!.with_index{ |n,i| i.even? ? n.to_sym : n } if @handler.symbolize_keys - @handler.push(Hash[*array]) - } - | ',' inline_continued - ; - inline_assignment_key - : IDENTIFIER { @handler.push(val[0]) } - ; - inline_assignment_value - : '=' value - ; - assignment - : IDENTIFIER '=' value { @handler.assign(val[0]) } - | STRING_BASIC '=' value { @handler.assign(val[0]) } - | STRING_LITERAL '=' value { @handler.assign(val[0]) } - | INTEGER '=' value { @handler.assign(val[0]) } - | TRUE '=' value { @handler.assign(val[0]) } - | FALSE '=' value { @handler.assign(val[0]) } - ; - array - : start_array array_continued - ; - array_continued - : ']' { array = @handler.end_(:array); @handler.push(array) } - | value array_next - ; - array_next - : ']' { array = @handler.end_(:array); @handler.push(array) } - | ',' array_continued - ; - start_array - : '[' { @handler.start_(:array) } - ; - value - : scalar { @handler.push(val[0]) } - | array - | inline_table - ; - scalar - : string - | literal - ; - literal - | FLOAT { result = val[0].to_f } - | INTEGER { result = val[0].to_i } - | TRUE { result = true } - | FALSE { result = false } - | DATETIME { result = Time.new(*val[0])} - ; - string - : STRING_MULTI { result = StringUtils.replace_escaped_chars(StringUtils.multiline_replacements(val[0])) } - | STRING_BASIC { result = StringUtils.replace_escaped_chars(val[0]) } - | STRING_LITERAL_MULTI { result = StringUtils.strip_spaces(val[0]) } - | STRING_LITERAL { result = val[0] } - ; diff --git a/source/toml-parser/tomlrb/scanner.rb b/source/toml-parser/tomlrb/scanner.rb deleted file mode 100644 index d0f479eef..000000000 --- a/source/toml-parser/tomlrb/scanner.rb +++ /dev/null @@ -1,54 +0,0 @@ -require 'strscan' - -module Tomlrb - class Scanner - COMMENT = /#.*/ - IDENTIFIER = /[A-Za-z0-9_-]+/ - SPACE = /[ \t\r\n]/ - STRING_BASIC = /(["])(?:\\?.)*?\1/ - STRING_MULTI = /"{3}([\s\S]*?"{3,4})/m - STRING_LITERAL = /(['])(?:\\?.)*?\1/ - STRING_LITERAL_MULTI = /'{3}([\s\S]*?'{3})/m - DATETIME = /(-?\d{4})-(\d{2})-(\d{2})(?:(?:t|\s)(\d{2}):(\d{2}):(\d{2}(?:\.\d+)?))?(z|[-+]\d{2}:\d{2})?/i - FLOAT = /[+-]?(?:[0-9_]+\.[0-9_]*|\d+(?=[eE]))(?:[eE][+-]?[0-9_]+)?/ - INTEGER = /[+-]?([1-9](_?\d)*|0)(?![A-Za-z0-9_-]+)/ - TRUE = /true/ - FALSE = /false/ - - def initialize(io) - @ss = StringScanner.new(io.read) - end - - def next_token - return if @ss.eos? - - case - when @ss.scan(SPACE) then next_token - when @ss.scan(COMMENT) then next_token - when @ss.scan(DATETIME) then process_datetime - when text = @ss.scan(STRING_MULTI) then [:STRING_MULTI, text[3..-4]] - when text = @ss.scan(STRING_BASIC) then [:STRING_BASIC, text[1..-2]] - when text = @ss.scan(STRING_LITERAL_MULTI) then [:STRING_LITERAL_MULTI, text[3..-4]] - when text = @ss.scan(STRING_LITERAL) then [:STRING_LITERAL, text[1..-2]] - when text = @ss.scan(FLOAT) then [:FLOAT, text] - when text = @ss.scan(INTEGER) then [:INTEGER, text] - when text = @ss.scan(TRUE) then [:TRUE, text] - when text = @ss.scan(FALSE) then [:FALSE, text] - when text = @ss.scan(IDENTIFIER) then [:IDENTIFIER, text] - else - x = @ss.getch - [x, x] - end - end - - def process_datetime - if @ss[7].nil? - offset = '+00:00' - else - offset = @ss[7].gsub('Z', '+00:00') - end - args = [@ss[1], @ss[2], @ss[3], @ss[4] || 0, @ss[5] || 0, @ss[6].to_f, offset] - [:DATETIME, args] - end - end -end diff --git a/source/toml-parser/tomlrb/string_utils.rb b/source/toml-parser/tomlrb/string_utils.rb deleted file mode 100644 index 53d27e414..000000000 --- a/source/toml-parser/tomlrb/string_utils.rb +++ /dev/null @@ -1,33 +0,0 @@ -module Tomlrb - class StringUtils - - SPECIAL_CHARS = { - '\\t' => "\t", - '\\b' => "\b", - '\\f' => "\f", - '\\n' => "\n", - '\\r' => "\r", - '\\"' => '"', - '\\\\' => '\\' - }.freeze - - def self.multiline_replacements(str) - strip_spaces(str).gsub(/\\\n\s+/, '') - end - - def self.replace_escaped_chars(str) - str.gsub(/\\(u[\da-fA-F]{4}|U[\da-fA-F]{8}|.)/) do |m| - if m.size == 2 - SPECIAL_CHARS[m] || (raise Tomlrb::ParseError.new "Escape sequence #{m} is reserved") - else - m[2..-1].to_i(16).chr(Encoding::UTF_8) - end - end - end - - def self.strip_spaces(str) - str[0] = '' if str[0] == "\n" - str - end - end -end diff --git a/source/toml-parser/tomlrb/version.rb b/source/toml-parser/tomlrb/version.rb deleted file mode 100644 index b72a81b60..000000000 --- a/source/toml-parser/tomlrb/version.rb +++ /dev/null @@ -1,3 +0,0 @@ -module Tomlrb - VERSION = "1.2.8" -end From 053c68dee4a6ff82f7149aa0ee4fd63db188f172 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Mon, 25 Jul 2022 15:21:39 -0700 Subject: [PATCH 245/301] Gangams/fix log loss inode reuse (#796) * use ignore_older fbit default and option for configurability * fix minor comment * fix minor comment --- .../scripts/td-agent-bit-conf-customizer.rb | 11 ++++- .../scripts/tomlparser-agent-config.rb | 46 +++++++++++++------ build/linux/installer/conf/td-agent-bit.conf | 2 +- build/windows/installer/conf/fluent-bit.conf | 2 +- kubernetes/container-azm-ms-agentconfig.yaml | 12 +++-- 5 files changed, 51 insertions(+), 22 deletions(-) diff --git a/build/common/installer/scripts/td-agent-bit-conf-customizer.rb b/build/common/installer/scripts/td-agent-bit-conf-customizer.rb index 995d72b87..5db387911 100644 --- a/build/common/installer/scripts/td-agent-bit-conf-customizer.rb +++ b/build/common/installer/scripts/td-agent-bit-conf-customizer.rb @@ -24,15 +24,16 @@ def substituteFluentBitPlaceHolders bufferChunkSize = ENV["FBIT_TAIL_BUFFER_CHUNK_SIZE"] bufferMaxSize = ENV["FBIT_TAIL_BUFFER_MAX_SIZE"] memBufLimit = ENV["FBIT_TAIL_MEM_BUF_LIMIT"] + ignoreOlder = ENV["FBIT_TAIL_IGNORE_OLDER"] - serviceInterval = (!interval.nil? && is_number?(interval) && interval.to_i > 0 ) ? interval : @default_service_interval + serviceInterval = (!interval.nil? && is_number?(interval) && interval.to_i > 0) ? interval : @default_service_interval serviceIntervalSetting = "Flush " + serviceInterval tailBufferChunkSize = (!bufferChunkSize.nil? && is_number?(bufferChunkSize) && bufferChunkSize.to_i > 0) ? bufferChunkSize : nil tailBufferMaxSize = (!bufferMaxSize.nil? && is_number?(bufferMaxSize) && bufferMaxSize.to_i > 0) ? bufferMaxSize : nil - if ((!tailBufferChunkSize.nil? && tailBufferMaxSize.nil?) || (!tailBufferChunkSize.nil? && !tailBufferMaxSize.nil? && tailBufferChunkSize.to_i > tailBufferMaxSize.to_i)) + if ((!tailBufferChunkSize.nil? && tailBufferMaxSize.nil?) || (!tailBufferChunkSize.nil? && !tailBufferMaxSize.nil? && tailBufferChunkSize.to_i > tailBufferMaxSize.to_i)) puts "config:warn buffer max size must be greater or equal to chunk size" tailBufferMaxSize = tailBufferChunkSize end @@ -54,6 +55,12 @@ def substituteFluentBitPlaceHolders new_contents = new_contents.gsub("\n ${TAIL_BUFFER_MAX_SIZE}\n", "\n") end + if !ignoreOlder.nil? && !ignoreOlder.empty? + new_contents = new_contents.gsub("${TAIL_IGNORE_OLDER}", "Ignore_Older " + ignoreOlder) + else + new_contents = new_contents.gsub("\n ${TAIL_IGNORE_OLDER}\n", "\n") + end + File.open(@td_agent_bit_conf_path, "w") { |file| file.puts new_contents } puts "config::Successfully substituted the placeholders in td-agent-bit.conf file" rescue => errorStr diff --git a/build/common/installer/scripts/tomlparser-agent-config.rb b/build/common/installer/scripts/tomlparser-agent-config.rb index a6b6ee0a1..4cfbc45ee 100644 --- a/build/common/installer/scripts/tomlparser-agent-config.rb +++ b/build/common/installer/scripts/tomlparser-agent-config.rb @@ -55,7 +55,7 @@ @fbitTailBufferChunkSizeMBs = 0 @fbitTailBufferMaxSizeMBs = 0 @fbitTailMemBufLimitMBs = 0 - +@fbitTailIgnoreOlder = "" def is_number?(value) true if Integer(value) rescue false @@ -145,7 +145,7 @@ def populateSettingValuesFromConfigMap(parsedConfig) end fbitTailBufferMaxSizeMBs = fbit_config[:tail_buf_maxsize_megabytes] - if !fbitTailBufferMaxSizeMBs.nil? && is_number?(fbitTailBufferMaxSizeMBs) && fbitTailBufferMaxSizeMBs.to_i > 0 + if !fbitTailBufferMaxSizeMBs.nil? && is_number?(fbitTailBufferMaxSizeMBs) && fbitTailBufferMaxSizeMBs.to_i > 0 if fbitTailBufferMaxSizeMBs.to_i >= @fbitTailBufferChunkSizeMBs @fbitTailBufferMaxSizeMBs = fbitTailBufferMaxSizeMBs.to_i puts "Using config map value: tail_buf_maxsize_megabytes = #{@fbitTailBufferMaxSizeMBs}" @@ -156,16 +156,27 @@ def populateSettingValuesFromConfigMap(parsedConfig) end end # in scenario - tail_buf_chunksize_megabytes provided but not tail_buf_maxsize_megabytes to prevent fbit crash - if @fbitTailBufferChunkSizeMBs > 0 && @fbitTailBufferMaxSizeMBs == 0 + if @fbitTailBufferChunkSizeMBs > 0 && @fbitTailBufferMaxSizeMBs == 0 @fbitTailBufferMaxSizeMBs = @fbitTailBufferChunkSizeMBs puts "config::warn: since tail_buf_maxsize_megabytes not provided hence using tail_buf_maxsize_megabytes=#{@fbitTailBufferMaxSizeMBs} which is same as the value of tail_buf_chunksize_megabytes" - end + end fbitTailMemBufLimitMBs = fbit_config[:tail_mem_buf_limit_megabytes] if !fbitTailMemBufLimitMBs.nil? && is_number?(fbitTailMemBufLimitMBs) && fbitTailMemBufLimitMBs.to_i > 0 @fbitTailMemBufLimitMBs = fbitTailMemBufLimitMBs.to_i puts "Using config map value: tail_mem_buf_limit_megabytes = #{@fbitTailMemBufLimitMBs}" end + + fbitTailIgnoreOlder = fbit_config[:tail_ignore_older] + re = /^[0-9]+[mhd]$/ + if !fbitTailIgnoreOlder.nil? && !fbitTailIgnoreOlder.empty? + if !re.match(fbitTailIgnoreOlder).nil? + @fbitTailIgnoreOlder = fbitTailIgnoreOlder + puts "Using config map value: tail_ignore_older = #{@fbitTailIgnoreOlder}" + else + puts "config:warn: provided tail_ignore_older value is not valid hence using default value" + end + end end end rescue => errorStr @@ -206,10 +217,15 @@ def populateSettingValuesFromConfigMap(parsedConfig) end if @fbitTailBufferMaxSizeMBs > 0 file.write("export FBIT_TAIL_BUFFER_MAX_SIZE=#{@fbitTailBufferMaxSizeMBs}\n") - end + end if @fbitTailMemBufLimitMBs > 0 file.write("export FBIT_TAIL_MEM_BUF_LIMIT=#{@fbitTailMemBufLimitMBs}\n") - end + end + + if !@fbitTailIgnoreOlder.nil? && !@fbitTailIgnoreOlder.empty? + file.write("export FBIT_TAIL_IGNORE_OLDER=#{@fbitTailIgnoreOlder}\n") + end + # Close file after writing all environment variables file.close else @@ -227,21 +243,25 @@ def get_command_windows(env_variable_name, env_variable_value) if !file.nil? if @fbitFlushIntervalSecs > 0 - commands = get_command_windows('FBIT_SERVICE_FLUSH_INTERVAL', @fbitFlushIntervalSecs) + commands = get_command_windows("FBIT_SERVICE_FLUSH_INTERVAL", @fbitFlushIntervalSecs) file.write(commands) end if @fbitTailBufferChunkSizeMBs > 0 - commands = get_command_windows('FBIT_TAIL_BUFFER_CHUNK_SIZE', @fbitTailBufferChunkSizeMBs) + commands = get_command_windows("FBIT_TAIL_BUFFER_CHUNK_SIZE", @fbitTailBufferChunkSizeMBs) file.write(commands) end if @fbitTailBufferMaxSizeMBs > 0 - commands = get_command_windows('FBIT_TAIL_BUFFER_MAX_SIZE', @fbitTailBufferMaxSizeMBs) + commands = get_command_windows("FBIT_TAIL_BUFFER_MAX_SIZE", @fbitTailBufferMaxSizeMBs) file.write(commands) - end + end if @fbitTailMemBufLimitMBs > 0 - commands = get_command_windows('FBIT_TAIL_MEM_BUF_LIMIT', @fbitTailMemBufLimitMBs) + commands = get_command_windows("FBIT_TAIL_MEM_BUF_LIMIT", @fbitTailMemBufLimitMBs) file.write(commands) - end + end + if !@fbitTailIgnoreOlder.nil? && !@fbitTailIgnoreOlder.empty? + commands = get_command_windows("FBIT_TAIL_IGNORE_OLDER", @fbitTailIgnoreOlder) + file.write(commands) + end # Close file after writing all environment variables file.close puts "****************End Config Processing********************" @@ -249,4 +269,4 @@ def get_command_windows(env_variable_name, env_variable_value) puts "Exception while opening file for writing config environment variables for WINDOWS LOG" puts "****************End Config Processing********************" end -end \ No newline at end of file +end diff --git a/build/linux/installer/conf/td-agent-bit.conf b/build/linux/installer/conf/td-agent-bit.conf index beba6a3ca..fe550ab62 100644 --- a/build/linux/installer/conf/td-agent-bit.conf +++ b/build/linux/installer/conf/td-agent-bit.conf @@ -26,7 +26,7 @@ Refresh_Interval 30 Path_Key filepath Skip_Long_Lines On - Ignore_Older 5m + ${TAIL_IGNORE_OLDER} Exclude_Path ${AZMON_CLUSTER_LOG_TAIL_EXCLUDE_PATH} [INPUT] diff --git a/build/windows/installer/conf/fluent-bit.conf b/build/windows/installer/conf/fluent-bit.conf index 1e2d8a93e..b43354e3f 100644 --- a/build/windows/installer/conf/fluent-bit.conf +++ b/build/windows/installer/conf/fluent-bit.conf @@ -25,7 +25,7 @@ Refresh_Interval 30 Path_Key filepath Skip_Long_Lines On - Ignore_Older 5m + ${TAIL_IGNORE_OLDER} Exclude_Path ${AZMON_CLUSTER_LOG_TAIL_EXCLUDE_PATH} [INPUT] diff --git a/kubernetes/container-azm-ms-agentconfig.yaml b/kubernetes/container-azm-ms-agentconfig.yaml index 8b9e2d718..a274724ef 100644 --- a/kubernetes/container-azm-ms-agentconfig.yaml +++ b/kubernetes/container-azm-ms-agentconfig.yaml @@ -46,8 +46,8 @@ data: # In the absence of this configmap, default value for containerlog_schema_version is "v1" # Supported values for this setting are "v1","v2" # See documentation at https://aka.ms/ContainerLogv2 for benefits of v2 schema over v1 schema before opting for "v2" schema - # containerlog_schema_version = "v2" - + # containerlog_schema_version = "v2" + prometheus-data-collection-settings: |- # Custom Prometheus metrics data collection settings @@ -135,7 +135,7 @@ data: # Alertable metrics configuration settings for completed jobs count [alertable_metrics_configuration_settings.job_completion_threshold] - # Threshold for completed job count , metric will be sent only for those jobs which were completed earlier than the following threshold + # Threshold for completed job count , metric will be sent only for those jobs which were completed earlier than the following threshold job_completion_threshold_time_minutes = 360 integrations: |- [integrations.azure_network_policy_manager] @@ -147,19 +147,21 @@ data: # Doc - https://github.com/microsoft/Docker-Provider/blob/ci_prod/Documentation/AgentSettings/ReadMe.md agent-settings: |- # prometheus scrape fluent bit settings for high scale - # buffer size should be greater than or equal to chunk size else we set it to chunk size. + # buffer size should be greater than or equal to chunk size else we set it to chunk size. [agent_settings.prometheus_fbit_settings] tcp_listener_chunk_size = 10 tcp_listener_buffer_size = 10 tcp_listener_mem_buf_limit = 200 - + # The following settings are "undocumented", we don't recommend uncommenting them unless directed by Microsoft. # They increase the maximum stdout/stderr log collection rate but will also cause higher cpu/memory usage. + ## Ref for more details about Ignore_Older - https://docs.fluentbit.io/manual/v/1.7/pipeline/inputs/tail # [agent_settings.fbit_config] # log_flush_interval_secs = "1" # default value is 15 # tail_mem_buf_limit_megabytes = "10" # default value is 10 # tail_buf_chunksize_megabytes = "1" # default value is 32kb (comment out this line for default) # tail_buf_maxsize_megabytes = "1" # defautl value is 32kb (comment out this line for default) + # tail_ignore_older = "5m" # default value same as fluent-bit default i.e.0m metadata: name: container-azm-ms-agentconfig From 870da7ad46306ceeb88dc198b303e9bb9579c15b Mon Sep 17 00:00:00 2001 From: bragi92 Date: Wed, 27 Jul 2022 23:07:31 -0700 Subject: [PATCH 246/301] merge conflict (#799) --- .trivyignore | 24 ++++++++++++++++++---- ReleaseNotes.md | 4 ++++ charts/azuremonitor-containers/Chart.yaml | 2 +- charts/azuremonitor-containers/values.yaml | 2 +- kubernetes/linux/Dockerfile | 2 +- kubernetes/linux/Dockerfile.multiarch | 2 +- kubernetes/omsagent.yaml | 6 +++--- source/plugins/ruby/KubernetesApiClient.rb | 4 ++-- 8 files changed, 33 insertions(+), 13 deletions(-) diff --git a/.trivyignore b/.trivyignore index 91ee2f5bb..1b10c14d5 100644 --- a/.trivyignore +++ b/.trivyignore @@ -1,9 +1,25 @@ # telegraf vulnerabilities CVE-2019-3826 -CVE-2022-29190 -CVE-2022-29222 -CVE-2022-29189 CVE-2022-1996 -#dpkg vulnerability in ubuntu +# still present in mdsd telegraf +CVE-2021-42836 + +# ruby in /usr/lib +CVE-2020-36327 +CVE-2021-43809 +CVE-2021-41816 +CVE-2021-41819 +CVE-2021-31799 +CVE-2021-28965 + +# dpkg vulnerability in ubuntu CVE-2022-1304 + +# Adding for Hotfix : This needs to be fixed +CVE-2022-27191 +CVE-2022-29190 +CVE-2022-29222 +CVE-2022-31030 +CVE-2022-29189 +CVE-2022-29526 diff --git a/ReleaseNotes.md b/ReleaseNotes.md index 39eeb6a50..dfa703a9f 100644 --- a/ReleaseNotes.md +++ b/ReleaseNotes.md @@ -11,6 +11,10 @@ additional questions or comments. Note : The agent version(s) below has dates (ciprod), which indicate the agent build dates (not release dates) +### 07/27/2022 - +##### Version microsoft/oms:ciprod06272022-hotfix Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod06272022-hotfix (linux) +- Fixes for sending the proper node allocatable cpu and memory value for the container which does not specify limits. + ### 06/27/2022 - ##### Version microsoft/oms:ciprod06272022 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod06272022 (linux) ##### Code change log diff --git a/charts/azuremonitor-containers/Chart.yaml b/charts/azuremonitor-containers/Chart.yaml index a8268d63d..54159a6ce 100644 --- a/charts/azuremonitor-containers/Chart.yaml +++ b/charts/azuremonitor-containers/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v1 appVersion: 7.0.0-1 description: Helm chart for deploying Azure Monitor container monitoring agent in Kubernetes name: azuremonitor-containers -version: 2.9.4 +version: 2.9.5 kubeVersion: "^1.10.0-0" keywords: - monitoring diff --git a/charts/azuremonitor-containers/values.yaml b/charts/azuremonitor-containers/values.yaml index d528115cf..af94fad75 100644 --- a/charts/azuremonitor-containers/values.yaml +++ b/charts/azuremonitor-containers/values.yaml @@ -22,7 +22,7 @@ Azure: omsagent: image: repo: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod" - tag: "ciprod06272022" + tag: "ciprod06272022-hotfix" tagWindows: "win-ciprod06142022" pullPolicy: IfNotPresent dockerProviderVersion: "18.0.1-0" diff --git a/kubernetes/linux/Dockerfile b/kubernetes/linux/Dockerfile index 211d37259..162318a9e 100644 --- a/kubernetes/linux/Dockerfile +++ b/kubernetes/linux/Dockerfile @@ -18,7 +18,7 @@ ENV RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR 0.9 RUN /usr/bin/apt-get update && /usr/bin/apt-get install -y libc-bin wget openssl curl sudo python-ctypes init-system-helpers net-tools rsyslog cron vim dmidecode apt-transport-https gnupg && rm -rf /var/lib/apt/lists/* COPY setup.sh main.sh defaultpromenvvariables defaultpromenvvariables-rs defaultpromenvvariables-sidecar mdsd.xml envmdsd logrotate.conf $tmpdir/ -ARG IMAGE_TAG=ciprod06272022 +ARG IMAGE_TAG=ciprod06272022-hotfix ENV AGENT_VERSION ${IMAGE_TAG} WORKDIR ${tmpdir} diff --git a/kubernetes/linux/Dockerfile.multiarch b/kubernetes/linux/Dockerfile.multiarch index a89fd1781..dda458fa1 100644 --- a/kubernetes/linux/Dockerfile.multiarch +++ b/kubernetes/linux/Dockerfile.multiarch @@ -33,7 +33,7 @@ RUN /usr/bin/apt-get update && /usr/bin/apt-get install -y libc-bin wget openssl COPY --from=builder /src/kubernetes/linux/Linux_ULINUX_1.0_*_64_Release/docker-cimprov-*.*.*-*.*.sh $tmpdir/ COPY kubernetes/linux/setup.sh kubernetes/linux/main.sh kubernetes/linux/defaultpromenvvariables kubernetes/linux/defaultpromenvvariables-rs kubernetes/linux/defaultpromenvvariables-sidecar kubernetes/linux/mdsd.xml kubernetes/linux/envmdsd kubernetes/linux/logrotate.conf $tmpdir/ -ARG IMAGE_TAG=ciprod06272022 +ARG IMAGE_TAG=ciprod06272022-hotfix ENV AGENT_VERSION ${IMAGE_TAG} WORKDIR ${tmpdir} diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index 88d2fdda8..c11650b9e 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -379,7 +379,7 @@ spec: # - NET_ADMIN # - NET_RAW - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod06272022" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod06272022-hotfix" imagePullPolicy: IfNotPresent resources: limits: @@ -468,7 +468,7 @@ spec: timeoutSeconds: 15 #Only in sidecar scraping mode - name: omsagent-prometheus - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod06272022" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod06272022-hotfix" imagePullPolicy: IfNotPresent resources: limits: @@ -653,7 +653,7 @@ spec: # - NET_ADMIN # - NET_RAW - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod06272022" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod06272022-hotfix" imagePullPolicy: IfNotPresent resources: limits: diff --git a/source/plugins/ruby/KubernetesApiClient.rb b/source/plugins/ruby/KubernetesApiClient.rb index 612035625..6828109b3 100644 --- a/source/plugins/ruby/KubernetesApiClient.rb +++ b/source/plugins/ruby/KubernetesApiClient.rb @@ -517,7 +517,7 @@ def getContainerResourceRequestsAndLimits(pod, metricCategory, metricNameToColle #No container level limit for the given metric, so default to node level limit else if (metricCategory == "limits" && !nodeAllocatableRecord.nil? && !nodeAllocatableRecord.empty? && nodeAllocatableRecord.has_key?(metricNameToCollect)) - metricValue = nodeAllocatableRecord[metricNameToCollect] + metricValue = getMetricNumericValue(metricNameToCollect, nodeAllocatableRecord[metricNameToCollect]) metricProps = {} metricProps["Timestamp"] = metricTime metricProps["Host"] = nodeName @@ -591,7 +591,7 @@ def getContainerResourceRequestsAndLimitsAsInsightsMetrics(pod, metricCategory, #No container level limit for the given metric, so default to node level limit for non-gpu metrics if (metricNameToCollect.downcase != "nvidia.com/gpu") && (metricNameToCollect.downcase != "amd.com/gpu") if !nodeAllocatableRecord.nil? && !nodeAllocatableRecord.empty? && nodeAllocatableRecord.has_key?(metricNameToCollect) - metricValue = nodeAllocatableRecord[metricNameToCollect] + metricValue = getMetricNumericValue(metricNameToCollect, nodeAllocatableRecord[metricNameToCollect]) end end end From f29033a6eccf31e0edd771fa1ec101f3d365c7b4 Mon Sep 17 00:00:00 2001 From: Amol Agrawal Date: Thu, 28 Jul 2022 20:05:50 -0700 Subject: [PATCH 247/301] update vulns (#800) Co-authored-by: Amol Agrawal --- .trivyignore | 24 ++++-------------------- 1 file changed, 4 insertions(+), 20 deletions(-) diff --git a/.trivyignore b/.trivyignore index 1b10c14d5..91ee2f5bb 100644 --- a/.trivyignore +++ b/.trivyignore @@ -1,25 +1,9 @@ # telegraf vulnerabilities CVE-2019-3826 +CVE-2022-29190 +CVE-2022-29222 +CVE-2022-29189 CVE-2022-1996 -# still present in mdsd telegraf -CVE-2021-42836 - -# ruby in /usr/lib -CVE-2020-36327 -CVE-2021-43809 -CVE-2021-41816 -CVE-2021-41819 -CVE-2021-31799 -CVE-2021-28965 - -# dpkg vulnerability in ubuntu +#dpkg vulnerability in ubuntu CVE-2022-1304 - -# Adding for Hotfix : This needs to be fixed -CVE-2022-27191 -CVE-2022-29190 -CVE-2022-29222 -CVE-2022-31030 -CVE-2022-29189 -CVE-2022-29526 From 1da6c4c5657f69f42ab9c6554d1bc8b0a6acaa43 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Sat, 30 Jul 2022 04:47:41 +0530 Subject: [PATCH 248/301] Gangams/fix permission assignments in test scripts (#802) * restrict rw permissions to owner * remove usage of worldwrite file permissions * remove worldwrite file permission * remove worldwrite file permission --- scripts/cluster-creation/aks-engine.sh | 9 ++------- scripts/cluster-creation/onprem-k8s.sh | 7 +++---- 2 files changed, 5 insertions(+), 11 deletions(-) diff --git a/scripts/cluster-creation/aks-engine.sh b/scripts/cluster-creation/aks-engine.sh index 9d287ea07..ba763e354 100644 --- a/scripts/cluster-creation/aks-engine.sh +++ b/scripts/cluster-creation/aks-engine.sh @@ -89,11 +89,8 @@ while getopts 'hs:c:w:d:l:' opt; do } create_cluster() { - -sudo touch kubernetes.json -sudo chmod 777 kubernetes.json # For docker runtime, remove kubernetesConfig block -cat >> kubernetes.json < /dev/null << 'EOF' { "apiVersion": "vlabs", "properties": { @@ -132,7 +129,7 @@ cat >> kubernetes.json <> kind-config.yaml < /dev/null << 'EOF' kind: Cluster apiVersion: kind.sigs.k8s.io/v1alpha3 nodes: - role: control-plane - role: worker -EOL +EOF + sudo kind create cluster --config kind-config.yaml --name $clusterName } From eead92367addd163961fe098b2312e69f546579a Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Sat, 30 Jul 2022 20:43:31 +0530 Subject: [PATCH 249/301] wip: data collection interval --- .../existingClusterOnboarding.json | 6 ++ source/plugins/ruby/constants.rb | 8 ++ source/plugins/ruby/extension.rb | 85 ++++++++++++------- source/plugins/ruby/extension_utils.rb | 48 ++++++++++- source/plugins/ruby/in_cadvisor_perf.rb | 2 + source/plugins/ruby/in_containerinventory.rb | 2 + source/plugins/ruby/in_kube_nodes.rb | 2 + source/plugins/ruby/in_kube_perfinventory.rb | 2 + source/plugins/ruby/in_kube_podinventory.rb | 2 + .../plugins/ruby/in_kube_podmdminventory.rb | 5 ++ source/plugins/ruby/in_kube_pvinventory.rb | 2 + .../plugins/ruby/in_kubestate_deployments.rb | 2 + source/plugins/ruby/in_kubestate_hpa.rb | 2 + source/plugins/ruby/in_win_cadvisor_perf.rb | 2 + 14 files changed, 138 insertions(+), 32 deletions(-) diff --git a/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json b/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json index 2024e611a..d19f9354e 100644 --- a/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json +++ b/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json @@ -74,6 +74,12 @@ "streams": [ "Microsoft-ContainerInsights-Group-Default" ], + "extensionSettings": { + "dataCollectionIntervalMinutes": 1, + "excludeNameSpaces": [ + "kube-system" + ] + }, "extensionName": "ContainerInsights" } ] diff --git a/source/plugins/ruby/constants.rb b/source/plugins/ruby/constants.rb index 5f57b465a..bfee5e002 100644 --- a/source/plugins/ruby/constants.rb +++ b/source/plugins/ruby/constants.rb @@ -144,4 +144,12 @@ class Constants POD_MDM_EMIT_STREAM_BATCH_SIZE = 5000 # each record is 200 bytes, 5k records ~2MB # only used in windows in AAD MSI auth mode IMDS_TOKEN_PATH_FOR_WINDOWS = "c:/etc/imds-access-token/token" + + # extension settings and these should match with DCR extension settings + EXTENSION_SETTING_DATA_COLLECTION_INTERVAL = "dataCollectionIntervalMinutes" + EXTENSION_SETTING_EXCLUDE_NAMESPACES = "excludeNamespaces" + + # min and max data collection interval minutes + DATA_COLLECTION_INTERVAL_MINUTES_MIN = 1 + DATA_COLLECTION_INTERVAL_MINUTES_MAX = 60 end diff --git a/source/plugins/ruby/extension.rb b/source/plugins/ruby/extension.rb index 78236fe15..5a6dcd6be 100644 --- a/source/plugins/ruby/extension.rb +++ b/source/plugins/ruby/extension.rb @@ -21,57 +21,82 @@ def get_output_stream_id(datatypeId) if @cache.has_key?(datatypeId) return @cache[datatypeId] else - @cache = get_config() + @cache = get_stream_mapping() return @cache[datatypeId] end } end + def get_extension_settings() + extensionSettings = Hash.new + begin + extensionConfigurations = get_extension_configs() + if !extensionConfigurations.nil? && !extensionConfigurations.empty? + extensionConfigurations.each do |extensionConfig| + extSettings = extensionConfig["extensionSettings"] + # TODO - can extensionsettings present in multiple extension configurations?? + if !extensionSettings.nil? && !extensionSettings.empty? + extensionSettings = extSettings + end + end + end + rescue =>errorStr + $log.warn("Extension::get_extension_settings failed: #{errorStr}") + ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) + end + return extensionSettings + end + + def get_stream_mapping() + dataTypeToStreamIdMap = Hash.new + begin + extensionConfigurations = get_extension_configs() + if !extensionConfigurations.nil? && !extensionConfigurations.empty? + extensionConfigurations.each do |extensionConfig| + outputStreams = extensionConfig["outputStreams"] + if !outputStreams.nil? && !outputStreams.empty? + outputStreams.each do |datatypeId, streamId| + dataTypeToStreamIdMap[datatypeId] = streamId + end + else + $log.warn("Extension::get_stream_mapping::received outputStreams is either nil or empty") + end + end + else + $log.warn("Extension::get_stream_mapping::received extensionConfigurations either nil or empty") + end + rescue => errorStr + $log.warn("Extension::get_stream_mapping failed: #{errorStr}") + ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) + end + return dataTypeToStreamIdMap + end + private - def get_config() - extConfig = Hash.new - $log.info("Extension::get_config start ...") + def get_extension_configs() + extensionConfigurations = [] begin clientSocket = UNIXSocket.open(Constants::ONEAGENT_FLUENT_SOCKET_NAME) requestId = SecureRandom.uuid.to_s - requestBodyJSON = { "Request" => "AgentTaggedData", "RequestId" => requestId, "Tag" => Constants::CI_EXTENSION_NAME, "Version" => Constants::CI_EXTENSION_VERSION }.to_json - $log.info("Extension::get_config::sending request with request body: #{requestBodyJSON}") + requestBodyJSON = { "Request" => "AgentTaggedData", "RequestId" => requestId, "Tag" => Constants::CI_EXTENSION_NAME, "Version" => Constants::CI_EXTENSION_VERSION }.to_json requestBodyMsgPack = requestBodyJSON.to_msgpack clientSocket.write(requestBodyMsgPack) - clientSocket.flush - $log.info("reading the response from fluent socket: #{Constants::ONEAGENT_FLUENT_SOCKET_NAME}") + clientSocket.flush resp = clientSocket.recv(Constants::CI_EXTENSION_CONFIG_MAX_BYTES) - if !resp.nil? && !resp.empty? - $log.info("Extension::get_config::successfully read the extension config from fluentsocket and number of bytes read is #{resp.length}") + if !resp.nil? && !resp.empty? respJSON = JSON.parse(resp) taggedData = respJSON["TaggedData"] if !taggedData.nil? && !taggedData.empty? taggedAgentData = JSON.parse(taggedData) - extensionConfigurations = taggedAgentData["extensionConfigurations"] - if !extensionConfigurations.nil? && !extensionConfigurations.empty? - extensionConfigurations.each do |extensionConfig| - outputStreams = extensionConfig["outputStreams"] - if !outputStreams.nil? && !outputStreams.empty? - outputStreams.each do |datatypeId, streamId| - $log.info("Extension::get_config datatypeId:#{datatypeId}, streamId: #{streamId}") - extConfig[datatypeId] = streamId - end - else - $log.warn("Extension::get_config::received outputStreams is either nil or empty") - end - end - else - $log.warn("Extension::get_config::received extensionConfigurations from fluentsocket is either nil or empty") - end + extensionConfigurations = taggedAgentData["extensionConfigurations"] end end rescue => errorStr - $log.warn("Extension::get_config failed: #{errorStr}") + $log.warn("Extension::get_extension_configs failed: #{errorStr}") ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) ensure clientSocket.close unless clientSocket.nil? - end - $log.info("Extension::get_config complete ...") - return extConfig + end + return extensionConfigurations end end diff --git a/source/plugins/ruby/extension_utils.rb b/source/plugins/ruby/extension_utils.rb index 5d439c6b2..e4c6a0aab 100644 --- a/source/plugins/ruby/extension_utils.rb +++ b/source/plugins/ruby/extension_utils.rb @@ -2,7 +2,8 @@ #!/usr/local/bin/ruby # frozen_string_literal: true -require_relative "extension" +require_relative "extension" +require "constants" class ExtensionUtils class << self @@ -22,6 +23,49 @@ def getOutputStreamId(dataType) end def isAADMSIAuthMode() return !ENV["AAD_MSI_AUTH_MODE"].nil? && !ENV["AAD_MSI_AUTH_MODE"].empty? && ENV["AAD_MSI_AUTH_MODE"].downcase == "true" - end + end + def getdataCollectionIntervalSeconds + collectionIntervalMinutes = 1 + begin + extensionSettings = Extension.instance.get_extension_settings() + if !extensionSettings.nil? && + !extensionSettings.empty? && + extensionSettings.has_key(Constants::EXTENSION_SETTING_DATA_COLLECTION_INTERVAL) + intervalMinutes = extensionSettings[Constants::EXTENSION_SETTING_DATA_COLLECTION_INTERVAL] + if is_number?(intervalMinutes) && + intervalMinutes.to_i >= Constants::DATA_COLLECTION_INTERVAL_MINUTES_MIN && + intervalMinutes.to_i <= Constants::DATA_COLLECTION_INTERVAL_MINUTES_MAX + collectionIntervalMinutes = intervalMinutes.to_i + else + $log.warn("ExtensionUtils::getdataCollectionIntervalSeconds: dataCollectionIntervalMinutes: #{intervalMinutes} not valid hence using default") + end + end + rescue => err + $log.warn("ExtensionUtils::getdataCollectionIntervalSeconds: failed with an exception: #{errorStr}") + end + collectionIntervalSeconds = collectionIntervalMinutes * 60 + return collectionIntervalSeconds + end + + def getdataCollectionExcludeNameSpaces + excludeNamespaces = [] + begin + extensionSettings = Extension.instance.get_extension_settings() + if !extensionSettings.nil? && + !extensionSettings.empty? && + extensionSettings.has_key(Constants::EXTENSION_SETTING_EXCLUDE_NAMESPACES) + namespacesToExclude = extensionSettings[Constants::EXTENSION_SETTING_EXCLUDE_NAMESPACES] + if !namespacesToExclude.nil? && !namespacesToExclude.empty? && namespacesToExclude.kind_of?(Array) && namespacesToExclude.length > 0 + excludeNamespaces = namespacesToExclude + else + $log.warn("ExtensionUtils::getdataCollectionExcludeNameSpaces: excludeNamespaces: #{namespacesToExclude} not valid hence using default") + end + end + rescue => err + $log.warn("ExtensionUtils::getdataCollectionExcludeNameSpaces: failed with an exception: #{errorStr}") + end + return excludeNamespaces + end + end end diff --git a/source/plugins/ruby/in_cadvisor_perf.rb b/source/plugins/ruby/in_cadvisor_perf.rb index d929e86fb..057242fe6 100644 --- a/source/plugins/ruby/in_cadvisor_perf.rb +++ b/source/plugins/ruby/in_cadvisor_perf.rb @@ -77,6 +77,8 @@ def enumerate() end $log.info("in_cadvisor_perf::enumerate: using perf tag -#{@tag} @ #{Time.now.utc.iso8601}") $log.info("in_cadvisor_perf::enumerate: using insightsmetrics tag -#{@insightsmetricstag} @ #{Time.now.utc.iso8601}") + @run_interval = ExtensionUtils.getdataCollectionIntervalSeconds() + $log.info("in_cadvisor_perf::enumerate: using data collection interval(seconds) -#{@run_interval} @ #{Time.now.utc.iso8601}") end router.emit_stream(@tag, eventStream) if eventStream router.emit_stream(@mdmtag, eventStream) if eventStream diff --git a/source/plugins/ruby/in_containerinventory.rb b/source/plugins/ruby/in_containerinventory.rb index aeb70c68a..427af5e94 100644 --- a/source/plugins/ruby/in_containerinventory.rb +++ b/source/plugins/ruby/in_containerinventory.rb @@ -65,6 +65,8 @@ def enumerate @tag = ExtensionUtils.getOutputStreamId(Constants::CONTAINER_INVENTORY_DATA_TYPE) end $log.info("in_container_inventory::enumerate: using tag -#{@tag} @ #{Time.now.utc.iso8601}") + @run_interval = ExtensionUtils.getdataCollectionIntervalSeconds() + $log.info("in_container_inventory::enumerate: using data collection interval(seconds) -#{@run_interval} @ #{Time.now.utc.iso8601}") end begin containerRuntimeEnv = ENV["CONTAINER_RUNTIME"] diff --git a/source/plugins/ruby/in_kube_nodes.rb b/source/plugins/ruby/in_kube_nodes.rb index 8473cca81..07b33f564 100644 --- a/source/plugins/ruby/in_kube_nodes.rb +++ b/source/plugins/ruby/in_kube_nodes.rb @@ -150,6 +150,8 @@ def enumerate $log.info("in_kube_nodes::enumerate: using insightsmetrics tag -#{@insightsMetricsTag} @ #{Time.now.utc.iso8601}") $log.info("in_kube_nodes::enumerate: using containernodeinventory tag -#{@ContainerNodeInventoryTag} @ #{Time.now.utc.iso8601}") $log.info("in_kube_nodes::enumerate: using kubenodeinventory tag -#{@tag} @ #{Time.now.utc.iso8601}") + @run_interval = ExtensionUtils.getdataCollectionIntervalSeconds() + $log.info("in_kube_nodes::enumerate: using data collection interval(seconds) -#{@run_interval} @ #{Time.now.utc.iso8601}") end nodesAPIChunkStartTime = (Time.now.to_f * 1000).to_i diff --git a/source/plugins/ruby/in_kube_perfinventory.rb b/source/plugins/ruby/in_kube_perfinventory.rb index 25f9c93e8..552ee5dd9 100644 --- a/source/plugins/ruby/in_kube_perfinventory.rb +++ b/source/plugins/ruby/in_kube_perfinventory.rb @@ -99,6 +99,8 @@ def enumerate(podList = nil) end $log.info("in_kube_perfinventory::enumerate: using perf tag -#{@kubeperfTag} @ #{Time.now.utc.iso8601}") $log.info("in_kube_perfinventory::enumerate: using insightsmetrics tag -#{@insightsMetricsTag} @ #{Time.now.utc.iso8601}") + @run_interval = ExtensionUtils.getdataCollectionIntervalSeconds() + $log.info("in_kube_perfinventory::enumerate: using data collection interval(seconds) -#{@run_interval} @ #{Time.now.utc.iso8601}") end nodeAllocatableRecords = getNodeAllocatableRecords() diff --git a/source/plugins/ruby/in_kube_podinventory.rb b/source/plugins/ruby/in_kube_podinventory.rb index a1986bd4a..1463fdf91 100644 --- a/source/plugins/ruby/in_kube_podinventory.rb +++ b/source/plugins/ruby/in_kube_podinventory.rb @@ -171,6 +171,8 @@ def enumerate(podList = nil) $log.info("in_kube_podinventory::enumerate: using containerinventory tag -#{@containerInventoryTag} @ #{Time.now.utc.iso8601}") $log.info("in_kube_podinventory::enumerate: using insightsmetrics tag -#{@insightsMetricsTag} @ #{Time.now.utc.iso8601}") $log.info("in_kube_podinventory::enumerate: using kubepodinventory tag -#{@tag} @ #{Time.now.utc.iso8601}") + @run_interval = ExtensionUtils.getdataCollectionIntervalSeconds() + $log.info("in_kube_podinventory::enumerate: using data collection interval(seconds) -#{@run_interval} @ #{Time.now.utc.iso8601}") end serviceInventory = {} diff --git a/source/plugins/ruby/in_kube_podmdminventory.rb b/source/plugins/ruby/in_kube_podmdminventory.rb index 38e07d860..f5c51d42d 100644 --- a/source/plugins/ruby/in_kube_podmdminventory.rb +++ b/source/plugins/ruby/in_kube_podmdminventory.rb @@ -24,6 +24,7 @@ def initialize require_relative "omslog" require_relative "constants" require_relative "CustomMetricsUtils" + require_relative "extension_utils" end config_param :run_interval, :time, :default => 60 @@ -62,6 +63,10 @@ def enumerate if !@isCustomMetricsAvailability $log.warn "in_kube_podmdminventory::enumerate:skipping since custom metrics not available either for this cluster type or the region" else + if ExtensionUtils.isAADMSIAuthMode() + @run_interval = ExtensionUtils.getdataCollectionIntervalSeconds() + $log.info("in_kube_podinventory::enumerate: using data collection interval(seconds) -#{@run_interval} @ #{Time.now.utc.iso8601}") + end parse_and_emit_records() end rescue => errorStr diff --git a/source/plugins/ruby/in_kube_pvinventory.rb b/source/plugins/ruby/in_kube_pvinventory.rb index 1e25e4057..882370b6c 100644 --- a/source/plugins/ruby/in_kube_pvinventory.rb +++ b/source/plugins/ruby/in_kube_pvinventory.rb @@ -67,6 +67,8 @@ def enumerate if @tag.nil? || !@tag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) @tag = ExtensionUtils.getOutputStreamId(Constants::KUBE_PV_INVENTORY_DATA_TYPE) end + @run_interval = ExtensionUtils.getdataCollectionIntervalSeconds() + $log.info("in_kube_pvinventory::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") end continuationToken = nil diff --git a/source/plugins/ruby/in_kubestate_deployments.rb b/source/plugins/ruby/in_kubestate_deployments.rb index 92e6318b9..314ab2bd6 100644 --- a/source/plugins/ruby/in_kubestate_deployments.rb +++ b/source/plugins/ruby/in_kubestate_deployments.rb @@ -88,6 +88,8 @@ def enumerate if @tag.nil? || !@tag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) @tag = ExtensionUtils.getOutputStreamId(Constants::INSIGHTS_METRICS_DATA_TYPE) end + @run_interval = ExtensionUtils.getdataCollectionIntervalSeconds() + $log.info("in_kubestate_deployments::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") end # Initializing continuation token to nil continuationToken = nil diff --git a/source/plugins/ruby/in_kubestate_hpa.rb b/source/plugins/ruby/in_kubestate_hpa.rb index 7f7e3aac5..906438347 100644 --- a/source/plugins/ruby/in_kubestate_hpa.rb +++ b/source/plugins/ruby/in_kubestate_hpa.rb @@ -85,6 +85,8 @@ def enumerate @tag = ExtensionUtils.getOutputStreamId(Constants::INSIGHTS_METRICS_DATA_TYPE) end $log.info("in_kubestate_hpa::enumerate: using tag -#{@tag} @ #{Time.now.utc.iso8601}") + @run_interval = ExtensionUtils.getdataCollectionIntervalSeconds() + $log.info("in_kubestate_hpa::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") end # Initializing continuation token to nil continuationToken = nil diff --git a/source/plugins/ruby/in_win_cadvisor_perf.rb b/source/plugins/ruby/in_win_cadvisor_perf.rb index 841c4867a..9e3f30d79 100644 --- a/source/plugins/ruby/in_win_cadvisor_perf.rb +++ b/source/plugins/ruby/in_win_cadvisor_perf.rb @@ -69,6 +69,8 @@ def enumerate() end $log.info("in_win_cadvisor_perf::enumerate: using perf tag -#{@kubeperfTag} @ #{Time.now.utc.iso8601}") $log.info("in_win_cadvisor_perf::enumerate: using insightsmetrics tag -#{@insightsMetricsTag} @ #{Time.now.utc.iso8601}") + @run_interval = ExtensionUtils.getdataCollectionIntervalSeconds() + $log.info("in_win_cadvisor_perf::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") end #Resetting this cache so that it is populated with the current set of containers with every call From f523ef83c3baa4d9b300f0383c3ce90e2900fcca Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Sun, 31 Jul 2022 05:23:05 +0530 Subject: [PATCH 250/301] wip:namespace filtering --- source/plugins/ruby/KubernetesApiClient.rb | 15 +++ source/plugins/ruby/extension_utils.rb | 10 +- source/plugins/ruby/in_kube_podinventory.rb | 125 +++++++++++++------- source/plugins/ruby/in_kubestate_hpa.rb | 3 + 4 files changed, 103 insertions(+), 50 deletions(-) diff --git a/source/plugins/ruby/KubernetesApiClient.rb b/source/plugins/ruby/KubernetesApiClient.rb index 6828109b3..f878ad4c8 100644 --- a/source/plugins/ruby/KubernetesApiClient.rb +++ b/source/plugins/ruby/KubernetesApiClient.rb @@ -1394,5 +1394,20 @@ def isEmitCacheTelemetry end return isEmitCacheTelemtryEnabled end + + def isExcludeResourceItem(resourceNamespace, excludeNamespaces) + isExclude = false + begin + if !resourceNamespace.nil? && !resourceNamespace.empty? + !excludeNamespaces.nil? && !excludeNamespaces.empty? && excludeNamespaces.length > 0 + && excludeNamespaces.include?(resourceNamespace) + isExclude = true + end + rescue => errorStr + @Log.warn "KubernetesApiClient::isExcludeResourceItem:Failed with an error : #{errorStr}" + end + return isExclude + end + end end diff --git a/source/plugins/ruby/extension_utils.rb b/source/plugins/ruby/extension_utils.rb index e4c6a0aab..5e6f537ef 100644 --- a/source/plugins/ruby/extension_utils.rb +++ b/source/plugins/ruby/extension_utils.rb @@ -24,8 +24,9 @@ def getOutputStreamId(dataType) def isAADMSIAuthMode() return !ENV["AAD_MSI_AUTH_MODE"].nil? && !ENV["AAD_MSI_AUTH_MODE"].empty? && ENV["AAD_MSI_AUTH_MODE"].downcase == "true" end + def getdataCollectionIntervalSeconds - collectionIntervalMinutes = 1 + collectionIntervalSeconds = 60 begin extensionSettings = Extension.instance.get_extension_settings() if !extensionSettings.nil? && @@ -35,15 +36,14 @@ def getdataCollectionIntervalSeconds if is_number?(intervalMinutes) && intervalMinutes.to_i >= Constants::DATA_COLLECTION_INTERVAL_MINUTES_MIN && intervalMinutes.to_i <= Constants::DATA_COLLECTION_INTERVAL_MINUTES_MAX - collectionIntervalMinutes = intervalMinutes.to_i + collectionIntervalSeconds = 60 * intervalMinutes.to_i else $log.warn("ExtensionUtils::getdataCollectionIntervalSeconds: dataCollectionIntervalMinutes: #{intervalMinutes} not valid hence using default") end end rescue => err $log.warn("ExtensionUtils::getdataCollectionIntervalSeconds: failed with an exception: #{errorStr}") - end - collectionIntervalSeconds = collectionIntervalMinutes * 60 + end return collectionIntervalSeconds end @@ -56,7 +56,7 @@ def getdataCollectionExcludeNameSpaces extensionSettings.has_key(Constants::EXTENSION_SETTING_EXCLUDE_NAMESPACES) namespacesToExclude = extensionSettings[Constants::EXTENSION_SETTING_EXCLUDE_NAMESPACES] if !namespacesToExclude.nil? && !namespacesToExclude.empty? && namespacesToExclude.kind_of?(Array) && namespacesToExclude.length > 0 - excludeNamespaces = namespacesToExclude + excludeNamespaces = namespacesToExclude.dup else $log.warn("ExtensionUtils::getdataCollectionExcludeNameSpaces: excludeNamespaces: #{namespacesToExclude} not valid hence using default") end diff --git a/source/plugins/ruby/in_kube_podinventory.rb b/source/plugins/ruby/in_kube_podinventory.rb index 1463fdf91..ea7fe4b83 100644 --- a/source/plugins/ruby/in_kube_podinventory.rb +++ b/source/plugins/ruby/in_kube_podinventory.rb @@ -59,6 +59,7 @@ def initialize @kubeservicesTag = "oneagent.containerInsights.KUBE_SERVICES_BLOB" @containerInventoryTag = "oneagent.containerInsights.CONTAINER_INVENTORY_BLOB" + @excludeNamespaces = [] end config_param :run_interval, :time, :default => 60 @@ -173,6 +174,8 @@ def enumerate(podList = nil) $log.info("in_kube_podinventory::enumerate: using kubepodinventory tag -#{@tag} @ #{Time.now.utc.iso8601}") @run_interval = ExtensionUtils.getdataCollectionIntervalSeconds() $log.info("in_kube_podinventory::enumerate: using data collection interval(seconds) -#{@run_interval} @ #{Time.now.utc.iso8601}") + @excludeNamespaces = ExtensionUtils.getdataCollectionExcludeNameSpaces() + $log.info("in_kube_podinventory::enumerate: using data collection excludenamespaces -#{@excludeNamespaces} @ #{Time.now.utc.iso8601}") end serviceInventory = {} @@ -737,6 +740,12 @@ def watch_pods if (podInventory.key?("items") && !podInventory["items"].nil? && !podInventory["items"].empty?) $log.info("in_kube_podinventory::watch_pods:number of pod items :#{podInventory["items"].length} from Kube API @ #{Time.now.utc.iso8601}") podInventory["items"].each do |item| + podNameSpace = "" + if !item["metdata"].nil? && !item["metadata"]["namespace"].nil? + podNameSpace = item["metadata"]["namespace"] + end + # exclude resource item if this in excluded namespaces + next unless !KubernetesApiClient.isExcludeResourceItem(podNameSpace, @excludeNamespaces) key = item["metadata"]["uid"] if !key.nil? && !key.empty? nodeName = (!item["spec"].nil? && !item["spec"]["nodeName"].nil?) ? item["spec"]["nodeName"] : "" @@ -776,6 +785,12 @@ def watch_pods if (podInventory.key?("items") && !podInventory["items"].nil? && !podInventory["items"].empty?) $log.info("in_kube_podinventory::watch_pods:number of pod items :#{podInventory["items"].length} from Kube API @ #{Time.now.utc.iso8601}") podInventory["items"].each do |item| + podNameSpace = "" + if !item["metdata"].nil? && !item["metadata"]["namespace"].nil? + podNameSpace = item["metadata"]["namespace"] + end + # exclude resource item if this in excluded namespaces + next unless !KubernetesApiClient.isExcludeResourceItem(podNameSpace, @excludeNamespaces) key = item["metadata"]["uid"] if !key.nil? && !key.empty? nodeName = (!item["spec"].nil? && !item["spec"]["nodeName"].nil?) ? item["spec"]["nodeName"] : "" @@ -834,38 +849,45 @@ def watch_pods # We have to abort here because this might cause lastResourceVersion inconsistency by skipping a potential RV with valid data! break end - if ((notice["type"] == "ADDED") || (notice["type"] == "MODIFIED")) - key = item["metadata"]["uid"] - if !key.nil? && !key.empty? - currentWindowsNodeNameList = [] - @windowsNodeNameCacheMutex.synchronize { - currentWindowsNodeNameList = @windowsNodeNameListCache.dup - } - isWindowsPodItem = false - nodeName = (!item["spec"].nil? && !item["spec"]["nodeName"].nil?) ? item["spec"]["nodeName"] : "" - if !nodeName.empty? && - !currentWindowsNodeNameList.nil? && - !currentWindowsNodeNameList.empty? && - currentWindowsNodeNameList.include?(nodeName) - isWindowsPodItem = true + podNameSpace = "" + if !item["metdata"].nil? && !item["metadata"]["namespace"].nil? + podNameSpace = item["metadata"]["namespace"] + end + # exclude resource item if this in excluded namespaces + if !KubernetesApiClient.isExcludeResourceItem(podNameSpace, @excludeNamespaces) + if ((notice["type"] == "ADDED") || (notice["type"] == "MODIFIED")) + key = item["metadata"]["uid"] + if !key.nil? && !key.empty? + currentWindowsNodeNameList = [] + @windowsNodeNameCacheMutex.synchronize { + currentWindowsNodeNameList = @windowsNodeNameListCache.dup + } + isWindowsPodItem = false + nodeName = (!item["spec"].nil? && !item["spec"]["nodeName"].nil?) ? item["spec"]["nodeName"] : "" + if !nodeName.empty? && + !currentWindowsNodeNameList.nil? && + !currentWindowsNodeNameList.empty? && + currentWindowsNodeNameList.include?(nodeName) + isWindowsPodItem = true + end + podItem = KubernetesApiClient.getOptimizedItem("pods", item, isWindowsPodItem) + if !podItem.nil? && !podItem.empty? + @podCacheMutex.synchronize { + @podItemsCache[key] = podItem + } + else + $log.warn "in_kube_podinventory::watch_pods:Received podItem is empty or nil @ #{Time.now.utc.iso8601}" + end + else + $log.warn "in_kube_podinventory::watch_pods:Received poduid either nil or empty @ #{Time.now.utc.iso8601}" end - podItem = KubernetesApiClient.getOptimizedItem("pods", item, isWindowsPodItem) - if !podItem.nil? && !podItem.empty? + elsif notice["type"] == "DELETED" + key = item["metadata"]["uid"] + if !key.nil? && !key.empty? @podCacheMutex.synchronize { - @podItemsCache[key] = podItem + @podItemsCache.delete(key) } - else - $log.warn "in_kube_podinventory::watch_pods:Received podItem is empty or nil @ #{Time.now.utc.iso8601}" end - else - $log.warn "in_kube_podinventory::watch_pods:Received poduid either nil or empty @ #{Time.now.utc.iso8601}" - end - elsif notice["type"] == "DELETED" - key = item["metadata"]["uid"] - if !key.nil? && !key.empty? - @podCacheMutex.synchronize { - @podItemsCache.delete(key) - } end end when "ERROR" @@ -926,6 +948,12 @@ def watch_services if (serviceInventory.key?("items") && !serviceInventory["items"].nil? && !serviceInventory["items"].empty?) $log.info("in_kube_podinventory::watch_services:number of service items #{serviceInventory["items"].length} @ #{Time.now.utc.iso8601}") serviceInventory["items"].each do |item| + serviceNamespace = "" + if !item["metdata"].nil? && !item["metadata"]["namespace"].nil? + serviceNamespace = item["metadata"]["namespace"] + end + # exclude resource item if this in excluded namespaces + next unless !KubernetesApiClient.isExcludeResourceItem(serviceNamespace, @excludeNamespaces) key = item["metadata"]["uid"] if !key.nil? && !key.empty? serviceItem = KubernetesApiClient.getOptimizedItem("services", item) @@ -976,26 +1004,33 @@ def watch_services # We have to abort here because this might cause lastResourceVersion inconsistency by skipping a potential RV with valid data! break end - if ((notice["type"] == "ADDED") || (notice["type"] == "MODIFIED")) - key = item["metadata"]["uid"] - if !key.nil? && !key.empty? - serviceItem = KubernetesApiClient.getOptimizedItem("services", item) - if !serviceItem.nil? && !serviceItem.empty? + serviceNamespace = "" + if !item["metdata"].nil? && !item["metadata"]["namespace"].nil? + serviceNamespace = item["metadata"]["namespace"] + end + # exclude resource item if this in excluded namespaces + if !KubernetesApiClient.isExcludeResourceItem(serviceNamespace, @excludeNamespaces) + if ((notice["type"] == "ADDED") || (notice["type"] == "MODIFIED")) + key = item["metadata"]["uid"] + if !key.nil? && !key.empty? + serviceItem = KubernetesApiClient.getOptimizedItem("services", item) + if !serviceItem.nil? && !serviceItem.empty? + @serviceCacheMutex.synchronize { + @serviceItemsCache[key] = serviceItem + } + else + $log.warn "in_kube_podinventory::watch_services:Received serviceItem either nil or empty @ #{Time.now.utc.iso8601}" + end + else + $log.warn "in_kube_podinventory::watch_services:Received serviceuid either nil or empty @ #{Time.now.utc.iso8601}" + end + elsif notice["type"] == "DELETED" + key = item["metadata"]["uid"] + if !key.nil? && !key.empty? @serviceCacheMutex.synchronize { - @serviceItemsCache[key] = serviceItem + @serviceItemsCache.delete(key) } - else - $log.warn "in_kube_podinventory::watch_services:Received serviceItem either nil or empty @ #{Time.now.utc.iso8601}" end - else - $log.warn "in_kube_podinventory::watch_services:Received serviceuid either nil or empty @ #{Time.now.utc.iso8601}" - end - elsif notice["type"] == "DELETED" - key = item["metadata"]["uid"] - if !key.nil? && !key.empty? - @serviceCacheMutex.synchronize { - @serviceItemsCache.delete(key) - } end end when "ERROR" diff --git a/source/plugins/ruby/in_kubestate_hpa.rb b/source/plugins/ruby/in_kubestate_hpa.rb index 906438347..7f2c55607 100644 --- a/source/plugins/ruby/in_kubestate_hpa.rb +++ b/source/plugins/ruby/in_kubestate_hpa.rb @@ -32,6 +32,7 @@ def initialize @NodeName = OMS::Common.get_hostname @ClusterId = KubernetesApiClient.getClusterId @ClusterName = KubernetesApiClient.getClusterName + @excludeNamespaces = [] end config_param :run_interval, :time, :default => 60 @@ -87,6 +88,8 @@ def enumerate $log.info("in_kubestate_hpa::enumerate: using tag -#{@tag} @ #{Time.now.utc.iso8601}") @run_interval = ExtensionUtils.getdataCollectionIntervalSeconds() $log.info("in_kubestate_hpa::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") + @excludeNamespaces = ExtensionUtils.getdataCollectionExcludeNameSpaces() + $log.info("in_kube_podinventory::enumerate: using data collection excludenamespaces -#{@excludeNamespaces} @ #{Time.now.utc.iso8601}") end # Initializing continuation token to nil continuationToken = nil From da4b67d431306ee20bcd7711f58f87477d3638ce Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Sun, 31 Jul 2022 09:50:07 +0530 Subject: [PATCH 251/301] wip --- .../plugins/ruby/CAdvisorMetricsAPIClient.rb | 2 +- source/plugins/ruby/KubernetesApiClient.rb | 6 +-- source/plugins/ruby/constants.rb | 2 +- source/plugins/ruby/extension_utils.rb | 8 ++-- source/plugins/ruby/in_cadvisor_perf.rb | 15 ++++--- source/plugins/ruby/in_containerinventory.rb | 3 ++ source/plugins/ruby/in_kube_events.rb | 8 +++- source/plugins/ruby/in_kube_perfinventory.rb | 43 +++++++++++-------- source/plugins/ruby/in_kube_podinventory.rb | 16 +++---- source/plugins/ruby/in_kube_pvinventory.rb | 21 +++++---- .../plugins/ruby/in_kubestate_deployments.rb | 3 ++ source/plugins/ruby/in_kubestate_hpa.rb | 7 +-- source/plugins/ruby/in_win_cadvisor_perf.rb | 7 ++- 13 files changed, 88 insertions(+), 53 deletions(-) diff --git a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb index b18e887fd..6128b4029 100644 --- a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb +++ b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb @@ -133,7 +133,7 @@ def getCAdvisorUri(winNode, relativeUri) return baseUri + relativeUri end - def getMetrics(winNode: nil, metricTime: Time.now.utc.iso8601) + def getMetrics(winNode: nil, excludeNameSpaces: [], metricTime: Time.now.utc.iso8601) metricDataItems = [] begin cAdvisorStats = getSummaryStatsFromCAdvisor(winNode) diff --git a/source/plugins/ruby/KubernetesApiClient.rb b/source/plugins/ruby/KubernetesApiClient.rb index f878ad4c8..70ec7a0ec 100644 --- a/source/plugins/ruby/KubernetesApiClient.rb +++ b/source/plugins/ruby/KubernetesApiClient.rb @@ -1395,12 +1395,12 @@ def isEmitCacheTelemetry return isEmitCacheTelemtryEnabled end - def isExcludeResourceItem(resourceNamespace, excludeNamespaces) + def isExcludeResourceItem(resourceNamespace, excludeNameSpaces) isExclude = false begin if !resourceNamespace.nil? && !resourceNamespace.empty? - !excludeNamespaces.nil? && !excludeNamespaces.empty? && excludeNamespaces.length > 0 - && excludeNamespaces.include?(resourceNamespace) + !excludeNameSpaces.nil? && !excludeNameSpaces.empty? && excludeNameSpaces.length > 0 + && excludeNameSpaces.include?(resourceNamespace) isExclude = true end rescue => errorStr diff --git a/source/plugins/ruby/constants.rb b/source/plugins/ruby/constants.rb index bfee5e002..07463fdc1 100644 --- a/source/plugins/ruby/constants.rb +++ b/source/plugins/ruby/constants.rb @@ -147,7 +147,7 @@ class Constants # extension settings and these should match with DCR extension settings EXTENSION_SETTING_DATA_COLLECTION_INTERVAL = "dataCollectionIntervalMinutes" - EXTENSION_SETTING_EXCLUDE_NAMESPACES = "excludeNamespaces" + EXTENSION_SETTING_EXCLUDE_NAMESPACES = "excludeNameSpaces" # min and max data collection interval minutes DATA_COLLECTION_INTERVAL_MINUTES_MIN = 1 diff --git a/source/plugins/ruby/extension_utils.rb b/source/plugins/ruby/extension_utils.rb index 5e6f537ef..41a92f772 100644 --- a/source/plugins/ruby/extension_utils.rb +++ b/source/plugins/ruby/extension_utils.rb @@ -48,7 +48,7 @@ def getdataCollectionIntervalSeconds end def getdataCollectionExcludeNameSpaces - excludeNamespaces = [] + excludeNameSpaces = [] begin extensionSettings = Extension.instance.get_extension_settings() if !extensionSettings.nil? && @@ -56,15 +56,15 @@ def getdataCollectionExcludeNameSpaces extensionSettings.has_key(Constants::EXTENSION_SETTING_EXCLUDE_NAMESPACES) namespacesToExclude = extensionSettings[Constants::EXTENSION_SETTING_EXCLUDE_NAMESPACES] if !namespacesToExclude.nil? && !namespacesToExclude.empty? && namespacesToExclude.kind_of?(Array) && namespacesToExclude.length > 0 - excludeNamespaces = namespacesToExclude.dup + excludeNameSpaces = namespacesToExclude.dup else - $log.warn("ExtensionUtils::getdataCollectionExcludeNameSpaces: excludeNamespaces: #{namespacesToExclude} not valid hence using default") + $log.warn("ExtensionUtils::getdataCollectionExcludeNameSpaces: excludeNameSpaces: #{namespacesToExclude} not valid hence using default") end end rescue => err $log.warn("ExtensionUtils::getdataCollectionExcludeNameSpaces: failed with an exception: #{errorStr}") end - return excludeNamespaces + return excludeNameSpaces end end diff --git a/source/plugins/ruby/in_cadvisor_perf.rb b/source/plugins/ruby/in_cadvisor_perf.rb index 057242fe6..b957e00b0 100644 --- a/source/plugins/ruby/in_cadvisor_perf.rb +++ b/source/plugins/ruby/in_cadvisor_perf.rb @@ -22,6 +22,7 @@ def initialize require_relative "omslog" require_relative "constants" require_relative "extension_utils" + @excludeNameSpaces = [] end config_param :run_interval, :time, :default => 60 @@ -62,11 +63,7 @@ def enumerate() begin eventStream = Fluent::MultiEventStream.new insightsMetricsEventStream = Fluent::MultiEventStream.new - metricData = CAdvisorMetricsAPIClient.getMetrics(winNode: nil, metricTime: batchTime) - metricData.each do |record| - eventStream.add(time, record) if record - end - + if ExtensionUtils.isAADMSIAuthMode() && !@@isWindows.nil? && @@isWindows == false $log.info("in_cadvisor_perf::enumerate: AAD AUTH MSI MODE") if @tag.nil? || !@tag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) @@ -79,7 +76,15 @@ def enumerate() $log.info("in_cadvisor_perf::enumerate: using insightsmetrics tag -#{@insightsmetricstag} @ #{Time.now.utc.iso8601}") @run_interval = ExtensionUtils.getdataCollectionIntervalSeconds() $log.info("in_cadvisor_perf::enumerate: using data collection interval(seconds) -#{@run_interval} @ #{Time.now.utc.iso8601}") + @excludeNameSpaces = ExtensionUtils.getdataCollectionExcludeNameSpaces() + $log.info("in_cadvisor_perf::enumerate: using data collection excludeNameSpaces -#{@excludeNameSpaces} @ #{Time.now.utc.iso8601}") + end + + metricData = CAdvisorMetricsAPIClient.getMetrics(winNode: nil, @excludeNameSpaces, metricTime: batchTime) + metricData.each do |record| + eventStream.add(time, record) if record end + router.emit_stream(@tag, eventStream) if eventStream router.emit_stream(@mdmtag, eventStream) if eventStream diff --git a/source/plugins/ruby/in_containerinventory.rb b/source/plugins/ruby/in_containerinventory.rb index 427af5e94..c35dffa78 100644 --- a/source/plugins/ruby/in_containerinventory.rb +++ b/source/plugins/ruby/in_containerinventory.rb @@ -67,6 +67,8 @@ def enumerate $log.info("in_container_inventory::enumerate: using tag -#{@tag} @ #{Time.now.utc.iso8601}") @run_interval = ExtensionUtils.getdataCollectionIntervalSeconds() $log.info("in_container_inventory::enumerate: using data collection interval(seconds) -#{@run_interval} @ #{Time.now.utc.iso8601}") + @excludeNameSpaces = ExtensionUtils.getdataCollectionExcludeNameSpaces() + $log.info("in_container_inventory::enumerate: using data collection excludeNameSpaces -#{@excludeNameSpaces} @ #{Time.now.utc.iso8601}") end begin containerRuntimeEnv = ENV["CONTAINER_RUNTIME"] @@ -79,6 +81,7 @@ def enumerate podList = JSON.parse(response.body) if !podList.nil? && !podList.empty? && podList.key?("items") && !podList["items"].nil? && !podList["items"].empty? podList["items"].each do |item| + next unless !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["namespace"], @excludeNameSpaces) containerInventoryRecords = KubernetesContainerInventory.getContainerInventoryRecords(item, batchTime, clusterCollectEnvironmentVar) containerInventoryRecords.each do |containerRecord| ContainerInventoryState.writeContainerState(containerRecord) diff --git a/source/plugins/ruby/in_kube_events.rb b/source/plugins/ruby/in_kube_events.rb index 6ccb02c54..fe1669050 100644 --- a/source/plugins/ruby/in_kube_events.rb +++ b/source/plugins/ruby/in_kube_events.rb @@ -28,6 +28,7 @@ def initialize # Initilize enable/disable normal event collection @collectAllKubeEvents = false + @excludeNameSpaces = [] end config_param :run_interval, :time, :default => 60 @@ -91,7 +92,9 @@ def enumerate if @tag.nil? || !@tag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) @tag = ExtensionUtils.getOutputStreamId(Constants::KUBE_EVENTS_DATA_TYPE) end - $log.info("in_kube_events::enumerate: using kubeevents tag -#{@tag} @ #{Time.now.utc.iso8601}") + $log.info("in_kube_events::enumerate: using kubeevents tag -#{@tag} @ #{Time.now.utc.iso8601}") + @excludeNameSpaces = ExtensionUtils.getdataCollectionExcludeNameSpaces() + $log.info("in_kube_events::enumerate: using data collection excludeNameSpaces -#{@excludeNameSpaces} @ #{Time.now.utc.iso8601}") end # Initializing continuation token to nil continuationToken = nil @@ -160,6 +163,9 @@ def parse_and_emit_records(events, eventQueryState, newEventQueryState, batchTim next end + # drop the events if the event of the excluded namespace + next unless !KubernetesApiClient.isExcludeResourceItem(items["involvedObject"]["namespace"], @excludeNameSpaces) + record["ObjectKind"] = items["involvedObject"]["kind"] record["Namespace"] = items["involvedObject"]["namespace"] record["Name"] = items["involvedObject"]["name"] diff --git a/source/plugins/ruby/in_kube_perfinventory.rb b/source/plugins/ruby/in_kube_perfinventory.rb index 552ee5dd9..091f2819a 100644 --- a/source/plugins/ruby/in_kube_perfinventory.rb +++ b/source/plugins/ruby/in_kube_perfinventory.rb @@ -32,6 +32,7 @@ def initialize @kubeperfTag = "oneagent.containerInsights.LINUX_PERF_BLOB" @insightsMetricsTag = "oneagent.containerInsights.INSIGHTS_METRICS_BLOB" + @excludeNameSpaces = [] end config_param :run_interval, :time, :default => 60 @@ -101,6 +102,8 @@ def enumerate(podList = nil) $log.info("in_kube_perfinventory::enumerate: using insightsmetrics tag -#{@insightsMetricsTag} @ #{Time.now.utc.iso8601}") @run_interval = ExtensionUtils.getdataCollectionIntervalSeconds() $log.info("in_kube_perfinventory::enumerate: using data collection interval(seconds) -#{@run_interval} @ #{Time.now.utc.iso8601}") + @excludeNameSpaces = ExtensionUtils.getdataCollectionExcludeNameSpaces() + $log.info("in_kube_perfinventory::enumerate: using data collection excludeNameSpaces -#{@excludeNameSpaces} @ #{Time.now.utc.iso8601}") end nodeAllocatableRecords = getNodeAllocatableRecords() @@ -264,6 +267,7 @@ def watch_pods if (podInventory.key?("items") && !podInventory["items"].nil? && !podInventory["items"].empty?) $log.info("in_kube_perfinventory::watch_pods:number of pod items :#{podInventory["items"].length} from Kube API @ #{Time.now.utc.iso8601}") podInventory["items"].each do |item| + next unless !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["namespace"], @excludeNameSpaces) key = item["metadata"]["uid"] if !key.nil? && !key.empty? podItem = KubernetesApiClient.getOptimizedItem("pods-perf", item) @@ -295,6 +299,7 @@ def watch_pods if (podInventory.key?("items") && !podInventory["items"].nil? && !podInventory["items"].empty?) $log.info("in_kube_perfinventory::watch_pods:number of pod items :#{podInventory["items"].length} from Kube API @ #{Time.now.utc.iso8601}") podInventory["items"].each do |item| + next unless !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["namespace"], @excludeNameSpaces) key = item["metadata"]["uid"] if !key.nil? && !key.empty? podItem = KubernetesApiClient.getOptimizedItem("pods-perf", item) @@ -345,28 +350,30 @@ def watch_pods # We have to abort here because this might cause lastResourceVersion inconsistency by skipping a potential RV with valid data! break end - if ((notice["type"] == "ADDED") || (notice["type"] == "MODIFIED")) - key = item["metadata"]["uid"] - if !key.nil? && !key.empty? - podItem = KubernetesApiClient.getOptimizedItem("pods-perf", item) - if !podItem.nil? && !podItem.empty? + if !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["namespace"], @excludeNameSpaces) + if ((notice["type"] == "ADDED") || (notice["type"] == "MODIFIED")) + key = item["metadata"]["uid"] + if !key.nil? && !key.empty? + podItem = KubernetesApiClient.getOptimizedItem("pods-perf", item) + if !podItem.nil? && !podItem.empty? + @podCacheMutex.synchronize { + @podItemsCache[key] = podItem + } + else + $log.warn "in_kube_perfinventory::watch_pods:Received podItem is empty or nil @ #{Time.now.utc.iso8601}" + end + else + $log.warn "in_kube_perfinventory::watch_pods:Received poduid either nil or empty @ #{Time.now.utc.iso8601}" + end + elsif notice["type"] == "DELETED" + key = item["metadata"]["uid"] + if !key.nil? && !key.empty? @podCacheMutex.synchronize { - @podItemsCache[key] = podItem + @podItemsCache.delete(key) } - else - $log.warn "in_kube_perfinventory::watch_pods:Received podItem is empty or nil @ #{Time.now.utc.iso8601}" end - else - $log.warn "in_kube_perfinventory::watch_pods:Received poduid either nil or empty @ #{Time.now.utc.iso8601}" end - elsif notice["type"] == "DELETED" - key = item["metadata"]["uid"] - if !key.nil? && !key.empty? - @podCacheMutex.synchronize { - @podItemsCache.delete(key) - } - end - end + end when "ERROR" podsResourceVersion = nil $log.warn("in_kube_perfinventory::watch_pods:ERROR event with :#{notice["object"]} @ #{Time.now.utc.iso8601}") diff --git a/source/plugins/ruby/in_kube_podinventory.rb b/source/plugins/ruby/in_kube_podinventory.rb index ea7fe4b83..b9e9017ee 100644 --- a/source/plugins/ruby/in_kube_podinventory.rb +++ b/source/plugins/ruby/in_kube_podinventory.rb @@ -59,7 +59,7 @@ def initialize @kubeservicesTag = "oneagent.containerInsights.KUBE_SERVICES_BLOB" @containerInventoryTag = "oneagent.containerInsights.CONTAINER_INVENTORY_BLOB" - @excludeNamespaces = [] + @excludeNameSpaces = [] end config_param :run_interval, :time, :default => 60 @@ -174,8 +174,8 @@ def enumerate(podList = nil) $log.info("in_kube_podinventory::enumerate: using kubepodinventory tag -#{@tag} @ #{Time.now.utc.iso8601}") @run_interval = ExtensionUtils.getdataCollectionIntervalSeconds() $log.info("in_kube_podinventory::enumerate: using data collection interval(seconds) -#{@run_interval} @ #{Time.now.utc.iso8601}") - @excludeNamespaces = ExtensionUtils.getdataCollectionExcludeNameSpaces() - $log.info("in_kube_podinventory::enumerate: using data collection excludenamespaces -#{@excludeNamespaces} @ #{Time.now.utc.iso8601}") + @excludeNameSpaces = ExtensionUtils.getdataCollectionExcludeNameSpaces() + $log.info("in_kube_podinventory::enumerate: using data collection excludeNameSpaces -#{@excludeNameSpaces} @ #{Time.now.utc.iso8601}") end serviceInventory = {} @@ -745,7 +745,7 @@ def watch_pods podNameSpace = item["metadata"]["namespace"] end # exclude resource item if this in excluded namespaces - next unless !KubernetesApiClient.isExcludeResourceItem(podNameSpace, @excludeNamespaces) + next unless !KubernetesApiClient.isExcludeResourceItem(podNameSpace, @excludeNameSpaces) key = item["metadata"]["uid"] if !key.nil? && !key.empty? nodeName = (!item["spec"].nil? && !item["spec"]["nodeName"].nil?) ? item["spec"]["nodeName"] : "" @@ -790,7 +790,7 @@ def watch_pods podNameSpace = item["metadata"]["namespace"] end # exclude resource item if this in excluded namespaces - next unless !KubernetesApiClient.isExcludeResourceItem(podNameSpace, @excludeNamespaces) + next unless !KubernetesApiClient.isExcludeResourceItem(podNameSpace, @excludeNameSpaces) key = item["metadata"]["uid"] if !key.nil? && !key.empty? nodeName = (!item["spec"].nil? && !item["spec"]["nodeName"].nil?) ? item["spec"]["nodeName"] : "" @@ -854,7 +854,7 @@ def watch_pods podNameSpace = item["metadata"]["namespace"] end # exclude resource item if this in excluded namespaces - if !KubernetesApiClient.isExcludeResourceItem(podNameSpace, @excludeNamespaces) + if !KubernetesApiClient.isExcludeResourceItem(podNameSpace, @excludeNameSpaces) if ((notice["type"] == "ADDED") || (notice["type"] == "MODIFIED")) key = item["metadata"]["uid"] if !key.nil? && !key.empty? @@ -953,7 +953,7 @@ def watch_services serviceNamespace = item["metadata"]["namespace"] end # exclude resource item if this in excluded namespaces - next unless !KubernetesApiClient.isExcludeResourceItem(serviceNamespace, @excludeNamespaces) + next unless !KubernetesApiClient.isExcludeResourceItem(serviceNamespace, @excludeNameSpaces) key = item["metadata"]["uid"] if !key.nil? && !key.empty? serviceItem = KubernetesApiClient.getOptimizedItem("services", item) @@ -1009,7 +1009,7 @@ def watch_services serviceNamespace = item["metadata"]["namespace"] end # exclude resource item if this in excluded namespaces - if !KubernetesApiClient.isExcludeResourceItem(serviceNamespace, @excludeNamespaces) + if !KubernetesApiClient.isExcludeResourceItem(serviceNamespace, @excludeNameSpaces) if ((notice["type"] == "ADDED") || (notice["type"] == "MODIFIED")) key = item["metadata"]["uid"] if !key.nil? && !key.empty? diff --git a/source/plugins/ruby/in_kube_pvinventory.rb b/source/plugins/ruby/in_kube_pvinventory.rb index 882370b6c..3253687ec 100644 --- a/source/plugins/ruby/in_kube_pvinventory.rb +++ b/source/plugins/ruby/in_kube_pvinventory.rb @@ -69,6 +69,8 @@ def enumerate end @run_interval = ExtensionUtils.getdataCollectionIntervalSeconds() $log.info("in_kube_pvinventory::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") + @excludeNameSpaces = ExtensionUtils.getdataCollectionExcludeNameSpaces() + $log.info("in_kube_pvinventory::enumerate: using data collection excludeNameSpaces -#{@excludeNameSpaces} @ #{Time.now.utc.iso8601}") end continuationToken = nil @@ -125,9 +127,18 @@ def parse_and_emit_records(pvInventory, batchTime = Time.utc.iso8601) begin records = [] pvInventory["items"].each do |item| - # Node, pod, & usage info can be found by joining with pvUsedBytes metric using PVCNamespace/PVCName record = {} + # Optional values + pvcNamespace, pvcName = getPVCInfo(item) + type, typeInfo = getTypeInfo(item) + record["PVCNamespace"] = pvcNamespace + record["PVCName"] = pvcName + record["PVType"] = type + record["PVTypeInfo"] = typeInfo + + next unless !KubernetesApiClient.isExcludeResourceItem(pvcNamespace, @excludeNameSpaces) + record["CollectionTime"] = batchTime record["ClusterId"] = KubernetesApiClient.getClusterId record["ClusterName"] = KubernetesApiClient.getClusterName @@ -138,13 +149,7 @@ def parse_and_emit_records(pvInventory, batchTime = Time.utc.iso8601) record["PVCapacityBytes"] = KubernetesApiClient.getMetricNumericValue("memory", item["spec"]["capacity"]["storage"]) record["PVCreationTimeStamp"] = item["metadata"]["creationTimestamp"] - # Optional values - pvcNamespace, pvcName = getPVCInfo(item) - type, typeInfo = getTypeInfo(item) - record["PVCNamespace"] = pvcNamespace - record["PVCName"] = pvcName - record["PVType"] = type - record["PVTypeInfo"] = typeInfo + records.push(record) diff --git a/source/plugins/ruby/in_kubestate_deployments.rb b/source/plugins/ruby/in_kubestate_deployments.rb index 314ab2bd6..c2126163b 100644 --- a/source/plugins/ruby/in_kubestate_deployments.rb +++ b/source/plugins/ruby/in_kubestate_deployments.rb @@ -90,6 +90,8 @@ def enumerate end @run_interval = ExtensionUtils.getdataCollectionIntervalSeconds() $log.info("in_kubestate_deployments::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") + @excludeNameSpaces = ExtensionUtils.getdataCollectionExcludeNameSpaces() + $log.info("in_kubestate_deployments::enumerate: using data collection excludeNameSpaces -#{@excludeNameSpaces} @ #{Time.now.utc.iso8601}") end # Initializing continuation token to nil continuationToken = nil @@ -142,6 +144,7 @@ def parse_and_emit_records(deployments, batchTime = Time.utc.iso8601) begin metricInfo = deployments metricInfo["items"].each do |deployment| + next unless !KubernetesApiClient.isExcludeResourceItem(deployment["metadata"]["namespace"], @excludeNameSpaces) deploymentName = deployment["metadata"]["name"] deploymentNameSpace = deployment["metadata"]["namespace"] deploymentCreatedTime = "" diff --git a/source/plugins/ruby/in_kubestate_hpa.rb b/source/plugins/ruby/in_kubestate_hpa.rb index 7f2c55607..f340dbcd2 100644 --- a/source/plugins/ruby/in_kubestate_hpa.rb +++ b/source/plugins/ruby/in_kubestate_hpa.rb @@ -32,7 +32,7 @@ def initialize @NodeName = OMS::Common.get_hostname @ClusterId = KubernetesApiClient.getClusterId @ClusterName = KubernetesApiClient.getClusterName - @excludeNamespaces = [] + @excludeNameSpaces = [] end config_param :run_interval, :time, :default => 60 @@ -88,8 +88,8 @@ def enumerate $log.info("in_kubestate_hpa::enumerate: using tag -#{@tag} @ #{Time.now.utc.iso8601}") @run_interval = ExtensionUtils.getdataCollectionIntervalSeconds() $log.info("in_kubestate_hpa::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") - @excludeNamespaces = ExtensionUtils.getdataCollectionExcludeNameSpaces() - $log.info("in_kube_podinventory::enumerate: using data collection excludenamespaces -#{@excludeNamespaces} @ #{Time.now.utc.iso8601}") + @excludeNameSpaces = ExtensionUtils.getdataCollectionExcludeNameSpaces() + $log.info("in_kubestate_hpa::enumerate: using data collection excludeNameSpaces -#{@excludeNameSpaces} @ #{Time.now.utc.iso8601}") end # Initializing continuation token to nil continuationToken = nil @@ -133,6 +133,7 @@ def parse_and_emit_records(hpas, batchTime = Time.utc.iso8601) begin metricInfo = hpas metricInfo["items"].each do |hpa| + next unless !KubernetesApiClient.isExcludeResourceItem(hpa["metadata"]["namespace"], @excludeNameSpaces) hpaName = hpa["metadata"]["name"] hpaNameSpace = hpa["metadata"]["namespace"] hpaCreatedTime = "" diff --git a/source/plugins/ruby/in_win_cadvisor_perf.rb b/source/plugins/ruby/in_win_cadvisor_perf.rb index 9e3f30d79..5aade7f93 100644 --- a/source/plugins/ruby/in_win_cadvisor_perf.rb +++ b/source/plugins/ruby/in_win_cadvisor_perf.rb @@ -22,6 +22,7 @@ def initialize require_relative "constants" require_relative "extension_utils" @insightsMetricsTag = "oneagent.containerInsights.INSIGHTS_METRICS_BLOB" + @excludeNameSpaces = [] end config_param :run_interval, :time, :default => 60 @@ -69,8 +70,12 @@ def enumerate() end $log.info("in_win_cadvisor_perf::enumerate: using perf tag -#{@kubeperfTag} @ #{Time.now.utc.iso8601}") $log.info("in_win_cadvisor_perf::enumerate: using insightsmetrics tag -#{@insightsMetricsTag} @ #{Time.now.utc.iso8601}") + @run_interval = ExtensionUtils.getdataCollectionIntervalSeconds() $log.info("in_win_cadvisor_perf::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") + + @excludeNameSpaces = ExtensionUtils.getdataCollectionExcludeNameSpaces() + $log.info("in_win_cadvisor_perf::enumerate: using data collection excludeNameSpaces -#{@excludeNameSpaces} @ #{Time.now.utc.iso8601}") end #Resetting this cache so that it is populated with the current set of containers with every call @@ -86,7 +91,7 @@ def enumerate() end @@winNodes.each do |winNode| eventStream = Fluent::MultiEventStream.new - metricData = CAdvisorMetricsAPIClient.getMetrics(winNode: winNode, metricTime: Time.now.utc.iso8601) + metricData = CAdvisorMetricsAPIClient.getMetrics(winNode: winNode, @excludeNameSpaces, metricTime: Time.now.utc.iso8601) metricData.each do |record| if !record.empty? eventStream.add(time, record) if record From 6b20eb438616a10877c39fe1bae946892e449eaa Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Sun, 31 Jul 2022 11:57:51 +0530 Subject: [PATCH 252/301] wip --- .../plugins/ruby/CAdvisorMetricsAPIClient.rb | 20 +++++++++---- source/plugins/ruby/KubernetesApiClient.rb | 10 +++++-- source/plugins/ruby/in_containerinventory.rb | 2 +- source/plugins/ruby/in_kube_perfinventory.rb | 6 ++-- source/plugins/ruby/in_kube_podinventory.rb | 30 ++++--------------- source/plugins/ruby/in_kube_pvinventory.rb | 2 +- .../plugins/ruby/in_kubestate_deployments.rb | 2 +- source/plugins/ruby/in_kubestate_hpa.rb | 2 +- 8 files changed, 33 insertions(+), 41 deletions(-) diff --git a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb index 6128b4029..c21d94512 100644 --- a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb +++ b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb @@ -210,7 +210,7 @@ def getMetrics(winNode: nil, excludeNameSpaces: [], metricTime: Time.now.utc.iso return metricDataItems end - def getContainerCpuMetricItems(metricJSON, hostName, cpuMetricNameToCollect, metricNametoReturn, metricPollTime) + def getContainerCpuMetricItems(metricJSON, hostName, cpuMetricNameToCollect, metricNametoReturn, metricPollTime, excludeNameSpaces) metricItems = [] clusterId = KubernetesApiClient.getClusterId timeDifference = (DateTime.now.to_time.to_i - @@telemetryCpuMetricTimeTracker).abs @@ -222,6 +222,8 @@ def getContainerCpuMetricItems(metricJSON, hostName, cpuMetricNameToCollect, met podName = pod["podRef"]["name"] podNamespace = pod["podRef"]["namespace"] + next unless !KubernetesApiClient.isExcludeResourceItem(podName, podNameSpace, excludeNameSpaces) + if (!pod["containers"].nil?) pod["containers"].each do |container| #cpu metric @@ -504,7 +506,7 @@ def resetWinContainerIdCache end # usageNanoCores doesnt exist for windows nodes. Hence need to compute this from usageCoreNanoSeconds - def getContainerCpuMetricItemRate(metricJSON, hostName, cpuMetricNameToCollect, metricNametoReturn, metricPollTime) + def getContainerCpuMetricItemRate(metricJSON, hostName, cpuMetricNameToCollect, metricNametoReturn, metricPollTime, excludeNameSpaces) metricItems = [] clusterId = KubernetesApiClient.getClusterId timeDifference = (DateTime.now.to_time.to_i - @@telemetryCpuMetricTimeTracker).abs @@ -518,6 +520,8 @@ def getContainerCpuMetricItemRate(metricJSON, hostName, cpuMetricNameToCollect, podName = pod["podRef"]["name"] podNamespace = pod["podRef"]["namespace"] + next unless !KubernetesApiClient.isExcludeResourceItem(podName, podNameSpace, excludeNameSpaces) + if (!pod["containers"].nil?) pod["containers"].each do |container| #cpu metric @@ -633,7 +637,7 @@ def getContainerCpuMetricItemRate(metricJSON, hostName, cpuMetricNameToCollect, return metricItems end - def getContainerMemoryMetricItems(metricJSON, hostName, memoryMetricNameToCollect, metricNametoReturn, metricPollTime, operatingSystem) + def getContainerMemoryMetricItems(metricJSON, hostName, memoryMetricNameToCollect, metricNametoReturn, metricPollTime, operatingSystem, excludeNameSpaces) metricItems = [] clusterId = KubernetesApiClient.getClusterId timeDifference = (DateTime.now.to_time.to_i - @@telemetryMemoryMetricTimeTracker).abs @@ -643,7 +647,8 @@ def getContainerMemoryMetricItems(metricJSON, hostName, memoryMetricNameToCollec metricInfo["pods"].each do |pod| podUid = pod["podRef"]["uid"] podName = pod["podRef"]["name"] - podNamespace = pod["podRef"]["namespace"] + podNamespace = pod["podRef"]["namespace"] + next unless !KubernetesApiClient.isExcludeResourceItem(podName, podNameSpace, excludeNameSpaces) if (!pod["containers"].nil?) pod["containers"].each do |container| containerName = container["name"] @@ -884,14 +889,17 @@ def getNodeLastRebootTimeMetric(metricJSON, hostName, metricNametoReturn, metric return metricItem end - def getContainerStartTimeMetricItems(metricJSON, hostName, metricNametoReturn, metricPollTime) + def getContainerStartTimeMetricItems(metricJSON, hostName, metricNametoReturn, metricPollTime, excludeNameSpaces) metricItems = [] clusterId = KubernetesApiClient.getClusterId #currentTime = Time.now.utc.iso8601 #2018-01-30T19:36:14Z begin metricInfo = metricJSON metricInfo["pods"].each do |pod| - podUid = pod["podRef"]["uid"] + podUid = pod["podRef"]["uid"] + podNamespace = pod["podRef"]["namespace"] + podName = pod["podRef"]["name"] + next unless !KubernetesApiClient.isExcludeResourceItem(podName, podNameSpace, excludeNameSpaces) if (!pod["containers"].nil?) pod["containers"].each do |container| containerName = container["name"] diff --git a/source/plugins/ruby/KubernetesApiClient.rb b/source/plugins/ruby/KubernetesApiClient.rb index 70ec7a0ec..d1d521817 100644 --- a/source/plugins/ruby/KubernetesApiClient.rb +++ b/source/plugins/ruby/KubernetesApiClient.rb @@ -1395,11 +1395,15 @@ def isEmitCacheTelemetry return isEmitCacheTelemtryEnabled end - def isExcludeResourceItem(resourceNamespace, excludeNameSpaces) + def isExcludeResourceItem(resourceName, resourceNamespace, excludeNameSpaces) isExclude = false begin - if !resourceNamespace.nil? && !resourceNamespace.empty? - !excludeNameSpaces.nil? && !excludeNameSpaces.empty? && excludeNameSpaces.length > 0 + if !resourceName.nil? && !resourceName.empty? + && resourceName.start_with("omsagent-") + && resourceNamespace.eql("kube-system") + isExclude = false + elsif !resourceNamespace.nil? && !resourceNamespace.empty? + && !excludeNameSpaces.nil? && !excludeNameSpaces.empty? && excludeNameSpaces.length > 0 && excludeNameSpaces.include?(resourceNamespace) isExclude = true end diff --git a/source/plugins/ruby/in_containerinventory.rb b/source/plugins/ruby/in_containerinventory.rb index c35dffa78..395916da6 100644 --- a/source/plugins/ruby/in_containerinventory.rb +++ b/source/plugins/ruby/in_containerinventory.rb @@ -81,7 +81,7 @@ def enumerate podList = JSON.parse(response.body) if !podList.nil? && !podList.empty? && podList.key?("items") && !podList["items"].nil? && !podList["items"].empty? podList["items"].each do |item| - next unless !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["namespace"], @excludeNameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["name"], item["metadata"]["namespace"], @excludeNameSpaces) containerInventoryRecords = KubernetesContainerInventory.getContainerInventoryRecords(item, batchTime, clusterCollectEnvironmentVar) containerInventoryRecords.each do |containerRecord| ContainerInventoryState.writeContainerState(containerRecord) diff --git a/source/plugins/ruby/in_kube_perfinventory.rb b/source/plugins/ruby/in_kube_perfinventory.rb index 091f2819a..cb8f83e7f 100644 --- a/source/plugins/ruby/in_kube_perfinventory.rb +++ b/source/plugins/ruby/in_kube_perfinventory.rb @@ -267,7 +267,7 @@ def watch_pods if (podInventory.key?("items") && !podInventory["items"].nil? && !podInventory["items"].empty?) $log.info("in_kube_perfinventory::watch_pods:number of pod items :#{podInventory["items"].length} from Kube API @ #{Time.now.utc.iso8601}") podInventory["items"].each do |item| - next unless !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["namespace"], @excludeNameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["name"],item["metadata"]["namespace"], @excludeNameSpaces) key = item["metadata"]["uid"] if !key.nil? && !key.empty? podItem = KubernetesApiClient.getOptimizedItem("pods-perf", item) @@ -299,7 +299,7 @@ def watch_pods if (podInventory.key?("items") && !podInventory["items"].nil? && !podInventory["items"].empty?) $log.info("in_kube_perfinventory::watch_pods:number of pod items :#{podInventory["items"].length} from Kube API @ #{Time.now.utc.iso8601}") podInventory["items"].each do |item| - next unless !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["namespace"], @excludeNameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["name"], item["metadata"]["namespace"], @excludeNameSpaces) key = item["metadata"]["uid"] if !key.nil? && !key.empty? podItem = KubernetesApiClient.getOptimizedItem("pods-perf", item) @@ -350,7 +350,7 @@ def watch_pods # We have to abort here because this might cause lastResourceVersion inconsistency by skipping a potential RV with valid data! break end - if !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["namespace"], @excludeNameSpaces) + if !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["name"], item["metadata"]["namespace"], @excludeNameSpaces) if ((notice["type"] == "ADDED") || (notice["type"] == "MODIFIED")) key = item["metadata"]["uid"] if !key.nil? && !key.empty? diff --git a/source/plugins/ruby/in_kube_podinventory.rb b/source/plugins/ruby/in_kube_podinventory.rb index b9e9017ee..7ce387df9 100644 --- a/source/plugins/ruby/in_kube_podinventory.rb +++ b/source/plugins/ruby/in_kube_podinventory.rb @@ -740,12 +740,8 @@ def watch_pods if (podInventory.key?("items") && !podInventory["items"].nil? && !podInventory["items"].empty?) $log.info("in_kube_podinventory::watch_pods:number of pod items :#{podInventory["items"].length} from Kube API @ #{Time.now.utc.iso8601}") podInventory["items"].each do |item| - podNameSpace = "" - if !item["metdata"].nil? && !item["metadata"]["namespace"].nil? - podNameSpace = item["metadata"]["namespace"] - end # exclude resource item if this in excluded namespaces - next unless !KubernetesApiClient.isExcludeResourceItem(podNameSpace, @excludeNameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["name"], item["metadata"]["namespace"], @excludeNameSpaces) key = item["metadata"]["uid"] if !key.nil? && !key.empty? nodeName = (!item["spec"].nil? && !item["spec"]["nodeName"].nil?) ? item["spec"]["nodeName"] : "" @@ -785,12 +781,8 @@ def watch_pods if (podInventory.key?("items") && !podInventory["items"].nil? && !podInventory["items"].empty?) $log.info("in_kube_podinventory::watch_pods:number of pod items :#{podInventory["items"].length} from Kube API @ #{Time.now.utc.iso8601}") podInventory["items"].each do |item| - podNameSpace = "" - if !item["metdata"].nil? && !item["metadata"]["namespace"].nil? - podNameSpace = item["metadata"]["namespace"] - end # exclude resource item if this in excluded namespaces - next unless !KubernetesApiClient.isExcludeResourceItem(podNameSpace, @excludeNameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["name"], item["metadata"]["namespace"], @excludeNameSpaces) key = item["metadata"]["uid"] if !key.nil? && !key.empty? nodeName = (!item["spec"].nil? && !item["spec"]["nodeName"].nil?) ? item["spec"]["nodeName"] : "" @@ -849,12 +841,8 @@ def watch_pods # We have to abort here because this might cause lastResourceVersion inconsistency by skipping a potential RV with valid data! break end - podNameSpace = "" - if !item["metdata"].nil? && !item["metadata"]["namespace"].nil? - podNameSpace = item["metadata"]["namespace"] - end # exclude resource item if this in excluded namespaces - if !KubernetesApiClient.isExcludeResourceItem(podNameSpace, @excludeNameSpaces) + if !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["name"], item["metadata"]["namespace"], @excludeNameSpaces) if ((notice["type"] == "ADDED") || (notice["type"] == "MODIFIED")) key = item["metadata"]["uid"] if !key.nil? && !key.empty? @@ -948,12 +936,8 @@ def watch_services if (serviceInventory.key?("items") && !serviceInventory["items"].nil? && !serviceInventory["items"].empty?) $log.info("in_kube_podinventory::watch_services:number of service items #{serviceInventory["items"].length} @ #{Time.now.utc.iso8601}") serviceInventory["items"].each do |item| - serviceNamespace = "" - if !item["metdata"].nil? && !item["metadata"]["namespace"].nil? - serviceNamespace = item["metadata"]["namespace"] - end # exclude resource item if this in excluded namespaces - next unless !KubernetesApiClient.isExcludeResourceItem(serviceNamespace, @excludeNameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["name"], item["metadata"]["namespace"], @excludeNameSpaces) key = item["metadata"]["uid"] if !key.nil? && !key.empty? serviceItem = KubernetesApiClient.getOptimizedItem("services", item) @@ -1004,12 +988,8 @@ def watch_services # We have to abort here because this might cause lastResourceVersion inconsistency by skipping a potential RV with valid data! break end - serviceNamespace = "" - if !item["metdata"].nil? && !item["metadata"]["namespace"].nil? - serviceNamespace = item["metadata"]["namespace"] - end # exclude resource item if this in excluded namespaces - if !KubernetesApiClient.isExcludeResourceItem(serviceNamespace, @excludeNameSpaces) + if !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["name"], item["metadata"]["namespace"], @excludeNameSpaces) if ((notice["type"] == "ADDED") || (notice["type"] == "MODIFIED")) key = item["metadata"]["uid"] if !key.nil? && !key.empty? diff --git a/source/plugins/ruby/in_kube_pvinventory.rb b/source/plugins/ruby/in_kube_pvinventory.rb index 3253687ec..6bb06cbbe 100644 --- a/source/plugins/ruby/in_kube_pvinventory.rb +++ b/source/plugins/ruby/in_kube_pvinventory.rb @@ -137,7 +137,7 @@ def parse_and_emit_records(pvInventory, batchTime = Time.utc.iso8601) record["PVType"] = type record["PVTypeInfo"] = typeInfo - next unless !KubernetesApiClient.isExcludeResourceItem(pvcNamespace, @excludeNameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(pvcName, pvcNamespace, @excludeNameSpaces) record["CollectionTime"] = batchTime record["ClusterId"] = KubernetesApiClient.getClusterId diff --git a/source/plugins/ruby/in_kubestate_deployments.rb b/source/plugins/ruby/in_kubestate_deployments.rb index c2126163b..43560a0c1 100644 --- a/source/plugins/ruby/in_kubestate_deployments.rb +++ b/source/plugins/ruby/in_kubestate_deployments.rb @@ -144,7 +144,7 @@ def parse_and_emit_records(deployments, batchTime = Time.utc.iso8601) begin metricInfo = deployments metricInfo["items"].each do |deployment| - next unless !KubernetesApiClient.isExcludeResourceItem(deployment["metadata"]["namespace"], @excludeNameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(deployment["metadata"]["name"], deployment["metadata"]["namespace"], @excludeNameSpaces) deploymentName = deployment["metadata"]["name"] deploymentNameSpace = deployment["metadata"]["namespace"] deploymentCreatedTime = "" diff --git a/source/plugins/ruby/in_kubestate_hpa.rb b/source/plugins/ruby/in_kubestate_hpa.rb index f340dbcd2..0f069df44 100644 --- a/source/plugins/ruby/in_kubestate_hpa.rb +++ b/source/plugins/ruby/in_kubestate_hpa.rb @@ -133,7 +133,7 @@ def parse_and_emit_records(hpas, batchTime = Time.utc.iso8601) begin metricInfo = hpas metricInfo["items"].each do |hpa| - next unless !KubernetesApiClient.isExcludeResourceItem(hpa["metadata"]["namespace"], @excludeNameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(hpa["metadata"]["name"], hpa["metadata"]["namespace"], @excludeNameSpaces) hpaName = hpa["metadata"]["name"] hpaNameSpace = hpa["metadata"]["namespace"] hpaCreatedTime = "" From 288f58e12618c06bbb32c0ae15b59a06303022a6 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Sun, 31 Jul 2022 14:58:21 +0530 Subject: [PATCH 253/301] fix naming --- source/plugins/ruby/CAdvisorMetricsAPIClient.rb | 8 ++++---- source/plugins/ruby/extension_utils.rb | 12 ++++++------ source/plugins/ruby/in_cadvisor_perf.rb | 4 ++-- source/plugins/ruby/in_containerinventory.rb | 4 ++-- source/plugins/ruby/in_kube_events.rb | 2 +- source/plugins/ruby/in_kube_nodes.rb | 2 +- source/plugins/ruby/in_kube_perfinventory.rb | 4 ++-- source/plugins/ruby/in_kube_podinventory.rb | 4 ++-- source/plugins/ruby/in_kube_podmdminventory.rb | 2 +- source/plugins/ruby/in_kube_pvinventory.rb | 4 ++-- source/plugins/ruby/in_kubestate_deployments.rb | 4 ++-- source/plugins/ruby/in_kubestate_hpa.rb | 4 ++-- source/plugins/ruby/in_win_cadvisor_perf.rb | 4 ++-- 13 files changed, 29 insertions(+), 29 deletions(-) diff --git a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb index c21d94512..f8d800efd 100644 --- a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb +++ b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb @@ -222,7 +222,7 @@ def getContainerCpuMetricItems(metricJSON, hostName, cpuMetricNameToCollect, met podName = pod["podRef"]["name"] podNamespace = pod["podRef"]["namespace"] - next unless !KubernetesApiClient.isExcludeResourceItem(podName, podNameSpace, excludeNameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(podName, podNamespace, excludeNameSpaces) if (!pod["containers"].nil?) pod["containers"].each do |container| @@ -520,7 +520,7 @@ def getContainerCpuMetricItemRate(metricJSON, hostName, cpuMetricNameToCollect, podName = pod["podRef"]["name"] podNamespace = pod["podRef"]["namespace"] - next unless !KubernetesApiClient.isExcludeResourceItem(podName, podNameSpace, excludeNameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(podName, podNamespace, excludeNameSpaces) if (!pod["containers"].nil?) pod["containers"].each do |container| @@ -648,7 +648,7 @@ def getContainerMemoryMetricItems(metricJSON, hostName, memoryMetricNameToCollec podUid = pod["podRef"]["uid"] podName = pod["podRef"]["name"] podNamespace = pod["podRef"]["namespace"] - next unless !KubernetesApiClient.isExcludeResourceItem(podName, podNameSpace, excludeNameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(podName, podNamespace, excludeNameSpaces) if (!pod["containers"].nil?) pod["containers"].each do |container| containerName = container["name"] @@ -899,7 +899,7 @@ def getContainerStartTimeMetricItems(metricJSON, hostName, metricNametoReturn, m podUid = pod["podRef"]["uid"] podNamespace = pod["podRef"]["namespace"] podName = pod["podRef"]["name"] - next unless !KubernetesApiClient.isExcludeResourceItem(podName, podNameSpace, excludeNameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(podName, podNamespace, excludeNameSpaces) if (!pod["containers"].nil?) pod["containers"].each do |container| containerName = container["name"] diff --git a/source/plugins/ruby/extension_utils.rb b/source/plugins/ruby/extension_utils.rb index 41a92f772..d03f4c957 100644 --- a/source/plugins/ruby/extension_utils.rb +++ b/source/plugins/ruby/extension_utils.rb @@ -25,7 +25,7 @@ def isAADMSIAuthMode() return !ENV["AAD_MSI_AUTH_MODE"].nil? && !ENV["AAD_MSI_AUTH_MODE"].empty? && ENV["AAD_MSI_AUTH_MODE"].downcase == "true" end - def getdataCollectionIntervalSeconds + def getDataCollectionIntervalSeconds collectionIntervalSeconds = 60 begin extensionSettings = Extension.instance.get_extension_settings() @@ -38,16 +38,16 @@ def getdataCollectionIntervalSeconds intervalMinutes.to_i <= Constants::DATA_COLLECTION_INTERVAL_MINUTES_MAX collectionIntervalSeconds = 60 * intervalMinutes.to_i else - $log.warn("ExtensionUtils::getdataCollectionIntervalSeconds: dataCollectionIntervalMinutes: #{intervalMinutes} not valid hence using default") + $log.warn("ExtensionUtils::getDataCollectionIntervalSeconds: dataCollectionIntervalMinutes: #{intervalMinutes} not valid hence using default") end end rescue => err - $log.warn("ExtensionUtils::getdataCollectionIntervalSeconds: failed with an exception: #{errorStr}") + $log.warn("ExtensionUtils::getDataCollectionIntervalSeconds: failed with an exception: #{errorStr}") end return collectionIntervalSeconds end - def getdataCollectionExcludeNameSpaces + def getDataCollectionExcludeNameSpaces excludeNameSpaces = [] begin extensionSettings = Extension.instance.get_extension_settings() @@ -58,11 +58,11 @@ def getdataCollectionExcludeNameSpaces if !namespacesToExclude.nil? && !namespacesToExclude.empty? && namespacesToExclude.kind_of?(Array) && namespacesToExclude.length > 0 excludeNameSpaces = namespacesToExclude.dup else - $log.warn("ExtensionUtils::getdataCollectionExcludeNameSpaces: excludeNameSpaces: #{namespacesToExclude} not valid hence using default") + $log.warn("ExtensionUtils::getDataCollectionExcludeNameSpaces: excludeNameSpaces: #{namespacesToExclude} not valid hence using default") end end rescue => err - $log.warn("ExtensionUtils::getdataCollectionExcludeNameSpaces: failed with an exception: #{errorStr}") + $log.warn("ExtensionUtils::getDataCollectionExcludeNameSpaces: failed with an exception: #{errorStr}") end return excludeNameSpaces end diff --git a/source/plugins/ruby/in_cadvisor_perf.rb b/source/plugins/ruby/in_cadvisor_perf.rb index b957e00b0..fefb5dac3 100644 --- a/source/plugins/ruby/in_cadvisor_perf.rb +++ b/source/plugins/ruby/in_cadvisor_perf.rb @@ -74,9 +74,9 @@ def enumerate() end $log.info("in_cadvisor_perf::enumerate: using perf tag -#{@tag} @ #{Time.now.utc.iso8601}") $log.info("in_cadvisor_perf::enumerate: using insightsmetrics tag -#{@insightsmetricstag} @ #{Time.now.utc.iso8601}") - @run_interval = ExtensionUtils.getdataCollectionIntervalSeconds() + @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() $log.info("in_cadvisor_perf::enumerate: using data collection interval(seconds) -#{@run_interval} @ #{Time.now.utc.iso8601}") - @excludeNameSpaces = ExtensionUtils.getdataCollectionExcludeNameSpaces() + @excludeNameSpaces = ExtensionUtils.getDataCollectionExcludeNameSpaces() $log.info("in_cadvisor_perf::enumerate: using data collection excludeNameSpaces -#{@excludeNameSpaces} @ #{Time.now.utc.iso8601}") end diff --git a/source/plugins/ruby/in_containerinventory.rb b/source/plugins/ruby/in_containerinventory.rb index 395916da6..7a6a1bb0b 100644 --- a/source/plugins/ruby/in_containerinventory.rb +++ b/source/plugins/ruby/in_containerinventory.rb @@ -65,9 +65,9 @@ def enumerate @tag = ExtensionUtils.getOutputStreamId(Constants::CONTAINER_INVENTORY_DATA_TYPE) end $log.info("in_container_inventory::enumerate: using tag -#{@tag} @ #{Time.now.utc.iso8601}") - @run_interval = ExtensionUtils.getdataCollectionIntervalSeconds() + @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() $log.info("in_container_inventory::enumerate: using data collection interval(seconds) -#{@run_interval} @ #{Time.now.utc.iso8601}") - @excludeNameSpaces = ExtensionUtils.getdataCollectionExcludeNameSpaces() + @excludeNameSpaces = ExtensionUtils.getDataCollectionExcludeNameSpaces() $log.info("in_container_inventory::enumerate: using data collection excludeNameSpaces -#{@excludeNameSpaces} @ #{Time.now.utc.iso8601}") end begin diff --git a/source/plugins/ruby/in_kube_events.rb b/source/plugins/ruby/in_kube_events.rb index fe1669050..2920babd4 100644 --- a/source/plugins/ruby/in_kube_events.rb +++ b/source/plugins/ruby/in_kube_events.rb @@ -93,7 +93,7 @@ def enumerate @tag = ExtensionUtils.getOutputStreamId(Constants::KUBE_EVENTS_DATA_TYPE) end $log.info("in_kube_events::enumerate: using kubeevents tag -#{@tag} @ #{Time.now.utc.iso8601}") - @excludeNameSpaces = ExtensionUtils.getdataCollectionExcludeNameSpaces() + @excludeNameSpaces = ExtensionUtils.getDataCollectionExcludeNameSpaces() $log.info("in_kube_events::enumerate: using data collection excludeNameSpaces -#{@excludeNameSpaces} @ #{Time.now.utc.iso8601}") end # Initializing continuation token to nil diff --git a/source/plugins/ruby/in_kube_nodes.rb b/source/plugins/ruby/in_kube_nodes.rb index 07b33f564..5f67faff8 100644 --- a/source/plugins/ruby/in_kube_nodes.rb +++ b/source/plugins/ruby/in_kube_nodes.rb @@ -150,7 +150,7 @@ def enumerate $log.info("in_kube_nodes::enumerate: using insightsmetrics tag -#{@insightsMetricsTag} @ #{Time.now.utc.iso8601}") $log.info("in_kube_nodes::enumerate: using containernodeinventory tag -#{@ContainerNodeInventoryTag} @ #{Time.now.utc.iso8601}") $log.info("in_kube_nodes::enumerate: using kubenodeinventory tag -#{@tag} @ #{Time.now.utc.iso8601}") - @run_interval = ExtensionUtils.getdataCollectionIntervalSeconds() + @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() $log.info("in_kube_nodes::enumerate: using data collection interval(seconds) -#{@run_interval} @ #{Time.now.utc.iso8601}") end nodesAPIChunkStartTime = (Time.now.to_f * 1000).to_i diff --git a/source/plugins/ruby/in_kube_perfinventory.rb b/source/plugins/ruby/in_kube_perfinventory.rb index cb8f83e7f..2153fd2f2 100644 --- a/source/plugins/ruby/in_kube_perfinventory.rb +++ b/source/plugins/ruby/in_kube_perfinventory.rb @@ -100,9 +100,9 @@ def enumerate(podList = nil) end $log.info("in_kube_perfinventory::enumerate: using perf tag -#{@kubeperfTag} @ #{Time.now.utc.iso8601}") $log.info("in_kube_perfinventory::enumerate: using insightsmetrics tag -#{@insightsMetricsTag} @ #{Time.now.utc.iso8601}") - @run_interval = ExtensionUtils.getdataCollectionIntervalSeconds() + @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() $log.info("in_kube_perfinventory::enumerate: using data collection interval(seconds) -#{@run_interval} @ #{Time.now.utc.iso8601}") - @excludeNameSpaces = ExtensionUtils.getdataCollectionExcludeNameSpaces() + @excludeNameSpaces = ExtensionUtils.getDataCollectionExcludeNameSpaces() $log.info("in_kube_perfinventory::enumerate: using data collection excludeNameSpaces -#{@excludeNameSpaces} @ #{Time.now.utc.iso8601}") end diff --git a/source/plugins/ruby/in_kube_podinventory.rb b/source/plugins/ruby/in_kube_podinventory.rb index 7ce387df9..502c834cc 100644 --- a/source/plugins/ruby/in_kube_podinventory.rb +++ b/source/plugins/ruby/in_kube_podinventory.rb @@ -172,9 +172,9 @@ def enumerate(podList = nil) $log.info("in_kube_podinventory::enumerate: using containerinventory tag -#{@containerInventoryTag} @ #{Time.now.utc.iso8601}") $log.info("in_kube_podinventory::enumerate: using insightsmetrics tag -#{@insightsMetricsTag} @ #{Time.now.utc.iso8601}") $log.info("in_kube_podinventory::enumerate: using kubepodinventory tag -#{@tag} @ #{Time.now.utc.iso8601}") - @run_interval = ExtensionUtils.getdataCollectionIntervalSeconds() + @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() $log.info("in_kube_podinventory::enumerate: using data collection interval(seconds) -#{@run_interval} @ #{Time.now.utc.iso8601}") - @excludeNameSpaces = ExtensionUtils.getdataCollectionExcludeNameSpaces() + @excludeNameSpaces = ExtensionUtils.getDataCollectionExcludeNameSpaces() $log.info("in_kube_podinventory::enumerate: using data collection excludeNameSpaces -#{@excludeNameSpaces} @ #{Time.now.utc.iso8601}") end diff --git a/source/plugins/ruby/in_kube_podmdminventory.rb b/source/plugins/ruby/in_kube_podmdminventory.rb index f5c51d42d..345b689bc 100644 --- a/source/plugins/ruby/in_kube_podmdminventory.rb +++ b/source/plugins/ruby/in_kube_podmdminventory.rb @@ -64,7 +64,7 @@ def enumerate $log.warn "in_kube_podmdminventory::enumerate:skipping since custom metrics not available either for this cluster type or the region" else if ExtensionUtils.isAADMSIAuthMode() - @run_interval = ExtensionUtils.getdataCollectionIntervalSeconds() + @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() $log.info("in_kube_podinventory::enumerate: using data collection interval(seconds) -#{@run_interval} @ #{Time.now.utc.iso8601}") end parse_and_emit_records() diff --git a/source/plugins/ruby/in_kube_pvinventory.rb b/source/plugins/ruby/in_kube_pvinventory.rb index 6bb06cbbe..16b52cde4 100644 --- a/source/plugins/ruby/in_kube_pvinventory.rb +++ b/source/plugins/ruby/in_kube_pvinventory.rb @@ -67,9 +67,9 @@ def enumerate if @tag.nil? || !@tag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) @tag = ExtensionUtils.getOutputStreamId(Constants::KUBE_PV_INVENTORY_DATA_TYPE) end - @run_interval = ExtensionUtils.getdataCollectionIntervalSeconds() + @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() $log.info("in_kube_pvinventory::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") - @excludeNameSpaces = ExtensionUtils.getdataCollectionExcludeNameSpaces() + @excludeNameSpaces = ExtensionUtils.getDataCollectionExcludeNameSpaces() $log.info("in_kube_pvinventory::enumerate: using data collection excludeNameSpaces -#{@excludeNameSpaces} @ #{Time.now.utc.iso8601}") end diff --git a/source/plugins/ruby/in_kubestate_deployments.rb b/source/plugins/ruby/in_kubestate_deployments.rb index 43560a0c1..8372701d8 100644 --- a/source/plugins/ruby/in_kubestate_deployments.rb +++ b/source/plugins/ruby/in_kubestate_deployments.rb @@ -88,9 +88,9 @@ def enumerate if @tag.nil? || !@tag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) @tag = ExtensionUtils.getOutputStreamId(Constants::INSIGHTS_METRICS_DATA_TYPE) end - @run_interval = ExtensionUtils.getdataCollectionIntervalSeconds() + @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() $log.info("in_kubestate_deployments::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") - @excludeNameSpaces = ExtensionUtils.getdataCollectionExcludeNameSpaces() + @excludeNameSpaces = ExtensionUtils.getDataCollectionExcludeNameSpaces() $log.info("in_kubestate_deployments::enumerate: using data collection excludeNameSpaces -#{@excludeNameSpaces} @ #{Time.now.utc.iso8601}") end # Initializing continuation token to nil diff --git a/source/plugins/ruby/in_kubestate_hpa.rb b/source/plugins/ruby/in_kubestate_hpa.rb index 0f069df44..1d5ec684c 100644 --- a/source/plugins/ruby/in_kubestate_hpa.rb +++ b/source/plugins/ruby/in_kubestate_hpa.rb @@ -86,9 +86,9 @@ def enumerate @tag = ExtensionUtils.getOutputStreamId(Constants::INSIGHTS_METRICS_DATA_TYPE) end $log.info("in_kubestate_hpa::enumerate: using tag -#{@tag} @ #{Time.now.utc.iso8601}") - @run_interval = ExtensionUtils.getdataCollectionIntervalSeconds() + @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() $log.info("in_kubestate_hpa::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") - @excludeNameSpaces = ExtensionUtils.getdataCollectionExcludeNameSpaces() + @excludeNameSpaces = ExtensionUtils.getDataCollectionExcludeNameSpaces() $log.info("in_kubestate_hpa::enumerate: using data collection excludeNameSpaces -#{@excludeNameSpaces} @ #{Time.now.utc.iso8601}") end # Initializing continuation token to nil diff --git a/source/plugins/ruby/in_win_cadvisor_perf.rb b/source/plugins/ruby/in_win_cadvisor_perf.rb index 5aade7f93..f311eb2c6 100644 --- a/source/plugins/ruby/in_win_cadvisor_perf.rb +++ b/source/plugins/ruby/in_win_cadvisor_perf.rb @@ -71,10 +71,10 @@ def enumerate() $log.info("in_win_cadvisor_perf::enumerate: using perf tag -#{@kubeperfTag} @ #{Time.now.utc.iso8601}") $log.info("in_win_cadvisor_perf::enumerate: using insightsmetrics tag -#{@insightsMetricsTag} @ #{Time.now.utc.iso8601}") - @run_interval = ExtensionUtils.getdataCollectionIntervalSeconds() + @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() $log.info("in_win_cadvisor_perf::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") - @excludeNameSpaces = ExtensionUtils.getdataCollectionExcludeNameSpaces() + @excludeNameSpaces = ExtensionUtils.getDataCollectionExcludeNameSpaces() $log.info("in_win_cadvisor_perf::enumerate: using data collection excludeNameSpaces -#{@excludeNameSpaces} @ #{Time.now.utc.iso8601}") end From d5df73c80997ff2b8573e79fffd823ffd4bd7a4b Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Tue, 2 Aug 2022 04:07:34 +0530 Subject: [PATCH 254/301] Gangams/rs vpa (#801) * add vpa sidecar container * add vpa sidecar container * add vpa sidecar container * add vpa sidecar container * use image which has support for only scaling limits * rename omsagent-rs-vpa to omsagent-vpa * add vpa configmap * use updated version of addon-resizer * collect omsagent-rs limits telemetry if VPA enabled * ignore new unfixed vulnerabilities * fix bug * fix bug * fix bug * bug fix * fix bug * fix bug * rename env var name * use the addon-resizer and collect requests and limits telemetry * fix bug * minor update --- kubernetes/container-azm-ms-vpaconfig.yaml | 13 ++++ kubernetes/omsagent.yaml | 50 +++++++++++++++ .../ruby/ApplicationInsightsUtility.rb | 5 ++ source/plugins/ruby/KubernetesApiClient.rb | 62 ++++++++++++++++++- 4 files changed, 128 insertions(+), 2 deletions(-) create mode 100644 kubernetes/container-azm-ms-vpaconfig.yaml diff --git a/kubernetes/container-azm-ms-vpaconfig.yaml b/kubernetes/container-azm-ms-vpaconfig.yaml new file mode 100644 index 000000000..9734a59f7 --- /dev/null +++ b/kubernetes/container-azm-ms-vpaconfig.yaml @@ -0,0 +1,13 @@ +kind: ConfigMap +apiVersion: v1 +data: + NannyConfiguration: |- + apiVersion: nannyconfig/v1alpha1 + kind: NannyConfiguration + baseCPU: 200m + cpuPerNode: 2m + baseMemory: 350Mi + memoryPerNode: 4Mi +metadata: + name: container-azm-ms-vpaconfig + namespace: kube-system diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index c11650b9e..42a96acaa 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -27,6 +27,11 @@ rules: - apiGroups: ["apps", "extensions", "autoscaling"] resources: ["replicasets", "deployments", "horizontalpodautoscalers"] verbs: ["list"] + # Uncomment below lines if AddonResizer VPA enabled + # - apiGroups: ["apps"] + # resources: ["deployments"] + # resourceNames: [ "omsagent-rs" ] + # verbs: ["get", "patch"] # Uncomment below lines for MSI Auth Mode testing # - apiGroups: [""] # resources: ["secrets"] @@ -617,6 +622,42 @@ spec: spec: serviceAccountName: omsagent containers: + # Uncomment below lines to enable VPA + # # Make sure this matching with version in AKS RP side + # - image: "mcr.microsoft.com/oss/kubernetes/autoscaler/addon-resizer:1.8.14" + # imagePullPolicy: IfNotPresent + # name: omsagent-vpa + # resources: + # limits: + # cpu: 100m + # memory: 300Mi + # requests: + # cpu: 5m + # memory: 30Mi + # env: + # - name: MY_POD_NAME + # valueFrom: + # fieldRef: + # fieldPath: metadata.name + # - name: MY_POD_NAMESPACE + # valueFrom: + # fieldRef: + # fieldPath: metadata.namespace + # volumeMounts: + # - name: omsagent-rs-vpa-config-volume + # mountPath: /etc/config + # command: + # - /pod_nanny + # - --config-dir=/etc/config + # - --cpu=200m + # - --extra-cpu=2m + # - --memory=300Mi + # - --extra-memory=4Mi + # - --poll-period=180000 + # - --threshold=5 + # - --namespace=kube-system + # - --deployment=omsagent-rs + # - --container=omsagent # Uncomment below lines for MSI Auth Mode testing # - name: addon-token-adapter # command: @@ -655,6 +696,7 @@ spec: - name: omsagent image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod06272022-hotfix" imagePullPolicy: IfNotPresent + # comment resources if VPA configured since the VPA will set these values resources: limits: cpu: 1 @@ -695,6 +737,9 @@ spec: # Uncomment below lines for MSI Auth Mode testing # - name: USING_AAD_MSI_AUTH # value: "true" + # Uncomment below lines when the Addon-resizer VPA enabled + # - name: RS_ADDON-RESIZER_VPA_ENABLED + # value: "true" securityContext: privileged: true ports: @@ -798,6 +843,11 @@ spec: configMap: name: container-azm-ms-osmconfig optional: true + # Uncomment below lines to enable VPA + # - name: omsagent-rs-vpa-config-volume + # configMap: + # name: omsagent-rs-vpa-config + # optional: true --- apiVersion: apps/v1 kind: DaemonSet diff --git a/source/plugins/ruby/ApplicationInsightsUtility.rb b/source/plugins/ruby/ApplicationInsightsUtility.rb index b34cb20ee..6f499e8bd 100644 --- a/source/plugins/ruby/ApplicationInsightsUtility.rb +++ b/source/plugins/ruby/ApplicationInsightsUtility.rb @@ -22,6 +22,7 @@ class ApplicationInsightsUtility @@EnvControllerType = "CONTROLLER_TYPE" @@EnvContainerRuntime = "CONTAINER_RUNTIME" @@EnvAADMSIAuthMode = "AAD_MSI_AUTH_MODE" + @@EnvAddonResizerVPAEnabled = "RS_ADDON-RESIZER_VPA_ENABLED" @@isWindows = false @@hostName = (OMS::Common.get_hostname) @@ -93,6 +94,10 @@ def initializeUtility() else @@CustomProperties["aadAuthMSIMode"] = "false" end + addonResizerVPAEnabled = ENV[@@EnvAddonResizerVPAEnabled] + if !addonResizerVPAEnabled.nil? && !addonResizerVPAEnabled.empty? && addonResizerVPAEnabled.downcase == "true".downcase + @@CustomProperties["addonResizerVPAEnabled"] = "true" + end #Check if telemetry is turned off telemetryOffSwitch = ENV["DISABLE_TELEMETRY"] if telemetryOffSwitch && !telemetryOffSwitch.nil? && !telemetryOffSwitch.empty? && telemetryOffSwitch.downcase == "true".downcase diff --git a/source/plugins/ruby/KubernetesApiClient.rb b/source/plugins/ruby/KubernetesApiClient.rb index 6828109b3..9e1ea467c 100644 --- a/source/plugins/ruby/KubernetesApiClient.rb +++ b/source/plugins/ruby/KubernetesApiClient.rb @@ -37,7 +37,10 @@ class KubernetesApiClient @Log = Logger.new(@LogPath, 2, 10 * 1048576) #keep last 2 files, max log file size = 10M @@TokenFileName = "/var/run/secrets/kubernetes.io/serviceaccount/token" @@TokenStr = nil - @@telemetryTimeTracker = DateTime.now.to_time.to_i + @@cpuLimitsTelemetryTimeTracker = DateTime.now.to_time.to_i + @@cpuRequestsTelemetryTimeTracker = DateTime.now.to_time.to_i + @@memoryLimitsTelemetryTimeTracker = DateTime.now.to_time.to_i + @@memoryRequestsTelemetryTimeTracker = DateTime.now.to_time.to_i @@resourceLimitsTelemetryHash = {} def initialize @@ -470,6 +473,7 @@ def getContainerResourceRequestsAndLimits(pod, metricCategory, metricNameToColle if podUid.nil? return metricItems end + podName = pod["metadata"]["name"] nodeName = "" #for unscheduled (non-started) pods nodeName does NOT exist @@ -514,8 +518,12 @@ def getContainerResourceRequestsAndLimits(pod, metricCategory, metricNameToColle metricCollections.push(metricCollection) metricProps["json_Collections"] = metricCollections.to_json metricItems.push(metricProps) - #No container level limit for the given metric, so default to node level limit + + if isAddonResizerVPAEnabled() + sendReplicasetAgentRequestsAndLimitsTelemetry(podName, podNameSpace, containerName, metricNametoReturn, metricValue) + end else + #No container level limit for the given metric, so default to node level limit if (metricCategory == "limits" && !nodeAllocatableRecord.nil? && !nodeAllocatableRecord.empty? && nodeAllocatableRecord.has_key?(metricNameToCollect)) metricValue = getMetricNumericValue(metricNameToCollect, nodeAllocatableRecord[metricNameToCollect]) metricProps = {} @@ -1394,5 +1402,55 @@ def isEmitCacheTelemetry end return isEmitCacheTelemtryEnabled end + + def isAddonResizerVPAEnabled + isAddonResizerVPAEnabled = false + if !ENV["RS_ADDON-RESIZER_VPA_ENABLED"].nil? && !ENV["RS_ADDON-RESIZER_VPA_ENABLED"].empty? && ENV["RS_ADDON-RESIZER_VPA_ENABLED"].downcase == "true".downcase + isAddonResizerVPAEnabled = true + end + return isAddonResizerVPAEnabled + end + + def sendReplicasetAgentRequestsAndLimitsTelemetry(podName, podNameSpace, containerName, metricName, metricValue) + begin + if (!podName.nil? && podName.downcase.start_with?("omsagent-rs-") && podNameSpace.eql?("kube-system") && containerName.eql?("omsagent")) + telemetryProps = {} + telemetryProps["PodName"] = podName + telemetryProps["ContainerName"] = containerName + case metricName + when "cpuLimitNanoCores" + timeDifference = (DateTime.now.to_time.to_i - @@cpuLimitsTelemetryTimeTracker).abs + timeDifferenceInMinutes = timeDifference / 60 + if (timeDifferenceInMinutes >= Constants::TELEMETRY_FLUSH_INTERVAL_IN_MINUTES) + @@cpuLimitsTelemetryTimeTracker = DateTime.now.to_time.to_i + ApplicationInsightsUtility.sendMetricTelemetry(metricName, metricValue, telemetryProps) + end + when "memoryLimitBytes" + timeDifference = (DateTime.now.to_time.to_i - @@memoryLimitsTelemetryTimeTracker).abs + timeDifferenceInMinutes = timeDifference / 60 + if (timeDifferenceInMinutes >= Constants::TELEMETRY_FLUSH_INTERVAL_IN_MINUTES) + @@memoryLimitsTelemetryTimeTracker = DateTime.now.to_time.to_i + ApplicationInsightsUtility.sendMetricTelemetry(metricName, metricValue, telemetryProps) + end + when "cpuRequestNanoCores" + timeDifference = (DateTime.now.to_time.to_i - @@cpuRequestsTelemetryTimeTracker).abs + timeDifferenceInMinutes = timeDifference / 60 + if (timeDifferenceInMinutes >= Constants::TELEMETRY_FLUSH_INTERVAL_IN_MINUTES) + @@cpuRequestsTelemetryTimeTracker = DateTime.now.to_time.to_i + ApplicationInsightsUtility.sendMetricTelemetry(metricName, metricValue, telemetryProps) + end + when "memoryRequestBytes" + timeDifference = (DateTime.now.to_time.to_i - @@memoryRequestsTelemetryTimeTracker).abs + timeDifferenceInMinutes = timeDifference / 60 + if (timeDifferenceInMinutes >= Constants::TELEMETRY_FLUSH_INTERVAL_IN_MINUTES) + @@memoryRequestsTelemetryTimeTracker = DateTime.now.to_time.to_i + ApplicationInsightsUtility.sendMetricTelemetry(metricName, metricValue, telemetryProps) + end + end + end + rescue => err + @Log.warn "KubernetesApiClient::sendReplicasetAgentRequestsAndLimitsTelemetry failed with an error: #{err}" + end + end end end From c39e64d86cd46cc7f94696f2b25133a064de9ff9 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Thu, 4 Aug 2022 12:30:15 +0530 Subject: [PATCH 255/301] rename variable names --- .../existingClusterOnboarding.json | 6 - .../existingClusterOnboarding.json | 197 ++++++++++++++++++ .../existingClusterParam.json | 31 +++ source/plugins/ruby/extension_utils.rb | 9 +- source/plugins/ruby/in_cadvisor_perf.rb | 2 +- source/plugins/ruby/in_containerinventory.rb | 2 +- source/plugins/ruby/in_kube_events.rb | 2 +- source/plugins/ruby/in_kube_perfinventory.rb | 2 +- source/plugins/ruby/in_kube_podinventory.rb | 2 +- source/plugins/ruby/in_kube_pvinventory.rb | 2 +- .../plugins/ruby/in_kubestate_deployments.rb | 2 +- source/plugins/ruby/in_kubestate_hpa.rb | 2 +- source/plugins/ruby/in_win_cadvisor_perf.rb | 2 +- 13 files changed, 242 insertions(+), 19 deletions(-) create mode 100644 scripts/onboarding/aks/onboarding-with-data-collection-settings/existingClusterOnboarding.json create mode 100644 scripts/onboarding/aks/onboarding-with-data-collection-settings/existingClusterParam.json diff --git a/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json b/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json index d19f9354e..2024e611a 100644 --- a/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json +++ b/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json @@ -74,12 +74,6 @@ "streams": [ "Microsoft-ContainerInsights-Group-Default" ], - "extensionSettings": { - "dataCollectionIntervalMinutes": 1, - "excludeNameSpaces": [ - "kube-system" - ] - }, "extensionName": "ContainerInsights" } ] diff --git a/scripts/onboarding/aks/onboarding-with-data-collection-settings/existingClusterOnboarding.json b/scripts/onboarding/aks/onboarding-with-data-collection-settings/existingClusterOnboarding.json new file mode 100644 index 000000000..0a5faf8cc --- /dev/null +++ b/scripts/onboarding/aks/onboarding-with-data-collection-settings/existingClusterOnboarding.json @@ -0,0 +1,197 @@ +{ + "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "aksResourceId": { + "type": "string", + "metadata": { + "description": "AKS Cluster Resource ID" + } + }, + "aksResourceLocation": { + "type": "string", + "metadata": { + "description": "Location of the AKS resource e.g. \"East US\"" + } + }, + "resourceTagValues": { + "type": "object", + "metadata": { + "description": "Existing or new tags to use on AKS, ContainerInsights and DataCollectionRule Resources" + } + }, + "workspaceLocation": { + "type": "string", + "metadata": { + "description": "Worksapce Location for data collection rule" + } + }, + "workspaceResourceId": { + "type": "string", + "metadata": { + "description": "Full Resource ID of the log analitycs workspace that will be used for data destination. For example /subscriptions/00000000-0000-0000-0000-0000-00000000/resourceGroups/ResourceGroupName/providers/Microsoft.operationalinsights/workspaces/ws_xyz" + } + }, + "dataCollectionIntervalInMinutes": { + "type": "int", + "metadata": { + "description": "data collection interval for metrics and inventory in minutes" + } + }, + "excludeNamespacesForDataCollection": { + "type": "array", + "metadata": { + "description": "Kubernetes namespaces to exclude for the data collection" + } + } + }, + "variables": { + "clusterSubscriptionId": "[split(parameters('aksResourceId'),'/')[2]]", + "clusterResourceGroup": "[split(parameters('aksResourceId'),'/')[4]]", + "clusterName": "[split(parameters('aksResourceId'),'/')[8]]", + "clusterLocation": "[replace(parameters('aksResourceLocation'),' ', '')]", + "workspaceSubscriptionId": "[split(parameters('workspaceResourceId'),'/')[2]]", + "workspaceResourceGroup": "[split(parameters('workspaceResourceId'),'/')[4]]", + "dcrName": "[Concat('MSCI', '-', variables('clusterName'), '-', variables('clusterLocation'))]", + "associationName": "ContainerInsightsExtension", + "dataCollectionRuleId": "[resourceId(variables('workspaceSubscriptionId'), variables('workspaceResourceGroup'), 'Microsoft.Insights/dataCollectionRules', variables('dcrName'))]" + }, + "resources": [ + { + "type": "Microsoft.Resources/deployments", + "name": "[Concat('aks-monitoring-msi-dcr', '-', uniqueString(variables('dcrName')))]", + "apiVersion": "2017-05-10", + "subscriptionId": "[variables('workspaceSubscriptionId')]", + "resourceGroup": "[variables('workspaceResourceGroup')]", + "properties": { + "mode": "Incremental", + "template": { + "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": {}, + "variables": {}, + "resources": [ + { + "type": "Microsoft.Insights/dataCollectionRules", + "apiVersion": "2021-04-01", + "name": "[variables('dcrName')]", + "location": "[parameters('workspaceLocation')]", + "tags": "[parameters('resourceTagValues')]", + "kind": "Linux", + "properties": { + "dataSources": { + "extensions": [ + { + "name": "ContainerInsightsExtension", + "streams": [ + "Microsoft-ContainerInsights-Group-Default" + ], + "extensionSettings": { + "dataCollectionIntervalMinutes": "[parameters('dataCollectionIntervalInMinutes')]", + "excludeNameSpaces": "[parameters('excludeNamespacesForDataCollection')]" + }, + "extensionName": "ContainerInsights" + } + ] + }, + "destinations": { + "logAnalytics": [ + { + "workspaceResourceId": "[parameters('workspaceResourceId')]", + "name": "ciworkspace" + } + ] + }, + "dataFlows": [ + { + "streams": [ + "Microsoft-ContainerInsights-Group-Default" + ], + "destinations": [ + "ciworkspace" + ] + } + ] + } + } + ] + }, + "parameters": {} + } + }, + { + "type": "Microsoft.Resources/deployments", + "name": "[Concat('aks-monitoring-msi-dcra', '-', uniqueString(parameters('aksResourceId')))]", + "apiVersion": "2017-05-10", + "subscriptionId": "[variables('clusterSubscriptionId')]", + "resourceGroup": "[variables('clusterResourceGroup')]", + "dependsOn": [ + "[Concat('aks-monitoring-msi-dcr', '-', uniqueString(variables('dcrName')))]" + ], + "properties": { + "mode": "Incremental", + "template": { + "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": {}, + "variables": {}, + "resources": [ + { + "type": "Microsoft.ContainerService/managedClusters/providers/dataCollectionRuleAssociations", + "name": "[concat(variables('clusterName'),'/microsoft.insights/', variables('associationName'))]", + "apiVersion": "2021-04-01", + "properties": { + "description": "Association of data collection rule. Deleting this association will break the data collection for this AKS Cluster.", + "dataCollectionRuleId": "[variables('dataCollectionRuleId')]" + } + } + + ] + }, + "parameters": {} + } + }, + { + "type": "Microsoft.Resources/deployments", + "name": "[Concat('aks-monitoring-msi-addon', '-', uniqueString(parameters('aksResourceId')))]", + "apiVersion": "2017-05-10", + "subscriptionId": "[variables('clusterSubscriptionId')]", + "resourceGroup": "[variables('clusterResourceGroup')]", + "dependsOn": [ + "[Concat('aks-monitoring-msi-dcra', '-', uniqueString(parameters('aksResourceId')))]" + ], + "properties": { + "mode": "Incremental", + "template": { + "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": {}, + "variables": {}, + "resources": [ + { + "name": "[variables('clusterName')]", + "type": "Microsoft.ContainerService/managedClusters", + "location": "[parameters('aksResourceLocation')]", + "tags": "[parameters('resourceTagValues')]", + "apiVersion": "2018-03-31", + "properties": { + "mode": "Incremental", + "id": "[parameters('aksResourceId')]", + "addonProfiles": { + "omsagent": { + "enabled": true, + "config": { + "logAnalyticsWorkspaceResourceID": "[parameters('workspaceResourceId')]", + "useAADAuth": "true" + } + } + } + } + } + ] + }, + "parameters": {} + } + } + ] +} diff --git a/scripts/onboarding/aks/onboarding-with-data-collection-settings/existingClusterParam.json b/scripts/onboarding/aks/onboarding-with-data-collection-settings/existingClusterParam.json new file mode 100644 index 000000000..3f313c754 --- /dev/null +++ b/scripts/onboarding/aks/onboarding-with-data-collection-settings/existingClusterParam.json @@ -0,0 +1,31 @@ +{ + "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentParameters.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "aksResourceId": { + "value": "/subscriptions//resourcegroups//providers/Microsoft.ContainerService/managedClusters/" + }, + "aksResourceLocation": { + "value": "" + }, + "workspaceResourceId": { + "value": "/subscriptions//resourceGroups//providers/Microsoft.OperationalInsights/workspaces/" + }, + "workspaceLocation": { + "value": "" + }, + "resourceTagValues": { + "value": { + "": "", + "": "", + "": "" + } + }, + "dataCollectionIntervalInMinutes": { + "value" : 1 + }, + "excludeNamespacesForDataCollection": { + "value": [ "kube-system"] + } + } + } diff --git a/source/plugins/ruby/extension_utils.rb b/source/plugins/ruby/extension_utils.rb index d03f4c957..70cf2343a 100644 --- a/source/plugins/ruby/extension_utils.rb +++ b/source/plugins/ruby/extension_utils.rb @@ -47,7 +47,7 @@ def getDataCollectionIntervalSeconds return collectionIntervalSeconds end - def getDataCollectionExcludeNameSpaces + def getExcludedNamespacesForDataCollection excludeNameSpaces = [] begin extensionSettings = Extension.instance.get_extension_settings() @@ -56,13 +56,14 @@ def getDataCollectionExcludeNameSpaces extensionSettings.has_key(Constants::EXTENSION_SETTING_EXCLUDE_NAMESPACES) namespacesToExclude = extensionSettings[Constants::EXTENSION_SETTING_EXCLUDE_NAMESPACES] if !namespacesToExclude.nil? && !namespacesToExclude.empty? && namespacesToExclude.kind_of?(Array) && namespacesToExclude.length > 0 - excludeNameSpaces = namespacesToExclude.dup + uniqNamespaces = namespacesToExclude.uniq + excludeNameSpaces = uniqNamespaces.map(&:downcase) else - $log.warn("ExtensionUtils::getDataCollectionExcludeNameSpaces: excludeNameSpaces: #{namespacesToExclude} not valid hence using default") + $log.warn("ExtensionUtils::getExcludedNamespacesForDataCollection: excludeNameSpaces: #{namespacesToExclude} not valid hence using default") end end rescue => err - $log.warn("ExtensionUtils::getDataCollectionExcludeNameSpaces: failed with an exception: #{errorStr}") + $log.warn("ExtensionUtils::getExcludedNamespacesForDataCollection: failed with an exception: #{errorStr}") end return excludeNameSpaces end diff --git a/source/plugins/ruby/in_cadvisor_perf.rb b/source/plugins/ruby/in_cadvisor_perf.rb index fefb5dac3..f5f255ded 100644 --- a/source/plugins/ruby/in_cadvisor_perf.rb +++ b/source/plugins/ruby/in_cadvisor_perf.rb @@ -76,7 +76,7 @@ def enumerate() $log.info("in_cadvisor_perf::enumerate: using insightsmetrics tag -#{@insightsmetricstag} @ #{Time.now.utc.iso8601}") @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() $log.info("in_cadvisor_perf::enumerate: using data collection interval(seconds) -#{@run_interval} @ #{Time.now.utc.iso8601}") - @excludeNameSpaces = ExtensionUtils.getDataCollectionExcludeNameSpaces() + @excludeNameSpaces = ExtensionUtils.getExcludedNamespacesForDataCollection() $log.info("in_cadvisor_perf::enumerate: using data collection excludeNameSpaces -#{@excludeNameSpaces} @ #{Time.now.utc.iso8601}") end diff --git a/source/plugins/ruby/in_containerinventory.rb b/source/plugins/ruby/in_containerinventory.rb index 7a6a1bb0b..65fab44cf 100644 --- a/source/plugins/ruby/in_containerinventory.rb +++ b/source/plugins/ruby/in_containerinventory.rb @@ -67,7 +67,7 @@ def enumerate $log.info("in_container_inventory::enumerate: using tag -#{@tag} @ #{Time.now.utc.iso8601}") @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() $log.info("in_container_inventory::enumerate: using data collection interval(seconds) -#{@run_interval} @ #{Time.now.utc.iso8601}") - @excludeNameSpaces = ExtensionUtils.getDataCollectionExcludeNameSpaces() + @excludeNameSpaces = ExtensionUtils.getExcludedNamespacesForDataCollection() $log.info("in_container_inventory::enumerate: using data collection excludeNameSpaces -#{@excludeNameSpaces} @ #{Time.now.utc.iso8601}") end begin diff --git a/source/plugins/ruby/in_kube_events.rb b/source/plugins/ruby/in_kube_events.rb index 2920babd4..f39dbb24b 100644 --- a/source/plugins/ruby/in_kube_events.rb +++ b/source/plugins/ruby/in_kube_events.rb @@ -93,7 +93,7 @@ def enumerate @tag = ExtensionUtils.getOutputStreamId(Constants::KUBE_EVENTS_DATA_TYPE) end $log.info("in_kube_events::enumerate: using kubeevents tag -#{@tag} @ #{Time.now.utc.iso8601}") - @excludeNameSpaces = ExtensionUtils.getDataCollectionExcludeNameSpaces() + @excludeNameSpaces = ExtensionUtils.getExcludedNamespacesForDataCollection() $log.info("in_kube_events::enumerate: using data collection excludeNameSpaces -#{@excludeNameSpaces} @ #{Time.now.utc.iso8601}") end # Initializing continuation token to nil diff --git a/source/plugins/ruby/in_kube_perfinventory.rb b/source/plugins/ruby/in_kube_perfinventory.rb index 2153fd2f2..74b1f0e70 100644 --- a/source/plugins/ruby/in_kube_perfinventory.rb +++ b/source/plugins/ruby/in_kube_perfinventory.rb @@ -102,7 +102,7 @@ def enumerate(podList = nil) $log.info("in_kube_perfinventory::enumerate: using insightsmetrics tag -#{@insightsMetricsTag} @ #{Time.now.utc.iso8601}") @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() $log.info("in_kube_perfinventory::enumerate: using data collection interval(seconds) -#{@run_interval} @ #{Time.now.utc.iso8601}") - @excludeNameSpaces = ExtensionUtils.getDataCollectionExcludeNameSpaces() + @excludeNameSpaces = ExtensionUtils.getExcludedNamespacesForDataCollection() $log.info("in_kube_perfinventory::enumerate: using data collection excludeNameSpaces -#{@excludeNameSpaces} @ #{Time.now.utc.iso8601}") end diff --git a/source/plugins/ruby/in_kube_podinventory.rb b/source/plugins/ruby/in_kube_podinventory.rb index 502c834cc..931d30bbc 100644 --- a/source/plugins/ruby/in_kube_podinventory.rb +++ b/source/plugins/ruby/in_kube_podinventory.rb @@ -174,7 +174,7 @@ def enumerate(podList = nil) $log.info("in_kube_podinventory::enumerate: using kubepodinventory tag -#{@tag} @ #{Time.now.utc.iso8601}") @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() $log.info("in_kube_podinventory::enumerate: using data collection interval(seconds) -#{@run_interval} @ #{Time.now.utc.iso8601}") - @excludeNameSpaces = ExtensionUtils.getDataCollectionExcludeNameSpaces() + @excludeNameSpaces = ExtensionUtils.getExcludedNamespacesForDataCollection() $log.info("in_kube_podinventory::enumerate: using data collection excludeNameSpaces -#{@excludeNameSpaces} @ #{Time.now.utc.iso8601}") end diff --git a/source/plugins/ruby/in_kube_pvinventory.rb b/source/plugins/ruby/in_kube_pvinventory.rb index 16b52cde4..0721eacc7 100644 --- a/source/plugins/ruby/in_kube_pvinventory.rb +++ b/source/plugins/ruby/in_kube_pvinventory.rb @@ -69,7 +69,7 @@ def enumerate end @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() $log.info("in_kube_pvinventory::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") - @excludeNameSpaces = ExtensionUtils.getDataCollectionExcludeNameSpaces() + @excludeNameSpaces = ExtensionUtils.getExcludedNamespacesForDataCollection() $log.info("in_kube_pvinventory::enumerate: using data collection excludeNameSpaces -#{@excludeNameSpaces} @ #{Time.now.utc.iso8601}") end diff --git a/source/plugins/ruby/in_kubestate_deployments.rb b/source/plugins/ruby/in_kubestate_deployments.rb index 8372701d8..f1ad0339f 100644 --- a/source/plugins/ruby/in_kubestate_deployments.rb +++ b/source/plugins/ruby/in_kubestate_deployments.rb @@ -90,7 +90,7 @@ def enumerate end @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() $log.info("in_kubestate_deployments::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") - @excludeNameSpaces = ExtensionUtils.getDataCollectionExcludeNameSpaces() + @excludeNameSpaces = ExtensionUtils.getExcludedNamespacesForDataCollection() $log.info("in_kubestate_deployments::enumerate: using data collection excludeNameSpaces -#{@excludeNameSpaces} @ #{Time.now.utc.iso8601}") end # Initializing continuation token to nil diff --git a/source/plugins/ruby/in_kubestate_hpa.rb b/source/plugins/ruby/in_kubestate_hpa.rb index 1d5ec684c..45c8f3d2d 100644 --- a/source/plugins/ruby/in_kubestate_hpa.rb +++ b/source/plugins/ruby/in_kubestate_hpa.rb @@ -88,7 +88,7 @@ def enumerate $log.info("in_kubestate_hpa::enumerate: using tag -#{@tag} @ #{Time.now.utc.iso8601}") @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() $log.info("in_kubestate_hpa::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") - @excludeNameSpaces = ExtensionUtils.getDataCollectionExcludeNameSpaces() + @excludeNameSpaces = ExtensionUtils.getExcludedNamespacesForDataCollection() $log.info("in_kubestate_hpa::enumerate: using data collection excludeNameSpaces -#{@excludeNameSpaces} @ #{Time.now.utc.iso8601}") end # Initializing continuation token to nil diff --git a/source/plugins/ruby/in_win_cadvisor_perf.rb b/source/plugins/ruby/in_win_cadvisor_perf.rb index f311eb2c6..d25d4d649 100644 --- a/source/plugins/ruby/in_win_cadvisor_perf.rb +++ b/source/plugins/ruby/in_win_cadvisor_perf.rb @@ -74,7 +74,7 @@ def enumerate() @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() $log.info("in_win_cadvisor_perf::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") - @excludeNameSpaces = ExtensionUtils.getDataCollectionExcludeNameSpaces() + @excludeNameSpaces = ExtensionUtils.getExcludedNamespacesForDataCollection() $log.info("in_win_cadvisor_perf::enumerate: using data collection excludeNameSpaces -#{@excludeNameSpaces} @ #{Time.now.utc.iso8601}") end From f430508805a9188ba96d384dd66755fcfa6612a8 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Thu, 4 Aug 2022 12:36:56 +0530 Subject: [PATCH 256/301] rename variable names --- source/plugins/ruby/extension_utils.rb | 6 +++--- source/plugins/ruby/in_cadvisor_perf.rb | 2 +- source/plugins/ruby/in_containerinventory.rb | 2 +- source/plugins/ruby/in_kube_events.rb | 2 +- source/plugins/ruby/in_kube_perfinventory.rb | 2 +- source/plugins/ruby/in_kube_podinventory.rb | 2 +- source/plugins/ruby/in_kube_pvinventory.rb | 2 +- source/plugins/ruby/in_kubestate_deployments.rb | 2 +- source/plugins/ruby/in_kubestate_hpa.rb | 2 +- source/plugins/ruby/in_win_cadvisor_perf.rb | 2 +- 10 files changed, 12 insertions(+), 12 deletions(-) diff --git a/source/plugins/ruby/extension_utils.rb b/source/plugins/ruby/extension_utils.rb index 70cf2343a..bc8bc96d2 100644 --- a/source/plugins/ruby/extension_utils.rb +++ b/source/plugins/ruby/extension_utils.rb @@ -47,7 +47,7 @@ def getDataCollectionIntervalSeconds return collectionIntervalSeconds end - def getExcludedNamespacesForDataCollection + def getNamespacesToExcludeForDataCollection excludeNameSpaces = [] begin extensionSettings = Extension.instance.get_extension_settings() @@ -59,11 +59,11 @@ def getExcludedNamespacesForDataCollection uniqNamespaces = namespacesToExclude.uniq excludeNameSpaces = uniqNamespaces.map(&:downcase) else - $log.warn("ExtensionUtils::getExcludedNamespacesForDataCollection: excludeNameSpaces: #{namespacesToExclude} not valid hence using default") + $log.warn("ExtensionUtils::getNamespacesToExcludeForDataCollection: excludeNameSpaces: #{namespacesToExclude} not valid hence using default") end end rescue => err - $log.warn("ExtensionUtils::getExcludedNamespacesForDataCollection: failed with an exception: #{errorStr}") + $log.warn("ExtensionUtils::getNamespacesToExcludeForDataCollection: failed with an exception: #{errorStr}") end return excludeNameSpaces end diff --git a/source/plugins/ruby/in_cadvisor_perf.rb b/source/plugins/ruby/in_cadvisor_perf.rb index f5f255ded..982cb5e11 100644 --- a/source/plugins/ruby/in_cadvisor_perf.rb +++ b/source/plugins/ruby/in_cadvisor_perf.rb @@ -76,7 +76,7 @@ def enumerate() $log.info("in_cadvisor_perf::enumerate: using insightsmetrics tag -#{@insightsmetricstag} @ #{Time.now.utc.iso8601}") @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() $log.info("in_cadvisor_perf::enumerate: using data collection interval(seconds) -#{@run_interval} @ #{Time.now.utc.iso8601}") - @excludeNameSpaces = ExtensionUtils.getExcludedNamespacesForDataCollection() + @excludeNameSpaces = ExtensionUtils.getNamespacesToExcludeForDataCollection() $log.info("in_cadvisor_perf::enumerate: using data collection excludeNameSpaces -#{@excludeNameSpaces} @ #{Time.now.utc.iso8601}") end diff --git a/source/plugins/ruby/in_containerinventory.rb b/source/plugins/ruby/in_containerinventory.rb index 65fab44cf..fea5efdef 100644 --- a/source/plugins/ruby/in_containerinventory.rb +++ b/source/plugins/ruby/in_containerinventory.rb @@ -67,7 +67,7 @@ def enumerate $log.info("in_container_inventory::enumerate: using tag -#{@tag} @ #{Time.now.utc.iso8601}") @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() $log.info("in_container_inventory::enumerate: using data collection interval(seconds) -#{@run_interval} @ #{Time.now.utc.iso8601}") - @excludeNameSpaces = ExtensionUtils.getExcludedNamespacesForDataCollection() + @excludeNameSpaces = ExtensionUtils.getNamespacesToExcludeForDataCollection() $log.info("in_container_inventory::enumerate: using data collection excludeNameSpaces -#{@excludeNameSpaces} @ #{Time.now.utc.iso8601}") end begin diff --git a/source/plugins/ruby/in_kube_events.rb b/source/plugins/ruby/in_kube_events.rb index f39dbb24b..50da47f4b 100644 --- a/source/plugins/ruby/in_kube_events.rb +++ b/source/plugins/ruby/in_kube_events.rb @@ -93,7 +93,7 @@ def enumerate @tag = ExtensionUtils.getOutputStreamId(Constants::KUBE_EVENTS_DATA_TYPE) end $log.info("in_kube_events::enumerate: using kubeevents tag -#{@tag} @ #{Time.now.utc.iso8601}") - @excludeNameSpaces = ExtensionUtils.getExcludedNamespacesForDataCollection() + @excludeNameSpaces = ExtensionUtils.getNamespacesToExcludeForDataCollection() $log.info("in_kube_events::enumerate: using data collection excludeNameSpaces -#{@excludeNameSpaces} @ #{Time.now.utc.iso8601}") end # Initializing continuation token to nil diff --git a/source/plugins/ruby/in_kube_perfinventory.rb b/source/plugins/ruby/in_kube_perfinventory.rb index 74b1f0e70..c2069dd9b 100644 --- a/source/plugins/ruby/in_kube_perfinventory.rb +++ b/source/plugins/ruby/in_kube_perfinventory.rb @@ -102,7 +102,7 @@ def enumerate(podList = nil) $log.info("in_kube_perfinventory::enumerate: using insightsmetrics tag -#{@insightsMetricsTag} @ #{Time.now.utc.iso8601}") @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() $log.info("in_kube_perfinventory::enumerate: using data collection interval(seconds) -#{@run_interval} @ #{Time.now.utc.iso8601}") - @excludeNameSpaces = ExtensionUtils.getExcludedNamespacesForDataCollection() + @excludeNameSpaces = ExtensionUtils.getNamespacesToExcludeForDataCollection() $log.info("in_kube_perfinventory::enumerate: using data collection excludeNameSpaces -#{@excludeNameSpaces} @ #{Time.now.utc.iso8601}") end diff --git a/source/plugins/ruby/in_kube_podinventory.rb b/source/plugins/ruby/in_kube_podinventory.rb index 931d30bbc..a98b63b89 100644 --- a/source/plugins/ruby/in_kube_podinventory.rb +++ b/source/plugins/ruby/in_kube_podinventory.rb @@ -174,7 +174,7 @@ def enumerate(podList = nil) $log.info("in_kube_podinventory::enumerate: using kubepodinventory tag -#{@tag} @ #{Time.now.utc.iso8601}") @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() $log.info("in_kube_podinventory::enumerate: using data collection interval(seconds) -#{@run_interval} @ #{Time.now.utc.iso8601}") - @excludeNameSpaces = ExtensionUtils.getExcludedNamespacesForDataCollection() + @excludeNameSpaces = ExtensionUtils.getNamespacesToExcludeForDataCollection() $log.info("in_kube_podinventory::enumerate: using data collection excludeNameSpaces -#{@excludeNameSpaces} @ #{Time.now.utc.iso8601}") end diff --git a/source/plugins/ruby/in_kube_pvinventory.rb b/source/plugins/ruby/in_kube_pvinventory.rb index 0721eacc7..2232db073 100644 --- a/source/plugins/ruby/in_kube_pvinventory.rb +++ b/source/plugins/ruby/in_kube_pvinventory.rb @@ -69,7 +69,7 @@ def enumerate end @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() $log.info("in_kube_pvinventory::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") - @excludeNameSpaces = ExtensionUtils.getExcludedNamespacesForDataCollection() + @excludeNameSpaces = ExtensionUtils.getNamespacesToExcludeForDataCollection() $log.info("in_kube_pvinventory::enumerate: using data collection excludeNameSpaces -#{@excludeNameSpaces} @ #{Time.now.utc.iso8601}") end diff --git a/source/plugins/ruby/in_kubestate_deployments.rb b/source/plugins/ruby/in_kubestate_deployments.rb index f1ad0339f..ee74bfe7b 100644 --- a/source/plugins/ruby/in_kubestate_deployments.rb +++ b/source/plugins/ruby/in_kubestate_deployments.rb @@ -90,7 +90,7 @@ def enumerate end @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() $log.info("in_kubestate_deployments::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") - @excludeNameSpaces = ExtensionUtils.getExcludedNamespacesForDataCollection() + @excludeNameSpaces = ExtensionUtils.getNamespacesToExcludeForDataCollection() $log.info("in_kubestate_deployments::enumerate: using data collection excludeNameSpaces -#{@excludeNameSpaces} @ #{Time.now.utc.iso8601}") end # Initializing continuation token to nil diff --git a/source/plugins/ruby/in_kubestate_hpa.rb b/source/plugins/ruby/in_kubestate_hpa.rb index 45c8f3d2d..7cc2f3a75 100644 --- a/source/plugins/ruby/in_kubestate_hpa.rb +++ b/source/plugins/ruby/in_kubestate_hpa.rb @@ -88,7 +88,7 @@ def enumerate $log.info("in_kubestate_hpa::enumerate: using tag -#{@tag} @ #{Time.now.utc.iso8601}") @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() $log.info("in_kubestate_hpa::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") - @excludeNameSpaces = ExtensionUtils.getExcludedNamespacesForDataCollection() + @excludeNameSpaces = ExtensionUtils.getNamespacesToExcludeForDataCollection() $log.info("in_kubestate_hpa::enumerate: using data collection excludeNameSpaces -#{@excludeNameSpaces} @ #{Time.now.utc.iso8601}") end # Initializing continuation token to nil diff --git a/source/plugins/ruby/in_win_cadvisor_perf.rb b/source/plugins/ruby/in_win_cadvisor_perf.rb index d25d4d649..3e0290137 100644 --- a/source/plugins/ruby/in_win_cadvisor_perf.rb +++ b/source/plugins/ruby/in_win_cadvisor_perf.rb @@ -74,7 +74,7 @@ def enumerate() @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() $log.info("in_win_cadvisor_perf::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") - @excludeNameSpaces = ExtensionUtils.getExcludedNamespacesForDataCollection() + @excludeNameSpaces = ExtensionUtils.getNamespacesToExcludeForDataCollection() $log.info("in_win_cadvisor_perf::enumerate: using data collection excludeNameSpaces -#{@excludeNameSpaces} @ #{Time.now.utc.iso8601}") end From 5cda9a124bc9529d6a9818d01c8fd107790334a1 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Thu, 4 Aug 2022 12:42:01 +0530 Subject: [PATCH 257/301] add telemetry --- source/plugins/ruby/in_kube_podinventory.rb | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/source/plugins/ruby/in_kube_podinventory.rb b/source/plugins/ruby/in_kube_podinventory.rb index a98b63b89..b8eafb684 100644 --- a/source/plugins/ruby/in_kube_podinventory.rb +++ b/source/plugins/ruby/in_kube_podinventory.rb @@ -235,6 +235,12 @@ def enumerate(podList = nil) telemetryProperties["SERVICE_ITEMS_CACHE_SIZE_KB"] = serviceItemsCacheSizeKB telemetryProperties["WINDOWS_CONTAINER_RECORDS_CACHE_SIZE_KB"] = @windowsContainerRecordsCacheSizeBytes / 1024 end + if !@excludeNameSpaces.nil? && !@excludeNameSpaces.empty? && @excludeNameSpaces.length > 0 + telemetry["DATACOLLECTION_EXCLUDED_NAMESPACES"] = @excludeNameSpaces + end + if @run_interval > 60 + telemetry["DATACOLLECTION_INTERVAL_MINUTES"] = @run_interval / 60 + end ApplicationInsightsUtility.sendCustomEvent("KubePodInventoryHeartBeatEvent", telemetryProperties) ApplicationInsightsUtility.sendMetricTelemetry("PodCount", @podCount, {}) ApplicationInsightsUtility.sendMetricTelemetry("ContainerCount", @containerCount, {}) From 5c27b85f51eda3180038e04556bba3528c4963b2 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Thu, 4 Aug 2022 15:41:32 +0530 Subject: [PATCH 258/301] fix bugs --- source/plugins/ruby/KubernetesApiClient.rb | 11 ++++------- source/plugins/ruby/extension_utils.rb | 2 +- source/plugins/ruby/in_cadvisor_perf.rb | 6 +++--- source/plugins/ruby/in_containerinventory.rb | 4 ++-- source/plugins/ruby/in_kube_events.rb | 2 +- source/plugins/ruby/in_kube_nodes.rb | 2 +- source/plugins/ruby/in_kube_perfinventory.rb | 6 +++--- source/plugins/ruby/in_kube_podinventory.rb | 4 ++-- source/plugins/ruby/in_kube_podmdminventory.rb | 2 +- source/plugins/ruby/in_kube_pvinventory.rb | 2 +- source/plugins/ruby/in_kubestate_deployments.rb | 2 +- source/plugins/ruby/in_kubestate_hpa.rb | 2 +- source/plugins/ruby/in_win_cadvisor_perf.rb | 4 ++-- 13 files changed, 23 insertions(+), 26 deletions(-) diff --git a/source/plugins/ruby/KubernetesApiClient.rb b/source/plugins/ruby/KubernetesApiClient.rb index fe8020e44..ef064eb3d 100644 --- a/source/plugins/ruby/KubernetesApiClient.rb +++ b/source/plugins/ruby/KubernetesApiClient.rb @@ -1406,14 +1406,11 @@ def isEmitCacheTelemetry def isExcludeResourceItem(resourceName, resourceNamespace, excludeNameSpaces) isExclude = false begin - if !resourceName.nil? && !resourceName.empty? - && resourceName.start_with("omsagent-") - && resourceNamespace.eql("kube-system") + # dont exclude agent related data + if !resourceName.nil? && !resourceName.empty? && resourceName.start_with?("omsagent") && resourceNamespace.eql?("kube-system") isExclude = false - elsif !resourceNamespace.nil? && !resourceNamespace.empty? - && !excludeNameSpaces.nil? && !excludeNameSpaces.empty? && excludeNameSpaces.length > 0 - && excludeNameSpaces.include?(resourceNamespace) - isExclude = true + elsif !resourceNamespace.nil? && !resourceNamespace.empty? && !excludeNameSpaces.nil? && !excludeNameSpaces.empty? && excludeNameSpaces.length > 0 && excludeNameSpaces.includee?(resourceNamespace) + isExclude = true end rescue => errorStr @Log.warn "KubernetesApiClient::isExcludeResourceItem:Failed with an error : #{errorStr}" diff --git a/source/plugins/ruby/extension_utils.rb b/source/plugins/ruby/extension_utils.rb index bc8bc96d2..5d8ac3f47 100644 --- a/source/plugins/ruby/extension_utils.rb +++ b/source/plugins/ruby/extension_utils.rb @@ -3,7 +3,7 @@ # frozen_string_literal: true require_relative "extension" -require "constants" +require_relative "constants" class ExtensionUtils class << self diff --git a/source/plugins/ruby/in_cadvisor_perf.rb b/source/plugins/ruby/in_cadvisor_perf.rb index 982cb5e11..e0cfb3a37 100644 --- a/source/plugins/ruby/in_cadvisor_perf.rb +++ b/source/plugins/ruby/in_cadvisor_perf.rb @@ -75,12 +75,12 @@ def enumerate() $log.info("in_cadvisor_perf::enumerate: using perf tag -#{@tag} @ #{Time.now.utc.iso8601}") $log.info("in_cadvisor_perf::enumerate: using insightsmetrics tag -#{@insightsmetricstag} @ #{Time.now.utc.iso8601}") @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() - $log.info("in_cadvisor_perf::enumerate: using data collection interval(seconds) -#{@run_interval} @ #{Time.now.utc.iso8601}") + $log.info("in_cadvisor_perf::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") @excludeNameSpaces = ExtensionUtils.getNamespacesToExcludeForDataCollection() - $log.info("in_cadvisor_perf::enumerate: using data collection excludeNameSpaces -#{@excludeNameSpaces} @ #{Time.now.utc.iso8601}") + $log.info("in_cadvisor_perf::enumerate: using data collection excludeNameSpaces: #{@excludeNameSpaces} @ #{Time.now.utc.iso8601}") end - metricData = CAdvisorMetricsAPIClient.getMetrics(winNode: nil, @excludeNameSpaces, metricTime: batchTime) + metricData = CAdvisorMetricsAPIClient.getMetrics(winNode: nil, excludeNameSpaces: @excludeNameSpaces, metricTime: batchTime) metricData.each do |record| eventStream.add(time, record) if record end diff --git a/source/plugins/ruby/in_containerinventory.rb b/source/plugins/ruby/in_containerinventory.rb index fea5efdef..320de34fa 100644 --- a/source/plugins/ruby/in_containerinventory.rb +++ b/source/plugins/ruby/in_containerinventory.rb @@ -66,9 +66,9 @@ def enumerate end $log.info("in_container_inventory::enumerate: using tag -#{@tag} @ #{Time.now.utc.iso8601}") @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() - $log.info("in_container_inventory::enumerate: using data collection interval(seconds) -#{@run_interval} @ #{Time.now.utc.iso8601}") + $log.info("in_container_inventory::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") @excludeNameSpaces = ExtensionUtils.getNamespacesToExcludeForDataCollection() - $log.info("in_container_inventory::enumerate: using data collection excludeNameSpaces -#{@excludeNameSpaces} @ #{Time.now.utc.iso8601}") + $log.info("in_container_inventory::enumerate: using data collection excludeNameSpaces: #{@excludeNameSpaces} @ #{Time.now.utc.iso8601}") end begin containerRuntimeEnv = ENV["CONTAINER_RUNTIME"] diff --git a/source/plugins/ruby/in_kube_events.rb b/source/plugins/ruby/in_kube_events.rb index 50da47f4b..43ab077ae 100644 --- a/source/plugins/ruby/in_kube_events.rb +++ b/source/plugins/ruby/in_kube_events.rb @@ -164,7 +164,7 @@ def parse_and_emit_records(events, eventQueryState, newEventQueryState, batchTim end # drop the events if the event of the excluded namespace - next unless !KubernetesApiClient.isExcludeResourceItem(items["involvedObject"]["namespace"], @excludeNameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem("", items["involvedObject"]["namespace"], @excludeNameSpaces) record["ObjectKind"] = items["involvedObject"]["kind"] record["Namespace"] = items["involvedObject"]["namespace"] diff --git a/source/plugins/ruby/in_kube_nodes.rb b/source/plugins/ruby/in_kube_nodes.rb index 5f67faff8..75136fdf5 100644 --- a/source/plugins/ruby/in_kube_nodes.rb +++ b/source/plugins/ruby/in_kube_nodes.rb @@ -151,7 +151,7 @@ def enumerate $log.info("in_kube_nodes::enumerate: using containernodeinventory tag -#{@ContainerNodeInventoryTag} @ #{Time.now.utc.iso8601}") $log.info("in_kube_nodes::enumerate: using kubenodeinventory tag -#{@tag} @ #{Time.now.utc.iso8601}") @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() - $log.info("in_kube_nodes::enumerate: using data collection interval(seconds) -#{@run_interval} @ #{Time.now.utc.iso8601}") + $log.info("in_kube_nodes::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") end nodesAPIChunkStartTime = (Time.now.to_f * 1000).to_i diff --git a/source/plugins/ruby/in_kube_perfinventory.rb b/source/plugins/ruby/in_kube_perfinventory.rb index c2069dd9b..2b8a75d18 100644 --- a/source/plugins/ruby/in_kube_perfinventory.rb +++ b/source/plugins/ruby/in_kube_perfinventory.rb @@ -101,9 +101,9 @@ def enumerate(podList = nil) $log.info("in_kube_perfinventory::enumerate: using perf tag -#{@kubeperfTag} @ #{Time.now.utc.iso8601}") $log.info("in_kube_perfinventory::enumerate: using insightsmetrics tag -#{@insightsMetricsTag} @ #{Time.now.utc.iso8601}") @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() - $log.info("in_kube_perfinventory::enumerate: using data collection interval(seconds) -#{@run_interval} @ #{Time.now.utc.iso8601}") + $log.info("in_kube_perfinventory::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") @excludeNameSpaces = ExtensionUtils.getNamespacesToExcludeForDataCollection() - $log.info("in_kube_perfinventory::enumerate: using data collection excludeNameSpaces -#{@excludeNameSpaces} @ #{Time.now.utc.iso8601}") + $log.info("in_kube_perfinventory::enumerate: using data collection excludeNameSpaces: #{@excludeNameSpaces} @ #{Time.now.utc.iso8601}") end nodeAllocatableRecords = getNodeAllocatableRecords() @@ -267,7 +267,7 @@ def watch_pods if (podInventory.key?("items") && !podInventory["items"].nil? && !podInventory["items"].empty?) $log.info("in_kube_perfinventory::watch_pods:number of pod items :#{podInventory["items"].length} from Kube API @ #{Time.now.utc.iso8601}") podInventory["items"].each do |item| - next unless !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["name"],item["metadata"]["namespace"], @excludeNameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["name"], item["metadata"]["namespace"], @excludeNameSpaces) key = item["metadata"]["uid"] if !key.nil? && !key.empty? podItem = KubernetesApiClient.getOptimizedItem("pods-perf", item) diff --git a/source/plugins/ruby/in_kube_podinventory.rb b/source/plugins/ruby/in_kube_podinventory.rb index b8eafb684..e54cb5a69 100644 --- a/source/plugins/ruby/in_kube_podinventory.rb +++ b/source/plugins/ruby/in_kube_podinventory.rb @@ -173,9 +173,9 @@ def enumerate(podList = nil) $log.info("in_kube_podinventory::enumerate: using insightsmetrics tag -#{@insightsMetricsTag} @ #{Time.now.utc.iso8601}") $log.info("in_kube_podinventory::enumerate: using kubepodinventory tag -#{@tag} @ #{Time.now.utc.iso8601}") @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() - $log.info("in_kube_podinventory::enumerate: using data collection interval(seconds) -#{@run_interval} @ #{Time.now.utc.iso8601}") + $log.info("in_kube_podinventory::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") @excludeNameSpaces = ExtensionUtils.getNamespacesToExcludeForDataCollection() - $log.info("in_kube_podinventory::enumerate: using data collection excludeNameSpaces -#{@excludeNameSpaces} @ #{Time.now.utc.iso8601}") + $log.info("in_kube_podinventory::enumerate: using data collection excludeNameSpaces: #{@excludeNameSpaces} @ #{Time.now.utc.iso8601}") end serviceInventory = {} diff --git a/source/plugins/ruby/in_kube_podmdminventory.rb b/source/plugins/ruby/in_kube_podmdminventory.rb index 345b689bc..3e2473634 100644 --- a/source/plugins/ruby/in_kube_podmdminventory.rb +++ b/source/plugins/ruby/in_kube_podmdminventory.rb @@ -65,7 +65,7 @@ def enumerate else if ExtensionUtils.isAADMSIAuthMode() @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() - $log.info("in_kube_podinventory::enumerate: using data collection interval(seconds) -#{@run_interval} @ #{Time.now.utc.iso8601}") + $log.info("in_kube_podinventory::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") end parse_and_emit_records() end diff --git a/source/plugins/ruby/in_kube_pvinventory.rb b/source/plugins/ruby/in_kube_pvinventory.rb index 2232db073..b3bf33c57 100644 --- a/source/plugins/ruby/in_kube_pvinventory.rb +++ b/source/plugins/ruby/in_kube_pvinventory.rb @@ -70,7 +70,7 @@ def enumerate @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() $log.info("in_kube_pvinventory::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") @excludeNameSpaces = ExtensionUtils.getNamespacesToExcludeForDataCollection() - $log.info("in_kube_pvinventory::enumerate: using data collection excludeNameSpaces -#{@excludeNameSpaces} @ #{Time.now.utc.iso8601}") + $log.info("in_kube_pvinventory::enumerate: using data collection excludeNameSpaces: #{@excludeNameSpaces} @ #{Time.now.utc.iso8601}") end continuationToken = nil diff --git a/source/plugins/ruby/in_kubestate_deployments.rb b/source/plugins/ruby/in_kubestate_deployments.rb index ee74bfe7b..2f7576d4a 100644 --- a/source/plugins/ruby/in_kubestate_deployments.rb +++ b/source/plugins/ruby/in_kubestate_deployments.rb @@ -91,7 +91,7 @@ def enumerate @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() $log.info("in_kubestate_deployments::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") @excludeNameSpaces = ExtensionUtils.getNamespacesToExcludeForDataCollection() - $log.info("in_kubestate_deployments::enumerate: using data collection excludeNameSpaces -#{@excludeNameSpaces} @ #{Time.now.utc.iso8601}") + $log.info("in_kubestate_deployments::enumerate: using data collection excludeNameSpaces: #{@excludeNameSpaces} @ #{Time.now.utc.iso8601}") end # Initializing continuation token to nil continuationToken = nil diff --git a/source/plugins/ruby/in_kubestate_hpa.rb b/source/plugins/ruby/in_kubestate_hpa.rb index 7cc2f3a75..ac1691afb 100644 --- a/source/plugins/ruby/in_kubestate_hpa.rb +++ b/source/plugins/ruby/in_kubestate_hpa.rb @@ -89,7 +89,7 @@ def enumerate @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() $log.info("in_kubestate_hpa::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") @excludeNameSpaces = ExtensionUtils.getNamespacesToExcludeForDataCollection() - $log.info("in_kubestate_hpa::enumerate: using data collection excludeNameSpaces -#{@excludeNameSpaces} @ #{Time.now.utc.iso8601}") + $log.info("in_kubestate_hpa::enumerate: using data collection excludeNameSpaces: #{@excludeNameSpaces} @ #{Time.now.utc.iso8601}") end # Initializing continuation token to nil continuationToken = nil diff --git a/source/plugins/ruby/in_win_cadvisor_perf.rb b/source/plugins/ruby/in_win_cadvisor_perf.rb index 3e0290137..cafcb5d5f 100644 --- a/source/plugins/ruby/in_win_cadvisor_perf.rb +++ b/source/plugins/ruby/in_win_cadvisor_perf.rb @@ -75,7 +75,7 @@ def enumerate() $log.info("in_win_cadvisor_perf::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") @excludeNameSpaces = ExtensionUtils.getNamespacesToExcludeForDataCollection() - $log.info("in_win_cadvisor_perf::enumerate: using data collection excludeNameSpaces -#{@excludeNameSpaces} @ #{Time.now.utc.iso8601}") + $log.info("in_win_cadvisor_perf::enumerate: using data collection excludeNameSpaces: #{@excludeNameSpaces} @ #{Time.now.utc.iso8601}") end #Resetting this cache so that it is populated with the current set of containers with every call @@ -91,7 +91,7 @@ def enumerate() end @@winNodes.each do |winNode| eventStream = Fluent::MultiEventStream.new - metricData = CAdvisorMetricsAPIClient.getMetrics(winNode: winNode, @excludeNameSpaces, metricTime: Time.now.utc.iso8601) + metricData = CAdvisorMetricsAPIClient.getMetrics(winNode: winNode, excludeNameSpaces: @excludeNameSpaces, metricTime: Time.now.utc.iso8601) metricData.each do |record| if !record.empty? eventStream.add(time, record) if record From 4dcd416ce82df8295358e20cfda08e3f471a7335 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Thu, 4 Aug 2022 17:47:07 +0530 Subject: [PATCH 259/301] fix bugs --- source/plugins/ruby/extension.rb | 3 +-- source/plugins/ruby/extension_utils.rb | 10 +++++----- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/source/plugins/ruby/extension.rb b/source/plugins/ruby/extension.rb index 5a6dcd6be..19f03f17d 100644 --- a/source/plugins/ruby/extension.rb +++ b/source/plugins/ruby/extension.rb @@ -34,8 +34,7 @@ def get_extension_settings() if !extensionConfigurations.nil? && !extensionConfigurations.empty? extensionConfigurations.each do |extensionConfig| extSettings = extensionConfig["extensionSettings"] - # TODO - can extensionsettings present in multiple extension configurations?? - if !extensionSettings.nil? && !extensionSettings.empty? + if !extSettings.nil? && !extSettings.empty? extensionSettings = extSettings end end diff --git a/source/plugins/ruby/extension_utils.rb b/source/plugins/ruby/extension_utils.rb index 5d8ac3f47..af50107dc 100644 --- a/source/plugins/ruby/extension_utils.rb +++ b/source/plugins/ruby/extension_utils.rb @@ -31,9 +31,9 @@ def getDataCollectionIntervalSeconds extensionSettings = Extension.instance.get_extension_settings() if !extensionSettings.nil? && !extensionSettings.empty? && - extensionSettings.has_key(Constants::EXTENSION_SETTING_DATA_COLLECTION_INTERVAL) + extensionSettings.has_key?(Constants::EXTENSION_SETTING_DATA_COLLECTION_INTERVAL) intervalMinutes = extensionSettings[Constants::EXTENSION_SETTING_DATA_COLLECTION_INTERVAL] - if is_number?(intervalMinutes) && + if intervalMinutes.kind_of?(Integer) && intervalMinutes.to_i >= Constants::DATA_COLLECTION_INTERVAL_MINUTES_MIN && intervalMinutes.to_i <= Constants::DATA_COLLECTION_INTERVAL_MINUTES_MAX collectionIntervalSeconds = 60 * intervalMinutes.to_i @@ -41,7 +41,7 @@ def getDataCollectionIntervalSeconds $log.warn("ExtensionUtils::getDataCollectionIntervalSeconds: dataCollectionIntervalMinutes: #{intervalMinutes} not valid hence using default") end end - rescue => err + rescue => errorStr $log.warn("ExtensionUtils::getDataCollectionIntervalSeconds: failed with an exception: #{errorStr}") end return collectionIntervalSeconds @@ -53,7 +53,7 @@ def getNamespacesToExcludeForDataCollection extensionSettings = Extension.instance.get_extension_settings() if !extensionSettings.nil? && !extensionSettings.empty? && - extensionSettings.has_key(Constants::EXTENSION_SETTING_EXCLUDE_NAMESPACES) + extensionSettings.has_key?(Constants::EXTENSION_SETTING_EXCLUDE_NAMESPACES) namespacesToExclude = extensionSettings[Constants::EXTENSION_SETTING_EXCLUDE_NAMESPACES] if !namespacesToExclude.nil? && !namespacesToExclude.empty? && namespacesToExclude.kind_of?(Array) && namespacesToExclude.length > 0 uniqNamespaces = namespacesToExclude.uniq @@ -62,7 +62,7 @@ def getNamespacesToExcludeForDataCollection $log.warn("ExtensionUtils::getNamespacesToExcludeForDataCollection: excludeNameSpaces: #{namespacesToExclude} not valid hence using default") end end - rescue => err + rescue => errorStr $log.warn("ExtensionUtils::getNamespacesToExcludeForDataCollection: failed with an exception: #{errorStr}") end return excludeNameSpaces From 6b8c858ab6edad815d94391d1571caa28a7200b1 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Thu, 4 Aug 2022 18:45:32 +0530 Subject: [PATCH 260/301] fix bugs --- source/plugins/ruby/in_kube_podinventory.rb | 2 ++ 1 file changed, 2 insertions(+) diff --git a/source/plugins/ruby/in_kube_podinventory.rb b/source/plugins/ruby/in_kube_podinventory.rb index e54cb5a69..b0b9b4c30 100644 --- a/source/plugins/ruby/in_kube_podinventory.rb +++ b/source/plugins/ruby/in_kube_podinventory.rb @@ -286,6 +286,7 @@ def parse_and_emit_records(podInventory, serviceRecords, continuationToken, batc begin #begin block start podInventory["items"].each do |item| #podInventory block start + next unless !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["name"],item["metadata"]["namespace"], @excludeNameSpaces ) # pod inventory records podInventoryRecords = getPodInventoryRecords(item, serviceRecords, batchTime) @containerCount += podInventoryRecords.length @@ -383,6 +384,7 @@ def parse_and_emit_records(podInventory, serviceRecords, continuationToken, batc if continuationToken.nil? # sending kube services inventory records kubeServicesEventStream = Fluent::MultiEventStream.new serviceRecords.each do |kubeServiceRecord| + next unless !KubernetesApiClient.isExcludeResourceItem(kubeServiceRecord["ServiceName"], kubeServiceRecord["namespace"], @excludeNameSpaces) if !kubeServiceRecord.nil? # adding before emit to reduce memory foot print kubeServiceRecord["ClusterId"] = KubernetesApiClient.getClusterId From 00ec4ed1b0c37085d90341be097b67022c2c463d Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Thu, 4 Aug 2022 18:53:27 +0530 Subject: [PATCH 261/301] fix bug --- source/plugins/ruby/in_kube_perfinventory.rb | 1 + 1 file changed, 1 insertion(+) diff --git a/source/plugins/ruby/in_kube_perfinventory.rb b/source/plugins/ruby/in_kube_perfinventory.rb index 2b8a75d18..524684604 100644 --- a/source/plugins/ruby/in_kube_perfinventory.rb +++ b/source/plugins/ruby/in_kube_perfinventory.rb @@ -139,6 +139,7 @@ def parse_and_emit_records(podInventory, nodeAllocatableRecords, continuationTok begin #begin block start podInventory["items"].each do |item| #podInventory block start + next unless !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["name"], item["metadata"]["namespace"], @excludeNameSpaces) nodeName = "" if !item["spec"]["nodeName"].nil? nodeName = item["spec"]["nodeName"] From cabaf4b3f7dda12f0cbff35c38326d38d3c231d3 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Thu, 4 Aug 2022 19:06:45 +0530 Subject: [PATCH 262/301] fix bug --- source/plugins/ruby/CAdvisorMetricsAPIClient.rb | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb index f8d800efd..b862b9810 100644 --- a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb +++ b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb @@ -155,8 +155,8 @@ def getMetrics(winNode: nil, excludeNameSpaces: [], metricTime: Time.now.utc.iso # Checking if we are in windows daemonset and sending only few metrics that are needed for MDM if !@os_type.nil? && !@os_type.empty? && @os_type.strip.casecmp("windows") == 0 # Container metrics - metricDataItems.concat(getContainerMemoryMetricItems(metricInfo, hostName, "workingSetBytes", Constants::MEMORY_WORKING_SET_BYTES, metricTime, operatingSystem)) - containerCpuUsageNanoSecondsRate = getContainerCpuMetricItemRate(metricInfo, hostName, "usageCoreNanoSeconds", Constants::CPU_USAGE_NANO_CORES, metricTime) + metricDataItems.concat(getContainerMemoryMetricItems(metricInfo, hostName, "workingSetBytes", Constants::MEMORY_WORKING_SET_BYTES, metricTime, operatingSystem, excludeNameSpaces)) + containerCpuUsageNanoSecondsRate = getContainerCpuMetricItemRate(metricInfo, hostName, "usageCoreNanoSeconds", Constants::CPU_USAGE_NANO_CORES, metricTime, excludeNameSpaces) if containerCpuUsageNanoSecondsRate && !containerCpuUsageNanoSecondsRate.empty? && !containerCpuUsageNanoSecondsRate.nil? metricDataItems.concat(containerCpuUsageNanoSecondsRate) end @@ -167,15 +167,15 @@ def getMetrics(winNode: nil, excludeNameSpaces: [], metricTime: Time.now.utc.iso end metricDataItems.push(getNodeMetricItem(metricInfo, hostName, "memory", "workingSetBytes", Constants::MEMORY_WORKING_SET_BYTES, metricTime)) else - metricDataItems.concat(getContainerMemoryMetricItems(metricInfo, hostName, "workingSetBytes", Constants::MEMORY_WORKING_SET_BYTES, metricTime, operatingSystem)) - metricDataItems.concat(getContainerStartTimeMetricItems(metricInfo, hostName, "restartTimeEpoch", metricTime)) + metricDataItems.concat(getContainerMemoryMetricItems(metricInfo, hostName, "workingSetBytes", Constants::MEMORY_WORKING_SET_BYTES, metricTime, operatingSystem, excludeNameSpaces)) + metricDataItems.concat(getContainerStartTimeMetricItems(metricInfo, hostName, "restartTimeEpoch", metricTime, excludeNameSpaces)) if operatingSystem == "Linux" - metricDataItems.concat(getContainerCpuMetricItems(metricInfo, hostName, "usageNanoCores", Constants::CPU_USAGE_NANO_CORES, metricTime)) - metricDataItems.concat(getContainerMemoryMetricItems(metricInfo, hostName, "rssBytes", Constants::MEMORY_RSS_BYTES, metricTime, operatingSystem)) + metricDataItems.concat(getContainerCpuMetricItems(metricInfo, hostName, "usageNanoCores", Constants::CPU_USAGE_NANO_CORES, metricTime, excludeNameSpaces)) + metricDataItems.concat(getContainerMemoryMetricItems(metricInfo, hostName, "rssBytes", Constants::MEMORY_RSS_BYTES, metricTime, operatingSystem, excludeNameSpaces)) metricDataItems.push(getNodeMetricItem(metricInfo, hostName, "memory", "rssBytes", Constants::MEMORY_RSS_BYTES, metricTime)) elsif operatingSystem == "Windows" - containerCpuUsageNanoSecondsRate = getContainerCpuMetricItemRate(metricInfo, hostName, "usageCoreNanoSeconds", Constants::CPU_USAGE_NANO_CORES, metricTime) + containerCpuUsageNanoSecondsRate = getContainerCpuMetricItemRate(metricInfo, hostName, "usageCoreNanoSeconds", Constants::CPU_USAGE_NANO_CORES, metricTime, excludeNameSpaces) if containerCpuUsageNanoSecondsRate && !containerCpuUsageNanoSecondsRate.empty? && !containerCpuUsageNanoSecondsRate.nil? metricDataItems.concat(containerCpuUsageNanoSecondsRate) end From dd94fa12099174d0b0ff11d7e13de209d4f4d3d7 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Thu, 4 Aug 2022 19:25:43 +0530 Subject: [PATCH 263/301] fix bugs --- source/plugins/ruby/CAdvisorMetricsAPIClient.rb | 16 +++++++++------- source/plugins/ruby/in_cadvisor_perf.rb | 2 +- source/plugins/ruby/in_win_cadvisor_perf.rb | 2 +- 3 files changed, 11 insertions(+), 9 deletions(-) diff --git a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb index b862b9810..4182e40d5 100644 --- a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb +++ b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb @@ -313,7 +313,7 @@ def getContainerCpuMetricItems(metricJSON, hostName, cpuMetricNameToCollect, met return metricItems end - def getInsightsMetrics(winNode: nil, metricTime: Time.now.utc.iso8601) + def getInsightsMetrics(winNode: nil, excludeNameSpaces: [], metricTime: Time.now.utc.iso8601) metricDataItems = [] begin cAdvisorStats = getSummaryStatsFromCAdvisor(winNode) @@ -332,11 +332,11 @@ def getInsightsMetrics(winNode: nil, metricTime: Time.now.utc.iso8601) operatingSystem = "Linux" end if !metricInfo.nil? - metricDataItems.concat(getContainerGpuMetricsAsInsightsMetrics(metricInfo, hostName, "memoryTotal", "containerGpumemoryTotalBytes", metricTime)) - metricDataItems.concat(getContainerGpuMetricsAsInsightsMetrics(metricInfo, hostName, "memoryUsed", "containerGpumemoryUsedBytes", metricTime)) - metricDataItems.concat(getContainerGpuMetricsAsInsightsMetrics(metricInfo, hostName, "dutyCycle", "containerGpuDutyCycle", metricTime)) + metricDataItems.concat(getContainerGpuMetricsAsInsightsMetrics(metricInfo, hostName, "memoryTotal", "containerGpumemoryTotalBytes", metricTime, excludeNameSpaces)) + metricDataItems.concat(getContainerGpuMetricsAsInsightsMetrics(metricInfo, hostName, "memoryUsed", "containerGpumemoryUsedBytes", metricTime, excludeNameSpaces)) + metricDataItems.concat(getContainerGpuMetricsAsInsightsMetrics(metricInfo, hostName, "dutyCycle", "containerGpuDutyCycle", metricTime, excludeNameSpaces)) - metricDataItems.concat(getPersistentVolumeMetrics(metricInfo, hostName, "usedBytes", Constants::PV_USED_BYTES, metricTime)) + metricDataItems.concat(getPersistentVolumeMetrics(metricInfo, hostName, "usedBytes", Constants::PV_USED_BYTES, metricTime, excludeNameSpaces)) else @Log.warn("Couldn't get Insights metrics information for host: #{hostName} os:#{operatingSystem}") end @@ -347,7 +347,7 @@ def getInsightsMetrics(winNode: nil, metricTime: Time.now.utc.iso8601) return metricDataItems end - def getPersistentVolumeMetrics(metricJSON, hostName, metricNameToCollect, metricNameToReturn, metricPollTime) + def getPersistentVolumeMetrics(metricJSON, hostName, metricNameToCollect, metricNameToReturn, metricPollTime, excludeNameSpaces) telemetryTimeDifference = (DateTime.now.to_time.to_i - @@telemetryPVKubeSystemMetricsTimeTracker).abs telemetryTimeDifferenceInMinutes = telemetryTimeDifference / 60 @@ -358,6 +358,7 @@ def getPersistentVolumeMetrics(metricJSON, hostName, metricNameToCollect, metric metricInfo = metricJSON metricInfo["pods"].each do |pod| podNamespace = pod["podRef"]["namespace"] + next unless !KubernetesApiClient.isExcludeResourceItem(pod["podRef"]["name"], podNamespace, excludeNameSpaces) excludeNamespace = false if (podNamespace.downcase == "kube-system") && @pvKubeSystemCollectionMetricsEnabled == "false" excludeNamespace = true @@ -419,7 +420,7 @@ def getPersistentVolumeMetrics(metricJSON, hostName, metricNameToCollect, metric return metricItems end - def getContainerGpuMetricsAsInsightsMetrics(metricJSON, hostName, metricNameToCollect, metricNametoReturn, metricPollTime) + def getContainerGpuMetricsAsInsightsMetrics(metricJSON, hostName, metricNameToCollect, metricNametoReturn, metricPollTime, excludeNameSpaces) metricItems = [] clusterId = KubernetesApiClient.getClusterId clusterName = KubernetesApiClient.getClusterName @@ -429,6 +430,7 @@ def getContainerGpuMetricsAsInsightsMetrics(metricJSON, hostName, metricNameToCo podUid = pod["podRef"]["uid"] podName = pod["podRef"]["name"] podNamespace = pod["podRef"]["namespace"] + next unless !KubernetesApiClient.isExcludeResourceItem(podName, podNamespace, excludeNameSpaces) if (!pod["containers"].nil?) pod["containers"].each do |container| diff --git a/source/plugins/ruby/in_cadvisor_perf.rb b/source/plugins/ruby/in_cadvisor_perf.rb index e0cfb3a37..3fb5816c0 100644 --- a/source/plugins/ruby/in_cadvisor_perf.rb +++ b/source/plugins/ruby/in_cadvisor_perf.rb @@ -96,7 +96,7 @@ def enumerate() begin if !@@isWindows.nil? && @@isWindows == false containerGPUusageInsightsMetricsDataItems = [] - containerGPUusageInsightsMetricsDataItems.concat(CAdvisorMetricsAPIClient.getInsightsMetrics(winNode: nil, metricTime: batchTime)) + containerGPUusageInsightsMetricsDataItems.concat(CAdvisorMetricsAPIClient.getInsightsMetrics(winNode: nil, excludeNameSpaces: @excludeNameSpaces, metricTime: batchTime)) containerGPUusageInsightsMetricsDataItems.each do |insightsMetricsRecord| insightsMetricsEventStream.add(time, insightsMetricsRecord) if insightsMetricsRecord diff --git a/source/plugins/ruby/in_win_cadvisor_perf.rb b/source/plugins/ruby/in_win_cadvisor_perf.rb index cafcb5d5f..6288a5140 100644 --- a/source/plugins/ruby/in_win_cadvisor_perf.rb +++ b/source/plugins/ruby/in_win_cadvisor_perf.rb @@ -106,7 +106,7 @@ def enumerate() #start GPU InsightsMetrics items begin containerGPUusageInsightsMetricsDataItems = [] - containerGPUusageInsightsMetricsDataItems.concat(CAdvisorMetricsAPIClient.getInsightsMetrics(winNode: winNode, metricTime: Time.now.utc.iso8601)) + containerGPUusageInsightsMetricsDataItems.concat(CAdvisorMetricsAPIClient.getInsightsMetrics(winNode: winNode, excludeNameSpaces: @excludeNameSpaces, metricTime: Time.now.utc.iso8601)) insightsMetricsEventStream = Fluent::MultiEventStream.new containerGPUusageInsightsMetricsDataItems.each do |insightsMetricsRecord| From 2e3b0549052c48eef1ce58e0019c916c74df5302 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Fri, 5 Aug 2022 15:25:29 +0530 Subject: [PATCH 264/301] fix bug --- source/plugins/ruby/KubernetesApiClient.rb | 2 +- source/plugins/ruby/in_kube_podinventory.rb | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/source/plugins/ruby/KubernetesApiClient.rb b/source/plugins/ruby/KubernetesApiClient.rb index ef064eb3d..aea941a14 100644 --- a/source/plugins/ruby/KubernetesApiClient.rb +++ b/source/plugins/ruby/KubernetesApiClient.rb @@ -1409,7 +1409,7 @@ def isExcludeResourceItem(resourceName, resourceNamespace, excludeNameSpaces) # dont exclude agent related data if !resourceName.nil? && !resourceName.empty? && resourceName.start_with?("omsagent") && resourceNamespace.eql?("kube-system") isExclude = false - elsif !resourceNamespace.nil? && !resourceNamespace.empty? && !excludeNameSpaces.nil? && !excludeNameSpaces.empty? && excludeNameSpaces.length > 0 && excludeNameSpaces.includee?(resourceNamespace) + elsif !resourceNamespace.nil? && !resourceNamespace.empty? && !excludeNameSpaces.nil? && !excludeNameSpaces.empty? && excludeNameSpaces.length > 0 && excludeNameSpaces.include?(resourceNamespace) isExclude = true end rescue => errorStr diff --git a/source/plugins/ruby/in_kube_podinventory.rb b/source/plugins/ruby/in_kube_podinventory.rb index b0b9b4c30..16229485e 100644 --- a/source/plugins/ruby/in_kube_podinventory.rb +++ b/source/plugins/ruby/in_kube_podinventory.rb @@ -236,10 +236,10 @@ def enumerate(podList = nil) telemetryProperties["WINDOWS_CONTAINER_RECORDS_CACHE_SIZE_KB"] = @windowsContainerRecordsCacheSizeBytes / 1024 end if !@excludeNameSpaces.nil? && !@excludeNameSpaces.empty? && @excludeNameSpaces.length > 0 - telemetry["DATACOLLECTION_EXCLUDED_NAMESPACES"] = @excludeNameSpaces + telemetryProperties["DATA_COLLECTION_EXCLUDED_NAMESPACES"] = @excludeNameSpaces end if @run_interval > 60 - telemetry["DATACOLLECTION_INTERVAL_MINUTES"] = @run_interval / 60 + telemetryProperties["DATA_COLLECTION_INTERVAL_MINUTES"] = @run_interval / 60 end ApplicationInsightsUtility.sendCustomEvent("KubePodInventoryHeartBeatEvent", telemetryProperties) ApplicationInsightsUtility.sendMetricTelemetry("PodCount", @podCount, {}) From a134c099eac3afcb91a317f5236ea2abd1361e5b Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Fri, 5 Aug 2022 15:57:16 +0530 Subject: [PATCH 265/301] add known cve to ignore list --- .trivyignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.trivyignore b/.trivyignore index 91ee2f5bb..b2bc7a04d 100644 --- a/.trivyignore +++ b/.trivyignore @@ -7,3 +7,4 @@ CVE-2022-1996 #dpkg vulnerability in ubuntu CVE-2022-1304 +CVE-2022-2509 \ No newline at end of file From 071ac725c9deb5247886bf6b9006e3cc2eed97af Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Sat, 6 Aug 2022 14:52:29 +0530 Subject: [PATCH 266/301] more optimizations --- source/plugins/ruby/in_kube_perfinventory.rb | 24 ++++++++-------- source/plugins/ruby/in_kube_podinventory.rb | 29 +++++++++++--------- 2 files changed, 29 insertions(+), 24 deletions(-) diff --git a/source/plugins/ruby/in_kube_perfinventory.rb b/source/plugins/ruby/in_kube_perfinventory.rb index 524684604..1e0f0f331 100644 --- a/source/plugins/ruby/in_kube_perfinventory.rb +++ b/source/plugins/ruby/in_kube_perfinventory.rb @@ -248,6 +248,7 @@ def run_periodic def watch_pods $log.info("in_kube_perfinventory::watch_pods:Start @ #{Time.now.utc.iso8601}") podsResourceVersion = nil + excludeNameSpaces = [] loop do begin if podsResourceVersion.nil? @@ -256,6 +257,9 @@ def watch_pods @podItemsCache.clear() } continuationToken = nil + if ExtensionUtils.isAADMSIAuthMode() + excludeNameSpaces = ExtensionUtils.getNamespacesToExcludeForDataCollection() + end resourceUri = "pods?limit=#{@PODS_CHUNK_SIZE}" $log.info("in_kube_perfinventory::watch_pods:Getting pods from Kube API: #{resourceUri} @ #{Time.now.utc.iso8601}") continuationToken, podInventory, responseCode = KubernetesApiClient.getResourcesAndContinuationTokenV2(resourceUri) @@ -268,7 +272,7 @@ def watch_pods if (podInventory.key?("items") && !podInventory["items"].nil? && !podInventory["items"].empty?) $log.info("in_kube_perfinventory::watch_pods:number of pod items :#{podInventory["items"].length} from Kube API @ #{Time.now.utc.iso8601}") podInventory["items"].each do |item| - next unless !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["name"], item["metadata"]["namespace"], @excludeNameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["name"], item["metadata"]["namespace"], excludeNameSpaces) key = item["metadata"]["uid"] if !key.nil? && !key.empty? podItem = KubernetesApiClient.getOptimizedItem("pods-perf", item) @@ -300,7 +304,7 @@ def watch_pods if (podInventory.key?("items") && !podInventory["items"].nil? && !podInventory["items"].empty?) $log.info("in_kube_perfinventory::watch_pods:number of pod items :#{podInventory["items"].length} from Kube API @ #{Time.now.utc.iso8601}") podInventory["items"].each do |item| - next unless !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["name"], item["metadata"]["namespace"], @excludeNameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["name"], item["metadata"]["namespace"], excludeNameSpaces) key = item["metadata"]["uid"] if !key.nil? && !key.empty? podItem = KubernetesApiClient.getOptimizedItem("pods-perf", item) @@ -351,10 +355,9 @@ def watch_pods # We have to abort here because this might cause lastResourceVersion inconsistency by skipping a potential RV with valid data! break end - if !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["name"], item["metadata"]["namespace"], @excludeNameSpaces) - if ((notice["type"] == "ADDED") || (notice["type"] == "MODIFIED")) - key = item["metadata"]["uid"] - if !key.nil? && !key.empty? + if ((notice["type"] == "ADDED") || (notice["type"] == "MODIFIED")) + key = item["metadata"]["uid"] + if !key.nil? && !key.empty? podItem = KubernetesApiClient.getOptimizedItem("pods-perf", item) if !podItem.nil? && !podItem.empty? @podCacheMutex.synchronize { @@ -363,18 +366,17 @@ def watch_pods else $log.warn "in_kube_perfinventory::watch_pods:Received podItem is empty or nil @ #{Time.now.utc.iso8601}" end - else + else $log.warn "in_kube_perfinventory::watch_pods:Received poduid either nil or empty @ #{Time.now.utc.iso8601}" - end - elsif notice["type"] == "DELETED" + end + elsif notice["type"] == "DELETED" key = item["metadata"]["uid"] if !key.nil? && !key.empty? @podCacheMutex.synchronize { @podItemsCache.delete(key) } end - end - end + end when "ERROR" podsResourceVersion = nil $log.warn("in_kube_perfinventory::watch_pods:ERROR event with :#{notice["object"]} @ #{Time.now.utc.iso8601}") diff --git a/source/plugins/ruby/in_kube_podinventory.rb b/source/plugins/ruby/in_kube_podinventory.rb index 16229485e..f07ef6c4f 100644 --- a/source/plugins/ruby/in_kube_podinventory.rb +++ b/source/plugins/ruby/in_kube_podinventory.rb @@ -717,6 +717,7 @@ def getServiceNameFromLabels(namespace, labels, serviceRecords) def watch_pods $log.info("in_kube_podinventory::watch_pods:Start @ #{Time.now.utc.iso8601}") podsResourceVersion = nil + excludeNameSpaces = [] # invoke getWindowsNodes to handle scenario where windowsNodeNameCache not populated yet on containerstart winNodes = KubernetesApiClient.getWindowsNodesArray() if winNodes.length > 0 @@ -735,6 +736,9 @@ def watch_pods @windowsNodeNameCacheMutex.synchronize { currentWindowsNodeNameList = @windowsNodeNameListCache.dup } + if ExtensionUtils.isAADMSIAuthMode() + excludeNameSpaces = ExtensionUtils.getNamespacesToExcludeForDataCollection() + end continuationToken = nil resourceUri = "pods?limit=#{@PODS_CHUNK_SIZE}" $log.info("in_kube_podinventory::watch_pods:Getting pods from Kube API: #{resourceUri} @ #{Time.now.utc.iso8601}") @@ -749,7 +753,7 @@ def watch_pods $log.info("in_kube_podinventory::watch_pods:number of pod items :#{podInventory["items"].length} from Kube API @ #{Time.now.utc.iso8601}") podInventory["items"].each do |item| # exclude resource item if this in excluded namespaces - next unless !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["name"], item["metadata"]["namespace"], @excludeNameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["name"], item["metadata"]["namespace"], excludeNameSpaces) key = item["metadata"]["uid"] if !key.nil? && !key.empty? nodeName = (!item["spec"].nil? && !item["spec"]["nodeName"].nil?) ? item["spec"]["nodeName"] : "" @@ -790,7 +794,7 @@ def watch_pods $log.info("in_kube_podinventory::watch_pods:number of pod items :#{podInventory["items"].length} from Kube API @ #{Time.now.utc.iso8601}") podInventory["items"].each do |item| # exclude resource item if this in excluded namespaces - next unless !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["name"], item["metadata"]["namespace"], @excludeNameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["name"], item["metadata"]["namespace"], excludeNameSpaces) key = item["metadata"]["uid"] if !key.nil? && !key.empty? nodeName = (!item["spec"].nil? && !item["spec"]["nodeName"].nil?) ? item["spec"]["nodeName"] : "" @@ -849,11 +853,9 @@ def watch_pods # We have to abort here because this might cause lastResourceVersion inconsistency by skipping a potential RV with valid data! break end - # exclude resource item if this in excluded namespaces - if !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["name"], item["metadata"]["namespace"], @excludeNameSpaces) - if ((notice["type"] == "ADDED") || (notice["type"] == "MODIFIED")) - key = item["metadata"]["uid"] - if !key.nil? && !key.empty? + if ((notice["type"] == "ADDED") || (notice["type"] == "MODIFIED")) + key = item["metadata"]["uid"] + if !key.nil? && !key.empty? currentWindowsNodeNameList = [] @windowsNodeNameCacheMutex.synchronize { currentWindowsNodeNameList = @windowsNodeNameListCache.dup @@ -883,7 +885,6 @@ def watch_pods @podCacheMutex.synchronize { @podItemsCache.delete(key) } - end end end when "ERROR" @@ -921,6 +922,7 @@ def watch_pods def watch_services $log.info("in_kube_podinventory::watch_services:Start @ #{Time.now.utc.iso8601}") servicesResourceVersion = nil + excludeNameSpaces = [] loop do begin if servicesResourceVersion.nil? @@ -928,6 +930,9 @@ def watch_services @serviceCacheMutex.synchronize { @serviceItemsCache.clear() } + if ExtensionUtils.isAADMSIAuthMode() + excludeNameSpaces = ExtensionUtils.getNamespacesToExcludeForDataCollection() + end $log.info("in_kube_podinventory::watch_services:Getting services from Kube API @ #{Time.now.utc.iso8601}") responseCode, serviceInfo = KubernetesApiClient.getKubeResourceInfoV2("services") if responseCode.nil? || responseCode != "200" @@ -945,7 +950,7 @@ def watch_services $log.info("in_kube_podinventory::watch_services:number of service items #{serviceInventory["items"].length} @ #{Time.now.utc.iso8601}") serviceInventory["items"].each do |item| # exclude resource item if this in excluded namespaces - next unless !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["name"], item["metadata"]["namespace"], @excludeNameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["name"], item["metadata"]["namespace"], excludeNameSpaces) key = item["metadata"]["uid"] if !key.nil? && !key.empty? serviceItem = KubernetesApiClient.getOptimizedItem("services", item) @@ -997,8 +1002,7 @@ def watch_services break end # exclude resource item if this in excluded namespaces - if !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["name"], item["metadata"]["namespace"], @excludeNameSpaces) - if ((notice["type"] == "ADDED") || (notice["type"] == "MODIFIED")) + if ((notice["type"] == "ADDED") || (notice["type"] == "MODIFIED")) key = item["metadata"]["uid"] if !key.nil? && !key.empty? serviceItem = KubernetesApiClient.getOptimizedItem("services", item) @@ -1012,14 +1016,13 @@ def watch_services else $log.warn "in_kube_podinventory::watch_services:Received serviceuid either nil or empty @ #{Time.now.utc.iso8601}" end - elsif notice["type"] == "DELETED" + elsif notice["type"] == "DELETED" key = item["metadata"]["uid"] if !key.nil? && !key.empty? @serviceCacheMutex.synchronize { @serviceItemsCache.delete(key) } end - end end when "ERROR" servicesResourceVersion = nil From 8149677914bc7fdf5b835455ed0267ffb6bbd4c5 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Sat, 6 Aug 2022 18:01:42 +0530 Subject: [PATCH 267/301] refactor extensionSettings --- .../existingClusterOnboarding.json | 14 ++++--- .../existingClusterParam.json | 4 +- source/plugins/ruby/constants.rb | 10 +++-- source/plugins/ruby/extension.rb | 20 +++++++++- source/plugins/ruby/extension_utils.rb | 38 ++++++++++--------- 5 files changed, 56 insertions(+), 30 deletions(-) diff --git a/scripts/onboarding/aks/onboarding-with-data-collection-settings/existingClusterOnboarding.json b/scripts/onboarding/aks/onboarding-with-data-collection-settings/existingClusterOnboarding.json index 0a5faf8cc..6e792ebe2 100644 --- a/scripts/onboarding/aks/onboarding-with-data-collection-settings/existingClusterOnboarding.json +++ b/scripts/onboarding/aks/onboarding-with-data-collection-settings/existingClusterOnboarding.json @@ -32,16 +32,16 @@ "description": "Full Resource ID of the log analitycs workspace that will be used for data destination. For example /subscriptions/00000000-0000-0000-0000-0000-00000000/resourceGroups/ResourceGroupName/providers/Microsoft.operationalinsights/workspaces/ws_xyz" } }, - "dataCollectionIntervalInMinutes": { - "type": "int", + "dataCollectionInterval": { + "type": "string", "metadata": { - "description": "data collection interval for metrics and inventory in minutes" + "description": "Data collection interval e.g. \"5m\" for metrics and inventory. Supported value range from 1m to 30m" } }, "excludeNamespacesForDataCollection": { "type": "array", "metadata": { - "description": "Kubernetes namespaces to exclude for the data collection" + "description": "Kubernetes namespaces to exclude for the data collection of inventory and metrics" } } }, @@ -87,8 +87,10 @@ "Microsoft-ContainerInsights-Group-Default" ], "extensionSettings": { - "dataCollectionIntervalMinutes": "[parameters('dataCollectionIntervalInMinutes')]", - "excludeNameSpaces": "[parameters('excludeNamespacesForDataCollection')]" + "dataCollectionSettings" : { + "interval": "[parameters('dataCollectionInterval')]", + "excludeNameSpaces": "[parameters('excludeNamespacesForDataCollection')]" + } }, "extensionName": "ContainerInsights" } diff --git a/scripts/onboarding/aks/onboarding-with-data-collection-settings/existingClusterParam.json b/scripts/onboarding/aks/onboarding-with-data-collection-settings/existingClusterParam.json index 3f313c754..f73d38ecd 100644 --- a/scripts/onboarding/aks/onboarding-with-data-collection-settings/existingClusterParam.json +++ b/scripts/onboarding/aks/onboarding-with-data-collection-settings/existingClusterParam.json @@ -21,8 +21,8 @@ "": "" } }, - "dataCollectionIntervalInMinutes": { - "value" : 1 + "dataCollectionInterval": { + "value" : "1m" }, "excludeNamespacesForDataCollection": { "value": [ "kube-system"] diff --git a/source/plugins/ruby/constants.rb b/source/plugins/ruby/constants.rb index 07463fdc1..d77dd1794 100644 --- a/source/plugins/ruby/constants.rb +++ b/source/plugins/ruby/constants.rb @@ -146,10 +146,12 @@ class Constants IMDS_TOKEN_PATH_FOR_WINDOWS = "c:/etc/imds-access-token/token" # extension settings and these should match with DCR extension settings - EXTENSION_SETTING_DATA_COLLECTION_INTERVAL = "dataCollectionIntervalMinutes" - EXTENSION_SETTING_EXCLUDE_NAMESPACES = "excludeNameSpaces" + EXTENSION_SETTINGS = "extensionSettings" + EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS = "dataCollectionSettings" + EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_INTERVAL = "interval" + EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_EXCLUDE_NAMESPACES = "excludeNameSpaces" # min and max data collection interval minutes - DATA_COLLECTION_INTERVAL_MINUTES_MIN = 1 - DATA_COLLECTION_INTERVAL_MINUTES_MAX = 60 + EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_INTERVAL_MIN = 1 + EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_INTERVAL_MAX = 30 end diff --git a/source/plugins/ruby/extension.rb b/source/plugins/ruby/extension.rb index 19f03f17d..f73fcf1b7 100644 --- a/source/plugins/ruby/extension.rb +++ b/source/plugins/ruby/extension.rb @@ -33,7 +33,7 @@ def get_extension_settings() extensionConfigurations = get_extension_configs() if !extensionConfigurations.nil? && !extensionConfigurations.empty? extensionConfigurations.each do |extensionConfig| - extSettings = extensionConfig["extensionSettings"] + extSettings = extensionConfig[Constants::EXTENSION_SETTINGS] if !extSettings.nil? && !extSettings.empty? extensionSettings = extSettings end @@ -46,6 +46,24 @@ def get_extension_settings() return extensionSettings end + def get_extension_data_collection_settings() + dataCollectionSettings = Hash.new + begin + extensionSettings = get_extension_settings() + if !extensionSettings.nil? && !extensionSettings.empty? + dcSettings = extensionSettings[Constants::EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS] + if !dcSettings.nil? && !dcSettings.empty? + dataCollectionSettings = dcSettings + end + end + rescue =>errorStr + $log.warn("Extension::get_extension_data_collection_settings failed: #{errorStr}") + ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) + end + return dataCollectionSettings + end + + def get_stream_mapping() dataTypeToStreamIdMap = Hash.new begin diff --git a/source/plugins/ruby/extension_utils.rb b/source/plugins/ruby/extension_utils.rb index af50107dc..c1364e926 100644 --- a/source/plugins/ruby/extension_utils.rb +++ b/source/plugins/ruby/extension_utils.rb @@ -28,18 +28,22 @@ def isAADMSIAuthMode() def getDataCollectionIntervalSeconds collectionIntervalSeconds = 60 begin - extensionSettings = Extension.instance.get_extension_settings() - if !extensionSettings.nil? && - !extensionSettings.empty? && - extensionSettings.has_key?(Constants::EXTENSION_SETTING_DATA_COLLECTION_INTERVAL) - intervalMinutes = extensionSettings[Constants::EXTENSION_SETTING_DATA_COLLECTION_INTERVAL] - if intervalMinutes.kind_of?(Integer) && - intervalMinutes.to_i >= Constants::DATA_COLLECTION_INTERVAL_MINUTES_MIN && - intervalMinutes.to_i <= Constants::DATA_COLLECTION_INTERVAL_MINUTES_MAX - collectionIntervalSeconds = 60 * intervalMinutes.to_i - else - $log.warn("ExtensionUtils::getDataCollectionIntervalSeconds: dataCollectionIntervalMinutes: #{intervalMinutes} not valid hence using default") - end + dataCollectionSettings = Extension.instance.get_extension_data_collection_settings() + if !dataCollectionSettings.nil? && + !dataCollectionSettings.empty? && + dataCollectionSettings.has_key?(Constants::EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_INTERVAL) + interval = dataCollectionSettings[Constants::EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_INTERVAL] + re = /^[0-9]+[m]$/ + if !re.match(interval).nil? + intervalMinutes = interval.dup.chomp!("m").to_i + if intervalMinutes.between?(Constants::EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_INTERVAL_MIN, Constants::EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_INTERVAL_MAX) + collectionIntervalSeconds = intervalMinutes * 60 + else + $log.warn("ExtensionUtils::getDataCollectionIntervalSeconds: interval value not in the range 1m to 30m hence using default, 60s: #{errorStr}") + end + else + $log.warn("ExtensionUtils::getDataCollectionIntervalSeconds: interval value is invalid hence using default, 60s: #{errorStr}") + end end rescue => errorStr $log.warn("ExtensionUtils::getDataCollectionIntervalSeconds: failed with an exception: #{errorStr}") @@ -50,11 +54,11 @@ def getDataCollectionIntervalSeconds def getNamespacesToExcludeForDataCollection excludeNameSpaces = [] begin - extensionSettings = Extension.instance.get_extension_settings() - if !extensionSettings.nil? && - !extensionSettings.empty? && - extensionSettings.has_key?(Constants::EXTENSION_SETTING_EXCLUDE_NAMESPACES) - namespacesToExclude = extensionSettings[Constants::EXTENSION_SETTING_EXCLUDE_NAMESPACES] + dataCollectionSettings = Extension.instance.get_extension_data_collection_settings() + if !dataCollectionSettings.nil? && + !dataCollectionSettings.empty? && + dataCollectionSettings.has_key?(Constants::EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_EXCLUDE_NAMESPACES) + namespacesToExclude = dataCollectionSettings[Constants::EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_EXCLUDE_NAMESPACES] if !namespacesToExclude.nil? && !namespacesToExclude.empty? && namespacesToExclude.kind_of?(Array) && namespacesToExclude.length > 0 uniqNamespaces = namespacesToExclude.uniq excludeNameSpaces = uniqNamespaces.map(&:downcase) From 29b3a30e5d5a95d6bdb737b59356c2dad97eac06 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Sun, 7 Aug 2022 05:55:00 +0530 Subject: [PATCH 268/301] fix minor issue --- source/plugins/ruby/extension.rb | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/source/plugins/ruby/extension.rb b/source/plugins/ruby/extension.rb index f73fcf1b7..af8ebc339 100644 --- a/source/plugins/ruby/extension.rb +++ b/source/plugins/ruby/extension.rb @@ -33,9 +33,11 @@ def get_extension_settings() extensionConfigurations = get_extension_configs() if !extensionConfigurations.nil? && !extensionConfigurations.empty? extensionConfigurations.each do |extensionConfig| - extSettings = extensionConfig[Constants::EXTENSION_SETTINGS] - if !extSettings.nil? && !extSettings.empty? - extensionSettings = extSettings + if !extensionConfig.nil? && !extensionConfig.empty? + extSettings = extensionConfig[Constants::EXTENSION_SETTINGS] + if !extSettings.nil? && !extSettings.empty? + extensionSettings = extSettings + end end end end From d83696690aa96bd2e7b2adb8fed90d6391caa072 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Sun, 7 Aug 2022 06:05:25 +0530 Subject: [PATCH 269/301] arm templates for arc k8s --- .../existingClusterParam.json | 2 +- .../existingClusterOnboarding.json | 220 ++++++++++++++++++ .../existingClusterParam.json | 34 +++ 3 files changed, 255 insertions(+), 1 deletion(-) create mode 100644 scripts/onboarding/templates/arc-k8s-extension-data-collection-settings/existingClusterOnboarding.json create mode 100644 scripts/onboarding/templates/arc-k8s-extension-data-collection-settings/existingClusterParam.json diff --git a/scripts/onboarding/aks/onboarding-with-data-collection-settings/existingClusterParam.json b/scripts/onboarding/aks/onboarding-with-data-collection-settings/existingClusterParam.json index f73d38ecd..8c3bb4989 100644 --- a/scripts/onboarding/aks/onboarding-with-data-collection-settings/existingClusterParam.json +++ b/scripts/onboarding/aks/onboarding-with-data-collection-settings/existingClusterParam.json @@ -22,7 +22,7 @@ } }, "dataCollectionInterval": { - "value" : "1m" + "value" : "5m" }, "excludeNamespacesForDataCollection": { "value": [ "kube-system"] diff --git a/scripts/onboarding/templates/arc-k8s-extension-data-collection-settings/existingClusterOnboarding.json b/scripts/onboarding/templates/arc-k8s-extension-data-collection-settings/existingClusterOnboarding.json new file mode 100644 index 000000000..c0f18041c --- /dev/null +++ b/scripts/onboarding/templates/arc-k8s-extension-data-collection-settings/existingClusterOnboarding.json @@ -0,0 +1,220 @@ +{ + "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "clusterResourceId": { + "type": "string", + "metadata": { + "description": "Resource Id of the Azure Arc Connected Cluster" + } + }, + "clusterRegion": { + "type": "string", + "metadata": { + "description": "Location of the Azure Arc Connected Cluster Resource e.g. \"eastus\"" + } + }, + "workspaceResourceId": { + "type": "string", + "metadata": { + "description": "Azure Monitor Log Analytics Resource ID" + } + }, + "workspaceRegion": { + "type": "string", + "metadata": { + "description": "Azure Monitor Log Analytics Workspace region e.g. \"eastus\"" + } + }, + "workspaceDomain": { + "type": "string", + "allowedValues": [ + "opinsights.azure.com", + "opinsights.azure.cn", + "opinsights.azure.us", + "opinsights.azure.eaglex.ic.gov", + "opinsights.azure.microsoft.scloud" + ], + "defaultValue": "opinsights.azure.com", + "metadata": { + "description": "Azure Monitor Log Analytics Workspace Domain e.g. opinsights.azure.com" + } + }, + "resourceTagValues": { + "type": "object", + "metadata": { + "description": "Existing or new tags to use on Arc K8s ContainerInsights extension resources" + } + }, + "dataCollectionInterval": { + "type": "string", + "metadata": { + "description": "Data collection interval e.g. \"5m\" for metrics and inventory. Supported value range from 1m to 30m and default is 1m" + } + }, + "excludeNamespacesForDataCollection": { + "type": "array", + "metadata": { + "description": "Kubernetes namespaces to exclude for the data collection of inventory and metrics" + } + } + }, + "variables": { + "clusterSubscriptionId": "[split(parameters('clusterResourceId'),'/')[2]]", + "clusterResourceGroup": "[split(parameters('clusterResourceId'),'/')[4]]", + "clusterName": "[split(parameters('clusterResourceId'),'/')[8]]", + "clusterLocation": "[replace(parameters('clusterRegion'),' ', '')]", + "workspaceSubscriptionId": "[split(parameters('workspaceResourceId'),'/')[2]]", + "workspaceResourceGroup": "[split(parameters('workspaceResourceId'),'/')[4]]", + "dcrName": "[Concat('MSCI', '-', variables('clusterName'), '-', variables('clusterLocation'))]", + "associationName": "ContainerInsightsExtension", + "dataCollectionRuleId": "[resourceId(variables('workspaceSubscriptionId'), variables('workspaceResourceGroup'), 'Microsoft.Insights/dataCollectionRules', variables('dcrName'))]" + }, + "resources": [ + { + "type": "Microsoft.Resources/deployments", + "name": "[Concat('arc-k8s-monitoring-msi-dcr', '-', uniqueString(variables('dcrName')))]", + "apiVersion": "2017-05-10", + "subscriptionId": "[variables('workspaceSubscriptionId')]", + "resourceGroup": "[variables('workspaceResourceGroup')]", + "properties": { + "mode": "Incremental", + "template": { + "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": {}, + "variables": {}, + "resources": [ + { + "type": "Microsoft.Insights/dataCollectionRules", + "apiVersion": "2021-04-01", + "name": "[variables('dcrName')]", + "location": "[parameters('workspaceRegion')]", + "tags": "[parameters('resourceTagValues')]", + "kind": "Linux", + "properties": { + "dataSources": { + "extensions": [ + { + "name": "ContainerInsightsExtension", + "streams": [ + "Microsoft-ContainerInsights-Group-Default" + ], + "extensionSettings": { + "dataCollectionSettings" : { + "interval": "[parameters('dataCollectionInterval')]", + "excludeNameSpaces": "[parameters('excludeNamespacesForDataCollection')]" + } + }, + "extensionName": "ContainerInsights" + } + ] + }, + "destinations": { + "logAnalytics": [ + { + "workspaceResourceId": "[parameters('workspaceResourceId')]", + "name": "ciworkspace" + } + ] + }, + "dataFlows": [ + { + "streams": [ + "Microsoft-ContainerInsights-Group-Default" + ], + "destinations": [ + "ciworkspace" + ] + } + ] + } + } + ] + }, + "parameters": {} + } + }, + { + "type": "Microsoft.Resources/deployments", + "name": "[Concat('arc-k8s-monitoring-msi-dcra', '-', uniqueString(parameters('clusterResourceId')))]", + "apiVersion": "2017-05-10", + "subscriptionId": "[variables('clusterSubscriptionId')]", + "resourceGroup": "[variables('clusterResourceGroup')]", + "dependsOn": [ + "[Concat('arc-k8s-monitoring-msi-dcr', '-', uniqueString(variables('dcrName')))]" + ], + "properties": { + "mode": "Incremental", + "template": { + "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": {}, + "variables": {}, + "resources": [ + { + "type": "Microsoft.Kubernetes/connectedClusters/providers/dataCollectionRuleAssociations", + "name": "[concat(variables('clusterName'),'/microsoft.insights/', variables('associationName'))]", + "apiVersion": "2021-04-01", + "properties": { + "description": "Association of data collection rule. Deleting this association will break the data collection for this AKS Cluster.", + "dataCollectionRuleId": "[variables('dataCollectionRuleId')]" + } + } + ] + }, + "parameters": {} + } + }, + { + "type": "Microsoft.Resources/deployments", + "name": "[Concat('arc-k8s-ci-extension', '-', uniqueString(parameters('clusterResourceId')))]", + "apiVersion": "2019-05-01", + "subscriptionId": "[split(parameters('clusterResourceId'),'/')[2]]", + "resourceGroup": "[split(parameters('clusterResourceId'),'/')[4]]", + "dependsOn": [ + "[Concat('arc-k8s-monitoring-msi-dcra', '-', uniqueString(parameters('clusterResourceId')))]" + ], + "properties": { + "mode": "Incremental", + "template": { + "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": {}, + "variables": {}, + "resources": [ + { + "type": "Microsoft.KubernetesConfiguration/extensions", + "apiVersion": "2021-09-01", + "name": "azuremonitor-containers", + "location": "[parameters('clusterRegion')]", + "identity": { + "type": "systemassigned" + }, + "properties": { + "extensionType": "Microsoft.AzureMonitor.Containers", + "configurationSettings": { + "logAnalyticsWorkspaceResourceID": "[parameters('workspaceResourceId')]", + "omsagent.domain": "[parameters('workspaceDomain')]", + "omsagent.useAADAuth": "true" + }, + "configurationProtectedSettings": { + "omsagent.secret.wsid": "[reference(parameters('workspaceResourceId'), '2015-03-20').customerId]", + "omsagent.secret.key": "[listKeys(parameters('workspaceResourceId'), '2015-03-20').primarySharedKey]" + }, + "autoUpgradeMinorVersion": true, + "releaseTrain": "Stable", + "scope": { + "Cluster": { + "releaseNamespace": "azuremonitor-containers" + } + } + }, + "scope": "[concat('Microsoft.Kubernetes/connectedClusters/', split(parameters('clusterResourceId'),'/')[8])]" + } + ] + } + } + } + ] +} diff --git a/scripts/onboarding/templates/arc-k8s-extension-data-collection-settings/existingClusterParam.json b/scripts/onboarding/templates/arc-k8s-extension-data-collection-settings/existingClusterParam.json new file mode 100644 index 000000000..e4745d9f7 --- /dev/null +++ b/scripts/onboarding/templates/arc-k8s-extension-data-collection-settings/existingClusterParam.json @@ -0,0 +1,34 @@ +{ + "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentParameters.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "clusterResourceId": { + "value": "/subscriptions//resourceGroups//providers/Microsoft.Kubernetes/connectedClusters/" + }, + "clusterRegion": { + "value": "" + }, + "workspaceResourceId": { + "value": "/subscriptions//resourcegroups//providers/microsoft.operationalinsights/workspaces/" + }, + "workspaceRegion": { + "value": "" + }, + "workspaceDomain": { + "value": "" + }, + "resourceTagValues": { + "value": { + "": "", + "": "", + "": "" + } + }, + "dataCollectionInterval": { + "value" : "5m" + }, + "excludeNamespacesForDataCollection": { + "value": [ "kube-system"] + } + } +} From 177872b319fc39885a4c95b97387978f1374c582 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Sun, 7 Aug 2022 10:25:02 +0530 Subject: [PATCH 270/301] use total cache for ns filtering --- source/plugins/ruby/in_kube_perfinventory.rb | 6 ------ source/plugins/ruby/in_kube_podinventory.rb | 13 ------------- 2 files changed, 19 deletions(-) diff --git a/source/plugins/ruby/in_kube_perfinventory.rb b/source/plugins/ruby/in_kube_perfinventory.rb index 1e0f0f331..c2d63b3cf 100644 --- a/source/plugins/ruby/in_kube_perfinventory.rb +++ b/source/plugins/ruby/in_kube_perfinventory.rb @@ -248,7 +248,6 @@ def run_periodic def watch_pods $log.info("in_kube_perfinventory::watch_pods:Start @ #{Time.now.utc.iso8601}") podsResourceVersion = nil - excludeNameSpaces = [] loop do begin if podsResourceVersion.nil? @@ -257,9 +256,6 @@ def watch_pods @podItemsCache.clear() } continuationToken = nil - if ExtensionUtils.isAADMSIAuthMode() - excludeNameSpaces = ExtensionUtils.getNamespacesToExcludeForDataCollection() - end resourceUri = "pods?limit=#{@PODS_CHUNK_SIZE}" $log.info("in_kube_perfinventory::watch_pods:Getting pods from Kube API: #{resourceUri} @ #{Time.now.utc.iso8601}") continuationToken, podInventory, responseCode = KubernetesApiClient.getResourcesAndContinuationTokenV2(resourceUri) @@ -272,7 +268,6 @@ def watch_pods if (podInventory.key?("items") && !podInventory["items"].nil? && !podInventory["items"].empty?) $log.info("in_kube_perfinventory::watch_pods:number of pod items :#{podInventory["items"].length} from Kube API @ #{Time.now.utc.iso8601}") podInventory["items"].each do |item| - next unless !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["name"], item["metadata"]["namespace"], excludeNameSpaces) key = item["metadata"]["uid"] if !key.nil? && !key.empty? podItem = KubernetesApiClient.getOptimizedItem("pods-perf", item) @@ -304,7 +299,6 @@ def watch_pods if (podInventory.key?("items") && !podInventory["items"].nil? && !podInventory["items"].empty?) $log.info("in_kube_perfinventory::watch_pods:number of pod items :#{podInventory["items"].length} from Kube API @ #{Time.now.utc.iso8601}") podInventory["items"].each do |item| - next unless !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["name"], item["metadata"]["namespace"], excludeNameSpaces) key = item["metadata"]["uid"] if !key.nil? && !key.empty? podItem = KubernetesApiClient.getOptimizedItem("pods-perf", item) diff --git a/source/plugins/ruby/in_kube_podinventory.rb b/source/plugins/ruby/in_kube_podinventory.rb index f07ef6c4f..42afd62dc 100644 --- a/source/plugins/ruby/in_kube_podinventory.rb +++ b/source/plugins/ruby/in_kube_podinventory.rb @@ -736,9 +736,6 @@ def watch_pods @windowsNodeNameCacheMutex.synchronize { currentWindowsNodeNameList = @windowsNodeNameListCache.dup } - if ExtensionUtils.isAADMSIAuthMode() - excludeNameSpaces = ExtensionUtils.getNamespacesToExcludeForDataCollection() - end continuationToken = nil resourceUri = "pods?limit=#{@PODS_CHUNK_SIZE}" $log.info("in_kube_podinventory::watch_pods:Getting pods from Kube API: #{resourceUri} @ #{Time.now.utc.iso8601}") @@ -752,8 +749,6 @@ def watch_pods if (podInventory.key?("items") && !podInventory["items"].nil? && !podInventory["items"].empty?) $log.info("in_kube_podinventory::watch_pods:number of pod items :#{podInventory["items"].length} from Kube API @ #{Time.now.utc.iso8601}") podInventory["items"].each do |item| - # exclude resource item if this in excluded namespaces - next unless !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["name"], item["metadata"]["namespace"], excludeNameSpaces) key = item["metadata"]["uid"] if !key.nil? && !key.empty? nodeName = (!item["spec"].nil? && !item["spec"]["nodeName"].nil?) ? item["spec"]["nodeName"] : "" @@ -793,8 +788,6 @@ def watch_pods if (podInventory.key?("items") && !podInventory["items"].nil? && !podInventory["items"].empty?) $log.info("in_kube_podinventory::watch_pods:number of pod items :#{podInventory["items"].length} from Kube API @ #{Time.now.utc.iso8601}") podInventory["items"].each do |item| - # exclude resource item if this in excluded namespaces - next unless !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["name"], item["metadata"]["namespace"], excludeNameSpaces) key = item["metadata"]["uid"] if !key.nil? && !key.empty? nodeName = (!item["spec"].nil? && !item["spec"]["nodeName"].nil?) ? item["spec"]["nodeName"] : "" @@ -922,7 +915,6 @@ def watch_pods def watch_services $log.info("in_kube_podinventory::watch_services:Start @ #{Time.now.utc.iso8601}") servicesResourceVersion = nil - excludeNameSpaces = [] loop do begin if servicesResourceVersion.nil? @@ -930,9 +922,6 @@ def watch_services @serviceCacheMutex.synchronize { @serviceItemsCache.clear() } - if ExtensionUtils.isAADMSIAuthMode() - excludeNameSpaces = ExtensionUtils.getNamespacesToExcludeForDataCollection() - end $log.info("in_kube_podinventory::watch_services:Getting services from Kube API @ #{Time.now.utc.iso8601}") responseCode, serviceInfo = KubernetesApiClient.getKubeResourceInfoV2("services") if responseCode.nil? || responseCode != "200" @@ -949,8 +938,6 @@ def watch_services if (serviceInventory.key?("items") && !serviceInventory["items"].nil? && !serviceInventory["items"].empty?) $log.info("in_kube_podinventory::watch_services:number of service items #{serviceInventory["items"].length} @ #{Time.now.utc.iso8601}") serviceInventory["items"].each do |item| - # exclude resource item if this in excluded namespaces - next unless !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["name"], item["metadata"]["namespace"], excludeNameSpaces) key = item["metadata"]["uid"] if !key.nil? && !key.empty? serviceItem = KubernetesApiClient.getOptimizedItem("services", item) From 7f86f9cd93909969c3350062d39c8f89d4ebcf51 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Sun, 11 Sep 2022 21:17:50 -0700 Subject: [PATCH 271/301] use the preview images for private preview release --- .../existingClusterOnboarding.json | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/scripts/onboarding/templates/arc-k8s-extension-data-collection-settings/existingClusterOnboarding.json b/scripts/onboarding/templates/arc-k8s-extension-data-collection-settings/existingClusterOnboarding.json index c0f18041c..c2ea7853c 100644 --- a/scripts/onboarding/templates/arc-k8s-extension-data-collection-settings/existingClusterOnboarding.json +++ b/scripts/onboarding/templates/arc-k8s-extension-data-collection-settings/existingClusterOnboarding.json @@ -45,19 +45,19 @@ "metadata": { "description": "Existing or new tags to use on Arc K8s ContainerInsights extension resources" } - }, + }, "dataCollectionInterval": { "type": "string", "metadata": { "description": "Data collection interval e.g. \"5m\" for metrics and inventory. Supported value range from 1m to 30m and default is 1m" } - }, + }, "excludeNamespacesForDataCollection": { "type": "array", "metadata": { "description": "Kubernetes namespaces to exclude for the data collection of inventory and metrics" } - } + } }, "variables": { "clusterSubscriptionId": "[split(parameters('clusterResourceId'),'/')[2]]", @@ -104,7 +104,7 @@ "dataCollectionSettings" : { "interval": "[parameters('dataCollectionInterval')]", "excludeNameSpaces": "[parameters('excludeNamespacesForDataCollection')]" - } + } }, "extensionName": "ContainerInsights" } @@ -196,7 +196,9 @@ "configurationSettings": { "logAnalyticsWorkspaceResourceID": "[parameters('workspaceResourceId')]", "omsagent.domain": "[parameters('workspaceDomain')]", - "omsagent.useAADAuth": "true" + "omsagent.useAADAuth": "true", + "omsagent.image.tag": "ciprodpreview08082022", + "omsagent.image.tagWindows": "win-ciprodpreview08082022" }, "configurationProtectedSettings": { "omsagent.secret.wsid": "[reference(parameters('workspaceResourceId'), '2015-03-20').customerId]", From e568766cff67c3d1e54c4b2e03343706ea52c636 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Wed, 21 Sep 2022 19:50:26 -0700 Subject: [PATCH 272/301] fix merge issues --- .trivyignore | 2 +- README.md | 17 ++--------------- test/e2e/src/core/Dockerfile | 5 ----- 3 files changed, 3 insertions(+), 21 deletions(-) diff --git a/.trivyignore b/.trivyignore index 8d760982a..c88fb765f 100644 --- a/.trivyignore +++ b/.trivyignore @@ -7,4 +7,4 @@ CVE-2022-1996 CVE-2022-27664 #dpkg vulnerability in ubuntu -CVE-2022-2526 +CVE-2022-2526 \ No newline at end of file diff --git a/README.md b/README.md index ca628680e..31f064f3d 100644 --- a/README.md +++ b/README.md @@ -104,7 +104,7 @@ The general directory structure is: # Branches -- We are using a single branch which has all the code in development and we will be releasing from this branch itself. +- We are using a single branch which has all the code in development and we will be releasing from this branch itself. - `ci_prod` branch contains codebase version in development. To contribute: create your private branch off of `ci_prod`, make changes and use pull request to merge back to `ci_prod`. @@ -340,7 +340,7 @@ Here are the instructions to onboard the feature branch to Azure Dev Ops pipelin Integrated to Azure DevOps release pipeline for the ci_prod branch. With this, for every commit to ci_prod branch, latest bits automatically deployed to DEV AKS clusters in Build subscription. -When releasing the agent, we have a separate Azure DevOps pipeline which needs to be run to publish the image to prod MCR and our PROD AKS clusters. +When releasing the agent, we have a separate Azure DevOps pipeline which needs to be run to publish the image to prod MCR and our PROD AKS clusters. For development, agent image will be in this format mcr.microsoft.com/azuremonitor/containerinsights/cidev:`
    `-. For releases, agent will be in this format mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod`
    -`. @@ -355,19 +355,6 @@ Navigate to Kubernetes directory and update the yamls with latest docker image o For our single branch ci_prod, automatically deployed latest yaml with latest agent image (which automatically built by the azure devops pipeline) onto CIDEV AKS clusters in build subscription. So, you can use CIDEV AKS cluster to validate E2E. Similarly, you can set up build and release pipelines for your feature branch. -# Testing MSI Auth Mode Using Yaml - - 1. Enable Monitoring addon with Managed Idenity Auth Mode either using Portal or CLI or Template - 2. Deploy [ARM template](./scripts/onboarding/aks/onboarding-using-msi-auth/) with enabled = false to create DCR, DCR-A and link the workspace to Portal - > Note - Make sure to update the parameter values in existingClusterParam.json file and have enabled = false in template file - `az deployment group create --resource-group --template-file ./existingClusterOnboarding.json --parameters @./existingClusterParam.json` - 3. Get the MSI token (which is valid for 24 hrs.) value via `kubectl get secrets -n kube-system omsagent-aad-msi-token -o=jsonpath='{.data.token}'` - 4. Disable Monitoring addon via `az aks disable-addons -a monitoring -g -n ` - 5. Uncomment MSI auth related yaml lines, replace all the placeholder values, MSI token value and image tag in the omsagent.yaml - 6. Deploy the omsagent.yaml via `kubectl apply -f omsagent.yaml` - > Note: use the image toggle for release E2E validation - 7. validate E2E for LA & Metrics data flows, and other scenarios - # Testing MSI Auth Mode Using Yaml 1. Enable Monitoring addon with Managed Idenity Auth Mode either using Portal or CLI or Template diff --git a/test/e2e/src/core/Dockerfile b/test/e2e/src/core/Dockerfile index ba73e74f7..499ec3edb 100644 --- a/test/e2e/src/core/Dockerfile +++ b/test/e2e/src/core/Dockerfile @@ -13,14 +13,9 @@ RUN apt-get update && apt-get -y upgrade && \ CLI_REPO=$(lsb_release -cs) && \ echo "deb [arch=amd64] https://packages.microsoft.com/repos/azure-cli/ ${CLI_REPO} main" \ > /etc/apt/sources.list.d/azure-cli.list && \ - curl -fsSLo /usr/share/keyrings/kubernetes-archive-keyring.gpg https://packages.cloud.google.com/apt/doc/apt-key.gpg && \ - echo "deb [signed-by=/usr/share/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | tee /etc/apt/sources.list.d/kubernetes.list && \ - apt-get update && \ - apt-get install -y azure-cli kubectl && \ rm -rf /var/lib/apt/lists/* RUN python3 -m pip install junit_xml - COPY ./core/e2e_tests.sh / COPY ./core/setup_failure_handler.py / COPY ./core/pytest.ini /e2etests/ From eb6c689ac31dcb39a33ab21f50f0f60617568da7 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Wed, 21 Sep 2022 19:53:43 -0700 Subject: [PATCH 273/301] fix merge issues --- source/plugins/ruby/kubernetes_container_inventory.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/plugins/ruby/kubernetes_container_inventory.rb b/source/plugins/ruby/kubernetes_container_inventory.rb index 60aea5c78..24322b9e2 100644 --- a/source/plugins/ruby/kubernetes_container_inventory.rb +++ b/source/plugins/ruby/kubernetes_container_inventory.rb @@ -3,7 +3,7 @@ class KubernetesContainerInventory require "json" - require "time" + require "time" require_relative "omslog" require_relative "ApplicationInsightsUtility" From 6aa115ec7d2f9811b022e8377a14c75e23b76f3c Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Wed, 21 Sep 2022 20:02:26 -0700 Subject: [PATCH 274/301] create dcr in cluster rg --- .../existingClusterOnboarding.json | 10 ++++------ .../existingClusterOnboarding.json | 8 +++----- 2 files changed, 7 insertions(+), 11 deletions(-) diff --git a/scripts/onboarding/aks/onboarding-with-data-collection-settings/existingClusterOnboarding.json b/scripts/onboarding/aks/onboarding-with-data-collection-settings/existingClusterOnboarding.json index 6e792ebe2..8d4689da5 100644 --- a/scripts/onboarding/aks/onboarding-with-data-collection-settings/existingClusterOnboarding.json +++ b/scripts/onboarding/aks/onboarding-with-data-collection-settings/existingClusterOnboarding.json @@ -50,19 +50,17 @@ "clusterResourceGroup": "[split(parameters('aksResourceId'),'/')[4]]", "clusterName": "[split(parameters('aksResourceId'),'/')[8]]", "clusterLocation": "[replace(parameters('aksResourceLocation'),' ', '')]", - "workspaceSubscriptionId": "[split(parameters('workspaceResourceId'),'/')[2]]", - "workspaceResourceGroup": "[split(parameters('workspaceResourceId'),'/')[4]]", "dcrName": "[Concat('MSCI', '-', variables('clusterName'), '-', variables('clusterLocation'))]", "associationName": "ContainerInsightsExtension", - "dataCollectionRuleId": "[resourceId(variables('workspaceSubscriptionId'), variables('workspaceResourceGroup'), 'Microsoft.Insights/dataCollectionRules', variables('dcrName'))]" + "dataCollectionRuleId": "[resourceId(variables('clusterSubscriptionId'), variables('clusterResourceGroup'), 'Microsoft.Insights/dataCollectionRules', variables('dcrName'))]" }, "resources": [ { "type": "Microsoft.Resources/deployments", "name": "[Concat('aks-monitoring-msi-dcr', '-', uniqueString(variables('dcrName')))]", "apiVersion": "2017-05-10", - "subscriptionId": "[variables('workspaceSubscriptionId')]", - "resourceGroup": "[variables('workspaceResourceGroup')]", + "subscriptionId": "[variables('clusterSubscriptionId')]", + "resourceGroup": "[variables('clusterResourceGroup')]", "properties": { "mode": "Incremental", "template": { @@ -90,7 +88,7 @@ "dataCollectionSettings" : { "interval": "[parameters('dataCollectionInterval')]", "excludeNameSpaces": "[parameters('excludeNamespacesForDataCollection')]" - } + } }, "extensionName": "ContainerInsights" } diff --git a/scripts/onboarding/templates/arc-k8s-extension-data-collection-settings/existingClusterOnboarding.json b/scripts/onboarding/templates/arc-k8s-extension-data-collection-settings/existingClusterOnboarding.json index c2ea7853c..bee0edd69 100644 --- a/scripts/onboarding/templates/arc-k8s-extension-data-collection-settings/existingClusterOnboarding.json +++ b/scripts/onboarding/templates/arc-k8s-extension-data-collection-settings/existingClusterOnboarding.json @@ -64,19 +64,17 @@ "clusterResourceGroup": "[split(parameters('clusterResourceId'),'/')[4]]", "clusterName": "[split(parameters('clusterResourceId'),'/')[8]]", "clusterLocation": "[replace(parameters('clusterRegion'),' ', '')]", - "workspaceSubscriptionId": "[split(parameters('workspaceResourceId'),'/')[2]]", - "workspaceResourceGroup": "[split(parameters('workspaceResourceId'),'/')[4]]", "dcrName": "[Concat('MSCI', '-', variables('clusterName'), '-', variables('clusterLocation'))]", "associationName": "ContainerInsightsExtension", - "dataCollectionRuleId": "[resourceId(variables('workspaceSubscriptionId'), variables('workspaceResourceGroup'), 'Microsoft.Insights/dataCollectionRules', variables('dcrName'))]" + "dataCollectionRuleId": "[resourceId(variables('clusterSubscriptionId'), variables('clusterResourceGroup'), 'Microsoft.Insights/dataCollectionRules', variables('dcrName'))]" }, "resources": [ { "type": "Microsoft.Resources/deployments", "name": "[Concat('arc-k8s-monitoring-msi-dcr', '-', uniqueString(variables('dcrName')))]", "apiVersion": "2017-05-10", - "subscriptionId": "[variables('workspaceSubscriptionId')]", - "resourceGroup": "[variables('workspaceResourceGroup')]", + "subscriptionId": "[variables('clusterSubscriptionId')]", + "resourceGroup": "[variables('clusterResourceGroup')]", "properties": { "mode": "Incremental", "template": { From 9ba9fdb7729a2c12be78d46606092b1015f751f7 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Thu, 17 Nov 2022 21:08:34 -0800 Subject: [PATCH 275/301] update existing templates with data collection settings --- .../existingClusterOnboarding.json | 18 ++ .../existingClusterParam.json | 6 + .../existingClusterOnboarding.json | 197 ---------------- .../existingClusterParam.json | 31 --- .../existingClusterOnboarding.json | 220 ------------------ .../existingClusterParam.json | 34 --- .../existingClusterOnboarding.json | 20 +- .../existingClusterParam.json | 6 + .../existingClusterOnboarding.json | 20 +- .../existingClusterParam.json | 6 + 10 files changed, 74 insertions(+), 484 deletions(-) delete mode 100644 scripts/onboarding/aks/onboarding-with-data-collection-settings/existingClusterOnboarding.json delete mode 100644 scripts/onboarding/aks/onboarding-with-data-collection-settings/existingClusterParam.json delete mode 100644 scripts/onboarding/templates/arc-k8s-extension-data-collection-settings/existingClusterOnboarding.json delete mode 100644 scripts/onboarding/templates/arc-k8s-extension-data-collection-settings/existingClusterParam.json diff --git a/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json b/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json index 7b887c692..6d4f7d4a0 100644 --- a/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json +++ b/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json @@ -49,6 +49,18 @@ "metadata": { "description": "Array of allowed syslog facilities" } + }, + "dataCollectionInterval": { + "type": "string", + "metadata": { + "description": "Data collection interval e.g. \"5m\" for metrics and inventory. Supported value range from 1m to 30m" + } + }, + "excludeNamespacesForDataCollection": { + "type": "array", + "metadata": { + "description": "Kubernetes namespaces to exclude for the data collection of inventory and metrics" + } } }, "variables": { @@ -80,6 +92,12 @@ "streams": [ "Microsoft-ContainerInsights-Group-Default" ], + "extensionSettings": { + "dataCollectionSettings" : { + "interval": "[parameters('dataCollectionInterval')]", + "excludeNameSpaces": "[parameters('excludeNamespacesForDataCollection')]" + } + }, "extensionName": "ContainerInsights" } ] diff --git a/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterParam.json b/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterParam.json index f173e10a9..11abb2e97 100644 --- a/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterParam.json +++ b/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterParam.json @@ -59,6 +59,12 @@ "": "", "": "" } + }, + "dataCollectionInterval": { + "value" : "1m" + }, + "excludeNamespacesForDataCollection": { + "value": [ "kube-system"] } } } diff --git a/scripts/onboarding/aks/onboarding-with-data-collection-settings/existingClusterOnboarding.json b/scripts/onboarding/aks/onboarding-with-data-collection-settings/existingClusterOnboarding.json deleted file mode 100644 index 8d4689da5..000000000 --- a/scripts/onboarding/aks/onboarding-with-data-collection-settings/existingClusterOnboarding.json +++ /dev/null @@ -1,197 +0,0 @@ -{ - "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", - "contentVersion": "1.0.0.0", - "parameters": { - "aksResourceId": { - "type": "string", - "metadata": { - "description": "AKS Cluster Resource ID" - } - }, - "aksResourceLocation": { - "type": "string", - "metadata": { - "description": "Location of the AKS resource e.g. \"East US\"" - } - }, - "resourceTagValues": { - "type": "object", - "metadata": { - "description": "Existing or new tags to use on AKS, ContainerInsights and DataCollectionRule Resources" - } - }, - "workspaceLocation": { - "type": "string", - "metadata": { - "description": "Worksapce Location for data collection rule" - } - }, - "workspaceResourceId": { - "type": "string", - "metadata": { - "description": "Full Resource ID of the log analitycs workspace that will be used for data destination. For example /subscriptions/00000000-0000-0000-0000-0000-00000000/resourceGroups/ResourceGroupName/providers/Microsoft.operationalinsights/workspaces/ws_xyz" - } - }, - "dataCollectionInterval": { - "type": "string", - "metadata": { - "description": "Data collection interval e.g. \"5m\" for metrics and inventory. Supported value range from 1m to 30m" - } - }, - "excludeNamespacesForDataCollection": { - "type": "array", - "metadata": { - "description": "Kubernetes namespaces to exclude for the data collection of inventory and metrics" - } - } - }, - "variables": { - "clusterSubscriptionId": "[split(parameters('aksResourceId'),'/')[2]]", - "clusterResourceGroup": "[split(parameters('aksResourceId'),'/')[4]]", - "clusterName": "[split(parameters('aksResourceId'),'/')[8]]", - "clusterLocation": "[replace(parameters('aksResourceLocation'),' ', '')]", - "dcrName": "[Concat('MSCI', '-', variables('clusterName'), '-', variables('clusterLocation'))]", - "associationName": "ContainerInsightsExtension", - "dataCollectionRuleId": "[resourceId(variables('clusterSubscriptionId'), variables('clusterResourceGroup'), 'Microsoft.Insights/dataCollectionRules', variables('dcrName'))]" - }, - "resources": [ - { - "type": "Microsoft.Resources/deployments", - "name": "[Concat('aks-monitoring-msi-dcr', '-', uniqueString(variables('dcrName')))]", - "apiVersion": "2017-05-10", - "subscriptionId": "[variables('clusterSubscriptionId')]", - "resourceGroup": "[variables('clusterResourceGroup')]", - "properties": { - "mode": "Incremental", - "template": { - "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", - "contentVersion": "1.0.0.0", - "parameters": {}, - "variables": {}, - "resources": [ - { - "type": "Microsoft.Insights/dataCollectionRules", - "apiVersion": "2021-04-01", - "name": "[variables('dcrName')]", - "location": "[parameters('workspaceLocation')]", - "tags": "[parameters('resourceTagValues')]", - "kind": "Linux", - "properties": { - "dataSources": { - "extensions": [ - { - "name": "ContainerInsightsExtension", - "streams": [ - "Microsoft-ContainerInsights-Group-Default" - ], - "extensionSettings": { - "dataCollectionSettings" : { - "interval": "[parameters('dataCollectionInterval')]", - "excludeNameSpaces": "[parameters('excludeNamespacesForDataCollection')]" - } - }, - "extensionName": "ContainerInsights" - } - ] - }, - "destinations": { - "logAnalytics": [ - { - "workspaceResourceId": "[parameters('workspaceResourceId')]", - "name": "ciworkspace" - } - ] - }, - "dataFlows": [ - { - "streams": [ - "Microsoft-ContainerInsights-Group-Default" - ], - "destinations": [ - "ciworkspace" - ] - } - ] - } - } - ] - }, - "parameters": {} - } - }, - { - "type": "Microsoft.Resources/deployments", - "name": "[Concat('aks-monitoring-msi-dcra', '-', uniqueString(parameters('aksResourceId')))]", - "apiVersion": "2017-05-10", - "subscriptionId": "[variables('clusterSubscriptionId')]", - "resourceGroup": "[variables('clusterResourceGroup')]", - "dependsOn": [ - "[Concat('aks-monitoring-msi-dcr', '-', uniqueString(variables('dcrName')))]" - ], - "properties": { - "mode": "Incremental", - "template": { - "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", - "contentVersion": "1.0.0.0", - "parameters": {}, - "variables": {}, - "resources": [ - { - "type": "Microsoft.ContainerService/managedClusters/providers/dataCollectionRuleAssociations", - "name": "[concat(variables('clusterName'),'/microsoft.insights/', variables('associationName'))]", - "apiVersion": "2021-04-01", - "properties": { - "description": "Association of data collection rule. Deleting this association will break the data collection for this AKS Cluster.", - "dataCollectionRuleId": "[variables('dataCollectionRuleId')]" - } - } - - ] - }, - "parameters": {} - } - }, - { - "type": "Microsoft.Resources/deployments", - "name": "[Concat('aks-monitoring-msi-addon', '-', uniqueString(parameters('aksResourceId')))]", - "apiVersion": "2017-05-10", - "subscriptionId": "[variables('clusterSubscriptionId')]", - "resourceGroup": "[variables('clusterResourceGroup')]", - "dependsOn": [ - "[Concat('aks-monitoring-msi-dcra', '-', uniqueString(parameters('aksResourceId')))]" - ], - "properties": { - "mode": "Incremental", - "template": { - "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", - "contentVersion": "1.0.0.0", - "parameters": {}, - "variables": {}, - "resources": [ - { - "name": "[variables('clusterName')]", - "type": "Microsoft.ContainerService/managedClusters", - "location": "[parameters('aksResourceLocation')]", - "tags": "[parameters('resourceTagValues')]", - "apiVersion": "2018-03-31", - "properties": { - "mode": "Incremental", - "id": "[parameters('aksResourceId')]", - "addonProfiles": { - "omsagent": { - "enabled": true, - "config": { - "logAnalyticsWorkspaceResourceID": "[parameters('workspaceResourceId')]", - "useAADAuth": "true" - } - } - } - } - } - ] - }, - "parameters": {} - } - } - ] -} diff --git a/scripts/onboarding/aks/onboarding-with-data-collection-settings/existingClusterParam.json b/scripts/onboarding/aks/onboarding-with-data-collection-settings/existingClusterParam.json deleted file mode 100644 index 8c3bb4989..000000000 --- a/scripts/onboarding/aks/onboarding-with-data-collection-settings/existingClusterParam.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentParameters.json#", - "contentVersion": "1.0.0.0", - "parameters": { - "aksResourceId": { - "value": "/subscriptions//resourcegroups//providers/Microsoft.ContainerService/managedClusters/" - }, - "aksResourceLocation": { - "value": "" - }, - "workspaceResourceId": { - "value": "/subscriptions//resourceGroups//providers/Microsoft.OperationalInsights/workspaces/" - }, - "workspaceLocation": { - "value": "" - }, - "resourceTagValues": { - "value": { - "": "", - "": "", - "": "" - } - }, - "dataCollectionInterval": { - "value" : "5m" - }, - "excludeNamespacesForDataCollection": { - "value": [ "kube-system"] - } - } - } diff --git a/scripts/onboarding/templates/arc-k8s-extension-data-collection-settings/existingClusterOnboarding.json b/scripts/onboarding/templates/arc-k8s-extension-data-collection-settings/existingClusterOnboarding.json deleted file mode 100644 index bee0edd69..000000000 --- a/scripts/onboarding/templates/arc-k8s-extension-data-collection-settings/existingClusterOnboarding.json +++ /dev/null @@ -1,220 +0,0 @@ -{ - "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", - "contentVersion": "1.0.0.0", - "parameters": { - "clusterResourceId": { - "type": "string", - "metadata": { - "description": "Resource Id of the Azure Arc Connected Cluster" - } - }, - "clusterRegion": { - "type": "string", - "metadata": { - "description": "Location of the Azure Arc Connected Cluster Resource e.g. \"eastus\"" - } - }, - "workspaceResourceId": { - "type": "string", - "metadata": { - "description": "Azure Monitor Log Analytics Resource ID" - } - }, - "workspaceRegion": { - "type": "string", - "metadata": { - "description": "Azure Monitor Log Analytics Workspace region e.g. \"eastus\"" - } - }, - "workspaceDomain": { - "type": "string", - "allowedValues": [ - "opinsights.azure.com", - "opinsights.azure.cn", - "opinsights.azure.us", - "opinsights.azure.eaglex.ic.gov", - "opinsights.azure.microsoft.scloud" - ], - "defaultValue": "opinsights.azure.com", - "metadata": { - "description": "Azure Monitor Log Analytics Workspace Domain e.g. opinsights.azure.com" - } - }, - "resourceTagValues": { - "type": "object", - "metadata": { - "description": "Existing or new tags to use on Arc K8s ContainerInsights extension resources" - } - }, - "dataCollectionInterval": { - "type": "string", - "metadata": { - "description": "Data collection interval e.g. \"5m\" for metrics and inventory. Supported value range from 1m to 30m and default is 1m" - } - }, - "excludeNamespacesForDataCollection": { - "type": "array", - "metadata": { - "description": "Kubernetes namespaces to exclude for the data collection of inventory and metrics" - } - } - }, - "variables": { - "clusterSubscriptionId": "[split(parameters('clusterResourceId'),'/')[2]]", - "clusterResourceGroup": "[split(parameters('clusterResourceId'),'/')[4]]", - "clusterName": "[split(parameters('clusterResourceId'),'/')[8]]", - "clusterLocation": "[replace(parameters('clusterRegion'),' ', '')]", - "dcrName": "[Concat('MSCI', '-', variables('clusterName'), '-', variables('clusterLocation'))]", - "associationName": "ContainerInsightsExtension", - "dataCollectionRuleId": "[resourceId(variables('clusterSubscriptionId'), variables('clusterResourceGroup'), 'Microsoft.Insights/dataCollectionRules', variables('dcrName'))]" - }, - "resources": [ - { - "type": "Microsoft.Resources/deployments", - "name": "[Concat('arc-k8s-monitoring-msi-dcr', '-', uniqueString(variables('dcrName')))]", - "apiVersion": "2017-05-10", - "subscriptionId": "[variables('clusterSubscriptionId')]", - "resourceGroup": "[variables('clusterResourceGroup')]", - "properties": { - "mode": "Incremental", - "template": { - "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", - "contentVersion": "1.0.0.0", - "parameters": {}, - "variables": {}, - "resources": [ - { - "type": "Microsoft.Insights/dataCollectionRules", - "apiVersion": "2021-04-01", - "name": "[variables('dcrName')]", - "location": "[parameters('workspaceRegion')]", - "tags": "[parameters('resourceTagValues')]", - "kind": "Linux", - "properties": { - "dataSources": { - "extensions": [ - { - "name": "ContainerInsightsExtension", - "streams": [ - "Microsoft-ContainerInsights-Group-Default" - ], - "extensionSettings": { - "dataCollectionSettings" : { - "interval": "[parameters('dataCollectionInterval')]", - "excludeNameSpaces": "[parameters('excludeNamespacesForDataCollection')]" - } - }, - "extensionName": "ContainerInsights" - } - ] - }, - "destinations": { - "logAnalytics": [ - { - "workspaceResourceId": "[parameters('workspaceResourceId')]", - "name": "ciworkspace" - } - ] - }, - "dataFlows": [ - { - "streams": [ - "Microsoft-ContainerInsights-Group-Default" - ], - "destinations": [ - "ciworkspace" - ] - } - ] - } - } - ] - }, - "parameters": {} - } - }, - { - "type": "Microsoft.Resources/deployments", - "name": "[Concat('arc-k8s-monitoring-msi-dcra', '-', uniqueString(parameters('clusterResourceId')))]", - "apiVersion": "2017-05-10", - "subscriptionId": "[variables('clusterSubscriptionId')]", - "resourceGroup": "[variables('clusterResourceGroup')]", - "dependsOn": [ - "[Concat('arc-k8s-monitoring-msi-dcr', '-', uniqueString(variables('dcrName')))]" - ], - "properties": { - "mode": "Incremental", - "template": { - "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", - "contentVersion": "1.0.0.0", - "parameters": {}, - "variables": {}, - "resources": [ - { - "type": "Microsoft.Kubernetes/connectedClusters/providers/dataCollectionRuleAssociations", - "name": "[concat(variables('clusterName'),'/microsoft.insights/', variables('associationName'))]", - "apiVersion": "2021-04-01", - "properties": { - "description": "Association of data collection rule. Deleting this association will break the data collection for this AKS Cluster.", - "dataCollectionRuleId": "[variables('dataCollectionRuleId')]" - } - } - ] - }, - "parameters": {} - } - }, - { - "type": "Microsoft.Resources/deployments", - "name": "[Concat('arc-k8s-ci-extension', '-', uniqueString(parameters('clusterResourceId')))]", - "apiVersion": "2019-05-01", - "subscriptionId": "[split(parameters('clusterResourceId'),'/')[2]]", - "resourceGroup": "[split(parameters('clusterResourceId'),'/')[4]]", - "dependsOn": [ - "[Concat('arc-k8s-monitoring-msi-dcra', '-', uniqueString(parameters('clusterResourceId')))]" - ], - "properties": { - "mode": "Incremental", - "template": { - "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", - "contentVersion": "1.0.0.0", - "parameters": {}, - "variables": {}, - "resources": [ - { - "type": "Microsoft.KubernetesConfiguration/extensions", - "apiVersion": "2021-09-01", - "name": "azuremonitor-containers", - "location": "[parameters('clusterRegion')]", - "identity": { - "type": "systemassigned" - }, - "properties": { - "extensionType": "Microsoft.AzureMonitor.Containers", - "configurationSettings": { - "logAnalyticsWorkspaceResourceID": "[parameters('workspaceResourceId')]", - "omsagent.domain": "[parameters('workspaceDomain')]", - "omsagent.useAADAuth": "true", - "omsagent.image.tag": "ciprodpreview08082022", - "omsagent.image.tagWindows": "win-ciprodpreview08082022" - }, - "configurationProtectedSettings": { - "omsagent.secret.wsid": "[reference(parameters('workspaceResourceId'), '2015-03-20').customerId]", - "omsagent.secret.key": "[listKeys(parameters('workspaceResourceId'), '2015-03-20').primarySharedKey]" - }, - "autoUpgradeMinorVersion": true, - "releaseTrain": "Stable", - "scope": { - "Cluster": { - "releaseNamespace": "azuremonitor-containers" - } - } - }, - "scope": "[concat('Microsoft.Kubernetes/connectedClusters/', split(parameters('clusterResourceId'),'/')[8])]" - } - ] - } - } - } - ] -} diff --git a/scripts/onboarding/templates/arc-k8s-extension-data-collection-settings/existingClusterParam.json b/scripts/onboarding/templates/arc-k8s-extension-data-collection-settings/existingClusterParam.json deleted file mode 100644 index e4745d9f7..000000000 --- a/scripts/onboarding/templates/arc-k8s-extension-data-collection-settings/existingClusterParam.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentParameters.json#", - "contentVersion": "1.0.0.0", - "parameters": { - "clusterResourceId": { - "value": "/subscriptions//resourceGroups//providers/Microsoft.Kubernetes/connectedClusters/" - }, - "clusterRegion": { - "value": "" - }, - "workspaceResourceId": { - "value": "/subscriptions//resourcegroups//providers/microsoft.operationalinsights/workspaces/" - }, - "workspaceRegion": { - "value": "" - }, - "workspaceDomain": { - "value": "" - }, - "resourceTagValues": { - "value": { - "": "", - "": "", - "": "" - } - }, - "dataCollectionInterval": { - "value" : "5m" - }, - "excludeNamespacesForDataCollection": { - "value": [ "kube-system"] - } - } -} diff --git a/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterOnboarding.json b/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterOnboarding.json index 2c721f867..fa79712c1 100644 --- a/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterOnboarding.json +++ b/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterOnboarding.json @@ -63,7 +63,19 @@ "metadata": { "description": "Array of allowed syslog facilities" } - } + }, + "dataCollectionInterval": { + "type": "string", + "metadata": { + "description": "Data collection interval e.g. \"5m\" for metrics and inventory. Supported value range from 1m to 30m and default is 1m" + } + }, + "excludeNamespacesForDataCollection": { + "type": "array", + "metadata": { + "description": "Kubernetes namespaces to exclude for the data collection of inventory and metrics" + } + } }, "variables": { "clusterSubscriptionId": "[split(parameters('clusterResourceId'),'/')[2]]", @@ -94,6 +106,12 @@ "streams": [ "Microsoft-ContainerInsights-Group-Default" ], + "extensionSettings": { + "dataCollectionSettings" : { + "interval": "[parameters('dataCollectionInterval')]", + "excludeNameSpaces": "[parameters('excludeNamespacesForDataCollection')]" + } + }, "extensionName": "ContainerInsights" } ] diff --git a/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterParam.json b/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterParam.json index ab98a31d6..afc91dad7 100644 --- a/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterParam.json +++ b/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterParam.json @@ -62,6 +62,12 @@ "": "", "": "" } + }, + "dataCollectionInterval": { + "value" : "1m" + }, + "excludeNamespacesForDataCollection": { + "value": [ "kube-system"] } } } diff --git a/scripts/onboarding/templates/arc-k8s-extension-provisionedcluster-msi-auth/existingClusterOnboarding.json b/scripts/onboarding/templates/arc-k8s-extension-provisionedcluster-msi-auth/existingClusterOnboarding.json index e453a7377..cb8d3e16c 100644 --- a/scripts/onboarding/templates/arc-k8s-extension-provisionedcluster-msi-auth/existingClusterOnboarding.json +++ b/scripts/onboarding/templates/arc-k8s-extension-provisionedcluster-msi-auth/existingClusterOnboarding.json @@ -63,7 +63,19 @@ "metadata": { "description": "Array of allowed syslog facilities" } - } + }, + "dataCollectionInterval": { + "type": "string", + "metadata": { + "description": "Data collection interval e.g. \"5m\" for metrics and inventory. Supported value range from 1m to 30m and default is 1m" + } + }, + "excludeNamespacesForDataCollection": { + "type": "array", + "metadata": { + "description": "Kubernetes namespaces to exclude for the data collection of inventory and metrics" + } + } }, "variables": { "clusterSubscriptionId": "[split(parameters('clusterResourceId'),'/')[2]]", @@ -94,6 +106,12 @@ "streams": [ "Microsoft-ContainerInsights-Group-Default" ], + "extensionSettings": { + "dataCollectionSettings" : { + "interval": "[parameters('dataCollectionInterval')]", + "excludeNameSpaces": "[parameters('excludeNamespacesForDataCollection')]" + } + }, "extensionName": "ContainerInsights" } ] diff --git a/scripts/onboarding/templates/arc-k8s-extension-provisionedcluster-msi-auth/existingClusterParam.json b/scripts/onboarding/templates/arc-k8s-extension-provisionedcluster-msi-auth/existingClusterParam.json index 3d95dcf20..3fd0146e1 100644 --- a/scripts/onboarding/templates/arc-k8s-extension-provisionedcluster-msi-auth/existingClusterParam.json +++ b/scripts/onboarding/templates/arc-k8s-extension-provisionedcluster-msi-auth/existingClusterParam.json @@ -62,6 +62,12 @@ "": "", "": "" } + }, + "dataCollectionInterval": { + "value" : "1m" + }, + "excludeNamespacesForDataCollection": { + "value": [ "kube-system"] } } } From fa68c37d387006feaa288100d8766cfda35c30f7 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Thu, 17 Nov 2022 23:13:04 -0800 Subject: [PATCH 276/301] implement namespaces filtering mode --- .../existingClusterOnboarding.json | 19 ++- .../existingClusterParam.json | 5 +- .../existingClusterOnboarding.json | 27 +++- .../existingClusterParam.json | 5 +- .../existingClusterOnboarding.json | 27 +++- .../existingClusterParam.json | 5 +- .../plugins/ruby/CAdvisorMetricsAPIClient.rb | 54 ++++--- source/plugins/ruby/KubernetesApiClient.rb | 25 ++-- source/plugins/ruby/constants.rb | 4 +- source/plugins/ruby/extension_utils.rb | 132 ++++++++++-------- source/plugins/ruby/in_cadvisor_perf.rb | 13 +- source/plugins/ruby/in_containerinventory.rb | 54 +++---- source/plugins/ruby/in_kube_events.rb | 11 +- source/plugins/ruby/in_kube_perfinventory.rb | 41 +++--- source/plugins/ruby/in_kube_podinventory.rb | 106 +++++++------- source/plugins/ruby/in_kube_pvinventory.rb | 19 ++- .../plugins/ruby/in_kubestate_deployments.rb | 12 +- source/plugins/ruby/in_kubestate_hpa.rb | 15 +- source/plugins/ruby/in_win_cadvisor_perf.rb | 16 ++- 19 files changed, 343 insertions(+), 247 deletions(-) diff --git a/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json b/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json index 6d4f7d4a0..44885f1c4 100644 --- a/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json +++ b/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json @@ -56,10 +56,22 @@ "description": "Data collection interval e.g. \"5m\" for metrics and inventory. Supported value range from 1m to 30m" } }, - "excludeNamespacesForDataCollection": { + "nameSpacesFilteringModeForDataCollection": { + "type": "string", + "metadata": { + "description": "Data collection Mode for the namespaces" + }, + "allowedValues": [ + "Off", + "Include", + "Exclude" + ], + "defaultValue": "Off" + }, + "namespacesForDataCollection": { "type": "array", "metadata": { - "description": "Kubernetes namespaces to exclude for the data collection of inventory and metrics" + "description": "Kubernetes namespaces for the data collection of inventory and metrics" } } }, @@ -95,7 +107,8 @@ "extensionSettings": { "dataCollectionSettings" : { "interval": "[parameters('dataCollectionInterval')]", - "excludeNameSpaces": "[parameters('excludeNamespacesForDataCollection')]" + "mode" : "[parameters('nameSpacesFilteringModeForDataCollection')]", + "nameSpaces": "[parameters('namespacesForDataCollection')]" } }, "extensionName": "ContainerInsights" diff --git a/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterParam.json b/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterParam.json index 11abb2e97..ead21476d 100644 --- a/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterParam.json +++ b/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterParam.json @@ -63,7 +63,10 @@ "dataCollectionInterval": { "value" : "1m" }, - "excludeNamespacesForDataCollection": { + "nameSpacesFilteringModeForDataCollection": { + "value": "Off" + }, + "namespacesForDataCollection": { "value": [ "kube-system"] } } diff --git a/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterOnboarding.json b/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterOnboarding.json index fa79712c1..f120762cb 100644 --- a/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterOnboarding.json +++ b/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterOnboarding.json @@ -67,15 +67,27 @@ "dataCollectionInterval": { "type": "string", "metadata": { - "description": "Data collection interval e.g. \"5m\" for metrics and inventory. Supported value range from 1m to 30m and default is 1m" + "description": "Data collection interval e.g. \"5m\" for metrics and inventory. Supported value range from 1m to 30m" } - }, - "excludeNamespacesForDataCollection": { + }, + "nameSpacesFilteringModeForDataCollection": { + "type": "string", + "metadata": { + "description": "Data collection Mode for the namespaces" + }, + "allowedValues": [ + "Off", + "Include", + "Exclude" + ], + "defaultValue": "Off" + }, + "namespacesForDataCollection": { "type": "array", "metadata": { - "description": "Kubernetes namespaces to exclude for the data collection of inventory and metrics" + "description": "Kubernetes namespaces for the data collection of inventory and metrics" } - } + } }, "variables": { "clusterSubscriptionId": "[split(parameters('clusterResourceId'),'/')[2]]", @@ -108,8 +120,9 @@ ], "extensionSettings": { "dataCollectionSettings" : { - "interval": "[parameters('dataCollectionInterval')]", - "excludeNameSpaces": "[parameters('excludeNamespacesForDataCollection')]" + "interval": "[parameters('dataCollectionInterval')]", + "mode" : "[parameters('nameSpacesFilteringModeForDataCollection')]", + "nameSpaces": "[parameters('namespacesForDataCollection')]" } }, "extensionName": "ContainerInsights" diff --git a/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterParam.json b/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterParam.json index afc91dad7..77a94caa4 100644 --- a/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterParam.json +++ b/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterParam.json @@ -66,7 +66,10 @@ "dataCollectionInterval": { "value" : "1m" }, - "excludeNamespacesForDataCollection": { + "nameSpacesFilteringModeForDataCollection": { + "value": "Off" + }, + "namespacesForDataCollection": { "value": [ "kube-system"] } } diff --git a/scripts/onboarding/templates/arc-k8s-extension-provisionedcluster-msi-auth/existingClusterOnboarding.json b/scripts/onboarding/templates/arc-k8s-extension-provisionedcluster-msi-auth/existingClusterOnboarding.json index cb8d3e16c..f3a4221d3 100644 --- a/scripts/onboarding/templates/arc-k8s-extension-provisionedcluster-msi-auth/existingClusterOnboarding.json +++ b/scripts/onboarding/templates/arc-k8s-extension-provisionedcluster-msi-auth/existingClusterOnboarding.json @@ -67,15 +67,27 @@ "dataCollectionInterval": { "type": "string", "metadata": { - "description": "Data collection interval e.g. \"5m\" for metrics and inventory. Supported value range from 1m to 30m and default is 1m" + "description": "Data collection interval e.g. \"5m\" for metrics and inventory. Supported value range from 1m to 30m" } - }, - "excludeNamespacesForDataCollection": { + }, + "nameSpacesFilteringModeForDataCollection": { + "type": "string", + "metadata": { + "description": "Data collection Mode for the namespaces" + }, + "allowedValues": [ + "Off", + "Include", + "Exclude" + ], + "defaultValue": "Off" + }, + "namespacesForDataCollection": { "type": "array", "metadata": { - "description": "Kubernetes namespaces to exclude for the data collection of inventory and metrics" + "description": "Kubernetes namespaces for the data collection of inventory and metrics" } - } + } }, "variables": { "clusterSubscriptionId": "[split(parameters('clusterResourceId'),'/')[2]]", @@ -108,8 +120,9 @@ ], "extensionSettings": { "dataCollectionSettings" : { - "interval": "[parameters('dataCollectionInterval')]", - "excludeNameSpaces": "[parameters('excludeNamespacesForDataCollection')]" + "interval": "[parameters('dataCollectionInterval')]", + "mode" : "[parameters('nameSpacesFilteringModeForDataCollection')]", + "nameSpaces": "[parameters('namespacesForDataCollection')]" } }, "extensionName": "ContainerInsights" diff --git a/scripts/onboarding/templates/arc-k8s-extension-provisionedcluster-msi-auth/existingClusterParam.json b/scripts/onboarding/templates/arc-k8s-extension-provisionedcluster-msi-auth/existingClusterParam.json index 3fd0146e1..051338c9f 100644 --- a/scripts/onboarding/templates/arc-k8s-extension-provisionedcluster-msi-auth/existingClusterParam.json +++ b/scripts/onboarding/templates/arc-k8s-extension-provisionedcluster-msi-auth/existingClusterParam.json @@ -66,7 +66,10 @@ "dataCollectionInterval": { "value" : "1m" }, - "excludeNamespacesForDataCollection": { + "nameSpacesFilteringModeForDataCollection": { + "value": "Off" + }, + "namespacesForDataCollection": { "value": [ "kube-system"] } } diff --git a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb index aac3c9d1c..04b5fccac 100644 --- a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb +++ b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb @@ -133,7 +133,7 @@ def getCAdvisorUri(winNode, relativeUri) return baseUri + relativeUri end - def getMetrics(winNode: nil, excludeNameSpaces: [], metricTime: Time.now.utc.iso8601) + def getMetrics(winNode: nil, mode: "Off", nameSpaces: [], metricTime: Time.now.utc.iso8601) metricDataItems = [] begin cAdvisorStats = getSummaryStatsFromCAdvisor(winNode) @@ -155,8 +155,8 @@ def getMetrics(winNode: nil, excludeNameSpaces: [], metricTime: Time.now.utc.iso # Checking if we are in windows daemonset and sending only few metrics that are needed for MDM if !@os_type.nil? && !@os_type.empty? && @os_type.strip.casecmp("windows") == 0 # Container metrics - metricDataItems.concat(getContainerMemoryMetricItems(metricInfo, hostName, "workingSetBytes", Constants::MEMORY_WORKING_SET_BYTES, metricTime, operatingSystem, excludeNameSpaces)) - containerCpuUsageNanoSecondsRate = getContainerCpuMetricItemRate(metricInfo, hostName, "usageCoreNanoSeconds", Constants::CPU_USAGE_NANO_CORES, metricTime, excludeNameSpaces) + metricDataItems.concat(getContainerMemoryMetricItems(metricInfo, hostName, "workingSetBytes", Constants::MEMORY_WORKING_SET_BYTES, metricTime, operatingSystem, mode, nameSpaces)) + containerCpuUsageNanoSecondsRate = getContainerCpuMetricItemRate(metricInfo, hostName, "usageCoreNanoSeconds", Constants::CPU_USAGE_NANO_CORES, metricTime, mode, nameSpaces) if containerCpuUsageNanoSecondsRate && !containerCpuUsageNanoSecondsRate.empty? && !containerCpuUsageNanoSecondsRate.nil? metricDataItems.concat(containerCpuUsageNanoSecondsRate) end @@ -167,15 +167,15 @@ def getMetrics(winNode: nil, excludeNameSpaces: [], metricTime: Time.now.utc.iso end metricDataItems.push(getNodeMetricItem(metricInfo, hostName, "memory", "workingSetBytes", Constants::MEMORY_WORKING_SET_BYTES, metricTime)) else - metricDataItems.concat(getContainerMemoryMetricItems(metricInfo, hostName, "workingSetBytes", Constants::MEMORY_WORKING_SET_BYTES, metricTime, operatingSystem, excludeNameSpaces)) - metricDataItems.concat(getContainerStartTimeMetricItems(metricInfo, hostName, "restartTimeEpoch", metricTime, excludeNameSpaces)) + metricDataItems.concat(getContainerMemoryMetricItems(metricInfo, hostName, "workingSetBytes", Constants::MEMORY_WORKING_SET_BYTES, metricTime, operatingSystem, mode, nameSpaces)) + metricDataItems.concat(getContainerStartTimeMetricItems(metricInfo, hostName, "restartTimeEpoch", metricTime, nameSpaces)) if operatingSystem == "Linux" - metricDataItems.concat(getContainerCpuMetricItems(metricInfo, hostName, "usageNanoCores", Constants::CPU_USAGE_NANO_CORES, metricTime, excludeNameSpaces)) - metricDataItems.concat(getContainerMemoryMetricItems(metricInfo, hostName, "rssBytes", Constants::MEMORY_RSS_BYTES, metricTime, operatingSystem, excludeNameSpaces)) + metricDataItems.concat(getContainerCpuMetricItems(metricInfo, hostName, "usageNanoCores", Constants::CPU_USAGE_NANO_CORES, metricTime, nameSpaces)) + metricDataItems.concat(getContainerMemoryMetricItems(metricInfo, hostName, "rssBytes", Constants::MEMORY_RSS_BYTES, metricTime, operatingSystem, nameSpaces)) metricDataItems.push(getNodeMetricItem(metricInfo, hostName, "memory", "rssBytes", Constants::MEMORY_RSS_BYTES, metricTime)) elsif operatingSystem == "Windows" - containerCpuUsageNanoSecondsRate = getContainerCpuMetricItemRate(metricInfo, hostName, "usageCoreNanoSeconds", Constants::CPU_USAGE_NANO_CORES, metricTime, excludeNameSpaces) + containerCpuUsageNanoSecondsRate = getContainerCpuMetricItemRate(metricInfo, hostName, "usageCoreNanoSeconds", Constants::CPU_USAGE_NANO_CORES, metricTime, mode, nameSpaces) if containerCpuUsageNanoSecondsRate && !containerCpuUsageNanoSecondsRate.empty? && !containerCpuUsageNanoSecondsRate.nil? metricDataItems.concat(containerCpuUsageNanoSecondsRate) end @@ -210,7 +210,7 @@ def getMetrics(winNode: nil, excludeNameSpaces: [], metricTime: Time.now.utc.iso return metricDataItems end - def getContainerCpuMetricItems(metricJSON, hostName, cpuMetricNameToCollect, metricNametoReturn, metricPollTime, excludeNameSpaces) + def getContainerCpuMetricItems(metricJSON, hostName, cpuMetricNameToCollect, metricNametoReturn, metricPollTime, mode, nameSpaces) metricItems = [] clusterId = KubernetesApiClient.getClusterId timeDifference = (DateTime.now.to_time.to_i - @@telemetryCpuMetricTimeTracker).abs @@ -222,7 +222,7 @@ def getContainerCpuMetricItems(metricJSON, hostName, cpuMetricNameToCollect, met podName = pod["podRef"]["name"] podNamespace = pod["podRef"]["namespace"] - next unless !KubernetesApiClient.isExcludeResourceItem(podName, podNamespace, excludeNameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(podName, podNamespace, mode, nameSpaces) if (!pod["containers"].nil?) pod["containers"].each do |container| @@ -237,7 +237,6 @@ def getContainerCpuMetricItems(metricJSON, hostName, cpuMetricNameToCollect, met metricItem["ObjectName"] = Constants::OBJECT_NAME_K8S_CONTAINER metricItem["InstanceName"] = clusterId + "/" + podUid + "/" + containerName - metricCollection = {} metricCollection["CounterName"] = metricNametoReturn metricCollection["Value"] = metricValue @@ -313,7 +312,7 @@ def getContainerCpuMetricItems(metricJSON, hostName, cpuMetricNameToCollect, met return metricItems end - def getInsightsMetrics(winNode: nil, excludeNameSpaces: [], metricTime: Time.now.utc.iso8601) + def getInsightsMetrics(winNode: nil, mode: "Off", nameSpaces: [], metricTime: Time.now.utc.iso8601) metricDataItems = [] begin cAdvisorStats = getSummaryStatsFromCAdvisor(winNode) @@ -332,11 +331,11 @@ def getInsightsMetrics(winNode: nil, excludeNameSpaces: [], metricTime: Time.now operatingSystem = "Linux" end if !metricInfo.nil? - metricDataItems.concat(getContainerGpuMetricsAsInsightsMetrics(metricInfo, hostName, "memoryTotal", "containerGpumemoryTotalBytes", metricTime, excludeNameSpaces)) - metricDataItems.concat(getContainerGpuMetricsAsInsightsMetrics(metricInfo, hostName, "memoryUsed", "containerGpumemoryUsedBytes", metricTime, excludeNameSpaces)) - metricDataItems.concat(getContainerGpuMetricsAsInsightsMetrics(metricInfo, hostName, "dutyCycle", "containerGpuDutyCycle", metricTime, excludeNameSpaces)) + metricDataItems.concat(getContainerGpuMetricsAsInsightsMetrics(metricInfo, hostName, "memoryTotal", "containerGpumemoryTotalBytes", metricTime, mode, nameSpaces)) + metricDataItems.concat(getContainerGpuMetricsAsInsightsMetrics(metricInfo, hostName, "memoryUsed", "containerGpumemoryUsedBytes", metricTime, mode, nameSpaces)) + metricDataItems.concat(getContainerGpuMetricsAsInsightsMetrics(metricInfo, hostName, "dutyCycle", "containerGpuDutyCycle", metricTime, mode, nameSpaces)) - metricDataItems.concat(getPersistentVolumeMetrics(metricInfo, hostName, "usedBytes", Constants::PV_USED_BYTES, metricTime, excludeNameSpaces)) + metricDataItems.concat(getPersistentVolumeMetrics(metricInfo, hostName, "usedBytes", Constants::PV_USED_BYTES, metricTime, mode, nameSpaces)) else @Log.warn("Couldn't get Insights metrics information for host: #{hostName} os:#{operatingSystem}") end @@ -347,7 +346,7 @@ def getInsightsMetrics(winNode: nil, excludeNameSpaces: [], metricTime: Time.now return metricDataItems end - def getPersistentVolumeMetrics(metricJSON, hostName, metricNameToCollect, metricNameToReturn, metricPollTime, excludeNameSpaces) + def getPersistentVolumeMetrics(metricJSON, hostName, metricNameToCollect, metricNameToReturn, metricPollTime, mode, nameSpaces) telemetryTimeDifference = (DateTime.now.to_time.to_i - @@telemetryPVKubeSystemMetricsTimeTracker).abs telemetryTimeDifferenceInMinutes = telemetryTimeDifference / 60 @@ -358,7 +357,7 @@ def getPersistentVolumeMetrics(metricJSON, hostName, metricNameToCollect, metric metricInfo = metricJSON metricInfo["pods"].each do |pod| podNamespace = pod["podRef"]["namespace"] - next unless !KubernetesApiClient.isExcludeResourceItem(pod["podRef"]["name"], podNamespace, excludeNameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(pod["podRef"]["name"], podNamespace, mode, nameSpaces) excludeNamespace = false if (podNamespace.downcase == "kube-system") && @pvKubeSystemCollectionMetricsEnabled == "false" excludeNamespace = true @@ -420,7 +419,7 @@ def getPersistentVolumeMetrics(metricJSON, hostName, metricNameToCollect, metric return metricItems end - def getContainerGpuMetricsAsInsightsMetrics(metricJSON, hostName, metricNameToCollect, metricNametoReturn, metricPollTime, excludeNameSpaces) + def getContainerGpuMetricsAsInsightsMetrics(metricJSON, hostName, metricNameToCollect, metricNametoReturn, metricPollTime, mode, nameSpaces) metricItems = [] clusterId = KubernetesApiClient.getClusterId clusterName = KubernetesApiClient.getClusterName @@ -430,7 +429,7 @@ def getContainerGpuMetricsAsInsightsMetrics(metricJSON, hostName, metricNameToCo podUid = pod["podRef"]["uid"] podName = pod["podRef"]["name"] podNamespace = pod["podRef"]["namespace"] - next unless !KubernetesApiClient.isExcludeResourceItem(podName, podNamespace, excludeNameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(podName, podNamespace, mode, nameSpaces) if (!pod["containers"].nil?) pod["containers"].each do |container| @@ -508,7 +507,7 @@ def resetWinContainerIdCache end # usageNanoCores doesnt exist for windows nodes. Hence need to compute this from usageCoreNanoSeconds - def getContainerCpuMetricItemRate(metricJSON, hostName, cpuMetricNameToCollect, metricNametoReturn, metricPollTime, excludeNameSpaces) + def getContainerCpuMetricItemRate(metricJSON, hostName, cpuMetricNameToCollect, metricNametoReturn, metricPollTime, mode, nameSpaces) metricItems = [] clusterId = KubernetesApiClient.getClusterId timeDifference = (DateTime.now.to_time.to_i - @@telemetryCpuMetricTimeTracker).abs @@ -522,7 +521,7 @@ def getContainerCpuMetricItemRate(metricJSON, hostName, cpuMetricNameToCollect, podName = pod["podRef"]["name"] podNamespace = pod["podRef"]["namespace"] - next unless !KubernetesApiClient.isExcludeResourceItem(podName, podNamespace, excludeNameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(podName, podNamespace, mode, nameSpaces) if (!pod["containers"].nil?) pod["containers"].each do |container| @@ -639,7 +638,7 @@ def getContainerCpuMetricItemRate(metricJSON, hostName, cpuMetricNameToCollect, return metricItems end - def getContainerMemoryMetricItems(metricJSON, hostName, memoryMetricNameToCollect, metricNametoReturn, metricPollTime, operatingSystem, excludeNameSpaces) + def getContainerMemoryMetricItems(metricJSON, hostName, memoryMetricNameToCollect, metricNametoReturn, metricPollTime, operatingSystem, nameSpaces) metricItems = [] clusterId = KubernetesApiClient.getClusterId timeDifference = (DateTime.now.to_time.to_i - @@telemetryMemoryMetricTimeTracker).abs @@ -650,7 +649,7 @@ def getContainerMemoryMetricItems(metricJSON, hostName, memoryMetricNameToCollec podUid = pod["podRef"]["uid"] podName = pod["podRef"]["name"] podNamespace = pod["podRef"]["namespace"] - next unless !KubernetesApiClient.isExcludeResourceItem(podName, podNamespace, excludeNameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(podName, podNamespace, mode, nameSpaces) if (!pod["containers"].nil?) pod["containers"].each do |container| containerName = container["name"] @@ -721,7 +720,6 @@ def getNodeMetricItem(metricJSON, hostName, metricCategory, metricNameToCollect, metricItem["ObjectName"] = Constants::OBJECT_NAME_K8S_NODE metricItem["InstanceName"] = clusterId + "/" + nodeName - metricCollection = {} metricCollection["CounterName"] = metricNametoReturn metricCollection["Value"] = metricValue @@ -867,13 +865,11 @@ def getNodeLastRebootTimeMetric(metricJSON, hostName, metricNametoReturn, metric metricValue = node["startTime"] metricTime = metricPollTime #Time.now.utc.iso8601 #2018-01-30T19:36:14Z - metricItem["Timestamp"] = metricTime metricItem["Host"] = hostName metricItem["ObjectName"] = Constants::OBJECT_NAME_K8S_NODE metricItem["InstanceName"] = clusterId + "/" + nodeName - metricCollection = {} metricCollection["CounterName"] = metricNametoReturn #Read it from /proc/uptime @@ -891,7 +887,7 @@ def getNodeLastRebootTimeMetric(metricJSON, hostName, metricNametoReturn, metric return metricItem end - def getContainerStartTimeMetricItems(metricJSON, hostName, metricNametoReturn, metricPollTime, excludeNameSpaces) + def getContainerStartTimeMetricItems(metricJSON, hostName, metricNametoReturn, metricPollTime, nameSpaces) metricItems = [] clusterId = KubernetesApiClient.getClusterId #currentTime = Time.now.utc.iso8601 #2018-01-30T19:36:14Z @@ -901,7 +897,7 @@ def getContainerStartTimeMetricItems(metricJSON, hostName, metricNametoReturn, m podUid = pod["podRef"]["uid"] podNamespace = pod["podRef"]["namespace"] podName = pod["podRef"]["name"] - next unless !KubernetesApiClient.isExcludeResourceItem(podName, podNamespace, excludeNameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(podName, podNamespace, mode, nameSpaces) if (!pod["containers"].nil?) pod["containers"].each do |container| containerName = container["name"] diff --git a/source/plugins/ruby/KubernetesApiClient.rb b/source/plugins/ruby/KubernetesApiClient.rb index df90a4fa7..716870fce 100644 --- a/source/plugins/ruby/KubernetesApiClient.rb +++ b/source/plugins/ruby/KubernetesApiClient.rb @@ -1409,19 +1409,20 @@ def isEmitCacheTelemetry return isEmitCacheTelemtryEnabled end - def isExcludeResourceItem(resourceName, resourceNamespace, excludeNameSpaces) - isExclude = false - begin - # dont exclude agent related data - if !resourceName.nil? && !resourceName.empty? && resourceName.start_with?("omsagent") && resourceNamespace.eql?("kube-system") - isExclude = false - elsif !resourceNamespace.nil? && !resourceNamespace.empty? && !excludeNameSpaces.nil? && !excludeNameSpaces.empty? && excludeNameSpaces.length > 0 && excludeNameSpaces.include?(resourceNamespace) - isExclude = true - end - rescue => errorStr + def isExcludeResourceItem(resourceName, resourceNamespace, mode, nameSpaces) + isExclude = false + begin + if mode == "Exclude" + if !resourceName.nil? && !resourceName.empty? && resourceName.start_with?("ama-logs") && resourceNamespace.eql?("kube-system") + isExclude = false + elsif !resourceNamespace.nil? && !resourceNamespace.empty? && !nameSpaces.nil? && !nameSpaces.empty? && nameSpaces.length > 0 && nameSpaces.include?(resourceNamespace) + isExclude = true + end + end + rescue => errorStr @Log.warn "KubernetesApiClient::isExcludeResourceItem:Failed with an error : #{errorStr}" - end - return isExclude + end + return isExclude end def isAddonResizerVPAEnabled diff --git a/source/plugins/ruby/constants.rb b/source/plugins/ruby/constants.rb index eef983e15..d66024f20 100644 --- a/source/plugins/ruby/constants.rb +++ b/source/plugins/ruby/constants.rb @@ -149,7 +149,9 @@ class Constants EXTENSION_SETTINGS = "extensionSettings" EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS = "dataCollectionSettings" EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_INTERVAL = "interval" - EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_EXCLUDE_NAMESPACES = "excludeNameSpaces" + EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_NAMESPACES_MODE = "mode" + EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_NAMESPACES_FILTERING_MODES = ["Off", "Include", "Exlcude"] + EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_NAMESPACES = "nameSpaces" # min and max data collection interval minutes EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_INTERVAL_MIN = 1 diff --git a/source/plugins/ruby/extension_utils.rb b/source/plugins/ruby/extension_utils.rb index 96f873be4..43248aac4 100644 --- a/source/plugins/ruby/extension_utils.rb +++ b/source/plugins/ruby/extension_utils.rb @@ -6,71 +6,91 @@ require_relative "constants" class ExtensionUtils - class << self - def getOutputStreamId(dataType) - outputStreamId = "" - begin - if !dataType.nil? && !dataType.empty? - outputStreamId = Extension.instance.get_output_stream_id(dataType) - $log.info("ExtensionUtils::getOutputStreamId: got streamid: #{outputStreamId} for datatype: #{dataType}") + class << self + def getOutputStreamId(dataType) + outputStreamId = "" + begin + if !dataType.nil? && !dataType.empty? + outputStreamId = Extension.instance.get_output_stream_id(dataType) + $log.info("ExtensionUtils::getOutputStreamId: got streamid: #{outputStreamId} for datatype: #{dataType}") + else + $log.warn("ExtensionUtils::getOutputStreamId: dataType shouldnt be nil or empty") + end + rescue => errorStr + $log.warn("ExtensionUtils::getOutputStreamId: failed with an exception: #{errorStr}") + end + return outputStreamId + end + + def isAADMSIAuthMode() + return !ENV["AAD_MSI_AUTH_MODE"].nil? && !ENV["AAD_MSI_AUTH_MODE"].empty? && ENV["AAD_MSI_AUTH_MODE"].downcase == "true" + end + + def getDataCollectionIntervalSeconds + collectionIntervalSeconds = 60 + begin + dataCollectionSettings = Extension.instance.get_extension_data_collection_settings() + if !dataCollectionSettings.nil? && + !dataCollectionSettings.empty? && + dataCollectionSettings.has_key?(Constants::EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_INTERVAL) + interval = dataCollectionSettings[Constants::EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_INTERVAL] + re = /^[0-9]+[m]$/ + if !re.match(interval).nil? + intervalMinutes = interval.dup.chomp!("m").to_i + if intervalMinutes.between?(Constants::EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_INTERVAL_MIN, Constants::EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_INTERVAL_MAX) + collectionIntervalSeconds = intervalMinutes * 60 else - $log.warn("ExtensionUtils::getOutputStreamId: dataType shouldnt be nil or empty") + $log.warn("ExtensionUtils::getDataCollectionIntervalSeconds: interval value not in the range 1m to 30m hence using default, 60s: #{errorStr}") end - rescue => errorStr - $log.warn("ExtensionUtils::getOutputStreamId: failed with an exception: #{errorStr}") + else + $log.warn("ExtensionUtils::getDataCollectionIntervalSeconds: interval value is invalid hence using default, 60s: #{errorStr}") end - return outputStreamId - end - def isAADMSIAuthMode() - return !ENV["AAD_MSI_AUTH_MODE"].nil? && !ENV["AAD_MSI_AUTH_MODE"].empty? && ENV["AAD_MSI_AUTH_MODE"].downcase == "true" end + rescue => errorStr + $log.warn("ExtensionUtils::getDataCollectionIntervalSeconds: failed with an exception: #{errorStr}") + end + return collectionIntervalSeconds + end - def getDataCollectionIntervalSeconds - collectionIntervalSeconds = 60 - begin - dataCollectionSettings = Extension.instance.get_extension_data_collection_settings() - if !dataCollectionSettings.nil? && - !dataCollectionSettings.empty? && - dataCollectionSettings.has_key?(Constants::EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_INTERVAL) - interval = dataCollectionSettings[Constants::EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_INTERVAL] - re = /^[0-9]+[m]$/ - if !re.match(interval).nil? - intervalMinutes = interval.dup.chomp!("m").to_i - if intervalMinutes.between?(Constants::EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_INTERVAL_MIN, Constants::EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_INTERVAL_MAX) - collectionIntervalSeconds = intervalMinutes * 60 - else - $log.warn("ExtensionUtils::getDataCollectionIntervalSeconds: interval value not in the range 1m to 30m hence using default, 60s: #{errorStr}") - end - else - $log.warn("ExtensionUtils::getDataCollectionIntervalSeconds: interval value is invalid hence using default, 60s: #{errorStr}") - end - end - rescue => errorStr - $log.warn("ExtensionUtils::getDataCollectionIntervalSeconds: failed with an exception: #{errorStr}") + def getNamespacesForDataCollection + nameSpaces = [] + begin + dataCollectionSettings = Extension.instance.get_extension_data_collection_settings() + if !dataCollectionSettings.nil? && + !dataCollectionSettings.empty? && + dataCollectionSettings.has_key?(Constants::EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_NAMESPACES) + nameSpacesSetting = dataCollectionSettings[Constants::EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_NAMESPACES] + if !nameSpacesSetting.nil? && !nameSpacesSetting.empty? && nameSpacesSetting.kind_of?(Array) && nameSpacesSetting.length > 0 + uniqNamespaces = nameSpacesSetting.uniq + nameSpaces = uniqNamespaces.map(&:downcase) + else + $log.warn("ExtensionUtils::getNamespacesForDataCollection: nameSpaces: #{nameSpacesSetting} not valid hence using default") end - return collectionIntervalSeconds end + rescue => errorStr + $log.warn("ExtensionUtils::getNamespacesForDataCollection: failed with an exception: #{errorStr}") + end + return nameSpaces + end - def getNamespacesToExcludeForDataCollection - excludeNameSpaces = [] - begin - dataCollectionSettings = Extension.instance.get_extension_data_collection_settings() - if !dataCollectionSettings.nil? && - !dataCollectionSettings.empty? && - dataCollectionSettings.has_key?(Constants::EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_EXCLUDE_NAMESPACES) - namespacesToExclude = dataCollectionSettings[Constants::EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_EXCLUDE_NAMESPACES] - if !namespacesToExclude.nil? && !namespacesToExclude.empty? && namespacesToExclude.kind_of?(Array) && namespacesToExclude.length > 0 - uniqNamespaces = namespacesToExclude.uniq - excludeNameSpaces = uniqNamespaces.map(&:downcase) - else - $log.warn("ExtensionUtils::getNamespacesToExcludeForDataCollection: excludeNameSpaces: #{namespacesToExclude} not valid hence using default") - end - end - rescue => errorStr - $log.warn("ExtensionUtils::getNamespacesToExcludeForDataCollection: failed with an exception: #{errorStr}") + def getNamespacesModeForDataCollection + nameSpaceMode = "Off" + begin + dataCollectionSettings = Extension.instance.get_extension_data_collection_settings() + if !dataCollectionSettings.nil? && + !dataCollectionSettings.empty? && + dataCollectionSettings.has_key?(Constants::EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_NAMESPACES_MODE) + mode = dataCollectionSettings[Constants::EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_NAMESPACES_MODE] + if !mode.nil? && !mode.empty? && Constants::EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_NAMESPACES_FILTERING_MODES.include?(mode) + nameSpaceMode = mode + else + $log.warn("ExtensionUtils::getNamespacesModeForDataCollection: nameSpaceMode: #{mode} not valid hence using default") end - return excludeNameSpaces end - + rescue => errorStr + $log.warn("ExtensionUtils::getNamespacesModeForDataCollection: failed with an exception: #{errorStr}") + end + return nameSpaceMode end + end end diff --git a/source/plugins/ruby/in_cadvisor_perf.rb b/source/plugins/ruby/in_cadvisor_perf.rb index a39df2898..5e98fc335 100644 --- a/source/plugins/ruby/in_cadvisor_perf.rb +++ b/source/plugins/ruby/in_cadvisor_perf.rb @@ -22,7 +22,8 @@ def initialize require_relative "omslog" require_relative "constants" require_relative "extension_utils" - @excludeNameSpaces = [] + @nameSpaces = [] + @mode = "Off" end config_param :run_interval, :time, :default => 60 @@ -76,11 +77,13 @@ def enumerate() $log.info("in_cadvisor_perf::enumerate: using insightsmetrics tag -#{@insightsmetricstag} @ #{Time.now.utc.iso8601}") @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() $log.info("in_cadvisor_perf::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") - @excludeNameSpaces = ExtensionUtils.getNamespacesToExcludeForDataCollection() - $log.info("in_cadvisor_perf::enumerate: using data collection excludeNameSpaces: #{@excludeNameSpaces} @ #{Time.now.utc.iso8601}") + @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() + $log.info("in_cadvisor_perf::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") + @mode = ExtensionUtils.getNamespacesModeForDataCollection() + $log.info("in_cadvisor_perf::enumerate: using data collection mode for nameSpaces: #{@mode} @ #{Time.now.utc.iso8601}") end - metricData = CAdvisorMetricsAPIClient.getMetrics(winNode: nil, excludeNameSpaces: @excludeNameSpaces, metricTime: batchTime) + metricData = CAdvisorMetricsAPIClient.getMetrics(winNode: nil, mode: @mode, nameSpaces: @nameSpaces, metricTime: batchTime) metricData.each do |record| eventStream.add(time, record) if record end @@ -96,7 +99,7 @@ def enumerate() begin if !@@isWindows.nil? && @@isWindows == false containerGPUusageInsightsMetricsDataItems = [] - containerGPUusageInsightsMetricsDataItems.concat(CAdvisorMetricsAPIClient.getInsightsMetrics(winNode: nil, excludeNameSpaces: @excludeNameSpaces, metricTime: batchTime)) + containerGPUusageInsightsMetricsDataItems.concat(CAdvisorMetricsAPIClient.getInsightsMetrics(winNode: nil, nameSpaces: @nameSpaces, metricTime: batchTime)) containerGPUusageInsightsMetricsDataItems.each do |insightsMetricsRecord| insightsMetricsEventStream.add(time, insightsMetricsRecord) if insightsMetricsRecord diff --git a/source/plugins/ruby/in_containerinventory.rb b/source/plugins/ruby/in_containerinventory.rb index 7d2202955..9f4162d0a 100644 --- a/source/plugins/ruby/in_containerinventory.rb +++ b/source/plugins/ruby/in_containerinventory.rb @@ -1,7 +1,7 @@ #!/usr/local/bin/ruby # frozen_string_literal: true -require 'fluent/plugin/input' +require "fluent/plugin/input" module Fluent::Plugin class Container_Inventory_Input < Input @@ -58,6 +58,8 @@ def enumerate containerInventory = Array.new eventStream = Fluent::MultiEventStream.new hostName = "" + @mode = "Off" + @nameSpaces = [] $log.info("in_container_inventory::enumerate : Begin processing @ #{Time.now.utc.iso8601}") if ExtensionUtils.isAADMSIAuthMode() $log.info("in_container_inventory::enumerate: AAD AUTH MSI MODE") @@ -67,8 +69,10 @@ def enumerate $log.info("in_container_inventory::enumerate: using tag -#{@tag} @ #{Time.now.utc.iso8601}") @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() $log.info("in_container_inventory::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") - @excludeNameSpaces = ExtensionUtils.getNamespacesToExcludeForDataCollection() - $log.info("in_container_inventory::enumerate: using data collection excludeNameSpaces: #{@excludeNameSpaces} @ #{Time.now.utc.iso8601}") + @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() + $log.info("in_container_inventory::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") + @mode = ExtensionUtils.getNamespacesModeForDataCollection() + $log.info("in_container_inventory::enumerate: using data collection mode for nameSpaces: #{@mode} @ #{Time.now.utc.iso8601}") end begin containerRuntimeEnv = ENV["CONTAINER_RUNTIME"] @@ -78,30 +82,30 @@ def enumerate containerIds = Array.new response = CAdvisorMetricsAPIClient.getPodsFromCAdvisor(winNode: nil) if !response.nil? && !response.body.nil? - podList = JSON.parse(response.body) - if !podList.nil? && !podList.empty? && podList.key?("items") && !podList["items"].nil? && !podList["items"].empty? - podList["items"].each do |item| - next unless !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["name"], item["metadata"]["namespace"], @excludeNameSpaces) - containerInventoryRecords = KubernetesContainerInventory.getContainerInventoryRecords(item, batchTime, clusterCollectEnvironmentVar) - containerInventoryRecords.each do |containerRecord| - ContainerInventoryState.writeContainerState(containerRecord) - if hostName.empty? && !containerRecord["Computer"].empty? - hostName = containerRecord["Computer"] - end - if @addonTokenAdapterImageTag.empty? && ExtensionUtils.isAADMSIAuthMode() - if !containerRecord["ElementName"].nil? && !containerRecord["ElementName"].empty? && - containerRecord["ElementName"].include?("_kube-system_") && - containerRecord["ElementName"].include?("addon-token-adapter_ama-logs") - if !containerRecord["ImageTag"].nil? && !containerRecord["ImageTag"].empty? - @addonTokenAdapterImageTag = containerRecord["ImageTag"] - end - end + podList = JSON.parse(response.body) + if !podList.nil? && !podList.empty? && podList.key?("items") && !podList["items"].nil? && !podList["items"].empty? + podList["items"].each do |item| + next unless !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["name"], item["metadata"]["namespace"], @mode, @nameSpaces) + containerInventoryRecords = KubernetesContainerInventory.getContainerInventoryRecords(item, batchTime, clusterCollectEnvironmentVar) + containerInventoryRecords.each do |containerRecord| + ContainerInventoryState.writeContainerState(containerRecord) + if hostName.empty? && !containerRecord["Computer"].empty? + hostName = containerRecord["Computer"] + end + if @addonTokenAdapterImageTag.empty? && ExtensionUtils.isAADMSIAuthMode() + if !containerRecord["ElementName"].nil? && !containerRecord["ElementName"].empty? && + containerRecord["ElementName"].include?("_kube-system_") && + containerRecord["ElementName"].include?("addon-token-adapter_ama-logs") + if !containerRecord["ImageTag"].nil? && !containerRecord["ImageTag"].empty? + @addonTokenAdapterImageTag = containerRecord["ImageTag"] + end end - containerIds.push containerRecord["InstanceID"] - containerInventory.push containerRecord end + containerIds.push containerRecord["InstanceID"] + containerInventory.push containerRecord end end + end end # Update the state for deleted containers deletedContainers = ContainerInventoryState.getDeletedContainers(containerIds) @@ -113,8 +117,8 @@ def enumerate container["State"] = "Deleted" KubernetesContainerInventory.deleteCGroupCacheEntryForDeletedContainer(container["InstanceID"]) containerInventory.push container - end - end + end + end end containerInventory.each do |record| eventStream.add(emitTime, record) if record diff --git a/source/plugins/ruby/in_kube_events.rb b/source/plugins/ruby/in_kube_events.rb index f84062656..3905f2b6c 100644 --- a/source/plugins/ruby/in_kube_events.rb +++ b/source/plugins/ruby/in_kube_events.rb @@ -28,7 +28,8 @@ def initialize # Initilize enable/disable normal event collection @collectAllKubeEvents = false - @excludeNameSpaces = [] + @nameSpaces = [] + @mode = "Off" end config_param :run_interval, :time, :default => 60 @@ -93,8 +94,10 @@ def enumerate @tag = ExtensionUtils.getOutputStreamId(Constants::KUBE_EVENTS_DATA_TYPE) end $log.info("in_kube_events::enumerate: using kubeevents tag -#{@tag} @ #{Time.now.utc.iso8601}") - @excludeNameSpaces = ExtensionUtils.getNamespacesToExcludeForDataCollection() - $log.info("in_kube_events::enumerate: using data collection excludeNameSpaces -#{@excludeNameSpaces} @ #{Time.now.utc.iso8601}") + @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() + $log.info("in_kube_events::enumerate: using data collection nameSpaces -#{@nameSpaces} @ #{Time.now.utc.iso8601}") + @mode = ExtensionUtils.getNamespacesModeForDataCollection() + $log.info("in_kube_events::enumerate: using data collection mode for nameSpaces -#{@mode} @ #{Time.now.utc.iso8601}") end # Initializing continuation token to nil continuationToken = nil @@ -164,7 +167,7 @@ def parse_and_emit_records(events, eventQueryState, newEventQueryState, batchTim end # drop the events if the event of the excluded namespace - next unless !KubernetesApiClient.isExcludeResourceItem("", items["involvedObject"]["namespace"], @excludeNameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem("", items["involvedObject"]["namespace"], @mode, @nameSpaces) record["ObjectKind"] = items["involvedObject"]["kind"] record["Namespace"] = items["involvedObject"]["namespace"] diff --git a/source/plugins/ruby/in_kube_perfinventory.rb b/source/plugins/ruby/in_kube_perfinventory.rb index c2d63b3cf..8e40a1208 100644 --- a/source/plugins/ruby/in_kube_perfinventory.rb +++ b/source/plugins/ruby/in_kube_perfinventory.rb @@ -32,7 +32,8 @@ def initialize @kubeperfTag = "oneagent.containerInsights.LINUX_PERF_BLOB" @insightsMetricsTag = "oneagent.containerInsights.INSIGHTS_METRICS_BLOB" - @excludeNameSpaces = [] + @nameSpaces = [] + @mode = "Off" end config_param :run_interval, :time, :default => 60 @@ -102,8 +103,10 @@ def enumerate(podList = nil) $log.info("in_kube_perfinventory::enumerate: using insightsmetrics tag -#{@insightsMetricsTag} @ #{Time.now.utc.iso8601}") @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() $log.info("in_kube_perfinventory::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") - @excludeNameSpaces = ExtensionUtils.getNamespacesToExcludeForDataCollection() - $log.info("in_kube_perfinventory::enumerate: using data collection excludeNameSpaces: #{@excludeNameSpaces} @ #{Time.now.utc.iso8601}") + @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() + $log.info("in_kube_perfinventory::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") + @mode = ExtensionUtils.getNamespacesModeForDataCollection() + $log.info("in_kube_perfinventory::enumerate: using data collection mode for nameSpaces: #{@mode} @ #{Time.now.utc.iso8601}") end nodeAllocatableRecords = getNodeAllocatableRecords() @@ -139,7 +142,7 @@ def parse_and_emit_records(podInventory, nodeAllocatableRecords, continuationTok begin #begin block start podInventory["items"].each do |item| #podInventory block start - next unless !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["name"], item["metadata"]["namespace"], @excludeNameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["name"], item["metadata"]["namespace"], @mode, @nameSpaces) nodeName = "" if !item["spec"]["nodeName"].nil? nodeName = item["spec"]["nodeName"] @@ -352,24 +355,24 @@ def watch_pods if ((notice["type"] == "ADDED") || (notice["type"] == "MODIFIED")) key = item["metadata"]["uid"] if !key.nil? && !key.empty? - podItem = KubernetesApiClient.getOptimizedItem("pods-perf", item) - if !podItem.nil? && !podItem.empty? - @podCacheMutex.synchronize { - @podItemsCache[key] = podItem - } - else - $log.warn "in_kube_perfinventory::watch_pods:Received podItem is empty or nil @ #{Time.now.utc.iso8601}" - end - else - $log.warn "in_kube_perfinventory::watch_pods:Received poduid either nil or empty @ #{Time.now.utc.iso8601}" - end - elsif notice["type"] == "DELETED" - key = item["metadata"]["uid"] - if !key.nil? && !key.empty? + podItem = KubernetesApiClient.getOptimizedItem("pods-perf", item) + if !podItem.nil? && !podItem.empty? @podCacheMutex.synchronize { - @podItemsCache.delete(key) + @podItemsCache[key] = podItem } + else + $log.warn "in_kube_perfinventory::watch_pods:Received podItem is empty or nil @ #{Time.now.utc.iso8601}" end + else + $log.warn "in_kube_perfinventory::watch_pods:Received poduid either nil or empty @ #{Time.now.utc.iso8601}" + end + elsif notice["type"] == "DELETED" + key = item["metadata"]["uid"] + if !key.nil? && !key.empty? + @podCacheMutex.synchronize { + @podItemsCache.delete(key) + } + end end when "ERROR" podsResourceVersion = nil diff --git a/source/plugins/ruby/in_kube_podinventory.rb b/source/plugins/ruby/in_kube_podinventory.rb index fa847ea9e..0ab181759 100644 --- a/source/plugins/ruby/in_kube_podinventory.rb +++ b/source/plugins/ruby/in_kube_podinventory.rb @@ -59,7 +59,8 @@ def initialize @kubeservicesTag = "oneagent.containerInsights.KUBE_SERVICES_BLOB" @containerInventoryTag = "oneagent.containerInsights.CONTAINER_INVENTORY_BLOB" - @excludeNameSpaces = [] + @nameSpaces = [] + @mode = "Off" end config_param :run_interval, :time, :default => 60 @@ -174,8 +175,10 @@ def enumerate(podList = nil) $log.info("in_kube_podinventory::enumerate: using kubepodinventory tag -#{@tag} @ #{Time.now.utc.iso8601}") @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() $log.info("in_kube_podinventory::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") - @excludeNameSpaces = ExtensionUtils.getNamespacesToExcludeForDataCollection() - $log.info("in_kube_podinventory::enumerate: using data collection excludeNameSpaces: #{@excludeNameSpaces} @ #{Time.now.utc.iso8601}") + @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() + $log.info("in_kube_podinventory::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") + @mode = ExtensionUtils.getNamespacesModeForDataCollection() + $log.info("in_kube_podinventory::enumerate: using data collection mode for nameSpaces: #{@mode} @ #{Time.now.utc.iso8601}") end serviceInventory = {} @@ -235,8 +238,11 @@ def enumerate(podList = nil) telemetryProperties["SERVICE_ITEMS_CACHE_SIZE_KB"] = serviceItemsCacheSizeKB telemetryProperties["WINDOWS_CONTAINER_RECORDS_CACHE_SIZE_KB"] = @windowsContainerRecordsCacheSizeBytes / 1024 end - if !@excludeNameSpaces.nil? && !@excludeNameSpaces.empty? && @excludeNameSpaces.length > 0 - telemetryProperties["DATA_COLLECTION_EXCLUDED_NAMESPACES"] = @excludeNameSpaces + if !@nameSpaces.nil? && !@nameSpaces.empty? && @nameSpaces.length > 0 + telemetryProperties["DATA_COLLECTION_NAMESPACES"] = @nameSpaces + end + if !@mode.nil? && !@mode.empty? + telemetryProperties["DATA_COLLECTION_NAMESPACES_MODE"] = @mode end if @run_interval > 60 telemetryProperties["DATA_COLLECTION_INTERVAL_MINUTES"] = @run_interval / 60 @@ -286,7 +292,7 @@ def parse_and_emit_records(podInventory, serviceRecords, continuationToken, batc begin #begin block start podInventory["items"].each do |item| #podInventory block start - next unless !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["name"],item["metadata"]["namespace"], @excludeNameSpaces ) + next unless !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["name"], item["metadata"]["namespace"], @mode, @nameSpaces) # pod inventory records podInventoryRecords = getPodInventoryRecords(item, serviceRecords, batchTime) @containerCount += podInventoryRecords.length @@ -384,7 +390,7 @@ def parse_and_emit_records(podInventory, serviceRecords, continuationToken, batc if continuationToken.nil? # sending kube services inventory records kubeServicesEventStream = Fluent::MultiEventStream.new serviceRecords.each do |kubeServiceRecord| - next unless !KubernetesApiClient.isExcludeResourceItem(kubeServiceRecord["ServiceName"], kubeServiceRecord["namespace"], @excludeNameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(kubeServiceRecord["ServiceName"], kubeServiceRecord["namespace"], @mode, @nameSpaces) if !kubeServiceRecord.nil? # adding before emit to reduce memory foot print kubeServiceRecord["ClusterId"] = KubernetesApiClient.getClusterId @@ -849,35 +855,35 @@ def watch_pods if ((notice["type"] == "ADDED") || (notice["type"] == "MODIFIED")) key = item["metadata"]["uid"] if !key.nil? && !key.empty? - currentWindowsNodeNameList = [] - @windowsNodeNameCacheMutex.synchronize { - currentWindowsNodeNameList = @windowsNodeNameListCache.dup - } - isWindowsPodItem = false - nodeName = (!item["spec"].nil? && !item["spec"]["nodeName"].nil?) ? item["spec"]["nodeName"] : "" - if !nodeName.empty? && - !currentWindowsNodeNameList.nil? && - !currentWindowsNodeNameList.empty? && - currentWindowsNodeNameList.include?(nodeName) - isWindowsPodItem = true - end - podItem = KubernetesApiClient.getOptimizedItem("pods", item, isWindowsPodItem) - if !podItem.nil? && !podItem.empty? - @podCacheMutex.synchronize { - @podItemsCache[key] = podItem - } - else - $log.warn "in_kube_podinventory::watch_pods:Received podItem is empty or nil @ #{Time.now.utc.iso8601}" - end - else - $log.warn "in_kube_podinventory::watch_pods:Received poduid either nil or empty @ #{Time.now.utc.iso8601}" + currentWindowsNodeNameList = [] + @windowsNodeNameCacheMutex.synchronize { + currentWindowsNodeNameList = @windowsNodeNameListCache.dup + } + isWindowsPodItem = false + nodeName = (!item["spec"].nil? && !item["spec"]["nodeName"].nil?) ? item["spec"]["nodeName"] : "" + if !nodeName.empty? && + !currentWindowsNodeNameList.nil? && + !currentWindowsNodeNameList.empty? && + currentWindowsNodeNameList.include?(nodeName) + isWindowsPodItem = true end - elsif notice["type"] == "DELETED" - key = item["metadata"]["uid"] - if !key.nil? && !key.empty? + podItem = KubernetesApiClient.getOptimizedItem("pods", item, isWindowsPodItem) + if !podItem.nil? && !podItem.empty? @podCacheMutex.synchronize { - @podItemsCache.delete(key) + @podItemsCache[key] = podItem } + else + $log.warn "in_kube_podinventory::watch_pods:Received podItem is empty or nil @ #{Time.now.utc.iso8601}" + end + else + $log.warn "in_kube_podinventory::watch_pods:Received poduid either nil or empty @ #{Time.now.utc.iso8601}" + end + elsif notice["type"] == "DELETED" + key = item["metadata"]["uid"] + if !key.nil? && !key.empty? + @podCacheMutex.synchronize { + @podItemsCache.delete(key) + } end end when "ERROR" @@ -990,26 +996,26 @@ def watch_services end # exclude resource item if this in excluded namespaces if ((notice["type"] == "ADDED") || (notice["type"] == "MODIFIED")) - key = item["metadata"]["uid"] - if !key.nil? && !key.empty? - serviceItem = KubernetesApiClient.getOptimizedItem("services", item) - if !serviceItem.nil? && !serviceItem.empty? - @serviceCacheMutex.synchronize { - @serviceItemsCache[key] = serviceItem - } - else - $log.warn "in_kube_podinventory::watch_services:Received serviceItem either nil or empty @ #{Time.now.utc.iso8601}" - end - else - $log.warn "in_kube_podinventory::watch_services:Received serviceuid either nil or empty @ #{Time.now.utc.iso8601}" - end - elsif notice["type"] == "DELETED" - key = item["metadata"]["uid"] - if !key.nil? && !key.empty? + key = item["metadata"]["uid"] + if !key.nil? && !key.empty? + serviceItem = KubernetesApiClient.getOptimizedItem("services", item) + if !serviceItem.nil? && !serviceItem.empty? @serviceCacheMutex.synchronize { - @serviceItemsCache.delete(key) + @serviceItemsCache[key] = serviceItem } + else + $log.warn "in_kube_podinventory::watch_services:Received serviceItem either nil or empty @ #{Time.now.utc.iso8601}" end + else + $log.warn "in_kube_podinventory::watch_services:Received serviceuid either nil or empty @ #{Time.now.utc.iso8601}" + end + elsif notice["type"] == "DELETED" + key = item["metadata"]["uid"] + if !key.nil? && !key.empty? + @serviceCacheMutex.synchronize { + @serviceItemsCache.delete(key) + } + end end when "ERROR" servicesResourceVersion = nil diff --git a/source/plugins/ruby/in_kube_pvinventory.rb b/source/plugins/ruby/in_kube_pvinventory.rb index 37417ea43..c4cf70d95 100644 --- a/source/plugins/ruby/in_kube_pvinventory.rb +++ b/source/plugins/ruby/in_kube_pvinventory.rb @@ -1,7 +1,7 @@ #!/usr/local/bin/ruby # frozen_string_literal: true -require 'fluent/plugin/input' +require "fluent/plugin/input" module Fluent::Plugin class Kube_PVInventory_Input < Input @@ -24,6 +24,8 @@ def initialize # Response size is around 1500 bytes per PV @PV_CHUNK_SIZE = "5000" @pvTypeToCountHash = {} + @nameSpaces = [] + @mode = "Off" end config_param :run_interval, :time, :default => 60 @@ -69,8 +71,10 @@ def enumerate end @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() $log.info("in_kube_pvinventory::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") - @excludeNameSpaces = ExtensionUtils.getNamespacesToExcludeForDataCollection() - $log.info("in_kube_pvinventory::enumerate: using data collection excludeNameSpaces: #{@excludeNameSpaces} @ #{Time.now.utc.iso8601}") + @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() + $log.info("in_kube_pvinventory::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") + @mode = ExtensionUtils.getNamespacesModeForDataCollection() + $log.info("in_kube_pvinventory::enumerate: using data collection mode for nameSpaces: #{@mode} @ #{Time.now.utc.iso8601}") end continuationToken = nil @@ -111,7 +115,6 @@ def enumerate ApplicationInsightsUtility.sendCustomEvent(Constants::PV_INVENTORY_HEART_BEAT_EVENT, telemetryProperties) @@pvTelemetryTimeTracker = DateTime.now.to_time.to_i end - rescue => errorStr $log.warn "in_kube_pvinventory::enumerate:Failed in enumerate: #{errorStr}" $log.debug_backtrace(errorStr.backtrace) @@ -137,19 +140,18 @@ def parse_and_emit_records(pvInventory, batchTime = Time.utc.iso8601) record["PVType"] = type record["PVTypeInfo"] = typeInfo - next unless !KubernetesApiClient.isExcludeResourceItem(pvcName, pvcNamespace, @excludeNameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(pvcName, pvcNamespace, @mode, @nameSpaces) record["CollectionTime"] = batchTime record["ClusterId"] = KubernetesApiClient.getClusterId record["ClusterName"] = KubernetesApiClient.getClusterName record["PVName"] = item["metadata"]["name"] record["PVStatus"] = item["status"]["phase"] - record["PVAccessModes"] = item["spec"]["accessModes"].join(', ') + record["PVAccessModes"] = item["spec"]["accessModes"].join(", ") record["PVStorageClassName"] = item["spec"]["storageClassName"] record["PVCapacityBytes"] = KubernetesApiClient.getMetricNumericValue("memory", item["spec"]["capacity"]["storage"]) record["PVCreationTimeStamp"] = item["metadata"]["creationTimestamp"] - records.push(record) # Record telemetry @@ -173,7 +175,6 @@ def parse_and_emit_records(pvInventory, batchTime = Time.utc.iso8601) if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) $log.info("kubePVInventoryEmitStreamSuccess @ #{Time.now.utc.iso8601}") end - rescue => errorStr $log.warn "Failed in parse_and_emit_record for in_kube_pvinventory: #{errorStr}" $log.debug_backtrace(errorStr.backtrace) @@ -219,7 +220,6 @@ def getTypeInfo(item) # Can only have one type: return right away when found return pvType, typeInfo - end end end @@ -233,7 +233,6 @@ def getTypeInfo(item) return nil, {} end - def run_periodic @mutex.lock done = @finished diff --git a/source/plugins/ruby/in_kubestate_deployments.rb b/source/plugins/ruby/in_kubestate_deployments.rb index e4ac1f11b..50c412be9 100644 --- a/source/plugins/ruby/in_kubestate_deployments.rb +++ b/source/plugins/ruby/in_kubestate_deployments.rb @@ -1,7 +1,7 @@ #!/usr/local/bin/ruby # frozen_string_literal: true -require 'fluent/plugin/input' +require "fluent/plugin/input" module Fluent::Plugin class Kube_Kubestate_Deployments_Input < Input @@ -35,6 +35,8 @@ def initialize @NodeName = OMS::Common.get_hostname @ClusterId = KubernetesApiClient.getClusterId @ClusterName = KubernetesApiClient.getClusterName + @nameSpaces = [] + @mode = "Off" end config_param :run_interval, :time, :default => 60 @@ -90,8 +92,10 @@ def enumerate end @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() $log.info("in_kubestate_deployments::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") - @excludeNameSpaces = ExtensionUtils.getNamespacesToExcludeForDataCollection() - $log.info("in_kubestate_deployments::enumerate: using data collection excludeNameSpaces: #{@excludeNameSpaces} @ #{Time.now.utc.iso8601}") + @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() + $log.info("in_kubestate_deployments::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") + @mode = ExtensionUtils.getNamespacesModeForDataCollection() + $log.info("in_kubestate_deployments::enumerate: using data collection mode for nameSpaces: #{@mode} @ #{Time.now.utc.iso8601}") end # Initializing continuation token to nil continuationToken = nil @@ -144,7 +148,7 @@ def parse_and_emit_records(deployments, batchTime = Time.utc.iso8601) begin metricInfo = deployments metricInfo["items"].each do |deployment| - next unless !KubernetesApiClient.isExcludeResourceItem(deployment["metadata"]["name"], deployment["metadata"]["namespace"], @excludeNameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(deployment["metadata"]["name"], deployment["metadata"]["namespace"], @mode, @nameSpaces) deploymentName = deployment["metadata"]["name"] deploymentNameSpace = deployment["metadata"]["namespace"] deploymentCreatedTime = "" diff --git a/source/plugins/ruby/in_kubestate_hpa.rb b/source/plugins/ruby/in_kubestate_hpa.rb index 6498517c2..3fd11b03b 100644 --- a/source/plugins/ruby/in_kubestate_hpa.rb +++ b/source/plugins/ruby/in_kubestate_hpa.rb @@ -1,7 +1,7 @@ #!/usr/local/bin/ruby # frozen_string_literal: true -require 'fluent/plugin/input' +require "fluent/plugin/input" module Fluent::Plugin class Kube_Kubestate_HPA_Input < Input @@ -32,7 +32,8 @@ def initialize @NodeName = OMS::Common.get_hostname @ClusterId = KubernetesApiClient.getClusterId @ClusterName = KubernetesApiClient.getClusterName - @excludeNameSpaces = [] + @nameSpaces = [] + @mode = "Off" end config_param :run_interval, :time, :default => 60 @@ -85,11 +86,13 @@ def enumerate if @tag.nil? || !@tag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) @tag = ExtensionUtils.getOutputStreamId(Constants::INSIGHTS_METRICS_DATA_TYPE) end - $log.info("in_kubestate_hpa::enumerate: using tag -#{@tag} @ #{Time.now.utc.iso8601}") + $log.info("in_kubestate_hpa::enumerate: using tag -#{@tag} @ #{Time.now.utc.iso8601}") @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() $log.info("in_kubestate_hpa::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") - @excludeNameSpaces = ExtensionUtils.getNamespacesToExcludeForDataCollection() - $log.info("in_kubestate_hpa::enumerate: using data collection excludeNameSpaces: #{@excludeNameSpaces} @ #{Time.now.utc.iso8601}") + @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() + $log.info("in_kubestate_hpa::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") + @mode = ExtensionUtils.getNamespacesModeForDataCollection() + $log.info("in_kubestate_hpa::enumerate: using data collection mode for nameSpaces: #{@mode} @ #{Time.now.utc.iso8601}") end # Initializing continuation token to nil continuationToken = nil @@ -133,7 +136,7 @@ def parse_and_emit_records(hpas, batchTime = Time.utc.iso8601) begin metricInfo = hpas metricInfo["items"].each do |hpa| - next unless !KubernetesApiClient.isExcludeResourceItem(hpa["metadata"]["name"], hpa["metadata"]["namespace"], @excludeNameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(hpa["metadata"]["name"], hpa["metadata"]["namespace"], @mode, @nameSpaces) hpaName = hpa["metadata"]["name"] hpaNameSpace = hpa["metadata"]["namespace"] hpaCreatedTime = "" diff --git a/source/plugins/ruby/in_win_cadvisor_perf.rb b/source/plugins/ruby/in_win_cadvisor_perf.rb index 34aa08054..b3cd0547c 100644 --- a/source/plugins/ruby/in_win_cadvisor_perf.rb +++ b/source/plugins/ruby/in_win_cadvisor_perf.rb @@ -22,7 +22,8 @@ def initialize require_relative "constants" require_relative "extension_utils" @insightsMetricsTag = "oneagent.containerInsights.INSIGHTS_METRICS_BLOB" - @excludeNameSpaces = [] + @nameSpaces = [] + @mode = "Off" end config_param :run_interval, :time, :default => 60 @@ -68,14 +69,17 @@ def enumerate() if @insightsMetricsTag.nil? || !@insightsMetricsTag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) @insightsMetricsTag = ExtensionUtils.getOutputStreamId(Constants::INSIGHTS_METRICS_DATA_TYPE) end - $log.info("in_win_cadvisor_perf::enumerate: using perf tag -#{@kubeperfTag} @ #{Time.now.utc.iso8601}") + $log.info("in_win_cadvisor_perf::enumerate: using perf tag -#{@kubeperfTag} @ #{Time.now.utc.iso8601}") $log.info("in_win_cadvisor_perf::enumerate: using insightsmetrics tag -#{@insightsMetricsTag} @ #{Time.now.utc.iso8601}") @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() $log.info("in_win_cadvisor_perf::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") - @excludeNameSpaces = ExtensionUtils.getNamespacesToExcludeForDataCollection() - $log.info("in_win_cadvisor_perf::enumerate: using data collection excludeNameSpaces: #{@excludeNameSpaces} @ #{Time.now.utc.iso8601}") + @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() + $log.info("in_win_cadvisor_perf::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") + + @mode = ExtensionUtils.getNamespacesModeForDataCollection() + $log.info("in_cadvisor_perf::enumerate: using data collection mode for nameSpaces: #{@mode} @ #{Time.now.utc.iso8601}") end #Resetting this cache so that it is populated with the current set of containers with every call @@ -91,7 +95,7 @@ def enumerate() end @@winNodes.each do |winNode| eventStream = Fluent::MultiEventStream.new - metricData = CAdvisorMetricsAPIClient.getMetrics(winNode: winNode, excludeNameSpaces: @excludeNameSpaces, metricTime: Time.now.utc.iso8601) + metricData = CAdvisorMetricsAPIClient.getMetrics(winNode: winNode, mode: @mode, nameSpaces: @nameSpaces, metricTime: Time.now.utc.iso8601) metricData.each do |record| if !record.empty? eventStream.add(time, record) if record @@ -106,7 +110,7 @@ def enumerate() #start GPU InsightsMetrics items begin containerGPUusageInsightsMetricsDataItems = [] - containerGPUusageInsightsMetricsDataItems.concat(CAdvisorMetricsAPIClient.getInsightsMetrics(winNode: winNode, excludeNameSpaces: @excludeNameSpaces, metricTime: Time.now.utc.iso8601)) + containerGPUusageInsightsMetricsDataItems.concat(CAdvisorMetricsAPIClient.getInsightsMetrics(winNode: winNode, nameSpaces: @nameSpaces, metricTime: Time.now.utc.iso8601)) insightsMetricsEventStream = Fluent::MultiEventStream.new containerGPUusageInsightsMetricsDataItems.each do |insightsMetricsRecord| From f2865b5bb041f07c44aef64d024c927a673978f4 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Thu, 17 Nov 2022 23:20:25 -0800 Subject: [PATCH 277/301] implement namespaces filtering mode --- source/plugins/ruby/in_cadvisor_perf.rb | 2 +- source/plugins/ruby/in_win_cadvisor_perf.rb | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/source/plugins/ruby/in_cadvisor_perf.rb b/source/plugins/ruby/in_cadvisor_perf.rb index 5e98fc335..841efa911 100644 --- a/source/plugins/ruby/in_cadvisor_perf.rb +++ b/source/plugins/ruby/in_cadvisor_perf.rb @@ -99,7 +99,7 @@ def enumerate() begin if !@@isWindows.nil? && @@isWindows == false containerGPUusageInsightsMetricsDataItems = [] - containerGPUusageInsightsMetricsDataItems.concat(CAdvisorMetricsAPIClient.getInsightsMetrics(winNode: nil, nameSpaces: @nameSpaces, metricTime: batchTime)) + containerGPUusageInsightsMetricsDataItems.concat(CAdvisorMetricsAPIClient.getInsightsMetrics(winNode: nil, mode: @mode, nameSpaces: @nameSpaces, metricTime: batchTime)) containerGPUusageInsightsMetricsDataItems.each do |insightsMetricsRecord| insightsMetricsEventStream.add(time, insightsMetricsRecord) if insightsMetricsRecord diff --git a/source/plugins/ruby/in_win_cadvisor_perf.rb b/source/plugins/ruby/in_win_cadvisor_perf.rb index b3cd0547c..32dca236a 100644 --- a/source/plugins/ruby/in_win_cadvisor_perf.rb +++ b/source/plugins/ruby/in_win_cadvisor_perf.rb @@ -110,7 +110,7 @@ def enumerate() #start GPU InsightsMetrics items begin containerGPUusageInsightsMetricsDataItems = [] - containerGPUusageInsightsMetricsDataItems.concat(CAdvisorMetricsAPIClient.getInsightsMetrics(winNode: winNode, nameSpaces: @nameSpaces, metricTime: Time.now.utc.iso8601)) + containerGPUusageInsightsMetricsDataItems.concat(CAdvisorMetricsAPIClient.getInsightsMetrics(winNode: winNode, mode: @mode, nameSpaces: @nameSpaces, metricTime: Time.now.utc.iso8601)) insightsMetricsEventStream = Fluent::MultiEventStream.new containerGPUusageInsightsMetricsDataItems.each do |insightsMetricsRecord| From b705bb47849ce28e7f736cb8608b860effe73ac3 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Thu, 17 Nov 2022 23:25:34 -0800 Subject: [PATCH 278/301] implement namespaces filtering mode --- source/plugins/ruby/CAdvisorMetricsAPIClient.rb | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb index 04b5fccac..eda019237 100644 --- a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb +++ b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb @@ -168,11 +168,11 @@ def getMetrics(winNode: nil, mode: "Off", nameSpaces: [], metricTime: Time.now.u metricDataItems.push(getNodeMetricItem(metricInfo, hostName, "memory", "workingSetBytes", Constants::MEMORY_WORKING_SET_BYTES, metricTime)) else metricDataItems.concat(getContainerMemoryMetricItems(metricInfo, hostName, "workingSetBytes", Constants::MEMORY_WORKING_SET_BYTES, metricTime, operatingSystem, mode, nameSpaces)) - metricDataItems.concat(getContainerStartTimeMetricItems(metricInfo, hostName, "restartTimeEpoch", metricTime, nameSpaces)) + metricDataItems.concat(getContainerStartTimeMetricItems(metricInfo, hostName, "restartTimeEpoch", metricTime, mode, nameSpaces)) if operatingSystem == "Linux" - metricDataItems.concat(getContainerCpuMetricItems(metricInfo, hostName, "usageNanoCores", Constants::CPU_USAGE_NANO_CORES, metricTime, nameSpaces)) - metricDataItems.concat(getContainerMemoryMetricItems(metricInfo, hostName, "rssBytes", Constants::MEMORY_RSS_BYTES, metricTime, operatingSystem, nameSpaces)) + metricDataItems.concat(getContainerCpuMetricItems(metricInfo, hostName, "usageNanoCores", Constants::CPU_USAGE_NANO_CORES, metricTime, mode, nameSpaces)) + metricDataItems.concat(getContainerMemoryMetricItems(metricInfo, hostName, "rssBytes", Constants::MEMORY_RSS_BYTES, metricTime, operatingSystem, mode, nameSpaces)) metricDataItems.push(getNodeMetricItem(metricInfo, hostName, "memory", "rssBytes", Constants::MEMORY_RSS_BYTES, metricTime)) elsif operatingSystem == "Windows" containerCpuUsageNanoSecondsRate = getContainerCpuMetricItemRate(metricInfo, hostName, "usageCoreNanoSeconds", Constants::CPU_USAGE_NANO_CORES, metricTime, mode, nameSpaces) @@ -638,7 +638,7 @@ def getContainerCpuMetricItemRate(metricJSON, hostName, cpuMetricNameToCollect, return metricItems end - def getContainerMemoryMetricItems(metricJSON, hostName, memoryMetricNameToCollect, metricNametoReturn, metricPollTime, operatingSystem, nameSpaces) + def getContainerMemoryMetricItems(metricJSON, hostName, memoryMetricNameToCollect, metricNametoReturn, metricPollTime, operatingSystem, mode, nameSpaces) metricItems = [] clusterId = KubernetesApiClient.getClusterId timeDifference = (DateTime.now.to_time.to_i - @@telemetryMemoryMetricTimeTracker).abs @@ -887,7 +887,7 @@ def getNodeLastRebootTimeMetric(metricJSON, hostName, metricNametoReturn, metric return metricItem end - def getContainerStartTimeMetricItems(metricJSON, hostName, metricNametoReturn, metricPollTime, nameSpaces) + def getContainerStartTimeMetricItems(metricJSON, hostName, metricNametoReturn, metricPollTime, mode, nameSpaces) metricItems = [] clusterId = KubernetesApiClient.getClusterId #currentTime = Time.now.utc.iso8601 #2018-01-30T19:36:14Z From dac996db02bb58c89bacea33357d2623c783ed4c Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Thu, 17 Nov 2022 23:34:45 -0800 Subject: [PATCH 279/301] implement namespaces filtering mode --- source/plugins/ruby/in_kube_podinventory.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/plugins/ruby/in_kube_podinventory.rb b/source/plugins/ruby/in_kube_podinventory.rb index 0ab181759..2c544feba 100644 --- a/source/plugins/ruby/in_kube_podinventory.rb +++ b/source/plugins/ruby/in_kube_podinventory.rb @@ -242,7 +242,7 @@ def enumerate(podList = nil) telemetryProperties["DATA_COLLECTION_NAMESPACES"] = @nameSpaces end if !@mode.nil? && !@mode.empty? - telemetryProperties["DATA_COLLECTION_NAMESPACES_MODE"] = @mode + telemetryProperties["DATA_COLLECTION_NAMESPACES_FILTERING_MODE"] = @mode end if @run_interval > 60 telemetryProperties["DATA_COLLECTION_INTERVAL_MINUTES"] = @run_interval / 60 From 295b7d6fdd57fa320cd56beeb8e67c326b0217dc Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Fri, 18 Nov 2022 09:00:02 -0800 Subject: [PATCH 280/301] implement namespaces filtering mode --- source/plugins/ruby/CAdvisorMetricsAPIClient.rb | 4 ++-- source/plugins/ruby/KubernetesApiClient.rb | 11 +++++++---- source/plugins/ruby/constants.rb | 2 +- source/plugins/ruby/extension_utils.rb | 9 ++++++--- source/plugins/ruby/in_cadvisor_perf.rb | 2 +- source/plugins/ruby/in_containerinventory.rb | 2 +- source/plugins/ruby/in_kube_events.rb | 2 +- source/plugins/ruby/in_kube_perfinventory.rb | 2 +- source/plugins/ruby/in_kube_podinventory.rb | 2 +- source/plugins/ruby/in_kube_pvinventory.rb | 2 +- source/plugins/ruby/in_kubestate_deployments.rb | 2 +- source/plugins/ruby/in_kubestate_hpa.rb | 2 +- source/plugins/ruby/in_win_cadvisor_perf.rb | 2 +- 13 files changed, 25 insertions(+), 19 deletions(-) diff --git a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb index eda019237..7fa65d09f 100644 --- a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb +++ b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb @@ -133,7 +133,7 @@ def getCAdvisorUri(winNode, relativeUri) return baseUri + relativeUri end - def getMetrics(winNode: nil, mode: "Off", nameSpaces: [], metricTime: Time.now.utc.iso8601) + def getMetrics(winNode: nil, mode: "off", nameSpaces: [], metricTime: Time.now.utc.iso8601) metricDataItems = [] begin cAdvisorStats = getSummaryStatsFromCAdvisor(winNode) @@ -312,7 +312,7 @@ def getContainerCpuMetricItems(metricJSON, hostName, cpuMetricNameToCollect, met return metricItems end - def getInsightsMetrics(winNode: nil, mode: "Off", nameSpaces: [], metricTime: Time.now.utc.iso8601) + def getInsightsMetrics(winNode: nil, mode: "off", nameSpaces: [], metricTime: Time.now.utc.iso8601) metricDataItems = [] begin cAdvisorStats = getSummaryStatsFromCAdvisor(winNode) diff --git a/source/plugins/ruby/KubernetesApiClient.rb b/source/plugins/ruby/KubernetesApiClient.rb index 716870fce..befaa8e1f 100644 --- a/source/plugins/ruby/KubernetesApiClient.rb +++ b/source/plugins/ruby/KubernetesApiClient.rb @@ -1412,10 +1412,13 @@ def isEmitCacheTelemetry def isExcludeResourceItem(resourceName, resourceNamespace, mode, nameSpaces) isExclude = false begin - if mode == "Exclude" - if !resourceName.nil? && !resourceName.empty? && resourceName.start_with?("ama-logs") && resourceNamespace.eql?("kube-system") - isExclude = false - elsif !resourceNamespace.nil? && !resourceNamespace.empty? && !nameSpaces.nil? && !nameSpaces.empty? && nameSpaces.length > 0 && nameSpaces.include?(resourceNamespace) + # include or exclude doesnt applicable for ama-logs agent as customer needs to monitor the agent + if !resourceName.nil? && !resourceName.empty? && resourceName.start_with?("ama-logs") && resourceNamespace.eql?("kube-system") + isExclude = false + elsif !resourceNamespace.nil? && !resourceNamespace.empty? && !nameSpaces.nil? && !nameSpaces.empty? && nameSpaces.length > 0 + if mode == "exclude" && nameSpaces.include?(resourceNamespace) + isExclude = true + elsif mode == "include" && !nameSpaces.include?(resourceNamespace) isExclude = true end end diff --git a/source/plugins/ruby/constants.rb b/source/plugins/ruby/constants.rb index d66024f20..f2bcaf928 100644 --- a/source/plugins/ruby/constants.rb +++ b/source/plugins/ruby/constants.rb @@ -150,7 +150,7 @@ class Constants EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS = "dataCollectionSettings" EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_INTERVAL = "interval" EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_NAMESPACES_MODE = "mode" - EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_NAMESPACES_FILTERING_MODES = ["Off", "Include", "Exlcude"] + EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_NAMESPACES_FILTERING_MODES = ["off", "include", "exlcude"] EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_NAMESPACES = "nameSpaces" # min and max data collection interval minutes diff --git a/source/plugins/ruby/extension_utils.rb b/source/plugins/ruby/extension_utils.rb index 43248aac4..29125d8da 100644 --- a/source/plugins/ruby/extension_utils.rb +++ b/source/plugins/ruby/extension_utils.rb @@ -74,15 +74,18 @@ def getNamespacesForDataCollection end def getNamespacesModeForDataCollection - nameSpaceMode = "Off" + nameSpaceMode = "off" begin dataCollectionSettings = Extension.instance.get_extension_data_collection_settings() if !dataCollectionSettings.nil? && !dataCollectionSettings.empty? && dataCollectionSettings.has_key?(Constants::EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_NAMESPACES_MODE) mode = dataCollectionSettings[Constants::EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_NAMESPACES_MODE] - if !mode.nil? && !mode.empty? && Constants::EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_NAMESPACES_FILTERING_MODES.include?(mode) - nameSpaceMode = mode + if !mode.nil? && !mode.empty? + nameSpaceMode = mode.downcase + if !Constants::EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_NAMESPACES_FILTERING_MODES.include?(nameSpaceMode) + $log.warn("ExtensionUtils::getNamespacesModeForDataCollection: nameSpaceMode: #{mode} not supported hence using default") + end else $log.warn("ExtensionUtils::getNamespacesModeForDataCollection: nameSpaceMode: #{mode} not valid hence using default") end diff --git a/source/plugins/ruby/in_cadvisor_perf.rb b/source/plugins/ruby/in_cadvisor_perf.rb index 841efa911..fbdedf132 100644 --- a/source/plugins/ruby/in_cadvisor_perf.rb +++ b/source/plugins/ruby/in_cadvisor_perf.rb @@ -23,7 +23,7 @@ def initialize require_relative "constants" require_relative "extension_utils" @nameSpaces = [] - @mode = "Off" + @mode = "off" end config_param :run_interval, :time, :default => 60 diff --git a/source/plugins/ruby/in_containerinventory.rb b/source/plugins/ruby/in_containerinventory.rb index 9f4162d0a..206a95be8 100644 --- a/source/plugins/ruby/in_containerinventory.rb +++ b/source/plugins/ruby/in_containerinventory.rb @@ -58,7 +58,7 @@ def enumerate containerInventory = Array.new eventStream = Fluent::MultiEventStream.new hostName = "" - @mode = "Off" + @mode = "off" @nameSpaces = [] $log.info("in_container_inventory::enumerate : Begin processing @ #{Time.now.utc.iso8601}") if ExtensionUtils.isAADMSIAuthMode() diff --git a/source/plugins/ruby/in_kube_events.rb b/source/plugins/ruby/in_kube_events.rb index 3905f2b6c..460646014 100644 --- a/source/plugins/ruby/in_kube_events.rb +++ b/source/plugins/ruby/in_kube_events.rb @@ -29,7 +29,7 @@ def initialize # Initilize enable/disable normal event collection @collectAllKubeEvents = false @nameSpaces = [] - @mode = "Off" + @mode = "off" end config_param :run_interval, :time, :default => 60 diff --git a/source/plugins/ruby/in_kube_perfinventory.rb b/source/plugins/ruby/in_kube_perfinventory.rb index 8e40a1208..9175e32bc 100644 --- a/source/plugins/ruby/in_kube_perfinventory.rb +++ b/source/plugins/ruby/in_kube_perfinventory.rb @@ -33,7 +33,7 @@ def initialize @kubeperfTag = "oneagent.containerInsights.LINUX_PERF_BLOB" @insightsMetricsTag = "oneagent.containerInsights.INSIGHTS_METRICS_BLOB" @nameSpaces = [] - @mode = "Off" + @mode = "off" end config_param :run_interval, :time, :default => 60 diff --git a/source/plugins/ruby/in_kube_podinventory.rb b/source/plugins/ruby/in_kube_podinventory.rb index 2c544feba..fd078cfbc 100644 --- a/source/plugins/ruby/in_kube_podinventory.rb +++ b/source/plugins/ruby/in_kube_podinventory.rb @@ -60,7 +60,7 @@ def initialize @kubeservicesTag = "oneagent.containerInsights.KUBE_SERVICES_BLOB" @containerInventoryTag = "oneagent.containerInsights.CONTAINER_INVENTORY_BLOB" @nameSpaces = [] - @mode = "Off" + @mode = "off" end config_param :run_interval, :time, :default => 60 diff --git a/source/plugins/ruby/in_kube_pvinventory.rb b/source/plugins/ruby/in_kube_pvinventory.rb index c4cf70d95..5e5ec4282 100644 --- a/source/plugins/ruby/in_kube_pvinventory.rb +++ b/source/plugins/ruby/in_kube_pvinventory.rb @@ -25,7 +25,7 @@ def initialize @PV_CHUNK_SIZE = "5000" @pvTypeToCountHash = {} @nameSpaces = [] - @mode = "Off" + @mode = "off" end config_param :run_interval, :time, :default => 60 diff --git a/source/plugins/ruby/in_kubestate_deployments.rb b/source/plugins/ruby/in_kubestate_deployments.rb index 50c412be9..9bf3f16cc 100644 --- a/source/plugins/ruby/in_kubestate_deployments.rb +++ b/source/plugins/ruby/in_kubestate_deployments.rb @@ -36,7 +36,7 @@ def initialize @ClusterId = KubernetesApiClient.getClusterId @ClusterName = KubernetesApiClient.getClusterName @nameSpaces = [] - @mode = "Off" + @mode = "off" end config_param :run_interval, :time, :default => 60 diff --git a/source/plugins/ruby/in_kubestate_hpa.rb b/source/plugins/ruby/in_kubestate_hpa.rb index 3fd11b03b..f4da6771b 100644 --- a/source/plugins/ruby/in_kubestate_hpa.rb +++ b/source/plugins/ruby/in_kubestate_hpa.rb @@ -33,7 +33,7 @@ def initialize @ClusterId = KubernetesApiClient.getClusterId @ClusterName = KubernetesApiClient.getClusterName @nameSpaces = [] - @mode = "Off" + @mode = "off" end config_param :run_interval, :time, :default => 60 diff --git a/source/plugins/ruby/in_win_cadvisor_perf.rb b/source/plugins/ruby/in_win_cadvisor_perf.rb index 32dca236a..e1f2535ba 100644 --- a/source/plugins/ruby/in_win_cadvisor_perf.rb +++ b/source/plugins/ruby/in_win_cadvisor_perf.rb @@ -23,7 +23,7 @@ def initialize require_relative "extension_utils" @insightsMetricsTag = "oneagent.containerInsights.INSIGHTS_METRICS_BLOB" @nameSpaces = [] - @mode = "Off" + @mode = "off" end config_param :run_interval, :time, :default => 60 From 83c183ad1c5e0477cf8747fc219ea3c0fd6848bc Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Sat, 19 Nov 2022 09:16:52 -0800 Subject: [PATCH 281/301] clean up --- .../existingClusterOnboarding.json | 4 ++-- .../existingClusterOnboarding.json | 6 +++--- .../existingClusterOnboarding.json | 4 ++-- source/plugins/ruby/KubernetesApiClient.rb | 18 ++++++++++-------- source/plugins/ruby/in_kube_podinventory.rb | 2 -- 5 files changed, 17 insertions(+), 17 deletions(-) diff --git a/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json b/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json index 44885f1c4..6bb53b51d 100644 --- a/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json +++ b/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json @@ -59,7 +59,7 @@ "nameSpacesFilteringModeForDataCollection": { "type": "string", "metadata": { - "description": "Data collection Mode for the namespaces" + "description": "Data collection Filtering Mode for the namespaces" }, "allowedValues": [ "Off", @@ -71,7 +71,7 @@ "namespacesForDataCollection": { "type": "array", "metadata": { - "description": "Kubernetes namespaces for the data collection of inventory and metrics" + "description": "An array of Kubernetes namespaces for the data collection of inventory, events and metrics" } } }, diff --git a/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterOnboarding.json b/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterOnboarding.json index f120762cb..689efae4d 100644 --- a/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterOnboarding.json +++ b/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterOnboarding.json @@ -73,7 +73,7 @@ "nameSpacesFilteringModeForDataCollection": { "type": "string", "metadata": { - "description": "Data collection Mode for the namespaces" + "description": "Data collection Filtering Mode for the namespaces" }, "allowedValues": [ "Off", @@ -85,8 +85,8 @@ "namespacesForDataCollection": { "type": "array", "metadata": { - "description": "Kubernetes namespaces for the data collection of inventory and metrics" - } + "description": "An array of Kubernetes namespaces for the data collection of inventory, events and metrics" + } } }, "variables": { diff --git a/scripts/onboarding/templates/arc-k8s-extension-provisionedcluster-msi-auth/existingClusterOnboarding.json b/scripts/onboarding/templates/arc-k8s-extension-provisionedcluster-msi-auth/existingClusterOnboarding.json index 59fd6be6d..542781af1 100644 --- a/scripts/onboarding/templates/arc-k8s-extension-provisionedcluster-msi-auth/existingClusterOnboarding.json +++ b/scripts/onboarding/templates/arc-k8s-extension-provisionedcluster-msi-auth/existingClusterOnboarding.json @@ -73,7 +73,7 @@ "nameSpacesFilteringModeForDataCollection": { "type": "string", "metadata": { - "description": "Data collection Mode for the namespaces" + "description": "Data collection Filtering Mode for the namespaces" }, "allowedValues": [ "Off", @@ -85,7 +85,7 @@ "namespacesForDataCollection": { "type": "array", "metadata": { - "description": "Kubernetes namespaces for the data collection of inventory and metrics" + "description": "An array of Kubernetes namespaces for the data collection of inventory, events and metrics" } } }, diff --git a/source/plugins/ruby/KubernetesApiClient.rb b/source/plugins/ruby/KubernetesApiClient.rb index befaa8e1f..122cba0f5 100644 --- a/source/plugins/ruby/KubernetesApiClient.rb +++ b/source/plugins/ruby/KubernetesApiClient.rb @@ -1412,14 +1412,16 @@ def isEmitCacheTelemetry def isExcludeResourceItem(resourceName, resourceNamespace, mode, nameSpaces) isExclude = false begin - # include or exclude doesnt applicable for ama-logs agent as customer needs to monitor the agent - if !resourceName.nil? && !resourceName.empty? && resourceName.start_with?("ama-logs") && resourceNamespace.eql?("kube-system") - isExclude = false - elsif !resourceNamespace.nil? && !resourceNamespace.empty? && !nameSpaces.nil? && !nameSpaces.empty? && nameSpaces.length > 0 - if mode == "exclude" && nameSpaces.include?(resourceNamespace) - isExclude = true - elsif mode == "include" && !nameSpaces.include?(resourceNamespace) - isExclude = true + if !resourceName.nil? && !resourceName.empty? && !resourceNamespace.nil? && !resourceNamespace.empty? + # data collection namespace filtering not applicable for ama-logs agent as customer needs to monitor the agent + if resourceName.start_with?("ama-logs") && resourceNamespace.eql?("kube-system") + isExclude = false + elsif !nameSpaces.nil? && !nameSpaces.empty? && nameSpaces.length > 0 + if mode == "exclude" && nameSpaces.include?(resourceNamespace) + isExclude = true + elsif mode == "include" && !nameSpaces.include?(resourceNamespace) + isExclude = true + end end end rescue => errorStr diff --git a/source/plugins/ruby/in_kube_podinventory.rb b/source/plugins/ruby/in_kube_podinventory.rb index fd078cfbc..c9fa17dd8 100644 --- a/source/plugins/ruby/in_kube_podinventory.rb +++ b/source/plugins/ruby/in_kube_podinventory.rb @@ -723,7 +723,6 @@ def getServiceNameFromLabels(namespace, labels, serviceRecords) def watch_pods $log.info("in_kube_podinventory::watch_pods:Start @ #{Time.now.utc.iso8601}") podsResourceVersion = nil - excludeNameSpaces = [] # invoke getWindowsNodes to handle scenario where windowsNodeNameCache not populated yet on containerstart winNodes = KubernetesApiClient.getWindowsNodesArray() if winNodes.length > 0 @@ -994,7 +993,6 @@ def watch_services # We have to abort here because this might cause lastResourceVersion inconsistency by skipping a potential RV with valid data! break end - # exclude resource item if this in excluded namespaces if ((notice["type"] == "ADDED") || (notice["type"] == "MODIFIED")) key = item["metadata"]["uid"] if !key.nil? && !key.empty? From 627b6e5f460665fdbeabb023703bf5fa4b93b586 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Sat, 19 Nov 2022 09:31:36 -0800 Subject: [PATCH 282/301] better naming --- source/plugins/ruby/extension_utils.rb | 16 ++++++++-------- source/plugins/ruby/in_cadvisor_perf.rb | 10 +++++----- source/plugins/ruby/in_containerinventory.rb | 8 ++++---- source/plugins/ruby/in_kube_events.rb | 8 ++++---- source/plugins/ruby/in_kube_perfinventory.rb | 8 ++++---- source/plugins/ruby/in_kube_podinventory.rb | 14 +++++++------- source/plugins/ruby/in_kube_pvinventory.rb | 8 ++++---- source/plugins/ruby/in_kubestate_deployments.rb | 8 ++++---- source/plugins/ruby/in_kubestate_hpa.rb | 8 ++++---- source/plugins/ruby/in_win_cadvisor_perf.rb | 10 +++++----- 10 files changed, 49 insertions(+), 49 deletions(-) diff --git a/source/plugins/ruby/extension_utils.rb b/source/plugins/ruby/extension_utils.rb index 29125d8da..1b53cdc1f 100644 --- a/source/plugins/ruby/extension_utils.rb +++ b/source/plugins/ruby/extension_utils.rb @@ -73,8 +73,8 @@ def getNamespacesForDataCollection return nameSpaces end - def getNamespacesModeForDataCollection - nameSpaceMode = "off" + def getNamespacesFilteringModeForDataCollection + nameSpacesFilteringMode = "off" begin dataCollectionSettings = Extension.instance.get_extension_data_collection_settings() if !dataCollectionSettings.nil? && @@ -82,18 +82,18 @@ def getNamespacesModeForDataCollection dataCollectionSettings.has_key?(Constants::EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_NAMESPACES_MODE) mode = dataCollectionSettings[Constants::EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_NAMESPACES_MODE] if !mode.nil? && !mode.empty? - nameSpaceMode = mode.downcase - if !Constants::EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_NAMESPACES_FILTERING_MODES.include?(nameSpaceMode) - $log.warn("ExtensionUtils::getNamespacesModeForDataCollection: nameSpaceMode: #{mode} not supported hence using default") + nameSpacesFilteringMode = mode.downcase + if !Constants::EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_NAMESPACES_FILTERING_MODES.include?(nameSpacesFilteringMode) + $log.warn("ExtensionUtils::getNamespacesFilteringModeForDataCollection: nameSpacesFilteringMode: #{mode} not supported hence using default") end else - $log.warn("ExtensionUtils::getNamespacesModeForDataCollection: nameSpaceMode: #{mode} not valid hence using default") + $log.warn("ExtensionUtils::getNamespacesFilteringModeForDataCollection: nameSpacesFilteringMode: #{mode} not valid hence using default") end end rescue => errorStr - $log.warn("ExtensionUtils::getNamespacesModeForDataCollection: failed with an exception: #{errorStr}") + $log.warn("ExtensionUtils::getNamespacesFilteringModeForDataCollection: failed with an exception: #{errorStr}") end - return nameSpaceMode + return nameSpacesFilteringMode end end end diff --git a/source/plugins/ruby/in_cadvisor_perf.rb b/source/plugins/ruby/in_cadvisor_perf.rb index fbdedf132..a67caae6e 100644 --- a/source/plugins/ruby/in_cadvisor_perf.rb +++ b/source/plugins/ruby/in_cadvisor_perf.rb @@ -23,7 +23,7 @@ def initialize require_relative "constants" require_relative "extension_utils" @nameSpaces = [] - @mode = "off" + @nameSpacesFilteringMode = "off" end config_param :run_interval, :time, :default => 60 @@ -79,11 +79,11 @@ def enumerate() $log.info("in_cadvisor_perf::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() $log.info("in_cadvisor_perf::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") - @mode = ExtensionUtils.getNamespacesModeForDataCollection() - $log.info("in_cadvisor_perf::enumerate: using data collection mode for nameSpaces: #{@mode} @ #{Time.now.utc.iso8601}") + @nameSpacesFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() + $log.info("in_cadvisor_perf::enumerate: using data collection mode for nameSpaces: #{@nameSpacesFilteringMode} @ #{Time.now.utc.iso8601}") end - metricData = CAdvisorMetricsAPIClient.getMetrics(winNode: nil, mode: @mode, nameSpaces: @nameSpaces, metricTime: batchTime) + metricData = CAdvisorMetricsAPIClient.getMetrics(winNode: nil, mode: @nameSpacesFilteringMode, nameSpaces: @nameSpaces, metricTime: batchTime) metricData.each do |record| eventStream.add(time, record) if record end @@ -99,7 +99,7 @@ def enumerate() begin if !@@isWindows.nil? && @@isWindows == false containerGPUusageInsightsMetricsDataItems = [] - containerGPUusageInsightsMetricsDataItems.concat(CAdvisorMetricsAPIClient.getInsightsMetrics(winNode: nil, mode: @mode, nameSpaces: @nameSpaces, metricTime: batchTime)) + containerGPUusageInsightsMetricsDataItems.concat(CAdvisorMetricsAPIClient.getInsightsMetrics(winNode: nil, mode: @nameSpacesFilteringMode, nameSpaces: @nameSpaces, metricTime: batchTime)) containerGPUusageInsightsMetricsDataItems.each do |insightsMetricsRecord| insightsMetricsEventStream.add(time, insightsMetricsRecord) if insightsMetricsRecord diff --git a/source/plugins/ruby/in_containerinventory.rb b/source/plugins/ruby/in_containerinventory.rb index 206a95be8..bda6820e7 100644 --- a/source/plugins/ruby/in_containerinventory.rb +++ b/source/plugins/ruby/in_containerinventory.rb @@ -58,7 +58,7 @@ def enumerate containerInventory = Array.new eventStream = Fluent::MultiEventStream.new hostName = "" - @mode = "off" + @nameSpacesFilteringMode = "off" @nameSpaces = [] $log.info("in_container_inventory::enumerate : Begin processing @ #{Time.now.utc.iso8601}") if ExtensionUtils.isAADMSIAuthMode() @@ -71,8 +71,8 @@ def enumerate $log.info("in_container_inventory::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() $log.info("in_container_inventory::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") - @mode = ExtensionUtils.getNamespacesModeForDataCollection() - $log.info("in_container_inventory::enumerate: using data collection mode for nameSpaces: #{@mode} @ #{Time.now.utc.iso8601}") + @nameSpacesFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() + $log.info("in_container_inventory::enumerate: using data collection mode for nameSpaces: #{@nameSpacesFilteringMode} @ #{Time.now.utc.iso8601}") end begin containerRuntimeEnv = ENV["CONTAINER_RUNTIME"] @@ -85,7 +85,7 @@ def enumerate podList = JSON.parse(response.body) if !podList.nil? && !podList.empty? && podList.key?("items") && !podList["items"].nil? && !podList["items"].empty? podList["items"].each do |item| - next unless !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["name"], item["metadata"]["namespace"], @mode, @nameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["name"], item["metadata"]["namespace"], @nameSpacesFilteringMode, @nameSpaces) containerInventoryRecords = KubernetesContainerInventory.getContainerInventoryRecords(item, batchTime, clusterCollectEnvironmentVar) containerInventoryRecords.each do |containerRecord| ContainerInventoryState.writeContainerState(containerRecord) diff --git a/source/plugins/ruby/in_kube_events.rb b/source/plugins/ruby/in_kube_events.rb index 7f4a393ef..dbd33c83b 100644 --- a/source/plugins/ruby/in_kube_events.rb +++ b/source/plugins/ruby/in_kube_events.rb @@ -29,7 +29,7 @@ def initialize # Initilize enable/disable normal event collection @collectAllKubeEvents = false @nameSpaces = [] - @mode = "off" + @nameSpacesFilteringMode = "off" end config_param :run_interval, :time, :default => 60 @@ -96,8 +96,8 @@ def enumerate $log.info("in_kube_events::enumerate: using kubeevents tag -#{@tag} @ #{Time.now.utc.iso8601}") @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() $log.info("in_kube_events::enumerate: using data collection nameSpaces -#{@nameSpaces} @ #{Time.now.utc.iso8601}") - @mode = ExtensionUtils.getNamespacesModeForDataCollection() - $log.info("in_kube_events::enumerate: using data collection mode for nameSpaces -#{@mode} @ #{Time.now.utc.iso8601}") + @nameSpacesFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() + $log.info("in_kube_events::enumerate: using data collection mode for nameSpaces -#{@nameSpacesFilteringMode} @ #{Time.now.utc.iso8601}") end # Initializing continuation token to nil continuationToken = nil @@ -170,7 +170,7 @@ def parse_and_emit_records(events, eventQueryState, newEventQueryState, batchTim end # drop the events if the event of the excluded namespace - next unless !KubernetesApiClient.isExcludeResourceItem("", items["involvedObject"]["namespace"], @mode, @nameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem("", items["involvedObject"]["namespace"], @nameSpacesFilteringMode, @nameSpaces) record["ObjectKind"] = items["involvedObject"]["kind"] record["Namespace"] = items["involvedObject"]["namespace"] diff --git a/source/plugins/ruby/in_kube_perfinventory.rb b/source/plugins/ruby/in_kube_perfinventory.rb index 9175e32bc..b9d37970b 100644 --- a/source/plugins/ruby/in_kube_perfinventory.rb +++ b/source/plugins/ruby/in_kube_perfinventory.rb @@ -33,7 +33,7 @@ def initialize @kubeperfTag = "oneagent.containerInsights.LINUX_PERF_BLOB" @insightsMetricsTag = "oneagent.containerInsights.INSIGHTS_METRICS_BLOB" @nameSpaces = [] - @mode = "off" + @nameSpacesFilteringMode = "off" end config_param :run_interval, :time, :default => 60 @@ -105,8 +105,8 @@ def enumerate(podList = nil) $log.info("in_kube_perfinventory::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() $log.info("in_kube_perfinventory::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") - @mode = ExtensionUtils.getNamespacesModeForDataCollection() - $log.info("in_kube_perfinventory::enumerate: using data collection mode for nameSpaces: #{@mode} @ #{Time.now.utc.iso8601}") + @nameSpacesFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() + $log.info("in_kube_perfinventory::enumerate: using data collection mode for nameSpaces: #{@nameSpacesFilteringMode} @ #{Time.now.utc.iso8601}") end nodeAllocatableRecords = getNodeAllocatableRecords() @@ -142,7 +142,7 @@ def parse_and_emit_records(podInventory, nodeAllocatableRecords, continuationTok begin #begin block start podInventory["items"].each do |item| #podInventory block start - next unless !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["name"], item["metadata"]["namespace"], @mode, @nameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["name"], item["metadata"]["namespace"], @nameSpacesFilteringMode, @nameSpaces) nodeName = "" if !item["spec"]["nodeName"].nil? nodeName = item["spec"]["nodeName"] diff --git a/source/plugins/ruby/in_kube_podinventory.rb b/source/plugins/ruby/in_kube_podinventory.rb index c9fa17dd8..0df3a6855 100644 --- a/source/plugins/ruby/in_kube_podinventory.rb +++ b/source/plugins/ruby/in_kube_podinventory.rb @@ -60,7 +60,7 @@ def initialize @kubeservicesTag = "oneagent.containerInsights.KUBE_SERVICES_BLOB" @containerInventoryTag = "oneagent.containerInsights.CONTAINER_INVENTORY_BLOB" @nameSpaces = [] - @mode = "off" + @nameSpacesFilteringMode = "off" end config_param :run_interval, :time, :default => 60 @@ -177,8 +177,8 @@ def enumerate(podList = nil) $log.info("in_kube_podinventory::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() $log.info("in_kube_podinventory::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") - @mode = ExtensionUtils.getNamespacesModeForDataCollection() - $log.info("in_kube_podinventory::enumerate: using data collection mode for nameSpaces: #{@mode} @ #{Time.now.utc.iso8601}") + @nameSpacesFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() + $log.info("in_kube_podinventory::enumerate: using data collection mode for nameSpaces: #{@nameSpacesFilteringMode} @ #{Time.now.utc.iso8601}") end serviceInventory = {} @@ -241,8 +241,8 @@ def enumerate(podList = nil) if !@nameSpaces.nil? && !@nameSpaces.empty? && @nameSpaces.length > 0 telemetryProperties["DATA_COLLECTION_NAMESPACES"] = @nameSpaces end - if !@mode.nil? && !@mode.empty? - telemetryProperties["DATA_COLLECTION_NAMESPACES_FILTERING_MODE"] = @mode + if !@nameSpacesFilteringMode.nil? && !@nameSpacesFilteringMode.empty? + telemetryProperties["DATA_COLLECTION_NAMESPACES_FILTERING_MODE"] = @nameSpacesFilteringMode end if @run_interval > 60 telemetryProperties["DATA_COLLECTION_INTERVAL_MINUTES"] = @run_interval / 60 @@ -292,7 +292,7 @@ def parse_and_emit_records(podInventory, serviceRecords, continuationToken, batc begin #begin block start podInventory["items"].each do |item| #podInventory block start - next unless !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["name"], item["metadata"]["namespace"], @mode, @nameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["name"], item["metadata"]["namespace"], @nameSpacesFilteringMode, @nameSpaces) # pod inventory records podInventoryRecords = getPodInventoryRecords(item, serviceRecords, batchTime) @containerCount += podInventoryRecords.length @@ -390,7 +390,7 @@ def parse_and_emit_records(podInventory, serviceRecords, continuationToken, batc if continuationToken.nil? # sending kube services inventory records kubeServicesEventStream = Fluent::MultiEventStream.new serviceRecords.each do |kubeServiceRecord| - next unless !KubernetesApiClient.isExcludeResourceItem(kubeServiceRecord["ServiceName"], kubeServiceRecord["namespace"], @mode, @nameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(kubeServiceRecord["ServiceName"], kubeServiceRecord["namespace"], @nameSpacesFilteringMode, @nameSpaces) if !kubeServiceRecord.nil? # adding before emit to reduce memory foot print kubeServiceRecord["ClusterId"] = KubernetesApiClient.getClusterId diff --git a/source/plugins/ruby/in_kube_pvinventory.rb b/source/plugins/ruby/in_kube_pvinventory.rb index 5e5ec4282..b20d13a82 100644 --- a/source/plugins/ruby/in_kube_pvinventory.rb +++ b/source/plugins/ruby/in_kube_pvinventory.rb @@ -25,7 +25,7 @@ def initialize @PV_CHUNK_SIZE = "5000" @pvTypeToCountHash = {} @nameSpaces = [] - @mode = "off" + @nameSpacesFilteringMode = "off" end config_param :run_interval, :time, :default => 60 @@ -73,8 +73,8 @@ def enumerate $log.info("in_kube_pvinventory::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() $log.info("in_kube_pvinventory::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") - @mode = ExtensionUtils.getNamespacesModeForDataCollection() - $log.info("in_kube_pvinventory::enumerate: using data collection mode for nameSpaces: #{@mode} @ #{Time.now.utc.iso8601}") + @nameSpacesFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() + $log.info("in_kube_pvinventory::enumerate: using data collection mode for nameSpaces: #{@nameSpacesFilteringMode} @ #{Time.now.utc.iso8601}") end continuationToken = nil @@ -140,7 +140,7 @@ def parse_and_emit_records(pvInventory, batchTime = Time.utc.iso8601) record["PVType"] = type record["PVTypeInfo"] = typeInfo - next unless !KubernetesApiClient.isExcludeResourceItem(pvcName, pvcNamespace, @mode, @nameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(pvcName, pvcNamespace, @nameSpacesFilteringMode, @nameSpaces) record["CollectionTime"] = batchTime record["ClusterId"] = KubernetesApiClient.getClusterId diff --git a/source/plugins/ruby/in_kubestate_deployments.rb b/source/plugins/ruby/in_kubestate_deployments.rb index 9bf3f16cc..63290e9fa 100644 --- a/source/plugins/ruby/in_kubestate_deployments.rb +++ b/source/plugins/ruby/in_kubestate_deployments.rb @@ -36,7 +36,7 @@ def initialize @ClusterId = KubernetesApiClient.getClusterId @ClusterName = KubernetesApiClient.getClusterName @nameSpaces = [] - @mode = "off" + @nameSpacesFilteringMode = "off" end config_param :run_interval, :time, :default => 60 @@ -94,8 +94,8 @@ def enumerate $log.info("in_kubestate_deployments::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() $log.info("in_kubestate_deployments::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") - @mode = ExtensionUtils.getNamespacesModeForDataCollection() - $log.info("in_kubestate_deployments::enumerate: using data collection mode for nameSpaces: #{@mode} @ #{Time.now.utc.iso8601}") + @nameSpacesFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() + $log.info("in_kubestate_deployments::enumerate: using data collection mode for nameSpaces: #{@nameSpacesFilteringMode} @ #{Time.now.utc.iso8601}") end # Initializing continuation token to nil continuationToken = nil @@ -148,7 +148,7 @@ def parse_and_emit_records(deployments, batchTime = Time.utc.iso8601) begin metricInfo = deployments metricInfo["items"].each do |deployment| - next unless !KubernetesApiClient.isExcludeResourceItem(deployment["metadata"]["name"], deployment["metadata"]["namespace"], @mode, @nameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(deployment["metadata"]["name"], deployment["metadata"]["namespace"], @nameSpacesFilteringMode, @nameSpaces) deploymentName = deployment["metadata"]["name"] deploymentNameSpace = deployment["metadata"]["namespace"] deploymentCreatedTime = "" diff --git a/source/plugins/ruby/in_kubestate_hpa.rb b/source/plugins/ruby/in_kubestate_hpa.rb index f4da6771b..dd54732e8 100644 --- a/source/plugins/ruby/in_kubestate_hpa.rb +++ b/source/plugins/ruby/in_kubestate_hpa.rb @@ -33,7 +33,7 @@ def initialize @ClusterId = KubernetesApiClient.getClusterId @ClusterName = KubernetesApiClient.getClusterName @nameSpaces = [] - @mode = "off" + @nameSpacesFilteringMode = "off" end config_param :run_interval, :time, :default => 60 @@ -91,8 +91,8 @@ def enumerate $log.info("in_kubestate_hpa::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() $log.info("in_kubestate_hpa::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") - @mode = ExtensionUtils.getNamespacesModeForDataCollection() - $log.info("in_kubestate_hpa::enumerate: using data collection mode for nameSpaces: #{@mode} @ #{Time.now.utc.iso8601}") + @nameSpacesFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() + $log.info("in_kubestate_hpa::enumerate: using data collection mode for nameSpaces: #{@nameSpacesFilteringMode} @ #{Time.now.utc.iso8601}") end # Initializing continuation token to nil continuationToken = nil @@ -136,7 +136,7 @@ def parse_and_emit_records(hpas, batchTime = Time.utc.iso8601) begin metricInfo = hpas metricInfo["items"].each do |hpa| - next unless !KubernetesApiClient.isExcludeResourceItem(hpa["metadata"]["name"], hpa["metadata"]["namespace"], @mode, @nameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(hpa["metadata"]["name"], hpa["metadata"]["namespace"], @nameSpacesFilteringMode, @nameSpaces) hpaName = hpa["metadata"]["name"] hpaNameSpace = hpa["metadata"]["namespace"] hpaCreatedTime = "" diff --git a/source/plugins/ruby/in_win_cadvisor_perf.rb b/source/plugins/ruby/in_win_cadvisor_perf.rb index e1f2535ba..025e23e23 100644 --- a/source/plugins/ruby/in_win_cadvisor_perf.rb +++ b/source/plugins/ruby/in_win_cadvisor_perf.rb @@ -23,7 +23,7 @@ def initialize require_relative "extension_utils" @insightsMetricsTag = "oneagent.containerInsights.INSIGHTS_METRICS_BLOB" @nameSpaces = [] - @mode = "off" + @nameSpacesFilteringMode = "off" end config_param :run_interval, :time, :default => 60 @@ -78,8 +78,8 @@ def enumerate() @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() $log.info("in_win_cadvisor_perf::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") - @mode = ExtensionUtils.getNamespacesModeForDataCollection() - $log.info("in_cadvisor_perf::enumerate: using data collection mode for nameSpaces: #{@mode} @ #{Time.now.utc.iso8601}") + @nameSpacesFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() + $log.info("in_cadvisor_perf::enumerate: using data collection mode for nameSpaces: #{@nameSpacesFilteringMode} @ #{Time.now.utc.iso8601}") end #Resetting this cache so that it is populated with the current set of containers with every call @@ -95,7 +95,7 @@ def enumerate() end @@winNodes.each do |winNode| eventStream = Fluent::MultiEventStream.new - metricData = CAdvisorMetricsAPIClient.getMetrics(winNode: winNode, mode: @mode, nameSpaces: @nameSpaces, metricTime: Time.now.utc.iso8601) + metricData = CAdvisorMetricsAPIClient.getMetrics(winNode: winNode, mode: @nameSpacesFilteringMode, nameSpaces: @nameSpaces, metricTime: Time.now.utc.iso8601) metricData.each do |record| if !record.empty? eventStream.add(time, record) if record @@ -110,7 +110,7 @@ def enumerate() #start GPU InsightsMetrics items begin containerGPUusageInsightsMetricsDataItems = [] - containerGPUusageInsightsMetricsDataItems.concat(CAdvisorMetricsAPIClient.getInsightsMetrics(winNode: winNode, mode: @mode, nameSpaces: @nameSpaces, metricTime: Time.now.utc.iso8601)) + containerGPUusageInsightsMetricsDataItems.concat(CAdvisorMetricsAPIClient.getInsightsMetrics(winNode: winNode, mode: @nameSpacesFilteringMode, nameSpaces: @nameSpaces, metricTime: Time.now.utc.iso8601)) insightsMetricsEventStream = Fluent::MultiEventStream.new containerGPUusageInsightsMetricsDataItems.each do |insightsMetricsRecord| From 4417ef4153c61c5235fcac900fed0a2d0938fc36 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Sat, 19 Nov 2022 10:26:57 -0800 Subject: [PATCH 283/301] better naming --- source/plugins/ruby/extension_utils.rb | 13 +++++++------ source/plugins/ruby/in_cadvisor_perf.rb | 10 +++++----- source/plugins/ruby/in_containerinventory.rb | 8 ++++---- source/plugins/ruby/in_kube_events.rb | 8 ++++---- source/plugins/ruby/in_kube_perfinventory.rb | 8 ++++---- source/plugins/ruby/in_kube_podinventory.rb | 14 +++++++------- source/plugins/ruby/in_kube_pvinventory.rb | 8 ++++---- source/plugins/ruby/in_kubestate_deployments.rb | 8 ++++---- source/plugins/ruby/in_kubestate_hpa.rb | 8 ++++---- source/plugins/ruby/in_win_cadvisor_perf.rb | 10 +++++----- 10 files changed, 48 insertions(+), 47 deletions(-) diff --git a/source/plugins/ruby/extension_utils.rb b/source/plugins/ruby/extension_utils.rb index 1b53cdc1f..b0f2c89b1 100644 --- a/source/plugins/ruby/extension_utils.rb +++ b/source/plugins/ruby/extension_utils.rb @@ -74,7 +74,7 @@ def getNamespacesForDataCollection end def getNamespacesFilteringModeForDataCollection - nameSpacesFilteringMode = "off" + nameSpaceFilteringMode = "off" begin dataCollectionSettings = Extension.instance.get_extension_data_collection_settings() if !dataCollectionSettings.nil? && @@ -82,18 +82,19 @@ def getNamespacesFilteringModeForDataCollection dataCollectionSettings.has_key?(Constants::EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_NAMESPACES_MODE) mode = dataCollectionSettings[Constants::EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_NAMESPACES_MODE] if !mode.nil? && !mode.empty? - nameSpacesFilteringMode = mode.downcase - if !Constants::EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_NAMESPACES_FILTERING_MODES.include?(nameSpacesFilteringMode) - $log.warn("ExtensionUtils::getNamespacesFilteringModeForDataCollection: nameSpacesFilteringMode: #{mode} not supported hence using default") + if Constants::EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_NAMESPACES_FILTERING_MODES.include?(mode.downcase) + nameSpaceFilteringMode = mode.downcase + else + $log.warn("ExtensionUtils::getNamespacesFilteringModeForDataCollection: nameSpaceFilteringMode: #{mode} not supported hence using default") end else - $log.warn("ExtensionUtils::getNamespacesFilteringModeForDataCollection: nameSpacesFilteringMode: #{mode} not valid hence using default") + $log.warn("ExtensionUtils::getNamespacesFilteringModeForDataCollection: nameSpaceFilteringMode: #{mode} not valid hence using default") end end rescue => errorStr $log.warn("ExtensionUtils::getNamespacesFilteringModeForDataCollection: failed with an exception: #{errorStr}") end - return nameSpacesFilteringMode + return nameSpaceFilteringMode end end end diff --git a/source/plugins/ruby/in_cadvisor_perf.rb b/source/plugins/ruby/in_cadvisor_perf.rb index a67caae6e..2854cefea 100644 --- a/source/plugins/ruby/in_cadvisor_perf.rb +++ b/source/plugins/ruby/in_cadvisor_perf.rb @@ -23,7 +23,7 @@ def initialize require_relative "constants" require_relative "extension_utils" @nameSpaces = [] - @nameSpacesFilteringMode = "off" + @nameSpaceFilteringMode = "off" end config_param :run_interval, :time, :default => 60 @@ -79,11 +79,11 @@ def enumerate() $log.info("in_cadvisor_perf::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() $log.info("in_cadvisor_perf::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") - @nameSpacesFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() - $log.info("in_cadvisor_perf::enumerate: using data collection mode for nameSpaces: #{@nameSpacesFilteringMode} @ #{Time.now.utc.iso8601}") + @nameSpaceFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() + $log.info("in_cadvisor_perf::enumerate: using data collection mode for nameSpaces: #{@nameSpaceFilteringMode} @ #{Time.now.utc.iso8601}") end - metricData = CAdvisorMetricsAPIClient.getMetrics(winNode: nil, mode: @nameSpacesFilteringMode, nameSpaces: @nameSpaces, metricTime: batchTime) + metricData = CAdvisorMetricsAPIClient.getMetrics(winNode: nil, mode: @nameSpaceFilteringMode, nameSpaces: @nameSpaces, metricTime: batchTime) metricData.each do |record| eventStream.add(time, record) if record end @@ -99,7 +99,7 @@ def enumerate() begin if !@@isWindows.nil? && @@isWindows == false containerGPUusageInsightsMetricsDataItems = [] - containerGPUusageInsightsMetricsDataItems.concat(CAdvisorMetricsAPIClient.getInsightsMetrics(winNode: nil, mode: @nameSpacesFilteringMode, nameSpaces: @nameSpaces, metricTime: batchTime)) + containerGPUusageInsightsMetricsDataItems.concat(CAdvisorMetricsAPIClient.getInsightsMetrics(winNode: nil, mode: @nameSpaceFilteringMode, nameSpaces: @nameSpaces, metricTime: batchTime)) containerGPUusageInsightsMetricsDataItems.each do |insightsMetricsRecord| insightsMetricsEventStream.add(time, insightsMetricsRecord) if insightsMetricsRecord diff --git a/source/plugins/ruby/in_containerinventory.rb b/source/plugins/ruby/in_containerinventory.rb index bda6820e7..5b7a093f8 100644 --- a/source/plugins/ruby/in_containerinventory.rb +++ b/source/plugins/ruby/in_containerinventory.rb @@ -58,7 +58,7 @@ def enumerate containerInventory = Array.new eventStream = Fluent::MultiEventStream.new hostName = "" - @nameSpacesFilteringMode = "off" + @nameSpaceFilteringMode = "off" @nameSpaces = [] $log.info("in_container_inventory::enumerate : Begin processing @ #{Time.now.utc.iso8601}") if ExtensionUtils.isAADMSIAuthMode() @@ -71,8 +71,8 @@ def enumerate $log.info("in_container_inventory::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() $log.info("in_container_inventory::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") - @nameSpacesFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() - $log.info("in_container_inventory::enumerate: using data collection mode for nameSpaces: #{@nameSpacesFilteringMode} @ #{Time.now.utc.iso8601}") + @nameSpaceFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() + $log.info("in_container_inventory::enumerate: using data collection mode for nameSpaces: #{@nameSpaceFilteringMode} @ #{Time.now.utc.iso8601}") end begin containerRuntimeEnv = ENV["CONTAINER_RUNTIME"] @@ -85,7 +85,7 @@ def enumerate podList = JSON.parse(response.body) if !podList.nil? && !podList.empty? && podList.key?("items") && !podList["items"].nil? && !podList["items"].empty? podList["items"].each do |item| - next unless !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["name"], item["metadata"]["namespace"], @nameSpacesFilteringMode, @nameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["name"], item["metadata"]["namespace"], @nameSpaceFilteringMode, @nameSpaces) containerInventoryRecords = KubernetesContainerInventory.getContainerInventoryRecords(item, batchTime, clusterCollectEnvironmentVar) containerInventoryRecords.each do |containerRecord| ContainerInventoryState.writeContainerState(containerRecord) diff --git a/source/plugins/ruby/in_kube_events.rb b/source/plugins/ruby/in_kube_events.rb index dbd33c83b..34014724f 100644 --- a/source/plugins/ruby/in_kube_events.rb +++ b/source/plugins/ruby/in_kube_events.rb @@ -29,7 +29,7 @@ def initialize # Initilize enable/disable normal event collection @collectAllKubeEvents = false @nameSpaces = [] - @nameSpacesFilteringMode = "off" + @nameSpaceFilteringMode = "off" end config_param :run_interval, :time, :default => 60 @@ -96,8 +96,8 @@ def enumerate $log.info("in_kube_events::enumerate: using kubeevents tag -#{@tag} @ #{Time.now.utc.iso8601}") @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() $log.info("in_kube_events::enumerate: using data collection nameSpaces -#{@nameSpaces} @ #{Time.now.utc.iso8601}") - @nameSpacesFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() - $log.info("in_kube_events::enumerate: using data collection mode for nameSpaces -#{@nameSpacesFilteringMode} @ #{Time.now.utc.iso8601}") + @nameSpaceFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() + $log.info("in_kube_events::enumerate: using data collection mode for nameSpaces -#{@nameSpaceFilteringMode} @ #{Time.now.utc.iso8601}") end # Initializing continuation token to nil continuationToken = nil @@ -170,7 +170,7 @@ def parse_and_emit_records(events, eventQueryState, newEventQueryState, batchTim end # drop the events if the event of the excluded namespace - next unless !KubernetesApiClient.isExcludeResourceItem("", items["involvedObject"]["namespace"], @nameSpacesFilteringMode, @nameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem("", items["involvedObject"]["namespace"], @nameSpaceFilteringMode, @nameSpaces) record["ObjectKind"] = items["involvedObject"]["kind"] record["Namespace"] = items["involvedObject"]["namespace"] diff --git a/source/plugins/ruby/in_kube_perfinventory.rb b/source/plugins/ruby/in_kube_perfinventory.rb index b9d37970b..7caa705a2 100644 --- a/source/plugins/ruby/in_kube_perfinventory.rb +++ b/source/plugins/ruby/in_kube_perfinventory.rb @@ -33,7 +33,7 @@ def initialize @kubeperfTag = "oneagent.containerInsights.LINUX_PERF_BLOB" @insightsMetricsTag = "oneagent.containerInsights.INSIGHTS_METRICS_BLOB" @nameSpaces = [] - @nameSpacesFilteringMode = "off" + @nameSpaceFilteringMode = "off" end config_param :run_interval, :time, :default => 60 @@ -105,8 +105,8 @@ def enumerate(podList = nil) $log.info("in_kube_perfinventory::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() $log.info("in_kube_perfinventory::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") - @nameSpacesFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() - $log.info("in_kube_perfinventory::enumerate: using data collection mode for nameSpaces: #{@nameSpacesFilteringMode} @ #{Time.now.utc.iso8601}") + @nameSpaceFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() + $log.info("in_kube_perfinventory::enumerate: using data collection mode for nameSpaces: #{@nameSpaceFilteringMode} @ #{Time.now.utc.iso8601}") end nodeAllocatableRecords = getNodeAllocatableRecords() @@ -142,7 +142,7 @@ def parse_and_emit_records(podInventory, nodeAllocatableRecords, continuationTok begin #begin block start podInventory["items"].each do |item| #podInventory block start - next unless !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["name"], item["metadata"]["namespace"], @nameSpacesFilteringMode, @nameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["name"], item["metadata"]["namespace"], @nameSpaceFilteringMode, @nameSpaces) nodeName = "" if !item["spec"]["nodeName"].nil? nodeName = item["spec"]["nodeName"] diff --git a/source/plugins/ruby/in_kube_podinventory.rb b/source/plugins/ruby/in_kube_podinventory.rb index 0df3a6855..202ea0d60 100644 --- a/source/plugins/ruby/in_kube_podinventory.rb +++ b/source/plugins/ruby/in_kube_podinventory.rb @@ -60,7 +60,7 @@ def initialize @kubeservicesTag = "oneagent.containerInsights.KUBE_SERVICES_BLOB" @containerInventoryTag = "oneagent.containerInsights.CONTAINER_INVENTORY_BLOB" @nameSpaces = [] - @nameSpacesFilteringMode = "off" + @nameSpaceFilteringMode = "off" end config_param :run_interval, :time, :default => 60 @@ -177,8 +177,8 @@ def enumerate(podList = nil) $log.info("in_kube_podinventory::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() $log.info("in_kube_podinventory::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") - @nameSpacesFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() - $log.info("in_kube_podinventory::enumerate: using data collection mode for nameSpaces: #{@nameSpacesFilteringMode} @ #{Time.now.utc.iso8601}") + @nameSpaceFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() + $log.info("in_kube_podinventory::enumerate: using data collection mode for nameSpaces: #{@nameSpaceFilteringMode} @ #{Time.now.utc.iso8601}") end serviceInventory = {} @@ -241,8 +241,8 @@ def enumerate(podList = nil) if !@nameSpaces.nil? && !@nameSpaces.empty? && @nameSpaces.length > 0 telemetryProperties["DATA_COLLECTION_NAMESPACES"] = @nameSpaces end - if !@nameSpacesFilteringMode.nil? && !@nameSpacesFilteringMode.empty? - telemetryProperties["DATA_COLLECTION_NAMESPACES_FILTERING_MODE"] = @nameSpacesFilteringMode + if !@nameSpaceFilteringMode.nil? && !@nameSpaceFilteringMode.empty? + telemetryProperties["DATA_COLLECTION_NAMESPACES_FILTERING_MODE"] = @nameSpaceFilteringMode end if @run_interval > 60 telemetryProperties["DATA_COLLECTION_INTERVAL_MINUTES"] = @run_interval / 60 @@ -292,7 +292,7 @@ def parse_and_emit_records(podInventory, serviceRecords, continuationToken, batc begin #begin block start podInventory["items"].each do |item| #podInventory block start - next unless !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["name"], item["metadata"]["namespace"], @nameSpacesFilteringMode, @nameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["name"], item["metadata"]["namespace"], @nameSpaceFilteringMode, @nameSpaces) # pod inventory records podInventoryRecords = getPodInventoryRecords(item, serviceRecords, batchTime) @containerCount += podInventoryRecords.length @@ -390,7 +390,7 @@ def parse_and_emit_records(podInventory, serviceRecords, continuationToken, batc if continuationToken.nil? # sending kube services inventory records kubeServicesEventStream = Fluent::MultiEventStream.new serviceRecords.each do |kubeServiceRecord| - next unless !KubernetesApiClient.isExcludeResourceItem(kubeServiceRecord["ServiceName"], kubeServiceRecord["namespace"], @nameSpacesFilteringMode, @nameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(kubeServiceRecord["ServiceName"], kubeServiceRecord["namespace"], @nameSpaceFilteringMode, @nameSpaces) if !kubeServiceRecord.nil? # adding before emit to reduce memory foot print kubeServiceRecord["ClusterId"] = KubernetesApiClient.getClusterId diff --git a/source/plugins/ruby/in_kube_pvinventory.rb b/source/plugins/ruby/in_kube_pvinventory.rb index b20d13a82..d10b82aa3 100644 --- a/source/plugins/ruby/in_kube_pvinventory.rb +++ b/source/plugins/ruby/in_kube_pvinventory.rb @@ -25,7 +25,7 @@ def initialize @PV_CHUNK_SIZE = "5000" @pvTypeToCountHash = {} @nameSpaces = [] - @nameSpacesFilteringMode = "off" + @nameSpaceFilteringMode = "off" end config_param :run_interval, :time, :default => 60 @@ -73,8 +73,8 @@ def enumerate $log.info("in_kube_pvinventory::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() $log.info("in_kube_pvinventory::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") - @nameSpacesFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() - $log.info("in_kube_pvinventory::enumerate: using data collection mode for nameSpaces: #{@nameSpacesFilteringMode} @ #{Time.now.utc.iso8601}") + @nameSpaceFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() + $log.info("in_kube_pvinventory::enumerate: using data collection mode for nameSpaces: #{@nameSpaceFilteringMode} @ #{Time.now.utc.iso8601}") end continuationToken = nil @@ -140,7 +140,7 @@ def parse_and_emit_records(pvInventory, batchTime = Time.utc.iso8601) record["PVType"] = type record["PVTypeInfo"] = typeInfo - next unless !KubernetesApiClient.isExcludeResourceItem(pvcName, pvcNamespace, @nameSpacesFilteringMode, @nameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(pvcName, pvcNamespace, @nameSpaceFilteringMode, @nameSpaces) record["CollectionTime"] = batchTime record["ClusterId"] = KubernetesApiClient.getClusterId diff --git a/source/plugins/ruby/in_kubestate_deployments.rb b/source/plugins/ruby/in_kubestate_deployments.rb index 63290e9fa..e3810d34f 100644 --- a/source/plugins/ruby/in_kubestate_deployments.rb +++ b/source/plugins/ruby/in_kubestate_deployments.rb @@ -36,7 +36,7 @@ def initialize @ClusterId = KubernetesApiClient.getClusterId @ClusterName = KubernetesApiClient.getClusterName @nameSpaces = [] - @nameSpacesFilteringMode = "off" + @nameSpaceFilteringMode = "off" end config_param :run_interval, :time, :default => 60 @@ -94,8 +94,8 @@ def enumerate $log.info("in_kubestate_deployments::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() $log.info("in_kubestate_deployments::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") - @nameSpacesFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() - $log.info("in_kubestate_deployments::enumerate: using data collection mode for nameSpaces: #{@nameSpacesFilteringMode} @ #{Time.now.utc.iso8601}") + @nameSpaceFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() + $log.info("in_kubestate_deployments::enumerate: using data collection mode for nameSpaces: #{@nameSpaceFilteringMode} @ #{Time.now.utc.iso8601}") end # Initializing continuation token to nil continuationToken = nil @@ -148,7 +148,7 @@ def parse_and_emit_records(deployments, batchTime = Time.utc.iso8601) begin metricInfo = deployments metricInfo["items"].each do |deployment| - next unless !KubernetesApiClient.isExcludeResourceItem(deployment["metadata"]["name"], deployment["metadata"]["namespace"], @nameSpacesFilteringMode, @nameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(deployment["metadata"]["name"], deployment["metadata"]["namespace"], @nameSpaceFilteringMode, @nameSpaces) deploymentName = deployment["metadata"]["name"] deploymentNameSpace = deployment["metadata"]["namespace"] deploymentCreatedTime = "" diff --git a/source/plugins/ruby/in_kubestate_hpa.rb b/source/plugins/ruby/in_kubestate_hpa.rb index dd54732e8..a1975486e 100644 --- a/source/plugins/ruby/in_kubestate_hpa.rb +++ b/source/plugins/ruby/in_kubestate_hpa.rb @@ -33,7 +33,7 @@ def initialize @ClusterId = KubernetesApiClient.getClusterId @ClusterName = KubernetesApiClient.getClusterName @nameSpaces = [] - @nameSpacesFilteringMode = "off" + @nameSpaceFilteringMode = "off" end config_param :run_interval, :time, :default => 60 @@ -91,8 +91,8 @@ def enumerate $log.info("in_kubestate_hpa::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() $log.info("in_kubestate_hpa::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") - @nameSpacesFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() - $log.info("in_kubestate_hpa::enumerate: using data collection mode for nameSpaces: #{@nameSpacesFilteringMode} @ #{Time.now.utc.iso8601}") + @nameSpaceFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() + $log.info("in_kubestate_hpa::enumerate: using data collection mode for nameSpaces: #{@nameSpaceFilteringMode} @ #{Time.now.utc.iso8601}") end # Initializing continuation token to nil continuationToken = nil @@ -136,7 +136,7 @@ def parse_and_emit_records(hpas, batchTime = Time.utc.iso8601) begin metricInfo = hpas metricInfo["items"].each do |hpa| - next unless !KubernetesApiClient.isExcludeResourceItem(hpa["metadata"]["name"], hpa["metadata"]["namespace"], @nameSpacesFilteringMode, @nameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(hpa["metadata"]["name"], hpa["metadata"]["namespace"], @nameSpaceFilteringMode, @nameSpaces) hpaName = hpa["metadata"]["name"] hpaNameSpace = hpa["metadata"]["namespace"] hpaCreatedTime = "" diff --git a/source/plugins/ruby/in_win_cadvisor_perf.rb b/source/plugins/ruby/in_win_cadvisor_perf.rb index 025e23e23..ca67f653a 100644 --- a/source/plugins/ruby/in_win_cadvisor_perf.rb +++ b/source/plugins/ruby/in_win_cadvisor_perf.rb @@ -23,7 +23,7 @@ def initialize require_relative "extension_utils" @insightsMetricsTag = "oneagent.containerInsights.INSIGHTS_METRICS_BLOB" @nameSpaces = [] - @nameSpacesFilteringMode = "off" + @nameSpaceFilteringMode = "off" end config_param :run_interval, :time, :default => 60 @@ -78,8 +78,8 @@ def enumerate() @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() $log.info("in_win_cadvisor_perf::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") - @nameSpacesFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() - $log.info("in_cadvisor_perf::enumerate: using data collection mode for nameSpaces: #{@nameSpacesFilteringMode} @ #{Time.now.utc.iso8601}") + @nameSpaceFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() + $log.info("in_cadvisor_perf::enumerate: using data collection mode for nameSpaces: #{@nameSpaceFilteringMode} @ #{Time.now.utc.iso8601}") end #Resetting this cache so that it is populated with the current set of containers with every call @@ -95,7 +95,7 @@ def enumerate() end @@winNodes.each do |winNode| eventStream = Fluent::MultiEventStream.new - metricData = CAdvisorMetricsAPIClient.getMetrics(winNode: winNode, mode: @nameSpacesFilteringMode, nameSpaces: @nameSpaces, metricTime: Time.now.utc.iso8601) + metricData = CAdvisorMetricsAPIClient.getMetrics(winNode: winNode, mode: @nameSpaceFilteringMode, nameSpaces: @nameSpaces, metricTime: Time.now.utc.iso8601) metricData.each do |record| if !record.empty? eventStream.add(time, record) if record @@ -110,7 +110,7 @@ def enumerate() #start GPU InsightsMetrics items begin containerGPUusageInsightsMetricsDataItems = [] - containerGPUusageInsightsMetricsDataItems.concat(CAdvisorMetricsAPIClient.getInsightsMetrics(winNode: winNode, mode: @nameSpacesFilteringMode, nameSpaces: @nameSpaces, metricTime: Time.now.utc.iso8601)) + containerGPUusageInsightsMetricsDataItems.concat(CAdvisorMetricsAPIClient.getInsightsMetrics(winNode: winNode, mode: @nameSpaceFilteringMode, nameSpaces: @nameSpaces, metricTime: Time.now.utc.iso8601)) insightsMetricsEventStream = Fluent::MultiEventStream.new containerGPUusageInsightsMetricsDataItems.each do |insightsMetricsRecord| From 9ac14c3d740f9c25e1ca66b656478e3a700e18e3 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Sat, 19 Nov 2022 10:37:23 -0800 Subject: [PATCH 284/301] better naming --- .../plugins/ruby/CAdvisorMetricsAPIClient.rb | 54 +++++++++---------- source/plugins/ruby/KubernetesApiClient.rb | 6 +-- source/plugins/ruby/in_cadvisor_perf.rb | 4 +- source/plugins/ruby/in_win_cadvisor_perf.rb | 4 +- 4 files changed, 34 insertions(+), 34 deletions(-) diff --git a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb index 7fa65d09f..d2f581f3c 100644 --- a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb +++ b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb @@ -133,7 +133,7 @@ def getCAdvisorUri(winNode, relativeUri) return baseUri + relativeUri end - def getMetrics(winNode: nil, mode: "off", nameSpaces: [], metricTime: Time.now.utc.iso8601) + def getMetrics(winNode: nil, nameSpaceFilteringMode: "off", nameSpaces: [], metricTime: Time.now.utc.iso8601) metricDataItems = [] begin cAdvisorStats = getSummaryStatsFromCAdvisor(winNode) @@ -155,8 +155,8 @@ def getMetrics(winNode: nil, mode: "off", nameSpaces: [], metricTime: Time.now.u # Checking if we are in windows daemonset and sending only few metrics that are needed for MDM if !@os_type.nil? && !@os_type.empty? && @os_type.strip.casecmp("windows") == 0 # Container metrics - metricDataItems.concat(getContainerMemoryMetricItems(metricInfo, hostName, "workingSetBytes", Constants::MEMORY_WORKING_SET_BYTES, metricTime, operatingSystem, mode, nameSpaces)) - containerCpuUsageNanoSecondsRate = getContainerCpuMetricItemRate(metricInfo, hostName, "usageCoreNanoSeconds", Constants::CPU_USAGE_NANO_CORES, metricTime, mode, nameSpaces) + metricDataItems.concat(getContainerMemoryMetricItems(metricInfo, hostName, "workingSetBytes", Constants::MEMORY_WORKING_SET_BYTES, metricTime, operatingSystem, nameSpaceFilteringMode, nameSpaces)) + containerCpuUsageNanoSecondsRate = getContainerCpuMetricItemRate(metricInfo, hostName, "usageCoreNanoSeconds", Constants::CPU_USAGE_NANO_CORES, metricTime, nameSpaceFilteringMode, nameSpaces) if containerCpuUsageNanoSecondsRate && !containerCpuUsageNanoSecondsRate.empty? && !containerCpuUsageNanoSecondsRate.nil? metricDataItems.concat(containerCpuUsageNanoSecondsRate) end @@ -167,15 +167,15 @@ def getMetrics(winNode: nil, mode: "off", nameSpaces: [], metricTime: Time.now.u end metricDataItems.push(getNodeMetricItem(metricInfo, hostName, "memory", "workingSetBytes", Constants::MEMORY_WORKING_SET_BYTES, metricTime)) else - metricDataItems.concat(getContainerMemoryMetricItems(metricInfo, hostName, "workingSetBytes", Constants::MEMORY_WORKING_SET_BYTES, metricTime, operatingSystem, mode, nameSpaces)) - metricDataItems.concat(getContainerStartTimeMetricItems(metricInfo, hostName, "restartTimeEpoch", metricTime, mode, nameSpaces)) + metricDataItems.concat(getContainerMemoryMetricItems(metricInfo, hostName, "workingSetBytes", Constants::MEMORY_WORKING_SET_BYTES, metricTime, operatingSystem, nameSpaceFilteringMode, nameSpaces)) + metricDataItems.concat(getContainerStartTimeMetricItems(metricInfo, hostName, "restartTimeEpoch", metricTime, nameSpaceFilteringMode, nameSpaces)) if operatingSystem == "Linux" - metricDataItems.concat(getContainerCpuMetricItems(metricInfo, hostName, "usageNanoCores", Constants::CPU_USAGE_NANO_CORES, metricTime, mode, nameSpaces)) - metricDataItems.concat(getContainerMemoryMetricItems(metricInfo, hostName, "rssBytes", Constants::MEMORY_RSS_BYTES, metricTime, operatingSystem, mode, nameSpaces)) + metricDataItems.concat(getContainerCpuMetricItems(metricInfo, hostName, "usageNanoCores", Constants::CPU_USAGE_NANO_CORES, metricTime, nameSpaceFilteringMode, nameSpaces)) + metricDataItems.concat(getContainerMemoryMetricItems(metricInfo, hostName, "rssBytes", Constants::MEMORY_RSS_BYTES, metricTime, operatingSystem, nameSpaceFilteringMode, nameSpaces)) metricDataItems.push(getNodeMetricItem(metricInfo, hostName, "memory", "rssBytes", Constants::MEMORY_RSS_BYTES, metricTime)) elsif operatingSystem == "Windows" - containerCpuUsageNanoSecondsRate = getContainerCpuMetricItemRate(metricInfo, hostName, "usageCoreNanoSeconds", Constants::CPU_USAGE_NANO_CORES, metricTime, mode, nameSpaces) + containerCpuUsageNanoSecondsRate = getContainerCpuMetricItemRate(metricInfo, hostName, "usageCoreNanoSeconds", Constants::CPU_USAGE_NANO_CORES, metricTime, nameSpaceFilteringMode, nameSpaces) if containerCpuUsageNanoSecondsRate && !containerCpuUsageNanoSecondsRate.empty? && !containerCpuUsageNanoSecondsRate.nil? metricDataItems.concat(containerCpuUsageNanoSecondsRate) end @@ -210,7 +210,7 @@ def getMetrics(winNode: nil, mode: "off", nameSpaces: [], metricTime: Time.now.u return metricDataItems end - def getContainerCpuMetricItems(metricJSON, hostName, cpuMetricNameToCollect, metricNametoReturn, metricPollTime, mode, nameSpaces) + def getContainerCpuMetricItems(metricJSON, hostName, cpuMetricNameToCollect, metricNametoReturn, metricPollTime, nameSpaceFilteringMode, nameSpaces) metricItems = [] clusterId = KubernetesApiClient.getClusterId timeDifference = (DateTime.now.to_time.to_i - @@telemetryCpuMetricTimeTracker).abs @@ -222,7 +222,7 @@ def getContainerCpuMetricItems(metricJSON, hostName, cpuMetricNameToCollect, met podName = pod["podRef"]["name"] podNamespace = pod["podRef"]["namespace"] - next unless !KubernetesApiClient.isExcludeResourceItem(podName, podNamespace, mode, nameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(podName, podNamespace, nameSpaceFilteringMode, nameSpaces) if (!pod["containers"].nil?) pod["containers"].each do |container| @@ -312,7 +312,7 @@ def getContainerCpuMetricItems(metricJSON, hostName, cpuMetricNameToCollect, met return metricItems end - def getInsightsMetrics(winNode: nil, mode: "off", nameSpaces: [], metricTime: Time.now.utc.iso8601) + def getInsightsMetrics(winNode: nil, nameSpaceFilteringMode: "off", nameSpaces: [], metricTime: Time.now.utc.iso8601) metricDataItems = [] begin cAdvisorStats = getSummaryStatsFromCAdvisor(winNode) @@ -331,11 +331,11 @@ def getInsightsMetrics(winNode: nil, mode: "off", nameSpaces: [], metricTime: Ti operatingSystem = "Linux" end if !metricInfo.nil? - metricDataItems.concat(getContainerGpuMetricsAsInsightsMetrics(metricInfo, hostName, "memoryTotal", "containerGpumemoryTotalBytes", metricTime, mode, nameSpaces)) - metricDataItems.concat(getContainerGpuMetricsAsInsightsMetrics(metricInfo, hostName, "memoryUsed", "containerGpumemoryUsedBytes", metricTime, mode, nameSpaces)) - metricDataItems.concat(getContainerGpuMetricsAsInsightsMetrics(metricInfo, hostName, "dutyCycle", "containerGpuDutyCycle", metricTime, mode, nameSpaces)) + metricDataItems.concat(getContainerGpuMetricsAsInsightsMetrics(metricInfo, hostName, "memoryTotal", "containerGpumemoryTotalBytes", metricTime, nameSpaceFilteringMode, nameSpaces)) + metricDataItems.concat(getContainerGpuMetricsAsInsightsMetrics(metricInfo, hostName, "memoryUsed", "containerGpumemoryUsedBytes", metricTime, nameSpaceFilteringMode, nameSpaces)) + metricDataItems.concat(getContainerGpuMetricsAsInsightsMetrics(metricInfo, hostName, "dutyCycle", "containerGpuDutyCycle", metricTime, nameSpaceFilteringMode, nameSpaces)) - metricDataItems.concat(getPersistentVolumeMetrics(metricInfo, hostName, "usedBytes", Constants::PV_USED_BYTES, metricTime, mode, nameSpaces)) + metricDataItems.concat(getPersistentVolumeMetrics(metricInfo, hostName, "usedBytes", Constants::PV_USED_BYTES, metricTime, nameSpaceFilteringMode, nameSpaces)) else @Log.warn("Couldn't get Insights metrics information for host: #{hostName} os:#{operatingSystem}") end @@ -346,7 +346,7 @@ def getInsightsMetrics(winNode: nil, mode: "off", nameSpaces: [], metricTime: Ti return metricDataItems end - def getPersistentVolumeMetrics(metricJSON, hostName, metricNameToCollect, metricNameToReturn, metricPollTime, mode, nameSpaces) + def getPersistentVolumeMetrics(metricJSON, hostName, metricNameToCollect, metricNameToReturn, metricPollTime, nameSpaceFilteringMode, nameSpaces) telemetryTimeDifference = (DateTime.now.to_time.to_i - @@telemetryPVKubeSystemMetricsTimeTracker).abs telemetryTimeDifferenceInMinutes = telemetryTimeDifference / 60 @@ -357,7 +357,7 @@ def getPersistentVolumeMetrics(metricJSON, hostName, metricNameToCollect, metric metricInfo = metricJSON metricInfo["pods"].each do |pod| podNamespace = pod["podRef"]["namespace"] - next unless !KubernetesApiClient.isExcludeResourceItem(pod["podRef"]["name"], podNamespace, mode, nameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(pod["podRef"]["name"], podNamespace, nameSpaceFilteringMode, nameSpaces) excludeNamespace = false if (podNamespace.downcase == "kube-system") && @pvKubeSystemCollectionMetricsEnabled == "false" excludeNamespace = true @@ -419,7 +419,7 @@ def getPersistentVolumeMetrics(metricJSON, hostName, metricNameToCollect, metric return metricItems end - def getContainerGpuMetricsAsInsightsMetrics(metricJSON, hostName, metricNameToCollect, metricNametoReturn, metricPollTime, mode, nameSpaces) + def getContainerGpuMetricsAsInsightsMetrics(metricJSON, hostName, metricNameToCollect, metricNametoReturn, metricPollTime, nameSpaceFilteringMode, nameSpaces) metricItems = [] clusterId = KubernetesApiClient.getClusterId clusterName = KubernetesApiClient.getClusterName @@ -429,7 +429,7 @@ def getContainerGpuMetricsAsInsightsMetrics(metricJSON, hostName, metricNameToCo podUid = pod["podRef"]["uid"] podName = pod["podRef"]["name"] podNamespace = pod["podRef"]["namespace"] - next unless !KubernetesApiClient.isExcludeResourceItem(podName, podNamespace, mode, nameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(podName, podNamespace, nameSpaceFilteringMode, nameSpaces) if (!pod["containers"].nil?) pod["containers"].each do |container| @@ -458,8 +458,8 @@ def getContainerGpuMetricsAsInsightsMetrics(metricJSON, hostName, metricNameToCo metricTags[Constants::INSIGHTSMETRICS_TAGS_GPU_VENDOR] = accelerator["make"] end - if (!accelerator["model"].nil? && !accelerator["model"].empty?) - metricTags[Constants::INSIGHTSMETRICS_TAGS_GPU_MODEL] = accelerator["model"] + if (!accelerator["nameSpaceFilteringModel"].nil? && !accelerator["nameSpaceFilteringModel"].empty?) + metricTags[Constants::INSIGHTSMETRICS_TAGS_GPU_nameSpaceFilteringModeL] = accelerator["nameSpaceFilteringModel"] end if (!accelerator["id"].nil? && !accelerator["id"].empty?) @@ -507,7 +507,7 @@ def resetWinContainerIdCache end # usageNanoCores doesnt exist for windows nodes. Hence need to compute this from usageCoreNanoSeconds - def getContainerCpuMetricItemRate(metricJSON, hostName, cpuMetricNameToCollect, metricNametoReturn, metricPollTime, mode, nameSpaces) + def getContainerCpuMetricItemRate(metricJSON, hostName, cpuMetricNameToCollect, metricNametoReturn, metricPollTime, nameSpaceFilteringMode, nameSpaces) metricItems = [] clusterId = KubernetesApiClient.getClusterId timeDifference = (DateTime.now.to_time.to_i - @@telemetryCpuMetricTimeTracker).abs @@ -521,7 +521,7 @@ def getContainerCpuMetricItemRate(metricJSON, hostName, cpuMetricNameToCollect, podName = pod["podRef"]["name"] podNamespace = pod["podRef"]["namespace"] - next unless !KubernetesApiClient.isExcludeResourceItem(podName, podNamespace, mode, nameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(podName, podNamespace, nameSpaceFilteringMode, nameSpaces) if (!pod["containers"].nil?) pod["containers"].each do |container| @@ -638,7 +638,7 @@ def getContainerCpuMetricItemRate(metricJSON, hostName, cpuMetricNameToCollect, return metricItems end - def getContainerMemoryMetricItems(metricJSON, hostName, memoryMetricNameToCollect, metricNametoReturn, metricPollTime, operatingSystem, mode, nameSpaces) + def getContainerMemoryMetricItems(metricJSON, hostName, memoryMetricNameToCollect, metricNametoReturn, metricPollTime, operatingSystem, nameSpaceFilteringMode, nameSpaces) metricItems = [] clusterId = KubernetesApiClient.getClusterId timeDifference = (DateTime.now.to_time.to_i - @@telemetryMemoryMetricTimeTracker).abs @@ -649,7 +649,7 @@ def getContainerMemoryMetricItems(metricJSON, hostName, memoryMetricNameToCollec podUid = pod["podRef"]["uid"] podName = pod["podRef"]["name"] podNamespace = pod["podRef"]["namespace"] - next unless !KubernetesApiClient.isExcludeResourceItem(podName, podNamespace, mode, nameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(podName, podNamespace, nameSpaceFilteringMode, nameSpaces) if (!pod["containers"].nil?) pod["containers"].each do |container| containerName = container["name"] @@ -887,7 +887,7 @@ def getNodeLastRebootTimeMetric(metricJSON, hostName, metricNametoReturn, metric return metricItem end - def getContainerStartTimeMetricItems(metricJSON, hostName, metricNametoReturn, metricPollTime, mode, nameSpaces) + def getContainerStartTimeMetricItems(metricJSON, hostName, metricNametoReturn, metricPollTime, nameSpaceFilteringMode, nameSpaces) metricItems = [] clusterId = KubernetesApiClient.getClusterId #currentTime = Time.now.utc.iso8601 #2018-01-30T19:36:14Z @@ -897,7 +897,7 @@ def getContainerStartTimeMetricItems(metricJSON, hostName, metricNametoReturn, m podUid = pod["podRef"]["uid"] podNamespace = pod["podRef"]["namespace"] podName = pod["podRef"]["name"] - next unless !KubernetesApiClient.isExcludeResourceItem(podName, podNamespace, mode, nameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(podName, podNamespace, nameSpaceFilteringMode, nameSpaces) if (!pod["containers"].nil?) pod["containers"].each do |container| containerName = container["name"] diff --git a/source/plugins/ruby/KubernetesApiClient.rb b/source/plugins/ruby/KubernetesApiClient.rb index 122cba0f5..dc5399d51 100644 --- a/source/plugins/ruby/KubernetesApiClient.rb +++ b/source/plugins/ruby/KubernetesApiClient.rb @@ -1409,7 +1409,7 @@ def isEmitCacheTelemetry return isEmitCacheTelemtryEnabled end - def isExcludeResourceItem(resourceName, resourceNamespace, mode, nameSpaces) + def isExcludeResourceItem(resourceName, resourceNamespace, nameSpaceFilteringMode, nameSpaces) isExclude = false begin if !resourceName.nil? && !resourceName.empty? && !resourceNamespace.nil? && !resourceNamespace.empty? @@ -1417,9 +1417,9 @@ def isExcludeResourceItem(resourceName, resourceNamespace, mode, nameSpaces) if resourceName.start_with?("ama-logs") && resourceNamespace.eql?("kube-system") isExclude = false elsif !nameSpaces.nil? && !nameSpaces.empty? && nameSpaces.length > 0 - if mode == "exclude" && nameSpaces.include?(resourceNamespace) + if nameSpaceFilteringMode == "exclude" && nameSpaces.include?(resourceNamespace) isExclude = true - elsif mode == "include" && !nameSpaces.include?(resourceNamespace) + elsif nameSpaceFilteringMode == "include" && !nameSpaces.include?(resourceNamespace) isExclude = true end end diff --git a/source/plugins/ruby/in_cadvisor_perf.rb b/source/plugins/ruby/in_cadvisor_perf.rb index 2854cefea..ceef5d6da 100644 --- a/source/plugins/ruby/in_cadvisor_perf.rb +++ b/source/plugins/ruby/in_cadvisor_perf.rb @@ -83,7 +83,7 @@ def enumerate() $log.info("in_cadvisor_perf::enumerate: using data collection mode for nameSpaces: #{@nameSpaceFilteringMode} @ #{Time.now.utc.iso8601}") end - metricData = CAdvisorMetricsAPIClient.getMetrics(winNode: nil, mode: @nameSpaceFilteringMode, nameSpaces: @nameSpaces, metricTime: batchTime) + metricData = CAdvisorMetricsAPIClient.getMetrics(winNode: nil, nameSpaceFilteringMode: @nameSpaceFilteringMode, nameSpaces: @nameSpaces, metricTime: batchTime) metricData.each do |record| eventStream.add(time, record) if record end @@ -99,7 +99,7 @@ def enumerate() begin if !@@isWindows.nil? && @@isWindows == false containerGPUusageInsightsMetricsDataItems = [] - containerGPUusageInsightsMetricsDataItems.concat(CAdvisorMetricsAPIClient.getInsightsMetrics(winNode: nil, mode: @nameSpaceFilteringMode, nameSpaces: @nameSpaces, metricTime: batchTime)) + containerGPUusageInsightsMetricsDataItems.concat(CAdvisorMetricsAPIClient.getInsightsMetrics(winNode: nil, nameSpaceFilteringMode: @nameSpaceFilteringMode, nameSpaces: @nameSpaces, metricTime: batchTime)) containerGPUusageInsightsMetricsDataItems.each do |insightsMetricsRecord| insightsMetricsEventStream.add(time, insightsMetricsRecord) if insightsMetricsRecord diff --git a/source/plugins/ruby/in_win_cadvisor_perf.rb b/source/plugins/ruby/in_win_cadvisor_perf.rb index ca67f653a..53adf7963 100644 --- a/source/plugins/ruby/in_win_cadvisor_perf.rb +++ b/source/plugins/ruby/in_win_cadvisor_perf.rb @@ -95,7 +95,7 @@ def enumerate() end @@winNodes.each do |winNode| eventStream = Fluent::MultiEventStream.new - metricData = CAdvisorMetricsAPIClient.getMetrics(winNode: winNode, mode: @nameSpaceFilteringMode, nameSpaces: @nameSpaces, metricTime: Time.now.utc.iso8601) + metricData = CAdvisorMetricsAPIClient.getMetrics(winNode: winNode, nameSpaceFilteringMode: @nameSpaceFilteringMode, nameSpaces: @nameSpaces, metricTime: Time.now.utc.iso8601) metricData.each do |record| if !record.empty? eventStream.add(time, record) if record @@ -110,7 +110,7 @@ def enumerate() #start GPU InsightsMetrics items begin containerGPUusageInsightsMetricsDataItems = [] - containerGPUusageInsightsMetricsDataItems.concat(CAdvisorMetricsAPIClient.getInsightsMetrics(winNode: winNode, mode: @nameSpaceFilteringMode, nameSpaces: @nameSpaces, metricTime: Time.now.utc.iso8601)) + containerGPUusageInsightsMetricsDataItems.concat(CAdvisorMetricsAPIClient.getInsightsMetrics(winNode: winNode, nameSpaceFilteringMode: @nameSpaceFilteringMode, nameSpaces: @nameSpaces, metricTime: Time.now.utc.iso8601)) insightsMetricsEventStream = Fluent::MultiEventStream.new containerGPUusageInsightsMetricsDataItems.each do |insightsMetricsRecord| From ca62ee90b909086527f8dbbe1b761c3b17eff210 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Sat, 19 Nov 2022 10:43:32 -0800 Subject: [PATCH 285/301] better naming --- .../onboarding-using-msi-auth/existingClusterOnboarding.json | 2 +- .../arc-k8s-extension-msi-auth/existingClusterOnboarding.json | 2 +- .../existingClusterOnboarding.json | 2 +- source/plugins/ruby/CAdvisorMetricsAPIClient.rb | 4 ++-- source/plugins/ruby/constants.rb | 2 +- source/plugins/ruby/extension_utils.rb | 4 ++-- 6 files changed, 8 insertions(+), 8 deletions(-) diff --git a/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json b/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json index 6bb53b51d..9e87c2efa 100644 --- a/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json +++ b/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json @@ -107,7 +107,7 @@ "extensionSettings": { "dataCollectionSettings" : { "interval": "[parameters('dataCollectionInterval')]", - "mode" : "[parameters('nameSpacesFilteringModeForDataCollection')]", + "nameSpaceFilteringMode" : "[parameters('nameSpacesFilteringModeForDataCollection')]", "nameSpaces": "[parameters('namespacesForDataCollection')]" } }, diff --git a/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterOnboarding.json b/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterOnboarding.json index 689efae4d..77f0549b0 100644 --- a/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterOnboarding.json +++ b/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterOnboarding.json @@ -121,7 +121,7 @@ "extensionSettings": { "dataCollectionSettings" : { "interval": "[parameters('dataCollectionInterval')]", - "mode" : "[parameters('nameSpacesFilteringModeForDataCollection')]", + "nameSpaceFilteringMode" : "[parameters('nameSpacesFilteringModeForDataCollection')]", "nameSpaces": "[parameters('namespacesForDataCollection')]" } }, diff --git a/scripts/onboarding/templates/arc-k8s-extension-provisionedcluster-msi-auth/existingClusterOnboarding.json b/scripts/onboarding/templates/arc-k8s-extension-provisionedcluster-msi-auth/existingClusterOnboarding.json index 542781af1..58fa598f1 100644 --- a/scripts/onboarding/templates/arc-k8s-extension-provisionedcluster-msi-auth/existingClusterOnboarding.json +++ b/scripts/onboarding/templates/arc-k8s-extension-provisionedcluster-msi-auth/existingClusterOnboarding.json @@ -121,7 +121,7 @@ "extensionSettings": { "dataCollectionSettings" : { "interval": "[parameters('dataCollectionInterval')]", - "mode" : "[parameters('nameSpacesFilteringModeForDataCollection')]", + "nameSpaceFilteringMode" : "[parameters('nameSpacesFilteringModeForDataCollection')]", "nameSpaces": "[parameters('namespacesForDataCollection')]" } }, diff --git a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb index d2f581f3c..75105d7e4 100644 --- a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb +++ b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb @@ -458,8 +458,8 @@ def getContainerGpuMetricsAsInsightsMetrics(metricJSON, hostName, metricNameToCo metricTags[Constants::INSIGHTSMETRICS_TAGS_GPU_VENDOR] = accelerator["make"] end - if (!accelerator["nameSpaceFilteringModel"].nil? && !accelerator["nameSpaceFilteringModel"].empty?) - metricTags[Constants::INSIGHTSMETRICS_TAGS_GPU_nameSpaceFilteringModeL] = accelerator["nameSpaceFilteringModel"] + if (!accelerator["model"].nil? && !accelerator["model"].empty?) + metricTags[Constants::INSIGHTSMETRICS_TAGS_GPU_nameSpaceFilteringModeL] = accelerator["model"] end if (!accelerator["id"].nil? && !accelerator["id"].empty?) diff --git a/source/plugins/ruby/constants.rb b/source/plugins/ruby/constants.rb index f2bcaf928..a0b555646 100644 --- a/source/plugins/ruby/constants.rb +++ b/source/plugins/ruby/constants.rb @@ -149,7 +149,7 @@ class Constants EXTENSION_SETTINGS = "extensionSettings" EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS = "dataCollectionSettings" EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_INTERVAL = "interval" - EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_NAMESPACES_MODE = "mode" + EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_NAMESPACE_FILTERING_MODE = "nameSpaceFilteringMode" EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_NAMESPACES_FILTERING_MODES = ["off", "include", "exlcude"] EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_NAMESPACES = "nameSpaces" diff --git a/source/plugins/ruby/extension_utils.rb b/source/plugins/ruby/extension_utils.rb index b0f2c89b1..f3cd2319d 100644 --- a/source/plugins/ruby/extension_utils.rb +++ b/source/plugins/ruby/extension_utils.rb @@ -79,8 +79,8 @@ def getNamespacesFilteringModeForDataCollection dataCollectionSettings = Extension.instance.get_extension_data_collection_settings() if !dataCollectionSettings.nil? && !dataCollectionSettings.empty? && - dataCollectionSettings.has_key?(Constants::EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_NAMESPACES_MODE) - mode = dataCollectionSettings[Constants::EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_NAMESPACES_MODE] + dataCollectionSettings.has_key?(Constants::EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_NAMESPACE_FILTERING_MODE) + mode = dataCollectionSettings[Constants::EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_NAMESPACE_FILTERING_MODE] if !mode.nil? && !mode.empty? if Constants::EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_NAMESPACES_FILTERING_MODES.include?(mode.downcase) nameSpaceFilteringMode = mode.downcase From 7e8ec2af6d6990e6ebc37b0c366853a7bb311748 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Sat, 19 Nov 2022 11:44:06 -0800 Subject: [PATCH 286/301] better naming --- source/plugins/ruby/CAdvisorMetricsAPIClient.rb | 2 +- source/plugins/ruby/KubernetesApiClient.rb | 2 +- source/plugins/ruby/in_cadvisor_perf.rb | 2 +- source/plugins/ruby/in_containerinventory.rb | 2 +- source/plugins/ruby/in_kube_events.rb | 2 +- source/plugins/ruby/in_kube_perfinventory.rb | 2 +- source/plugins/ruby/in_kube_podinventory.rb | 2 +- source/plugins/ruby/in_kube_pvinventory.rb | 2 +- source/plugins/ruby/in_kubestate_deployments.rb | 2 +- source/plugins/ruby/in_kubestate_hpa.rb | 2 +- source/plugins/ruby/in_win_cadvisor_perf.rb | 2 +- 11 files changed, 11 insertions(+), 11 deletions(-) diff --git a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb index 75105d7e4..85602bac0 100644 --- a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb +++ b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb @@ -459,7 +459,7 @@ def getContainerGpuMetricsAsInsightsMetrics(metricJSON, hostName, metricNameToCo end if (!accelerator["model"].nil? && !accelerator["model"].empty?) - metricTags[Constants::INSIGHTSMETRICS_TAGS_GPU_nameSpaceFilteringModeL] = accelerator["model"] + metricTags[Constants::INSIGHTSMETRICS_TAGS_GPU_MODEL] = accelerator["model"] end if (!accelerator["id"].nil? && !accelerator["id"].empty?) diff --git a/source/plugins/ruby/KubernetesApiClient.rb b/source/plugins/ruby/KubernetesApiClient.rb index dc5399d51..390b508a9 100644 --- a/source/plugins/ruby/KubernetesApiClient.rb +++ b/source/plugins/ruby/KubernetesApiClient.rb @@ -1416,7 +1416,7 @@ def isExcludeResourceItem(resourceName, resourceNamespace, nameSpaceFilteringMod # data collection namespace filtering not applicable for ama-logs agent as customer needs to monitor the agent if resourceName.start_with?("ama-logs") && resourceNamespace.eql?("kube-system") isExclude = false - elsif !nameSpaces.nil? && !nameSpaces.empty? && nameSpaces.length > 0 + elsif !nameSpaces.nil? && !nameSpaces.empty? && nameSpaces.length > 0 && !nameSpaceFilteringMode.nil? && !nameSpaceFilteringMode.empty? if nameSpaceFilteringMode == "exclude" && nameSpaces.include?(resourceNamespace) isExclude = true elsif nameSpaceFilteringMode == "include" && !nameSpaces.include?(resourceNamespace) diff --git a/source/plugins/ruby/in_cadvisor_perf.rb b/source/plugins/ruby/in_cadvisor_perf.rb index ceef5d6da..8d3b165de 100644 --- a/source/plugins/ruby/in_cadvisor_perf.rb +++ b/source/plugins/ruby/in_cadvisor_perf.rb @@ -80,7 +80,7 @@ def enumerate() @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() $log.info("in_cadvisor_perf::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") @nameSpaceFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() - $log.info("in_cadvisor_perf::enumerate: using data collection mode for nameSpaces: #{@nameSpaceFilteringMode} @ #{Time.now.utc.iso8601}") + $log.info("in_cadvisor_perf::enumerate: using data collection filtering mode for nameSpaces: #{@nameSpaceFilteringMode} @ #{Time.now.utc.iso8601}") end metricData = CAdvisorMetricsAPIClient.getMetrics(winNode: nil, nameSpaceFilteringMode: @nameSpaceFilteringMode, nameSpaces: @nameSpaces, metricTime: batchTime) diff --git a/source/plugins/ruby/in_containerinventory.rb b/source/plugins/ruby/in_containerinventory.rb index 5b7a093f8..ba79eb1bf 100644 --- a/source/plugins/ruby/in_containerinventory.rb +++ b/source/plugins/ruby/in_containerinventory.rb @@ -72,7 +72,7 @@ def enumerate @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() $log.info("in_container_inventory::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") @nameSpaceFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() - $log.info("in_container_inventory::enumerate: using data collection mode for nameSpaces: #{@nameSpaceFilteringMode} @ #{Time.now.utc.iso8601}") + $log.info("in_container_inventory::enumerate: using data collection filtering mode for nameSpaces: #{@nameSpaceFilteringMode} @ #{Time.now.utc.iso8601}") end begin containerRuntimeEnv = ENV["CONTAINER_RUNTIME"] diff --git a/source/plugins/ruby/in_kube_events.rb b/source/plugins/ruby/in_kube_events.rb index 34014724f..d306d04b3 100644 --- a/source/plugins/ruby/in_kube_events.rb +++ b/source/plugins/ruby/in_kube_events.rb @@ -97,7 +97,7 @@ def enumerate @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() $log.info("in_kube_events::enumerate: using data collection nameSpaces -#{@nameSpaces} @ #{Time.now.utc.iso8601}") @nameSpaceFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() - $log.info("in_kube_events::enumerate: using data collection mode for nameSpaces -#{@nameSpaceFilteringMode} @ #{Time.now.utc.iso8601}") + $log.info("in_kube_events::enumerate: using data collection filtering mode for nameSpaces -#{@nameSpaceFilteringMode} @ #{Time.now.utc.iso8601}") end # Initializing continuation token to nil continuationToken = nil diff --git a/source/plugins/ruby/in_kube_perfinventory.rb b/source/plugins/ruby/in_kube_perfinventory.rb index 7caa705a2..ed3929258 100644 --- a/source/plugins/ruby/in_kube_perfinventory.rb +++ b/source/plugins/ruby/in_kube_perfinventory.rb @@ -106,7 +106,7 @@ def enumerate(podList = nil) @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() $log.info("in_kube_perfinventory::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") @nameSpaceFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() - $log.info("in_kube_perfinventory::enumerate: using data collection mode for nameSpaces: #{@nameSpaceFilteringMode} @ #{Time.now.utc.iso8601}") + $log.info("in_kube_perfinventory::enumerate: using data collection filtering mode for nameSpaces: #{@nameSpaceFilteringMode} @ #{Time.now.utc.iso8601}") end nodeAllocatableRecords = getNodeAllocatableRecords() diff --git a/source/plugins/ruby/in_kube_podinventory.rb b/source/plugins/ruby/in_kube_podinventory.rb index 202ea0d60..d471eeee5 100644 --- a/source/plugins/ruby/in_kube_podinventory.rb +++ b/source/plugins/ruby/in_kube_podinventory.rb @@ -178,7 +178,7 @@ def enumerate(podList = nil) @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() $log.info("in_kube_podinventory::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") @nameSpaceFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() - $log.info("in_kube_podinventory::enumerate: using data collection mode for nameSpaces: #{@nameSpaceFilteringMode} @ #{Time.now.utc.iso8601}") + $log.info("in_kube_podinventory::enumerate: using data collection filtering mode for nameSpaces: #{@nameSpaceFilteringMode} @ #{Time.now.utc.iso8601}") end serviceInventory = {} diff --git a/source/plugins/ruby/in_kube_pvinventory.rb b/source/plugins/ruby/in_kube_pvinventory.rb index d10b82aa3..2f59a1b67 100644 --- a/source/plugins/ruby/in_kube_pvinventory.rb +++ b/source/plugins/ruby/in_kube_pvinventory.rb @@ -74,7 +74,7 @@ def enumerate @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() $log.info("in_kube_pvinventory::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") @nameSpaceFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() - $log.info("in_kube_pvinventory::enumerate: using data collection mode for nameSpaces: #{@nameSpaceFilteringMode} @ #{Time.now.utc.iso8601}") + $log.info("in_kube_pvinventory::enumerate: using data collection filtering mode for nameSpaces: #{@nameSpaceFilteringMode} @ #{Time.now.utc.iso8601}") end continuationToken = nil diff --git a/source/plugins/ruby/in_kubestate_deployments.rb b/source/plugins/ruby/in_kubestate_deployments.rb index e3810d34f..319264b6d 100644 --- a/source/plugins/ruby/in_kubestate_deployments.rb +++ b/source/plugins/ruby/in_kubestate_deployments.rb @@ -95,7 +95,7 @@ def enumerate @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() $log.info("in_kubestate_deployments::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") @nameSpaceFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() - $log.info("in_kubestate_deployments::enumerate: using data collection mode for nameSpaces: #{@nameSpaceFilteringMode} @ #{Time.now.utc.iso8601}") + $log.info("in_kubestate_deployments::enumerate: using data collection filtering mode for nameSpaces: #{@nameSpaceFilteringMode} @ #{Time.now.utc.iso8601}") end # Initializing continuation token to nil continuationToken = nil diff --git a/source/plugins/ruby/in_kubestate_hpa.rb b/source/plugins/ruby/in_kubestate_hpa.rb index a1975486e..93e3f171d 100644 --- a/source/plugins/ruby/in_kubestate_hpa.rb +++ b/source/plugins/ruby/in_kubestate_hpa.rb @@ -92,7 +92,7 @@ def enumerate @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() $log.info("in_kubestate_hpa::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") @nameSpaceFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() - $log.info("in_kubestate_hpa::enumerate: using data collection mode for nameSpaces: #{@nameSpaceFilteringMode} @ #{Time.now.utc.iso8601}") + $log.info("in_kubestate_hpa::enumerate: using data collection filtering mode for nameSpaces: #{@nameSpaceFilteringMode} @ #{Time.now.utc.iso8601}") end # Initializing continuation token to nil continuationToken = nil diff --git a/source/plugins/ruby/in_win_cadvisor_perf.rb b/source/plugins/ruby/in_win_cadvisor_perf.rb index 53adf7963..dfd58baf6 100644 --- a/source/plugins/ruby/in_win_cadvisor_perf.rb +++ b/source/plugins/ruby/in_win_cadvisor_perf.rb @@ -79,7 +79,7 @@ def enumerate() $log.info("in_win_cadvisor_perf::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") @nameSpaceFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() - $log.info("in_cadvisor_perf::enumerate: using data collection mode for nameSpaces: #{@nameSpaceFilteringMode} @ #{Time.now.utc.iso8601}") + $log.info("in_cadvisor_perf::enumerate: using data collection filtering mode for nameSpaces: #{@nameSpaceFilteringMode} @ #{Time.now.utc.iso8601}") end #Resetting this cache so that it is populated with the current set of containers with every call From 24d99b84deef9bc31a2528f704bb908d7bcca4f2 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Sat, 19 Nov 2022 18:07:39 -0800 Subject: [PATCH 287/301] refactor code --- .../plugins/ruby/ApplicationInsightsUtility.rb | 3 +++ source/plugins/ruby/extension_utils.rb | 13 +++++++++++++ source/plugins/ruby/in_cadvisor_perf.rb | 14 ++++++++------ source/plugins/ruby/in_containerinventory.rb | 14 ++++++++------ source/plugins/ruby/in_kube_nodes.rb | 6 ++++-- source/plugins/ruby/in_kube_perfinventory.rb | 14 ++++++++------ source/plugins/ruby/in_kube_podinventory.rb | 14 ++++++++------ source/plugins/ruby/in_kube_podmdminventory.rb | 2 +- source/plugins/ruby/in_kube_pvinventory.rb | 14 ++++++++------ source/plugins/ruby/in_kubestate_deployments.rb | 14 ++++++++------ source/plugins/ruby/in_kubestate_hpa.rb | 14 ++++++++------ source/plugins/ruby/in_win_cadvisor_perf.rb | 16 ++++++++-------- 12 files changed, 85 insertions(+), 53 deletions(-) diff --git a/source/plugins/ruby/ApplicationInsightsUtility.rb b/source/plugins/ruby/ApplicationInsightsUtility.rb index 6e580ad9c..8371f8cf4 100644 --- a/source/plugins/ruby/ApplicationInsightsUtility.rb +++ b/source/plugins/ruby/ApplicationInsightsUtility.rb @@ -97,6 +97,9 @@ def initializeUtility() if File.file?(file) && File.exist?(file) && File.foreach(file).grep(/LINUX_SYSLOGS_BLOB/).any? @@CustomProperties["syslogEnabled"] = "true" end + if File.file?(file) && File.exist?(file) && File.foreach(file).grep(/ContainerInsightsExtension/).any? && File.foreach(file).grep(/dataCollectionSettings/).any? + @@CustomProperties["dataCollectionSettingsEnabled"] = "true" + end } end rescue => errorStr diff --git a/source/plugins/ruby/extension_utils.rb b/source/plugins/ruby/extension_utils.rb index f3cd2319d..25cc4452e 100644 --- a/source/plugins/ruby/extension_utils.rb +++ b/source/plugins/ruby/extension_utils.rb @@ -26,6 +26,19 @@ def isAADMSIAuthMode() return !ENV["AAD_MSI_AUTH_MODE"].nil? && !ENV["AAD_MSI_AUTH_MODE"].empty? && ENV["AAD_MSI_AUTH_MODE"].downcase == "true" end + def isDataCollectionSettingsConfigured + isCollectionSettingsEnabled = false + begin + dataCollectionSettings = Extension.instance.get_extension_data_collection_settings() + if !dataCollectionSettings.nil? && !dataCollectionSettings.empty? + isCollectionSettingsEnabled = true + end + rescue => errorStr + $log.warn("ExtensionUtils::isDataCollectionSettingsConfigured: failed with an exception: #{errorStr}") + end + return isCollectionSettingsEnabled + end + def getDataCollectionIntervalSeconds collectionIntervalSeconds = 60 begin diff --git a/source/plugins/ruby/in_cadvisor_perf.rb b/source/plugins/ruby/in_cadvisor_perf.rb index 8d3b165de..5dd4521bc 100644 --- a/source/plugins/ruby/in_cadvisor_perf.rb +++ b/source/plugins/ruby/in_cadvisor_perf.rb @@ -75,12 +75,14 @@ def enumerate() end $log.info("in_cadvisor_perf::enumerate: using perf tag -#{@tag} @ #{Time.now.utc.iso8601}") $log.info("in_cadvisor_perf::enumerate: using insightsmetrics tag -#{@insightsmetricstag} @ #{Time.now.utc.iso8601}") - @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() - $log.info("in_cadvisor_perf::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") - @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() - $log.info("in_cadvisor_perf::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") - @nameSpaceFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() - $log.info("in_cadvisor_perf::enumerate: using data collection filtering mode for nameSpaces: #{@nameSpaceFilteringMode} @ #{Time.now.utc.iso8601}") + if ExtensionUtils.isDataCollectionSettingsConfigured() + @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() + $log.info("in_cadvisor_perf::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") + @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() + $log.info("in_cadvisor_perf::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") + @nameSpaceFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() + $log.info("in_cadvisor_perf::enumerate: using data collection filtering mode for nameSpaces: #{@nameSpaceFilteringMode} @ #{Time.now.utc.iso8601}") + end end metricData = CAdvisorMetricsAPIClient.getMetrics(winNode: nil, nameSpaceFilteringMode: @nameSpaceFilteringMode, nameSpaces: @nameSpaces, metricTime: batchTime) diff --git a/source/plugins/ruby/in_containerinventory.rb b/source/plugins/ruby/in_containerinventory.rb index ba79eb1bf..f866e5c75 100644 --- a/source/plugins/ruby/in_containerinventory.rb +++ b/source/plugins/ruby/in_containerinventory.rb @@ -67,12 +67,14 @@ def enumerate @tag = ExtensionUtils.getOutputStreamId(Constants::CONTAINER_INVENTORY_DATA_TYPE) end $log.info("in_container_inventory::enumerate: using tag -#{@tag} @ #{Time.now.utc.iso8601}") - @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() - $log.info("in_container_inventory::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") - @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() - $log.info("in_container_inventory::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") - @nameSpaceFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() - $log.info("in_container_inventory::enumerate: using data collection filtering mode for nameSpaces: #{@nameSpaceFilteringMode} @ #{Time.now.utc.iso8601}") + if ExtensionUtils.isDataCollectionSettingsConfigured() + @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() + $log.info("in_container_inventory::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") + @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() + $log.info("in_container_inventory::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") + @nameSpaceFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() + $log.info("in_container_inventory::enumerate: using data collection filtering mode for nameSpaces: #{@nameSpaceFilteringMode} @ #{Time.now.utc.iso8601}") + end end begin containerRuntimeEnv = ENV["CONTAINER_RUNTIME"] diff --git a/source/plugins/ruby/in_kube_nodes.rb b/source/plugins/ruby/in_kube_nodes.rb index b0532b5d4..e2e1678b4 100644 --- a/source/plugins/ruby/in_kube_nodes.rb +++ b/source/plugins/ruby/in_kube_nodes.rb @@ -150,8 +150,10 @@ def enumerate $log.info("in_kube_nodes::enumerate: using insightsmetrics tag -#{@insightsMetricsTag} @ #{Time.now.utc.iso8601}") $log.info("in_kube_nodes::enumerate: using containernodeinventory tag -#{@ContainerNodeInventoryTag} @ #{Time.now.utc.iso8601}") $log.info("in_kube_nodes::enumerate: using kubenodeinventory tag -#{@tag} @ #{Time.now.utc.iso8601}") - @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() - $log.info("in_kube_nodes::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") + if ExtensionUtils.isDataCollectionSettingsConfigured() + @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() + $log.info("in_kube_nodes::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") + end end nodesAPIChunkStartTime = (Time.now.to_f * 1000).to_i diff --git a/source/plugins/ruby/in_kube_perfinventory.rb b/source/plugins/ruby/in_kube_perfinventory.rb index ed3929258..05c863522 100644 --- a/source/plugins/ruby/in_kube_perfinventory.rb +++ b/source/plugins/ruby/in_kube_perfinventory.rb @@ -101,12 +101,14 @@ def enumerate(podList = nil) end $log.info("in_kube_perfinventory::enumerate: using perf tag -#{@kubeperfTag} @ #{Time.now.utc.iso8601}") $log.info("in_kube_perfinventory::enumerate: using insightsmetrics tag -#{@insightsMetricsTag} @ #{Time.now.utc.iso8601}") - @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() - $log.info("in_kube_perfinventory::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") - @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() - $log.info("in_kube_perfinventory::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") - @nameSpaceFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() - $log.info("in_kube_perfinventory::enumerate: using data collection filtering mode for nameSpaces: #{@nameSpaceFilteringMode} @ #{Time.now.utc.iso8601}") + if ExtensionUtils.isDataCollectionSettingsConfigured() + @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() + $log.info("in_kube_perfinventory::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") + @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() + $log.info("in_kube_perfinventory::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") + @nameSpaceFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() + $log.info("in_kube_perfinventory::enumerate: using data collection filtering mode for nameSpaces: #{@nameSpaceFilteringMode} @ #{Time.now.utc.iso8601}") + end end nodeAllocatableRecords = getNodeAllocatableRecords() diff --git a/source/plugins/ruby/in_kube_podinventory.rb b/source/plugins/ruby/in_kube_podinventory.rb index d471eeee5..4f9c1879d 100644 --- a/source/plugins/ruby/in_kube_podinventory.rb +++ b/source/plugins/ruby/in_kube_podinventory.rb @@ -173,12 +173,14 @@ def enumerate(podList = nil) $log.info("in_kube_podinventory::enumerate: using containerinventory tag -#{@containerInventoryTag} @ #{Time.now.utc.iso8601}") $log.info("in_kube_podinventory::enumerate: using insightsmetrics tag -#{@insightsMetricsTag} @ #{Time.now.utc.iso8601}") $log.info("in_kube_podinventory::enumerate: using kubepodinventory tag -#{@tag} @ #{Time.now.utc.iso8601}") - @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() - $log.info("in_kube_podinventory::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") - @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() - $log.info("in_kube_podinventory::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") - @nameSpaceFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() - $log.info("in_kube_podinventory::enumerate: using data collection filtering mode for nameSpaces: #{@nameSpaceFilteringMode} @ #{Time.now.utc.iso8601}") + if ExtensionUtils.isDataCollectionSettingsConfigured() + @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() + $log.info("in_kube_podinventory::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") + @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() + $log.info("in_kube_podinventory::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") + @nameSpaceFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() + $log.info("in_kube_podinventory::enumerate: using data collection filtering mode for nameSpaces: #{@nameSpaceFilteringMode} @ #{Time.now.utc.iso8601}") + end end serviceInventory = {} diff --git a/source/plugins/ruby/in_kube_podmdminventory.rb b/source/plugins/ruby/in_kube_podmdminventory.rb index 3e2473634..2483154d5 100644 --- a/source/plugins/ruby/in_kube_podmdminventory.rb +++ b/source/plugins/ruby/in_kube_podmdminventory.rb @@ -63,7 +63,7 @@ def enumerate if !@isCustomMetricsAvailability $log.warn "in_kube_podmdminventory::enumerate:skipping since custom metrics not available either for this cluster type or the region" else - if ExtensionUtils.isAADMSIAuthMode() + if ExtensionUtils.isAADMSIAuthMode() && ExtensionUtils.isDataCollectionSettingsConfigured() @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() $log.info("in_kube_podinventory::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") end diff --git a/source/plugins/ruby/in_kube_pvinventory.rb b/source/plugins/ruby/in_kube_pvinventory.rb index 2f59a1b67..7b3c251ae 100644 --- a/source/plugins/ruby/in_kube_pvinventory.rb +++ b/source/plugins/ruby/in_kube_pvinventory.rb @@ -69,12 +69,14 @@ def enumerate if @tag.nil? || !@tag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) @tag = ExtensionUtils.getOutputStreamId(Constants::KUBE_PV_INVENTORY_DATA_TYPE) end - @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() - $log.info("in_kube_pvinventory::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") - @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() - $log.info("in_kube_pvinventory::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") - @nameSpaceFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() - $log.info("in_kube_pvinventory::enumerate: using data collection filtering mode for nameSpaces: #{@nameSpaceFilteringMode} @ #{Time.now.utc.iso8601}") + if ExtensionUtils.isDataCollectionSettingsConfigured() + @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() + $log.info("in_kube_pvinventory::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") + @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() + $log.info("in_kube_pvinventory::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") + @nameSpaceFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() + $log.info("in_kube_pvinventory::enumerate: using data collection filtering mode for nameSpaces: #{@nameSpaceFilteringMode} @ #{Time.now.utc.iso8601}") + end end continuationToken = nil diff --git a/source/plugins/ruby/in_kubestate_deployments.rb b/source/plugins/ruby/in_kubestate_deployments.rb index 319264b6d..d38996cee 100644 --- a/source/plugins/ruby/in_kubestate_deployments.rb +++ b/source/plugins/ruby/in_kubestate_deployments.rb @@ -90,12 +90,14 @@ def enumerate if @tag.nil? || !@tag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) @tag = ExtensionUtils.getOutputStreamId(Constants::INSIGHTS_METRICS_DATA_TYPE) end - @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() - $log.info("in_kubestate_deployments::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") - @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() - $log.info("in_kubestate_deployments::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") - @nameSpaceFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() - $log.info("in_kubestate_deployments::enumerate: using data collection filtering mode for nameSpaces: #{@nameSpaceFilteringMode} @ #{Time.now.utc.iso8601}") + if ExtensionUtils.isDataCollectionSettingsConfigured() + @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() + $log.info("in_kubestate_deployments::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") + @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() + $log.info("in_kubestate_deployments::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") + @nameSpaceFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() + $log.info("in_kubestate_deployments::enumerate: using data collection filtering mode for nameSpaces: #{@nameSpaceFilteringMode} @ #{Time.now.utc.iso8601}") + end end # Initializing continuation token to nil continuationToken = nil diff --git a/source/plugins/ruby/in_kubestate_hpa.rb b/source/plugins/ruby/in_kubestate_hpa.rb index 93e3f171d..fd9d0b9a1 100644 --- a/source/plugins/ruby/in_kubestate_hpa.rb +++ b/source/plugins/ruby/in_kubestate_hpa.rb @@ -87,12 +87,14 @@ def enumerate @tag = ExtensionUtils.getOutputStreamId(Constants::INSIGHTS_METRICS_DATA_TYPE) end $log.info("in_kubestate_hpa::enumerate: using tag -#{@tag} @ #{Time.now.utc.iso8601}") - @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() - $log.info("in_kubestate_hpa::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") - @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() - $log.info("in_kubestate_hpa::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") - @nameSpaceFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() - $log.info("in_kubestate_hpa::enumerate: using data collection filtering mode for nameSpaces: #{@nameSpaceFilteringMode} @ #{Time.now.utc.iso8601}") + if ExtensionUtils.isDataCollectionSettingsConfigured() + @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() + $log.info("in_kubestate_hpa::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") + @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() + $log.info("in_kubestate_hpa::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") + @nameSpaceFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() + $log.info("in_kubestate_hpa::enumerate: using data collection filtering mode for nameSpaces: #{@nameSpaceFilteringMode} @ #{Time.now.utc.iso8601}") + end end # Initializing continuation token to nil continuationToken = nil diff --git a/source/plugins/ruby/in_win_cadvisor_perf.rb b/source/plugins/ruby/in_win_cadvisor_perf.rb index dfd58baf6..88b35a84e 100644 --- a/source/plugins/ruby/in_win_cadvisor_perf.rb +++ b/source/plugins/ruby/in_win_cadvisor_perf.rb @@ -72,14 +72,14 @@ def enumerate() $log.info("in_win_cadvisor_perf::enumerate: using perf tag -#{@kubeperfTag} @ #{Time.now.utc.iso8601}") $log.info("in_win_cadvisor_perf::enumerate: using insightsmetrics tag -#{@insightsMetricsTag} @ #{Time.now.utc.iso8601}") - @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() - $log.info("in_win_cadvisor_perf::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") - - @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() - $log.info("in_win_cadvisor_perf::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") - - @nameSpaceFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() - $log.info("in_cadvisor_perf::enumerate: using data collection filtering mode for nameSpaces: #{@nameSpaceFilteringMode} @ #{Time.now.utc.iso8601}") + if ExtensionUtils.isDataCollectionSettingsConfigured() + @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() + $log.info("in_win_cadvisor_perf::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") + @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() + $log.info("in_win_cadvisor_perf::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") + @nameSpaceFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() + $log.info("in_cadvisor_perf::enumerate: using data collection filtering mode for nameSpaces: #{@nameSpaceFilteringMode} @ #{Time.now.utc.iso8601}") + end end #Resetting this cache so that it is populated with the current set of containers with every call From 2e891ca2a2f38fff6045cff6e67283c57e1163e8 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Sat, 19 Nov 2022 19:57:07 -0800 Subject: [PATCH 288/301] add telemetry --- .../ruby/ApplicationInsightsUtility.rb | 16 +++++++++------- source/plugins/ruby/in_kube_podinventory.rb | 19 +++++++++++-------- 2 files changed, 20 insertions(+), 15 deletions(-) diff --git a/source/plugins/ruby/ApplicationInsightsUtility.rb b/source/plugins/ruby/ApplicationInsightsUtility.rb index 8371f8cf4..1152c4505 100644 --- a/source/plugins/ruby/ApplicationInsightsUtility.rb +++ b/source/plugins/ruby/ApplicationInsightsUtility.rb @@ -92,13 +92,15 @@ def initializeUtility() if !aadAuthMSIMode.nil? && !aadAuthMSIMode.empty? && aadAuthMSIMode.downcase == "true".downcase @@CustomProperties["aadAuthMSIMode"] = "true" begin - if Dir.exist?('/etc/mdsd.d/config-cache/configchunks') - Dir.glob('/etc/mdsd.d/config-cache/configchunks/*.json') { |file| - if File.file?(file) && File.exist?(file) && File.foreach(file).grep(/LINUX_SYSLOGS_BLOB/).any? - @@CustomProperties["syslogEnabled"] = "true" - end - if File.file?(file) && File.exist?(file) && File.foreach(file).grep(/ContainerInsightsExtension/).any? && File.foreach(file).grep(/dataCollectionSettings/).any? - @@CustomProperties["dataCollectionSettingsEnabled"] = "true" + if Dir.exist?("/etc/mdsd.d/config-cache/configchunks") + Dir.glob("/etc/mdsd.d/config-cache/configchunks/*.json") { |file| + if File.file?(file) && File.exist?(file) + if File.foreach(file).grep(/LINUX_SYSLOGS_BLOB/).any? + @@CustomProperties["syslogEnabled"] = "true" + end + if File.foreach(file).grep(/ContainerInsightsExtension/).any? && File.foreach(file).grep(/dataCollectionSettings/).any? + @@CustomProperties["dataCollectionSettingsEnabled"] = "true" + end end } end diff --git a/source/plugins/ruby/in_kube_podinventory.rb b/source/plugins/ruby/in_kube_podinventory.rb index 4f9c1879d..aa07e125a 100644 --- a/source/plugins/ruby/in_kube_podinventory.rb +++ b/source/plugins/ruby/in_kube_podinventory.rb @@ -240,14 +240,17 @@ def enumerate(podList = nil) telemetryProperties["SERVICE_ITEMS_CACHE_SIZE_KB"] = serviceItemsCacheSizeKB telemetryProperties["WINDOWS_CONTAINER_RECORDS_CACHE_SIZE_KB"] = @windowsContainerRecordsCacheSizeBytes / 1024 end - if !@nameSpaces.nil? && !@nameSpaces.empty? && @nameSpaces.length > 0 - telemetryProperties["DATA_COLLECTION_NAMESPACES"] = @nameSpaces - end - if !@nameSpaceFilteringMode.nil? && !@nameSpaceFilteringMode.empty? - telemetryProperties["DATA_COLLECTION_NAMESPACES_FILTERING_MODE"] = @nameSpaceFilteringMode - end - if @run_interval > 60 - telemetryProperties["DATA_COLLECTION_INTERVAL_MINUTES"] = @run_interval / 60 + if ExtensionUtils.isDataCollectionSettingsConfigured() + telemetryProperties["dataCollectionSettingsEnabled"] = "true" + if !@nameSpaces.nil? && !@nameSpaces.empty? && @nameSpaces.length > 0 + telemetryProperties["DATA_COLLECTION_NAMESPACES"] = @nameSpaces + end + if !@nameSpaceFilteringMode.nil? && !@nameSpaceFilteringMode.empty? + telemetryProperties["DATA_COLLECTION_NAMESPACES_FILTERING_MODE"] = @nameSpaceFilteringMode + end + if @run_interval > 60 + telemetryProperties["DATA_COLLECTION_INTERVAL_MINUTES"] = @run_interval / 60 + end end ApplicationInsightsUtility.sendCustomEvent("KubePodInventoryHeartBeatEvent", telemetryProperties) ApplicationInsightsUtility.sendMetricTelemetry("PodCount", @podCount, {}) From 14183027e7cc5a0076eb838c0855b2c9ffc717a1 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Mon, 21 Nov 2022 07:45:46 -0800 Subject: [PATCH 289/301] update template params --- .../aks/onboarding-using-msi-auth/existingClusterParam.json | 2 +- .../arc-k8s-extension-msi-auth/existingClusterParam.json | 2 +- .../existingClusterParam.json | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterParam.json b/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterParam.json index ead21476d..f3c2d7957 100644 --- a/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterParam.json +++ b/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterParam.json @@ -67,7 +67,7 @@ "value": "Off" }, "namespacesForDataCollection": { - "value": [ "kube-system"] + "value": [ "kube-system", "gatekeeper-system" ] } } } diff --git a/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterParam.json b/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterParam.json index 77a94caa4..609d7ae94 100644 --- a/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterParam.json +++ b/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterParam.json @@ -70,7 +70,7 @@ "value": "Off" }, "namespacesForDataCollection": { - "value": [ "kube-system"] + "value": [ "kube-system", "gatekeeper-system" ] } } } diff --git a/scripts/onboarding/templates/arc-k8s-extension-provisionedcluster-msi-auth/existingClusterParam.json b/scripts/onboarding/templates/arc-k8s-extension-provisionedcluster-msi-auth/existingClusterParam.json index 051338c9f..0c091e5a0 100644 --- a/scripts/onboarding/templates/arc-k8s-extension-provisionedcluster-msi-auth/existingClusterParam.json +++ b/scripts/onboarding/templates/arc-k8s-extension-provisionedcluster-msi-auth/existingClusterParam.json @@ -70,7 +70,7 @@ "value": "Off" }, "namespacesForDataCollection": { - "value": [ "kube-system"] + "value": [ "kube-system", "gatekeeper-system" ] } } } From 5bb1ac65647d75844dfc9c49ab6a59aaa98994e3 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Mon, 21 Nov 2022 13:30:10 -0800 Subject: [PATCH 290/301] fix pr feedback --- .../aks/onboarding-using-msi-auth/existingClusterParam.json | 2 +- .../arc-k8s-extension-msi-auth/existingClusterParam.json | 2 +- .../existingClusterParam.json | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterParam.json b/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterParam.json index f3c2d7957..d29ca1b08 100644 --- a/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterParam.json +++ b/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterParam.json @@ -67,7 +67,7 @@ "value": "Off" }, "namespacesForDataCollection": { - "value": [ "kube-system", "gatekeeper-system" ] + "value": [ "kube-system", "gatekeeper-system", "azure-arc" ] } } } diff --git a/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterParam.json b/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterParam.json index 609d7ae94..d868fd29b 100644 --- a/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterParam.json +++ b/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterParam.json @@ -70,7 +70,7 @@ "value": "Off" }, "namespacesForDataCollection": { - "value": [ "kube-system", "gatekeeper-system" ] + "value": [ "kube-system", "gatekeeper-system", "azure-arc" ] } } } diff --git a/scripts/onboarding/templates/arc-k8s-extension-provisionedcluster-msi-auth/existingClusterParam.json b/scripts/onboarding/templates/arc-k8s-extension-provisionedcluster-msi-auth/existingClusterParam.json index 0c091e5a0..80f241e8a 100644 --- a/scripts/onboarding/templates/arc-k8s-extension-provisionedcluster-msi-auth/existingClusterParam.json +++ b/scripts/onboarding/templates/arc-k8s-extension-provisionedcluster-msi-auth/existingClusterParam.json @@ -70,7 +70,7 @@ "value": "Off" }, "namespacesForDataCollection": { - "value": [ "kube-system", "gatekeeper-system" ] + "value": [ "kube-system", "gatekeeper-system", "azure-arc" ] } } } From 64892c4ac48894d6dc48e21e8ae0911b93abee73 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Wed, 23 Nov 2022 12:39:12 -0800 Subject: [PATCH 291/301] fix pr feedback --- .../onboarding-using-msi-auth/existingClusterOnboarding.json | 4 ++-- .../aks/onboarding-using-msi-auth/existingClusterParam.json | 2 +- .../arc-k8s-extension-msi-auth/existingClusterOnboarding.json | 4 ++-- .../arc-k8s-extension-msi-auth/existingClusterParam.json | 2 +- .../existingClusterOnboarding.json | 4 ++-- .../existingClusterParam.json | 2 +- source/plugins/ruby/constants.rb | 2 +- 7 files changed, 10 insertions(+), 10 deletions(-) diff --git a/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json b/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json index 9e87c2efa..7eece9900 100644 --- a/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json +++ b/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json @@ -56,7 +56,7 @@ "description": "Data collection interval e.g. \"5m\" for metrics and inventory. Supported value range from 1m to 30m" } }, - "nameSpacesFilteringModeForDataCollection": { + "namespacesFilteringModeForDataCollection": { "type": "string", "metadata": { "description": "Data collection Filtering Mode for the namespaces" @@ -107,7 +107,7 @@ "extensionSettings": { "dataCollectionSettings" : { "interval": "[parameters('dataCollectionInterval')]", - "nameSpaceFilteringMode" : "[parameters('nameSpacesFilteringModeForDataCollection')]", + "nameSpaceFilteringMode" : "[parameters('namespacesFilteringModeForDataCollection')]", "nameSpaces": "[parameters('namespacesForDataCollection')]" } }, diff --git a/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterParam.json b/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterParam.json index d29ca1b08..1cdc8b163 100644 --- a/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterParam.json +++ b/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterParam.json @@ -63,7 +63,7 @@ "dataCollectionInterval": { "value" : "1m" }, - "nameSpacesFilteringModeForDataCollection": { + "namespacesFilteringModeForDataCollection": { "value": "Off" }, "namespacesForDataCollection": { diff --git a/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterOnboarding.json b/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterOnboarding.json index 77f0549b0..e87c14450 100644 --- a/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterOnboarding.json +++ b/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterOnboarding.json @@ -70,7 +70,7 @@ "description": "Data collection interval e.g. \"5m\" for metrics and inventory. Supported value range from 1m to 30m" } }, - "nameSpacesFilteringModeForDataCollection": { + "namespacesFilteringModeForDataCollection": { "type": "string", "metadata": { "description": "Data collection Filtering Mode for the namespaces" @@ -121,7 +121,7 @@ "extensionSettings": { "dataCollectionSettings" : { "interval": "[parameters('dataCollectionInterval')]", - "nameSpaceFilteringMode" : "[parameters('nameSpacesFilteringModeForDataCollection')]", + "nameSpaceFilteringMode" : "[parameters('namespacesFilteringModeForDataCollection')]", "nameSpaces": "[parameters('namespacesForDataCollection')]" } }, diff --git a/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterParam.json b/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterParam.json index d868fd29b..17f7b2a51 100644 --- a/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterParam.json +++ b/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterParam.json @@ -66,7 +66,7 @@ "dataCollectionInterval": { "value" : "1m" }, - "nameSpacesFilteringModeForDataCollection": { + "namespacesFilteringModeForDataCollection": { "value": "Off" }, "namespacesForDataCollection": { diff --git a/scripts/onboarding/templates/arc-k8s-extension-provisionedcluster-msi-auth/existingClusterOnboarding.json b/scripts/onboarding/templates/arc-k8s-extension-provisionedcluster-msi-auth/existingClusterOnboarding.json index 58fa598f1..e2bd1851c 100644 --- a/scripts/onboarding/templates/arc-k8s-extension-provisionedcluster-msi-auth/existingClusterOnboarding.json +++ b/scripts/onboarding/templates/arc-k8s-extension-provisionedcluster-msi-auth/existingClusterOnboarding.json @@ -70,7 +70,7 @@ "description": "Data collection interval e.g. \"5m\" for metrics and inventory. Supported value range from 1m to 30m" } }, - "nameSpacesFilteringModeForDataCollection": { + "namespacesFilteringModeForDataCollection": { "type": "string", "metadata": { "description": "Data collection Filtering Mode for the namespaces" @@ -121,7 +121,7 @@ "extensionSettings": { "dataCollectionSettings" : { "interval": "[parameters('dataCollectionInterval')]", - "nameSpaceFilteringMode" : "[parameters('nameSpacesFilteringModeForDataCollection')]", + "nameSpaceFilteringMode" : "[parameters('namespacesFilteringModeForDataCollection')]", "nameSpaces": "[parameters('namespacesForDataCollection')]" } }, diff --git a/scripts/onboarding/templates/arc-k8s-extension-provisionedcluster-msi-auth/existingClusterParam.json b/scripts/onboarding/templates/arc-k8s-extension-provisionedcluster-msi-auth/existingClusterParam.json index 80f241e8a..811ff3f4d 100644 --- a/scripts/onboarding/templates/arc-k8s-extension-provisionedcluster-msi-auth/existingClusterParam.json +++ b/scripts/onboarding/templates/arc-k8s-extension-provisionedcluster-msi-auth/existingClusterParam.json @@ -66,7 +66,7 @@ "dataCollectionInterval": { "value" : "1m" }, - "nameSpacesFilteringModeForDataCollection": { + "namespacesFilteringModeForDataCollection": { "value": "Off" }, "namespacesForDataCollection": { diff --git a/source/plugins/ruby/constants.rb b/source/plugins/ruby/constants.rb index a0b555646..04e7449cb 100644 --- a/source/plugins/ruby/constants.rb +++ b/source/plugins/ruby/constants.rb @@ -150,7 +150,7 @@ class Constants EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS = "dataCollectionSettings" EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_INTERVAL = "interval" EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_NAMESPACE_FILTERING_MODE = "nameSpaceFilteringMode" - EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_NAMESPACES_FILTERING_MODES = ["off", "include", "exlcude"] + EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_NAMESPACES_FILTERING_MODES = ["off", "include", "exclude"] EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_NAMESPACES = "nameSpaces" # min and max data collection interval minutes From 78b6ee6e016563e5be15822f90a8f1819cf141d0 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Wed, 23 Nov 2022 12:58:31 -0800 Subject: [PATCH 292/301] make naming consistent --- .../existingClusterOnboarding.json | 2 +- .../existingClusterOnboarding.json | 2 +- .../existingClusterOnboarding.json | 2 +- .../plugins/ruby/CAdvisorMetricsAPIClient.rb | 50 +++++++++---------- source/plugins/ruby/KubernetesApiClient.rb | 8 +-- source/plugins/ruby/constants.rb | 2 +- source/plugins/ruby/extension_utils.rb | 10 ++-- source/plugins/ruby/in_cadvisor_perf.rb | 10 ++-- source/plugins/ruby/in_containerinventory.rb | 8 +-- source/plugins/ruby/in_kube_events.rb | 8 +-- source/plugins/ruby/in_kube_perfinventory.rb | 8 +-- source/plugins/ruby/in_kube_podinventory.rb | 14 +++--- source/plugins/ruby/in_kube_pvinventory.rb | 8 +-- .../plugins/ruby/in_kubestate_deployments.rb | 8 +-- source/plugins/ruby/in_kubestate_hpa.rb | 8 +-- source/plugins/ruby/in_win_cadvisor_perf.rb | 10 ++-- 16 files changed, 79 insertions(+), 79 deletions(-) diff --git a/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json b/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json index 7eece9900..6a9c7b5a4 100644 --- a/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json +++ b/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json @@ -107,7 +107,7 @@ "extensionSettings": { "dataCollectionSettings" : { "interval": "[parameters('dataCollectionInterval')]", - "nameSpaceFilteringMode" : "[parameters('namespacesFilteringModeForDataCollection')]", + "namespaceFilteringMode" : "[parameters('namespacesFilteringModeForDataCollection')]", "nameSpaces": "[parameters('namespacesForDataCollection')]" } }, diff --git a/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterOnboarding.json b/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterOnboarding.json index e87c14450..6fe7305d6 100644 --- a/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterOnboarding.json +++ b/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterOnboarding.json @@ -121,7 +121,7 @@ "extensionSettings": { "dataCollectionSettings" : { "interval": "[parameters('dataCollectionInterval')]", - "nameSpaceFilteringMode" : "[parameters('namespacesFilteringModeForDataCollection')]", + "namespaceFilteringMode" : "[parameters('namespacesFilteringModeForDataCollection')]", "nameSpaces": "[parameters('namespacesForDataCollection')]" } }, diff --git a/scripts/onboarding/templates/arc-k8s-extension-provisionedcluster-msi-auth/existingClusterOnboarding.json b/scripts/onboarding/templates/arc-k8s-extension-provisionedcluster-msi-auth/existingClusterOnboarding.json index e2bd1851c..5ac986381 100644 --- a/scripts/onboarding/templates/arc-k8s-extension-provisionedcluster-msi-auth/existingClusterOnboarding.json +++ b/scripts/onboarding/templates/arc-k8s-extension-provisionedcluster-msi-auth/existingClusterOnboarding.json @@ -121,7 +121,7 @@ "extensionSettings": { "dataCollectionSettings" : { "interval": "[parameters('dataCollectionInterval')]", - "nameSpaceFilteringMode" : "[parameters('namespacesFilteringModeForDataCollection')]", + "namespaceFilteringMode" : "[parameters('namespacesFilteringModeForDataCollection')]", "nameSpaces": "[parameters('namespacesForDataCollection')]" } }, diff --git a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb index 85602bac0..a3949c77a 100644 --- a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb +++ b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb @@ -133,7 +133,7 @@ def getCAdvisorUri(winNode, relativeUri) return baseUri + relativeUri end - def getMetrics(winNode: nil, nameSpaceFilteringMode: "off", nameSpaces: [], metricTime: Time.now.utc.iso8601) + def getMetrics(winNode: nil, namespaceFilteringMode: "off", nameSpaces: [], metricTime: Time.now.utc.iso8601) metricDataItems = [] begin cAdvisorStats = getSummaryStatsFromCAdvisor(winNode) @@ -155,8 +155,8 @@ def getMetrics(winNode: nil, nameSpaceFilteringMode: "off", nameSpaces: [], metr # Checking if we are in windows daemonset and sending only few metrics that are needed for MDM if !@os_type.nil? && !@os_type.empty? && @os_type.strip.casecmp("windows") == 0 # Container metrics - metricDataItems.concat(getContainerMemoryMetricItems(metricInfo, hostName, "workingSetBytes", Constants::MEMORY_WORKING_SET_BYTES, metricTime, operatingSystem, nameSpaceFilteringMode, nameSpaces)) - containerCpuUsageNanoSecondsRate = getContainerCpuMetricItemRate(metricInfo, hostName, "usageCoreNanoSeconds", Constants::CPU_USAGE_NANO_CORES, metricTime, nameSpaceFilteringMode, nameSpaces) + metricDataItems.concat(getContainerMemoryMetricItems(metricInfo, hostName, "workingSetBytes", Constants::MEMORY_WORKING_SET_BYTES, metricTime, operatingSystem, namespaceFilteringMode, nameSpaces)) + containerCpuUsageNanoSecondsRate = getContainerCpuMetricItemRate(metricInfo, hostName, "usageCoreNanoSeconds", Constants::CPU_USAGE_NANO_CORES, metricTime, namespaceFilteringMode, nameSpaces) if containerCpuUsageNanoSecondsRate && !containerCpuUsageNanoSecondsRate.empty? && !containerCpuUsageNanoSecondsRate.nil? metricDataItems.concat(containerCpuUsageNanoSecondsRate) end @@ -167,15 +167,15 @@ def getMetrics(winNode: nil, nameSpaceFilteringMode: "off", nameSpaces: [], metr end metricDataItems.push(getNodeMetricItem(metricInfo, hostName, "memory", "workingSetBytes", Constants::MEMORY_WORKING_SET_BYTES, metricTime)) else - metricDataItems.concat(getContainerMemoryMetricItems(metricInfo, hostName, "workingSetBytes", Constants::MEMORY_WORKING_SET_BYTES, metricTime, operatingSystem, nameSpaceFilteringMode, nameSpaces)) - metricDataItems.concat(getContainerStartTimeMetricItems(metricInfo, hostName, "restartTimeEpoch", metricTime, nameSpaceFilteringMode, nameSpaces)) + metricDataItems.concat(getContainerMemoryMetricItems(metricInfo, hostName, "workingSetBytes", Constants::MEMORY_WORKING_SET_BYTES, metricTime, operatingSystem, namespaceFilteringMode, nameSpaces)) + metricDataItems.concat(getContainerStartTimeMetricItems(metricInfo, hostName, "restartTimeEpoch", metricTime, namespaceFilteringMode, nameSpaces)) if operatingSystem == "Linux" - metricDataItems.concat(getContainerCpuMetricItems(metricInfo, hostName, "usageNanoCores", Constants::CPU_USAGE_NANO_CORES, metricTime, nameSpaceFilteringMode, nameSpaces)) - metricDataItems.concat(getContainerMemoryMetricItems(metricInfo, hostName, "rssBytes", Constants::MEMORY_RSS_BYTES, metricTime, operatingSystem, nameSpaceFilteringMode, nameSpaces)) + metricDataItems.concat(getContainerCpuMetricItems(metricInfo, hostName, "usageNanoCores", Constants::CPU_USAGE_NANO_CORES, metricTime, namespaceFilteringMode, nameSpaces)) + metricDataItems.concat(getContainerMemoryMetricItems(metricInfo, hostName, "rssBytes", Constants::MEMORY_RSS_BYTES, metricTime, operatingSystem, namespaceFilteringMode, nameSpaces)) metricDataItems.push(getNodeMetricItem(metricInfo, hostName, "memory", "rssBytes", Constants::MEMORY_RSS_BYTES, metricTime)) elsif operatingSystem == "Windows" - containerCpuUsageNanoSecondsRate = getContainerCpuMetricItemRate(metricInfo, hostName, "usageCoreNanoSeconds", Constants::CPU_USAGE_NANO_CORES, metricTime, nameSpaceFilteringMode, nameSpaces) + containerCpuUsageNanoSecondsRate = getContainerCpuMetricItemRate(metricInfo, hostName, "usageCoreNanoSeconds", Constants::CPU_USAGE_NANO_CORES, metricTime, namespaceFilteringMode, nameSpaces) if containerCpuUsageNanoSecondsRate && !containerCpuUsageNanoSecondsRate.empty? && !containerCpuUsageNanoSecondsRate.nil? metricDataItems.concat(containerCpuUsageNanoSecondsRate) end @@ -210,7 +210,7 @@ def getMetrics(winNode: nil, nameSpaceFilteringMode: "off", nameSpaces: [], metr return metricDataItems end - def getContainerCpuMetricItems(metricJSON, hostName, cpuMetricNameToCollect, metricNametoReturn, metricPollTime, nameSpaceFilteringMode, nameSpaces) + def getContainerCpuMetricItems(metricJSON, hostName, cpuMetricNameToCollect, metricNametoReturn, metricPollTime, namespaceFilteringMode, nameSpaces) metricItems = [] clusterId = KubernetesApiClient.getClusterId timeDifference = (DateTime.now.to_time.to_i - @@telemetryCpuMetricTimeTracker).abs @@ -222,7 +222,7 @@ def getContainerCpuMetricItems(metricJSON, hostName, cpuMetricNameToCollect, met podName = pod["podRef"]["name"] podNamespace = pod["podRef"]["namespace"] - next unless !KubernetesApiClient.isExcludeResourceItem(podName, podNamespace, nameSpaceFilteringMode, nameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(podName, podNamespace, namespaceFilteringMode, nameSpaces) if (!pod["containers"].nil?) pod["containers"].each do |container| @@ -312,7 +312,7 @@ def getContainerCpuMetricItems(metricJSON, hostName, cpuMetricNameToCollect, met return metricItems end - def getInsightsMetrics(winNode: nil, nameSpaceFilteringMode: "off", nameSpaces: [], metricTime: Time.now.utc.iso8601) + def getInsightsMetrics(winNode: nil, namespaceFilteringMode: "off", nameSpaces: [], metricTime: Time.now.utc.iso8601) metricDataItems = [] begin cAdvisorStats = getSummaryStatsFromCAdvisor(winNode) @@ -331,11 +331,11 @@ def getInsightsMetrics(winNode: nil, nameSpaceFilteringMode: "off", nameSpaces: operatingSystem = "Linux" end if !metricInfo.nil? - metricDataItems.concat(getContainerGpuMetricsAsInsightsMetrics(metricInfo, hostName, "memoryTotal", "containerGpumemoryTotalBytes", metricTime, nameSpaceFilteringMode, nameSpaces)) - metricDataItems.concat(getContainerGpuMetricsAsInsightsMetrics(metricInfo, hostName, "memoryUsed", "containerGpumemoryUsedBytes", metricTime, nameSpaceFilteringMode, nameSpaces)) - metricDataItems.concat(getContainerGpuMetricsAsInsightsMetrics(metricInfo, hostName, "dutyCycle", "containerGpuDutyCycle", metricTime, nameSpaceFilteringMode, nameSpaces)) + metricDataItems.concat(getContainerGpuMetricsAsInsightsMetrics(metricInfo, hostName, "memoryTotal", "containerGpumemoryTotalBytes", metricTime, namespaceFilteringMode, nameSpaces)) + metricDataItems.concat(getContainerGpuMetricsAsInsightsMetrics(metricInfo, hostName, "memoryUsed", "containerGpumemoryUsedBytes", metricTime, namespaceFilteringMode, nameSpaces)) + metricDataItems.concat(getContainerGpuMetricsAsInsightsMetrics(metricInfo, hostName, "dutyCycle", "containerGpuDutyCycle", metricTime, namespaceFilteringMode, nameSpaces)) - metricDataItems.concat(getPersistentVolumeMetrics(metricInfo, hostName, "usedBytes", Constants::PV_USED_BYTES, metricTime, nameSpaceFilteringMode, nameSpaces)) + metricDataItems.concat(getPersistentVolumeMetrics(metricInfo, hostName, "usedBytes", Constants::PV_USED_BYTES, metricTime, namespaceFilteringMode, nameSpaces)) else @Log.warn("Couldn't get Insights metrics information for host: #{hostName} os:#{operatingSystem}") end @@ -346,7 +346,7 @@ def getInsightsMetrics(winNode: nil, nameSpaceFilteringMode: "off", nameSpaces: return metricDataItems end - def getPersistentVolumeMetrics(metricJSON, hostName, metricNameToCollect, metricNameToReturn, metricPollTime, nameSpaceFilteringMode, nameSpaces) + def getPersistentVolumeMetrics(metricJSON, hostName, metricNameToCollect, metricNameToReturn, metricPollTime, namespaceFilteringMode, nameSpaces) telemetryTimeDifference = (DateTime.now.to_time.to_i - @@telemetryPVKubeSystemMetricsTimeTracker).abs telemetryTimeDifferenceInMinutes = telemetryTimeDifference / 60 @@ -357,7 +357,7 @@ def getPersistentVolumeMetrics(metricJSON, hostName, metricNameToCollect, metric metricInfo = metricJSON metricInfo["pods"].each do |pod| podNamespace = pod["podRef"]["namespace"] - next unless !KubernetesApiClient.isExcludeResourceItem(pod["podRef"]["name"], podNamespace, nameSpaceFilteringMode, nameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(pod["podRef"]["name"], podNamespace, namespaceFilteringMode, nameSpaces) excludeNamespace = false if (podNamespace.downcase == "kube-system") && @pvKubeSystemCollectionMetricsEnabled == "false" excludeNamespace = true @@ -419,7 +419,7 @@ def getPersistentVolumeMetrics(metricJSON, hostName, metricNameToCollect, metric return metricItems end - def getContainerGpuMetricsAsInsightsMetrics(metricJSON, hostName, metricNameToCollect, metricNametoReturn, metricPollTime, nameSpaceFilteringMode, nameSpaces) + def getContainerGpuMetricsAsInsightsMetrics(metricJSON, hostName, metricNameToCollect, metricNametoReturn, metricPollTime, namespaceFilteringMode, nameSpaces) metricItems = [] clusterId = KubernetesApiClient.getClusterId clusterName = KubernetesApiClient.getClusterName @@ -429,7 +429,7 @@ def getContainerGpuMetricsAsInsightsMetrics(metricJSON, hostName, metricNameToCo podUid = pod["podRef"]["uid"] podName = pod["podRef"]["name"] podNamespace = pod["podRef"]["namespace"] - next unless !KubernetesApiClient.isExcludeResourceItem(podName, podNamespace, nameSpaceFilteringMode, nameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(podName, podNamespace, namespaceFilteringMode, nameSpaces) if (!pod["containers"].nil?) pod["containers"].each do |container| @@ -507,7 +507,7 @@ def resetWinContainerIdCache end # usageNanoCores doesnt exist for windows nodes. Hence need to compute this from usageCoreNanoSeconds - def getContainerCpuMetricItemRate(metricJSON, hostName, cpuMetricNameToCollect, metricNametoReturn, metricPollTime, nameSpaceFilteringMode, nameSpaces) + def getContainerCpuMetricItemRate(metricJSON, hostName, cpuMetricNameToCollect, metricNametoReturn, metricPollTime, namespaceFilteringMode, nameSpaces) metricItems = [] clusterId = KubernetesApiClient.getClusterId timeDifference = (DateTime.now.to_time.to_i - @@telemetryCpuMetricTimeTracker).abs @@ -521,7 +521,7 @@ def getContainerCpuMetricItemRate(metricJSON, hostName, cpuMetricNameToCollect, podName = pod["podRef"]["name"] podNamespace = pod["podRef"]["namespace"] - next unless !KubernetesApiClient.isExcludeResourceItem(podName, podNamespace, nameSpaceFilteringMode, nameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(podName, podNamespace, namespaceFilteringMode, nameSpaces) if (!pod["containers"].nil?) pod["containers"].each do |container| @@ -638,7 +638,7 @@ def getContainerCpuMetricItemRate(metricJSON, hostName, cpuMetricNameToCollect, return metricItems end - def getContainerMemoryMetricItems(metricJSON, hostName, memoryMetricNameToCollect, metricNametoReturn, metricPollTime, operatingSystem, nameSpaceFilteringMode, nameSpaces) + def getContainerMemoryMetricItems(metricJSON, hostName, memoryMetricNameToCollect, metricNametoReturn, metricPollTime, operatingSystem, namespaceFilteringMode, nameSpaces) metricItems = [] clusterId = KubernetesApiClient.getClusterId timeDifference = (DateTime.now.to_time.to_i - @@telemetryMemoryMetricTimeTracker).abs @@ -649,7 +649,7 @@ def getContainerMemoryMetricItems(metricJSON, hostName, memoryMetricNameToCollec podUid = pod["podRef"]["uid"] podName = pod["podRef"]["name"] podNamespace = pod["podRef"]["namespace"] - next unless !KubernetesApiClient.isExcludeResourceItem(podName, podNamespace, nameSpaceFilteringMode, nameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(podName, podNamespace, namespaceFilteringMode, nameSpaces) if (!pod["containers"].nil?) pod["containers"].each do |container| containerName = container["name"] @@ -887,7 +887,7 @@ def getNodeLastRebootTimeMetric(metricJSON, hostName, metricNametoReturn, metric return metricItem end - def getContainerStartTimeMetricItems(metricJSON, hostName, metricNametoReturn, metricPollTime, nameSpaceFilteringMode, nameSpaces) + def getContainerStartTimeMetricItems(metricJSON, hostName, metricNametoReturn, metricPollTime, namespaceFilteringMode, nameSpaces) metricItems = [] clusterId = KubernetesApiClient.getClusterId #currentTime = Time.now.utc.iso8601 #2018-01-30T19:36:14Z @@ -897,7 +897,7 @@ def getContainerStartTimeMetricItems(metricJSON, hostName, metricNametoReturn, m podUid = pod["podRef"]["uid"] podNamespace = pod["podRef"]["namespace"] podName = pod["podRef"]["name"] - next unless !KubernetesApiClient.isExcludeResourceItem(podName, podNamespace, nameSpaceFilteringMode, nameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(podName, podNamespace, namespaceFilteringMode, nameSpaces) if (!pod["containers"].nil?) pod["containers"].each do |container| containerName = container["name"] diff --git a/source/plugins/ruby/KubernetesApiClient.rb b/source/plugins/ruby/KubernetesApiClient.rb index 390b508a9..65f8c6e97 100644 --- a/source/plugins/ruby/KubernetesApiClient.rb +++ b/source/plugins/ruby/KubernetesApiClient.rb @@ -1409,17 +1409,17 @@ def isEmitCacheTelemetry return isEmitCacheTelemtryEnabled end - def isExcludeResourceItem(resourceName, resourceNamespace, nameSpaceFilteringMode, nameSpaces) + def isExcludeResourceItem(resourceName, resourceNamespace, namespaceFilteringMode, nameSpaces) isExclude = false begin if !resourceName.nil? && !resourceName.empty? && !resourceNamespace.nil? && !resourceNamespace.empty? # data collection namespace filtering not applicable for ama-logs agent as customer needs to monitor the agent if resourceName.start_with?("ama-logs") && resourceNamespace.eql?("kube-system") isExclude = false - elsif !nameSpaces.nil? && !nameSpaces.empty? && nameSpaces.length > 0 && !nameSpaceFilteringMode.nil? && !nameSpaceFilteringMode.empty? - if nameSpaceFilteringMode == "exclude" && nameSpaces.include?(resourceNamespace) + elsif !nameSpaces.nil? && !nameSpaces.empty? && nameSpaces.length > 0 && !namespaceFilteringMode.nil? && !namespaceFilteringMode.empty? + if namespaceFilteringMode == "exclude" && nameSpaces.include?(resourceNamespace) isExclude = true - elsif nameSpaceFilteringMode == "include" && !nameSpaces.include?(resourceNamespace) + elsif namespaceFilteringMode == "include" && !nameSpaces.include?(resourceNamespace) isExclude = true end end diff --git a/source/plugins/ruby/constants.rb b/source/plugins/ruby/constants.rb index 04e7449cb..0a69936de 100644 --- a/source/plugins/ruby/constants.rb +++ b/source/plugins/ruby/constants.rb @@ -149,7 +149,7 @@ class Constants EXTENSION_SETTINGS = "extensionSettings" EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS = "dataCollectionSettings" EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_INTERVAL = "interval" - EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_NAMESPACE_FILTERING_MODE = "nameSpaceFilteringMode" + EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_NAMESPACE_FILTERING_MODE = "namespaceFilteringMode" EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_NAMESPACES_FILTERING_MODES = ["off", "include", "exclude"] EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_NAMESPACES = "nameSpaces" diff --git a/source/plugins/ruby/extension_utils.rb b/source/plugins/ruby/extension_utils.rb index 25cc4452e..055d7fc7e 100644 --- a/source/plugins/ruby/extension_utils.rb +++ b/source/plugins/ruby/extension_utils.rb @@ -87,7 +87,7 @@ def getNamespacesForDataCollection end def getNamespacesFilteringModeForDataCollection - nameSpaceFilteringMode = "off" + namespaceFilteringMode = "off" begin dataCollectionSettings = Extension.instance.get_extension_data_collection_settings() if !dataCollectionSettings.nil? && @@ -96,18 +96,18 @@ def getNamespacesFilteringModeForDataCollection mode = dataCollectionSettings[Constants::EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_NAMESPACE_FILTERING_MODE] if !mode.nil? && !mode.empty? if Constants::EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_NAMESPACES_FILTERING_MODES.include?(mode.downcase) - nameSpaceFilteringMode = mode.downcase + namespaceFilteringMode = mode.downcase else - $log.warn("ExtensionUtils::getNamespacesFilteringModeForDataCollection: nameSpaceFilteringMode: #{mode} not supported hence using default") + $log.warn("ExtensionUtils::getNamespacesFilteringModeForDataCollection: namespaceFilteringMode: #{mode} not supported hence using default") end else - $log.warn("ExtensionUtils::getNamespacesFilteringModeForDataCollection: nameSpaceFilteringMode: #{mode} not valid hence using default") + $log.warn("ExtensionUtils::getNamespacesFilteringModeForDataCollection: namespaceFilteringMode: #{mode} not valid hence using default") end end rescue => errorStr $log.warn("ExtensionUtils::getNamespacesFilteringModeForDataCollection: failed with an exception: #{errorStr}") end - return nameSpaceFilteringMode + return namespaceFilteringMode end end end diff --git a/source/plugins/ruby/in_cadvisor_perf.rb b/source/plugins/ruby/in_cadvisor_perf.rb index 5dd4521bc..8b522ae21 100644 --- a/source/plugins/ruby/in_cadvisor_perf.rb +++ b/source/plugins/ruby/in_cadvisor_perf.rb @@ -23,7 +23,7 @@ def initialize require_relative "constants" require_relative "extension_utils" @nameSpaces = [] - @nameSpaceFilteringMode = "off" + @namespaceFilteringMode = "off" end config_param :run_interval, :time, :default => 60 @@ -80,12 +80,12 @@ def enumerate() $log.info("in_cadvisor_perf::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() $log.info("in_cadvisor_perf::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") - @nameSpaceFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() - $log.info("in_cadvisor_perf::enumerate: using data collection filtering mode for nameSpaces: #{@nameSpaceFilteringMode} @ #{Time.now.utc.iso8601}") + @namespaceFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() + $log.info("in_cadvisor_perf::enumerate: using data collection filtering mode for nameSpaces: #{@namespaceFilteringMode} @ #{Time.now.utc.iso8601}") end end - metricData = CAdvisorMetricsAPIClient.getMetrics(winNode: nil, nameSpaceFilteringMode: @nameSpaceFilteringMode, nameSpaces: @nameSpaces, metricTime: batchTime) + metricData = CAdvisorMetricsAPIClient.getMetrics(winNode: nil, namespaceFilteringMode: @namespaceFilteringMode, nameSpaces: @nameSpaces, metricTime: batchTime) metricData.each do |record| eventStream.add(time, record) if record end @@ -101,7 +101,7 @@ def enumerate() begin if !@@isWindows.nil? && @@isWindows == false containerGPUusageInsightsMetricsDataItems = [] - containerGPUusageInsightsMetricsDataItems.concat(CAdvisorMetricsAPIClient.getInsightsMetrics(winNode: nil, nameSpaceFilteringMode: @nameSpaceFilteringMode, nameSpaces: @nameSpaces, metricTime: batchTime)) + containerGPUusageInsightsMetricsDataItems.concat(CAdvisorMetricsAPIClient.getInsightsMetrics(winNode: nil, namespaceFilteringMode: @namespaceFilteringMode, nameSpaces: @nameSpaces, metricTime: batchTime)) containerGPUusageInsightsMetricsDataItems.each do |insightsMetricsRecord| insightsMetricsEventStream.add(time, insightsMetricsRecord) if insightsMetricsRecord diff --git a/source/plugins/ruby/in_containerinventory.rb b/source/plugins/ruby/in_containerinventory.rb index f866e5c75..8f839a278 100644 --- a/source/plugins/ruby/in_containerinventory.rb +++ b/source/plugins/ruby/in_containerinventory.rb @@ -58,7 +58,7 @@ def enumerate containerInventory = Array.new eventStream = Fluent::MultiEventStream.new hostName = "" - @nameSpaceFilteringMode = "off" + @namespaceFilteringMode = "off" @nameSpaces = [] $log.info("in_container_inventory::enumerate : Begin processing @ #{Time.now.utc.iso8601}") if ExtensionUtils.isAADMSIAuthMode() @@ -72,8 +72,8 @@ def enumerate $log.info("in_container_inventory::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() $log.info("in_container_inventory::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") - @nameSpaceFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() - $log.info("in_container_inventory::enumerate: using data collection filtering mode for nameSpaces: #{@nameSpaceFilteringMode} @ #{Time.now.utc.iso8601}") + @namespaceFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() + $log.info("in_container_inventory::enumerate: using data collection filtering mode for nameSpaces: #{@namespaceFilteringMode} @ #{Time.now.utc.iso8601}") end end begin @@ -87,7 +87,7 @@ def enumerate podList = JSON.parse(response.body) if !podList.nil? && !podList.empty? && podList.key?("items") && !podList["items"].nil? && !podList["items"].empty? podList["items"].each do |item| - next unless !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["name"], item["metadata"]["namespace"], @nameSpaceFilteringMode, @nameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["name"], item["metadata"]["namespace"], @namespaceFilteringMode, @nameSpaces) containerInventoryRecords = KubernetesContainerInventory.getContainerInventoryRecords(item, batchTime, clusterCollectEnvironmentVar) containerInventoryRecords.each do |containerRecord| ContainerInventoryState.writeContainerState(containerRecord) diff --git a/source/plugins/ruby/in_kube_events.rb b/source/plugins/ruby/in_kube_events.rb index d306d04b3..274920ee1 100644 --- a/source/plugins/ruby/in_kube_events.rb +++ b/source/plugins/ruby/in_kube_events.rb @@ -29,7 +29,7 @@ def initialize # Initilize enable/disable normal event collection @collectAllKubeEvents = false @nameSpaces = [] - @nameSpaceFilteringMode = "off" + @namespaceFilteringMode = "off" end config_param :run_interval, :time, :default => 60 @@ -96,8 +96,8 @@ def enumerate $log.info("in_kube_events::enumerate: using kubeevents tag -#{@tag} @ #{Time.now.utc.iso8601}") @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() $log.info("in_kube_events::enumerate: using data collection nameSpaces -#{@nameSpaces} @ #{Time.now.utc.iso8601}") - @nameSpaceFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() - $log.info("in_kube_events::enumerate: using data collection filtering mode for nameSpaces -#{@nameSpaceFilteringMode} @ #{Time.now.utc.iso8601}") + @namespaceFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() + $log.info("in_kube_events::enumerate: using data collection filtering mode for nameSpaces -#{@namespaceFilteringMode} @ #{Time.now.utc.iso8601}") end # Initializing continuation token to nil continuationToken = nil @@ -170,7 +170,7 @@ def parse_and_emit_records(events, eventQueryState, newEventQueryState, batchTim end # drop the events if the event of the excluded namespace - next unless !KubernetesApiClient.isExcludeResourceItem("", items["involvedObject"]["namespace"], @nameSpaceFilteringMode, @nameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem("", items["involvedObject"]["namespace"], @namespaceFilteringMode, @nameSpaces) record["ObjectKind"] = items["involvedObject"]["kind"] record["Namespace"] = items["involvedObject"]["namespace"] diff --git a/source/plugins/ruby/in_kube_perfinventory.rb b/source/plugins/ruby/in_kube_perfinventory.rb index 05c863522..a28272967 100644 --- a/source/plugins/ruby/in_kube_perfinventory.rb +++ b/source/plugins/ruby/in_kube_perfinventory.rb @@ -33,7 +33,7 @@ def initialize @kubeperfTag = "oneagent.containerInsights.LINUX_PERF_BLOB" @insightsMetricsTag = "oneagent.containerInsights.INSIGHTS_METRICS_BLOB" @nameSpaces = [] - @nameSpaceFilteringMode = "off" + @namespaceFilteringMode = "off" end config_param :run_interval, :time, :default => 60 @@ -106,8 +106,8 @@ def enumerate(podList = nil) $log.info("in_kube_perfinventory::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() $log.info("in_kube_perfinventory::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") - @nameSpaceFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() - $log.info("in_kube_perfinventory::enumerate: using data collection filtering mode for nameSpaces: #{@nameSpaceFilteringMode} @ #{Time.now.utc.iso8601}") + @namespaceFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() + $log.info("in_kube_perfinventory::enumerate: using data collection filtering mode for nameSpaces: #{@namespaceFilteringMode} @ #{Time.now.utc.iso8601}") end end @@ -144,7 +144,7 @@ def parse_and_emit_records(podInventory, nodeAllocatableRecords, continuationTok begin #begin block start podInventory["items"].each do |item| #podInventory block start - next unless !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["name"], item["metadata"]["namespace"], @nameSpaceFilteringMode, @nameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["name"], item["metadata"]["namespace"], @namespaceFilteringMode, @nameSpaces) nodeName = "" if !item["spec"]["nodeName"].nil? nodeName = item["spec"]["nodeName"] diff --git a/source/plugins/ruby/in_kube_podinventory.rb b/source/plugins/ruby/in_kube_podinventory.rb index aa07e125a..e10e4b18f 100644 --- a/source/plugins/ruby/in_kube_podinventory.rb +++ b/source/plugins/ruby/in_kube_podinventory.rb @@ -60,7 +60,7 @@ def initialize @kubeservicesTag = "oneagent.containerInsights.KUBE_SERVICES_BLOB" @containerInventoryTag = "oneagent.containerInsights.CONTAINER_INVENTORY_BLOB" @nameSpaces = [] - @nameSpaceFilteringMode = "off" + @namespaceFilteringMode = "off" end config_param :run_interval, :time, :default => 60 @@ -178,8 +178,8 @@ def enumerate(podList = nil) $log.info("in_kube_podinventory::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() $log.info("in_kube_podinventory::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") - @nameSpaceFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() - $log.info("in_kube_podinventory::enumerate: using data collection filtering mode for nameSpaces: #{@nameSpaceFilteringMode} @ #{Time.now.utc.iso8601}") + @namespaceFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() + $log.info("in_kube_podinventory::enumerate: using data collection filtering mode for nameSpaces: #{@namespaceFilteringMode} @ #{Time.now.utc.iso8601}") end end @@ -245,8 +245,8 @@ def enumerate(podList = nil) if !@nameSpaces.nil? && !@nameSpaces.empty? && @nameSpaces.length > 0 telemetryProperties["DATA_COLLECTION_NAMESPACES"] = @nameSpaces end - if !@nameSpaceFilteringMode.nil? && !@nameSpaceFilteringMode.empty? - telemetryProperties["DATA_COLLECTION_NAMESPACES_FILTERING_MODE"] = @nameSpaceFilteringMode + if !@namespaceFilteringMode.nil? && !@namespaceFilteringMode.empty? + telemetryProperties["DATA_COLLECTION_NAMESPACES_FILTERING_MODE"] = @namespaceFilteringMode end if @run_interval > 60 telemetryProperties["DATA_COLLECTION_INTERVAL_MINUTES"] = @run_interval / 60 @@ -297,7 +297,7 @@ def parse_and_emit_records(podInventory, serviceRecords, continuationToken, batc begin #begin block start podInventory["items"].each do |item| #podInventory block start - next unless !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["name"], item["metadata"]["namespace"], @nameSpaceFilteringMode, @nameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["name"], item["metadata"]["namespace"], @namespaceFilteringMode, @nameSpaces) # pod inventory records podInventoryRecords = getPodInventoryRecords(item, serviceRecords, batchTime) @containerCount += podInventoryRecords.length @@ -395,7 +395,7 @@ def parse_and_emit_records(podInventory, serviceRecords, continuationToken, batc if continuationToken.nil? # sending kube services inventory records kubeServicesEventStream = Fluent::MultiEventStream.new serviceRecords.each do |kubeServiceRecord| - next unless !KubernetesApiClient.isExcludeResourceItem(kubeServiceRecord["ServiceName"], kubeServiceRecord["namespace"], @nameSpaceFilteringMode, @nameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(kubeServiceRecord["ServiceName"], kubeServiceRecord["namespace"], @namespaceFilteringMode, @nameSpaces) if !kubeServiceRecord.nil? # adding before emit to reduce memory foot print kubeServiceRecord["ClusterId"] = KubernetesApiClient.getClusterId diff --git a/source/plugins/ruby/in_kube_pvinventory.rb b/source/plugins/ruby/in_kube_pvinventory.rb index 7b3c251ae..8f6b7ae7d 100644 --- a/source/plugins/ruby/in_kube_pvinventory.rb +++ b/source/plugins/ruby/in_kube_pvinventory.rb @@ -25,7 +25,7 @@ def initialize @PV_CHUNK_SIZE = "5000" @pvTypeToCountHash = {} @nameSpaces = [] - @nameSpaceFilteringMode = "off" + @namespaceFilteringMode = "off" end config_param :run_interval, :time, :default => 60 @@ -74,8 +74,8 @@ def enumerate $log.info("in_kube_pvinventory::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() $log.info("in_kube_pvinventory::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") - @nameSpaceFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() - $log.info("in_kube_pvinventory::enumerate: using data collection filtering mode for nameSpaces: #{@nameSpaceFilteringMode} @ #{Time.now.utc.iso8601}") + @namespaceFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() + $log.info("in_kube_pvinventory::enumerate: using data collection filtering mode for nameSpaces: #{@namespaceFilteringMode} @ #{Time.now.utc.iso8601}") end end @@ -142,7 +142,7 @@ def parse_and_emit_records(pvInventory, batchTime = Time.utc.iso8601) record["PVType"] = type record["PVTypeInfo"] = typeInfo - next unless !KubernetesApiClient.isExcludeResourceItem(pvcName, pvcNamespace, @nameSpaceFilteringMode, @nameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(pvcName, pvcNamespace, @namespaceFilteringMode, @nameSpaces) record["CollectionTime"] = batchTime record["ClusterId"] = KubernetesApiClient.getClusterId diff --git a/source/plugins/ruby/in_kubestate_deployments.rb b/source/plugins/ruby/in_kubestate_deployments.rb index d38996cee..1cc965e3d 100644 --- a/source/plugins/ruby/in_kubestate_deployments.rb +++ b/source/plugins/ruby/in_kubestate_deployments.rb @@ -36,7 +36,7 @@ def initialize @ClusterId = KubernetesApiClient.getClusterId @ClusterName = KubernetesApiClient.getClusterName @nameSpaces = [] - @nameSpaceFilteringMode = "off" + @namespaceFilteringMode = "off" end config_param :run_interval, :time, :default => 60 @@ -95,8 +95,8 @@ def enumerate $log.info("in_kubestate_deployments::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() $log.info("in_kubestate_deployments::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") - @nameSpaceFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() - $log.info("in_kubestate_deployments::enumerate: using data collection filtering mode for nameSpaces: #{@nameSpaceFilteringMode} @ #{Time.now.utc.iso8601}") + @namespaceFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() + $log.info("in_kubestate_deployments::enumerate: using data collection filtering mode for nameSpaces: #{@namespaceFilteringMode} @ #{Time.now.utc.iso8601}") end end # Initializing continuation token to nil @@ -150,7 +150,7 @@ def parse_and_emit_records(deployments, batchTime = Time.utc.iso8601) begin metricInfo = deployments metricInfo["items"].each do |deployment| - next unless !KubernetesApiClient.isExcludeResourceItem(deployment["metadata"]["name"], deployment["metadata"]["namespace"], @nameSpaceFilteringMode, @nameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(deployment["metadata"]["name"], deployment["metadata"]["namespace"], @namespaceFilteringMode, @nameSpaces) deploymentName = deployment["metadata"]["name"] deploymentNameSpace = deployment["metadata"]["namespace"] deploymentCreatedTime = "" diff --git a/source/plugins/ruby/in_kubestate_hpa.rb b/source/plugins/ruby/in_kubestate_hpa.rb index fd9d0b9a1..9a439aca7 100644 --- a/source/plugins/ruby/in_kubestate_hpa.rb +++ b/source/plugins/ruby/in_kubestate_hpa.rb @@ -33,7 +33,7 @@ def initialize @ClusterId = KubernetesApiClient.getClusterId @ClusterName = KubernetesApiClient.getClusterName @nameSpaces = [] - @nameSpaceFilteringMode = "off" + @namespaceFilteringMode = "off" end config_param :run_interval, :time, :default => 60 @@ -92,8 +92,8 @@ def enumerate $log.info("in_kubestate_hpa::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() $log.info("in_kubestate_hpa::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") - @nameSpaceFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() - $log.info("in_kubestate_hpa::enumerate: using data collection filtering mode for nameSpaces: #{@nameSpaceFilteringMode} @ #{Time.now.utc.iso8601}") + @namespaceFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() + $log.info("in_kubestate_hpa::enumerate: using data collection filtering mode for nameSpaces: #{@namespaceFilteringMode} @ #{Time.now.utc.iso8601}") end end # Initializing continuation token to nil @@ -138,7 +138,7 @@ def parse_and_emit_records(hpas, batchTime = Time.utc.iso8601) begin metricInfo = hpas metricInfo["items"].each do |hpa| - next unless !KubernetesApiClient.isExcludeResourceItem(hpa["metadata"]["name"], hpa["metadata"]["namespace"], @nameSpaceFilteringMode, @nameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(hpa["metadata"]["name"], hpa["metadata"]["namespace"], @namespaceFilteringMode, @nameSpaces) hpaName = hpa["metadata"]["name"] hpaNameSpace = hpa["metadata"]["namespace"] hpaCreatedTime = "" diff --git a/source/plugins/ruby/in_win_cadvisor_perf.rb b/source/plugins/ruby/in_win_cadvisor_perf.rb index 88b35a84e..51a19d591 100644 --- a/source/plugins/ruby/in_win_cadvisor_perf.rb +++ b/source/plugins/ruby/in_win_cadvisor_perf.rb @@ -23,7 +23,7 @@ def initialize require_relative "extension_utils" @insightsMetricsTag = "oneagent.containerInsights.INSIGHTS_METRICS_BLOB" @nameSpaces = [] - @nameSpaceFilteringMode = "off" + @namespaceFilteringMode = "off" end config_param :run_interval, :time, :default => 60 @@ -77,8 +77,8 @@ def enumerate() $log.info("in_win_cadvisor_perf::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() $log.info("in_win_cadvisor_perf::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") - @nameSpaceFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() - $log.info("in_cadvisor_perf::enumerate: using data collection filtering mode for nameSpaces: #{@nameSpaceFilteringMode} @ #{Time.now.utc.iso8601}") + @namespaceFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() + $log.info("in_cadvisor_perf::enumerate: using data collection filtering mode for nameSpaces: #{@namespaceFilteringMode} @ #{Time.now.utc.iso8601}") end end @@ -95,7 +95,7 @@ def enumerate() end @@winNodes.each do |winNode| eventStream = Fluent::MultiEventStream.new - metricData = CAdvisorMetricsAPIClient.getMetrics(winNode: winNode, nameSpaceFilteringMode: @nameSpaceFilteringMode, nameSpaces: @nameSpaces, metricTime: Time.now.utc.iso8601) + metricData = CAdvisorMetricsAPIClient.getMetrics(winNode: winNode, namespaceFilteringMode: @namespaceFilteringMode, nameSpaces: @nameSpaces, metricTime: Time.now.utc.iso8601) metricData.each do |record| if !record.empty? eventStream.add(time, record) if record @@ -110,7 +110,7 @@ def enumerate() #start GPU InsightsMetrics items begin containerGPUusageInsightsMetricsDataItems = [] - containerGPUusageInsightsMetricsDataItems.concat(CAdvisorMetricsAPIClient.getInsightsMetrics(winNode: winNode, nameSpaceFilteringMode: @nameSpaceFilteringMode, nameSpaces: @nameSpaces, metricTime: Time.now.utc.iso8601)) + containerGPUusageInsightsMetricsDataItems.concat(CAdvisorMetricsAPIClient.getInsightsMetrics(winNode: winNode, namespaceFilteringMode: @namespaceFilteringMode, nameSpaces: @nameSpaces, metricTime: Time.now.utc.iso8601)) insightsMetricsEventStream = Fluent::MultiEventStream.new containerGPUusageInsightsMetricsDataItems.each do |insightsMetricsRecord| From 8bac75a3c0338173e7c06688e2c2c67065c1a687 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Wed, 23 Nov 2022 13:02:03 -0800 Subject: [PATCH 293/301] make naming consistent --- .../onboarding-using-msi-auth/existingClusterOnboarding.json | 2 +- .../arc-k8s-extension-msi-auth/existingClusterOnboarding.json | 2 +- .../existingClusterOnboarding.json | 2 +- source/plugins/ruby/constants.rb | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json b/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json index 6a9c7b5a4..2980e3996 100644 --- a/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json +++ b/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json @@ -108,7 +108,7 @@ "dataCollectionSettings" : { "interval": "[parameters('dataCollectionInterval')]", "namespaceFilteringMode" : "[parameters('namespacesFilteringModeForDataCollection')]", - "nameSpaces": "[parameters('namespacesForDataCollection')]" + "namespaces": "[parameters('namespacesForDataCollection')]" } }, "extensionName": "ContainerInsights" diff --git a/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterOnboarding.json b/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterOnboarding.json index 6fe7305d6..d01a2d48f 100644 --- a/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterOnboarding.json +++ b/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterOnboarding.json @@ -122,7 +122,7 @@ "dataCollectionSettings" : { "interval": "[parameters('dataCollectionInterval')]", "namespaceFilteringMode" : "[parameters('namespacesFilteringModeForDataCollection')]", - "nameSpaces": "[parameters('namespacesForDataCollection')]" + "namespaces": "[parameters('namespacesForDataCollection')]" } }, "extensionName": "ContainerInsights" diff --git a/scripts/onboarding/templates/arc-k8s-extension-provisionedcluster-msi-auth/existingClusterOnboarding.json b/scripts/onboarding/templates/arc-k8s-extension-provisionedcluster-msi-auth/existingClusterOnboarding.json index 5ac986381..910b4d6de 100644 --- a/scripts/onboarding/templates/arc-k8s-extension-provisionedcluster-msi-auth/existingClusterOnboarding.json +++ b/scripts/onboarding/templates/arc-k8s-extension-provisionedcluster-msi-auth/existingClusterOnboarding.json @@ -122,7 +122,7 @@ "dataCollectionSettings" : { "interval": "[parameters('dataCollectionInterval')]", "namespaceFilteringMode" : "[parameters('namespacesFilteringModeForDataCollection')]", - "nameSpaces": "[parameters('namespacesForDataCollection')]" + "namespaces": "[parameters('namespacesForDataCollection')]" } }, "extensionName": "ContainerInsights" diff --git a/source/plugins/ruby/constants.rb b/source/plugins/ruby/constants.rb index 0a69936de..f96a1f689 100644 --- a/source/plugins/ruby/constants.rb +++ b/source/plugins/ruby/constants.rb @@ -151,7 +151,7 @@ class Constants EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_INTERVAL = "interval" EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_NAMESPACE_FILTERING_MODE = "namespaceFilteringMode" EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_NAMESPACES_FILTERING_MODES = ["off", "include", "exclude"] - EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_NAMESPACES = "nameSpaces" + EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_NAMESPACES = "namespaces" # min and max data collection interval minutes EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_INTERVAL_MIN = 1 From 278ed4745e95a515c3f625751c6765c426d53b06 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Wed, 23 Nov 2022 13:03:34 -0800 Subject: [PATCH 294/301] make naming consistent --- .../onboarding-using-msi-auth/existingClusterOnboarding.json | 4 ++-- .../aks/onboarding-using-msi-auth/existingClusterParam.json | 2 +- .../arc-k8s-extension-msi-auth/existingClusterOnboarding.json | 4 ++-- .../arc-k8s-extension-msi-auth/existingClusterParam.json | 2 +- .../existingClusterOnboarding.json | 4 ++-- .../existingClusterParam.json | 2 +- 6 files changed, 9 insertions(+), 9 deletions(-) diff --git a/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json b/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json index 2980e3996..c127d4fcd 100644 --- a/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json +++ b/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json @@ -56,7 +56,7 @@ "description": "Data collection interval e.g. \"5m\" for metrics and inventory. Supported value range from 1m to 30m" } }, - "namespacesFilteringModeForDataCollection": { + "namespaceFilteringModeForDataCollection": { "type": "string", "metadata": { "description": "Data collection Filtering Mode for the namespaces" @@ -107,7 +107,7 @@ "extensionSettings": { "dataCollectionSettings" : { "interval": "[parameters('dataCollectionInterval')]", - "namespaceFilteringMode" : "[parameters('namespacesFilteringModeForDataCollection')]", + "namespaceFilteringMode" : "[parameters('namespaceFilteringModeForDataCollection')]", "namespaces": "[parameters('namespacesForDataCollection')]" } }, diff --git a/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterParam.json b/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterParam.json index 1cdc8b163..f3622274a 100644 --- a/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterParam.json +++ b/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterParam.json @@ -63,7 +63,7 @@ "dataCollectionInterval": { "value" : "1m" }, - "namespacesFilteringModeForDataCollection": { + "namespaceFilteringModeForDataCollection": { "value": "Off" }, "namespacesForDataCollection": { diff --git a/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterOnboarding.json b/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterOnboarding.json index d01a2d48f..058da17a3 100644 --- a/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterOnboarding.json +++ b/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterOnboarding.json @@ -70,7 +70,7 @@ "description": "Data collection interval e.g. \"5m\" for metrics and inventory. Supported value range from 1m to 30m" } }, - "namespacesFilteringModeForDataCollection": { + "namespaceFilteringModeForDataCollection": { "type": "string", "metadata": { "description": "Data collection Filtering Mode for the namespaces" @@ -121,7 +121,7 @@ "extensionSettings": { "dataCollectionSettings" : { "interval": "[parameters('dataCollectionInterval')]", - "namespaceFilteringMode" : "[parameters('namespacesFilteringModeForDataCollection')]", + "namespaceFilteringMode" : "[parameters('namespaceFilteringModeForDataCollection')]", "namespaces": "[parameters('namespacesForDataCollection')]" } }, diff --git a/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterParam.json b/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterParam.json index 17f7b2a51..e0ff50b69 100644 --- a/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterParam.json +++ b/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterParam.json @@ -66,7 +66,7 @@ "dataCollectionInterval": { "value" : "1m" }, - "namespacesFilteringModeForDataCollection": { + "namespaceFilteringModeForDataCollection": { "value": "Off" }, "namespacesForDataCollection": { diff --git a/scripts/onboarding/templates/arc-k8s-extension-provisionedcluster-msi-auth/existingClusterOnboarding.json b/scripts/onboarding/templates/arc-k8s-extension-provisionedcluster-msi-auth/existingClusterOnboarding.json index 910b4d6de..53d8768a2 100644 --- a/scripts/onboarding/templates/arc-k8s-extension-provisionedcluster-msi-auth/existingClusterOnboarding.json +++ b/scripts/onboarding/templates/arc-k8s-extension-provisionedcluster-msi-auth/existingClusterOnboarding.json @@ -70,7 +70,7 @@ "description": "Data collection interval e.g. \"5m\" for metrics and inventory. Supported value range from 1m to 30m" } }, - "namespacesFilteringModeForDataCollection": { + "namespaceFilteringModeForDataCollection": { "type": "string", "metadata": { "description": "Data collection Filtering Mode for the namespaces" @@ -121,7 +121,7 @@ "extensionSettings": { "dataCollectionSettings" : { "interval": "[parameters('dataCollectionInterval')]", - "namespaceFilteringMode" : "[parameters('namespacesFilteringModeForDataCollection')]", + "namespaceFilteringMode" : "[parameters('namespaceFilteringModeForDataCollection')]", "namespaces": "[parameters('namespacesForDataCollection')]" } }, diff --git a/scripts/onboarding/templates/arc-k8s-extension-provisionedcluster-msi-auth/existingClusterParam.json b/scripts/onboarding/templates/arc-k8s-extension-provisionedcluster-msi-auth/existingClusterParam.json index 811ff3f4d..fe0ad951e 100644 --- a/scripts/onboarding/templates/arc-k8s-extension-provisionedcluster-msi-auth/existingClusterParam.json +++ b/scripts/onboarding/templates/arc-k8s-extension-provisionedcluster-msi-auth/existingClusterParam.json @@ -66,7 +66,7 @@ "dataCollectionInterval": { "value" : "1m" }, - "namespacesFilteringModeForDataCollection": { + "namespaceFilteringModeForDataCollection": { "value": "Off" }, "namespacesForDataCollection": { From 8395cfa61650ac4f99d4b490d3e7d791eaf490d2 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Wed, 23 Nov 2022 13:05:54 -0800 Subject: [PATCH 295/301] make naming consistent --- source/plugins/ruby/constants.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/plugins/ruby/constants.rb b/source/plugins/ruby/constants.rb index f96a1f689..1ba715864 100644 --- a/source/plugins/ruby/constants.rb +++ b/source/plugins/ruby/constants.rb @@ -150,7 +150,7 @@ class Constants EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS = "dataCollectionSettings" EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_INTERVAL = "interval" EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_NAMESPACE_FILTERING_MODE = "namespaceFilteringMode" - EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_NAMESPACES_FILTERING_MODES = ["off", "include", "exclude"] + EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_NAMESPACE_FILTERING_MODES = ["off", "include", "exclude"] EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_NAMESPACES = "namespaces" # min and max data collection interval minutes From eae583ea248df3724c0719158ac77e7297eb3712 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Wed, 23 Nov 2022 13:21:47 -0800 Subject: [PATCH 296/301] make naming consistent --- source/plugins/ruby/extension_utils.rb | 8 ++++---- source/plugins/ruby/in_cadvisor_perf.rb | 2 +- source/plugins/ruby/in_containerinventory.rb | 2 +- source/plugins/ruby/in_kube_events.rb | 2 +- source/plugins/ruby/in_kube_perfinventory.rb | 2 +- source/plugins/ruby/in_kube_podinventory.rb | 2 +- source/plugins/ruby/in_kube_pvinventory.rb | 2 +- source/plugins/ruby/in_kubestate_deployments.rb | 2 +- source/plugins/ruby/in_kubestate_hpa.rb | 2 +- source/plugins/ruby/in_win_cadvisor_perf.rb | 2 +- 10 files changed, 13 insertions(+), 13 deletions(-) diff --git a/source/plugins/ruby/extension_utils.rb b/source/plugins/ruby/extension_utils.rb index 055d7fc7e..f39a727f3 100644 --- a/source/plugins/ruby/extension_utils.rb +++ b/source/plugins/ruby/extension_utils.rb @@ -86,7 +86,7 @@ def getNamespacesForDataCollection return nameSpaces end - def getNamespacesFilteringModeForDataCollection + def getNamespaceFilteringModeForDataCollection namespaceFilteringMode = "off" begin dataCollectionSettings = Extension.instance.get_extension_data_collection_settings() @@ -98,14 +98,14 @@ def getNamespacesFilteringModeForDataCollection if Constants::EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_NAMESPACES_FILTERING_MODES.include?(mode.downcase) namespaceFilteringMode = mode.downcase else - $log.warn("ExtensionUtils::getNamespacesFilteringModeForDataCollection: namespaceFilteringMode: #{mode} not supported hence using default") + $log.warn("ExtensionUtils::getNamespaceFilteringModeForDataCollection: namespaceFilteringMode: #{mode} not supported hence using default") end else - $log.warn("ExtensionUtils::getNamespacesFilteringModeForDataCollection: namespaceFilteringMode: #{mode} not valid hence using default") + $log.warn("ExtensionUtils::getNamespaceFilteringModeForDataCollection: namespaceFilteringMode: #{mode} not valid hence using default") end end rescue => errorStr - $log.warn("ExtensionUtils::getNamespacesFilteringModeForDataCollection: failed with an exception: #{errorStr}") + $log.warn("ExtensionUtils::getNamespaceFilteringModeForDataCollection: failed with an exception: #{errorStr}") end return namespaceFilteringMode end diff --git a/source/plugins/ruby/in_cadvisor_perf.rb b/source/plugins/ruby/in_cadvisor_perf.rb index 8b522ae21..399743182 100644 --- a/source/plugins/ruby/in_cadvisor_perf.rb +++ b/source/plugins/ruby/in_cadvisor_perf.rb @@ -80,7 +80,7 @@ def enumerate() $log.info("in_cadvisor_perf::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() $log.info("in_cadvisor_perf::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") - @namespaceFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() + @namespaceFilteringMode = ExtensionUtils.getNamespaceFilteringModeForDataCollection() $log.info("in_cadvisor_perf::enumerate: using data collection filtering mode for nameSpaces: #{@namespaceFilteringMode} @ #{Time.now.utc.iso8601}") end end diff --git a/source/plugins/ruby/in_containerinventory.rb b/source/plugins/ruby/in_containerinventory.rb index 8f839a278..a97c26e0d 100644 --- a/source/plugins/ruby/in_containerinventory.rb +++ b/source/plugins/ruby/in_containerinventory.rb @@ -72,7 +72,7 @@ def enumerate $log.info("in_container_inventory::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() $log.info("in_container_inventory::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") - @namespaceFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() + @namespaceFilteringMode = ExtensionUtils.getNamespaceFilteringModeForDataCollection() $log.info("in_container_inventory::enumerate: using data collection filtering mode for nameSpaces: #{@namespaceFilteringMode} @ #{Time.now.utc.iso8601}") end end diff --git a/source/plugins/ruby/in_kube_events.rb b/source/plugins/ruby/in_kube_events.rb index 274920ee1..c470eb414 100644 --- a/source/plugins/ruby/in_kube_events.rb +++ b/source/plugins/ruby/in_kube_events.rb @@ -96,7 +96,7 @@ def enumerate $log.info("in_kube_events::enumerate: using kubeevents tag -#{@tag} @ #{Time.now.utc.iso8601}") @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() $log.info("in_kube_events::enumerate: using data collection nameSpaces -#{@nameSpaces} @ #{Time.now.utc.iso8601}") - @namespaceFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() + @namespaceFilteringMode = ExtensionUtils.getNamespaceFilteringModeForDataCollection() $log.info("in_kube_events::enumerate: using data collection filtering mode for nameSpaces -#{@namespaceFilteringMode} @ #{Time.now.utc.iso8601}") end # Initializing continuation token to nil diff --git a/source/plugins/ruby/in_kube_perfinventory.rb b/source/plugins/ruby/in_kube_perfinventory.rb index a28272967..b575d05ba 100644 --- a/source/plugins/ruby/in_kube_perfinventory.rb +++ b/source/plugins/ruby/in_kube_perfinventory.rb @@ -106,7 +106,7 @@ def enumerate(podList = nil) $log.info("in_kube_perfinventory::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() $log.info("in_kube_perfinventory::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") - @namespaceFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() + @namespaceFilteringMode = ExtensionUtils.getNamespaceFilteringModeForDataCollection() $log.info("in_kube_perfinventory::enumerate: using data collection filtering mode for nameSpaces: #{@namespaceFilteringMode} @ #{Time.now.utc.iso8601}") end end diff --git a/source/plugins/ruby/in_kube_podinventory.rb b/source/plugins/ruby/in_kube_podinventory.rb index e10e4b18f..465312f15 100644 --- a/source/plugins/ruby/in_kube_podinventory.rb +++ b/source/plugins/ruby/in_kube_podinventory.rb @@ -178,7 +178,7 @@ def enumerate(podList = nil) $log.info("in_kube_podinventory::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() $log.info("in_kube_podinventory::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") - @namespaceFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() + @namespaceFilteringMode = ExtensionUtils.getNamespaceFilteringModeForDataCollection() $log.info("in_kube_podinventory::enumerate: using data collection filtering mode for nameSpaces: #{@namespaceFilteringMode} @ #{Time.now.utc.iso8601}") end end diff --git a/source/plugins/ruby/in_kube_pvinventory.rb b/source/plugins/ruby/in_kube_pvinventory.rb index 8f6b7ae7d..ee7debfb4 100644 --- a/source/plugins/ruby/in_kube_pvinventory.rb +++ b/source/plugins/ruby/in_kube_pvinventory.rb @@ -74,7 +74,7 @@ def enumerate $log.info("in_kube_pvinventory::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() $log.info("in_kube_pvinventory::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") - @namespaceFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() + @namespaceFilteringMode = ExtensionUtils.getNamespaceFilteringModeForDataCollection() $log.info("in_kube_pvinventory::enumerate: using data collection filtering mode for nameSpaces: #{@namespaceFilteringMode} @ #{Time.now.utc.iso8601}") end end diff --git a/source/plugins/ruby/in_kubestate_deployments.rb b/source/plugins/ruby/in_kubestate_deployments.rb index 1cc965e3d..8ada925e9 100644 --- a/source/plugins/ruby/in_kubestate_deployments.rb +++ b/source/plugins/ruby/in_kubestate_deployments.rb @@ -95,7 +95,7 @@ def enumerate $log.info("in_kubestate_deployments::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() $log.info("in_kubestate_deployments::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") - @namespaceFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() + @namespaceFilteringMode = ExtensionUtils.getNamespaceFilteringModeForDataCollection() $log.info("in_kubestate_deployments::enumerate: using data collection filtering mode for nameSpaces: #{@namespaceFilteringMode} @ #{Time.now.utc.iso8601}") end end diff --git a/source/plugins/ruby/in_kubestate_hpa.rb b/source/plugins/ruby/in_kubestate_hpa.rb index 9a439aca7..0d29d07a4 100644 --- a/source/plugins/ruby/in_kubestate_hpa.rb +++ b/source/plugins/ruby/in_kubestate_hpa.rb @@ -92,7 +92,7 @@ def enumerate $log.info("in_kubestate_hpa::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() $log.info("in_kubestate_hpa::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") - @namespaceFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() + @namespaceFilteringMode = ExtensionUtils.getNamespaceFilteringModeForDataCollection() $log.info("in_kubestate_hpa::enumerate: using data collection filtering mode for nameSpaces: #{@namespaceFilteringMode} @ #{Time.now.utc.iso8601}") end end diff --git a/source/plugins/ruby/in_win_cadvisor_perf.rb b/source/plugins/ruby/in_win_cadvisor_perf.rb index 51a19d591..cbc995528 100644 --- a/source/plugins/ruby/in_win_cadvisor_perf.rb +++ b/source/plugins/ruby/in_win_cadvisor_perf.rb @@ -77,7 +77,7 @@ def enumerate() $log.info("in_win_cadvisor_perf::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() $log.info("in_win_cadvisor_perf::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") - @namespaceFilteringMode = ExtensionUtils.getNamespacesFilteringModeForDataCollection() + @namespaceFilteringMode = ExtensionUtils.getNamespaceFilteringModeForDataCollection() $log.info("in_cadvisor_perf::enumerate: using data collection filtering mode for nameSpaces: #{@namespaceFilteringMode} @ #{Time.now.utc.iso8601}") end end From 9f069d62b4d1a8e33228d381e9932e84faa6d6e9 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Wed, 23 Nov 2022 13:40:21 -0800 Subject: [PATCH 297/301] fix pr feedback --- .../existingClusterOnboarding.json | 2 +- .../existingClusterOnboarding.json | 2 +- .../existingClusterOnboarding.json | 2 +- .../plugins/ruby/CAdvisorMetricsAPIClient.rb | 50 +++++++++---------- source/plugins/ruby/KubernetesApiClient.rb | 8 +-- source/plugins/ruby/extension_utils.rb | 8 +-- source/plugins/ruby/in_cadvisor_perf.rb | 12 ++--- source/plugins/ruby/in_containerinventory.rb | 10 ++-- source/plugins/ruby/in_kube_events.rb | 10 ++-- source/plugins/ruby/in_kube_perfinventory.rb | 10 ++-- source/plugins/ruby/in_kube_podinventory.rb | 16 +++--- source/plugins/ruby/in_kube_pvinventory.rb | 10 ++-- .../plugins/ruby/in_kubestate_deployments.rb | 10 ++-- source/plugins/ruby/in_kubestate_hpa.rb | 10 ++-- source/plugins/ruby/in_win_cadvisor_perf.rb | 12 ++--- 15 files changed, 86 insertions(+), 86 deletions(-) diff --git a/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json b/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json index c127d4fcd..7285b7eba 100644 --- a/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json +++ b/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json @@ -107,7 +107,7 @@ "extensionSettings": { "dataCollectionSettings" : { "interval": "[parameters('dataCollectionInterval')]", - "namespaceFilteringMode" : "[parameters('namespaceFilteringModeForDataCollection')]", + "namespaceFilteringMode": "[parameters('namespaceFilteringModeForDataCollection')]", "namespaces": "[parameters('namespacesForDataCollection')]" } }, diff --git a/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterOnboarding.json b/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterOnboarding.json index 058da17a3..af2db71ff 100644 --- a/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterOnboarding.json +++ b/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterOnboarding.json @@ -121,7 +121,7 @@ "extensionSettings": { "dataCollectionSettings" : { "interval": "[parameters('dataCollectionInterval')]", - "namespaceFilteringMode" : "[parameters('namespaceFilteringModeForDataCollection')]", + "namespaceFilteringMode": "[parameters('namespaceFilteringModeForDataCollection')]", "namespaces": "[parameters('namespacesForDataCollection')]" } }, diff --git a/scripts/onboarding/templates/arc-k8s-extension-provisionedcluster-msi-auth/existingClusterOnboarding.json b/scripts/onboarding/templates/arc-k8s-extension-provisionedcluster-msi-auth/existingClusterOnboarding.json index 53d8768a2..0cc6a5d36 100644 --- a/scripts/onboarding/templates/arc-k8s-extension-provisionedcluster-msi-auth/existingClusterOnboarding.json +++ b/scripts/onboarding/templates/arc-k8s-extension-provisionedcluster-msi-auth/existingClusterOnboarding.json @@ -121,7 +121,7 @@ "extensionSettings": { "dataCollectionSettings" : { "interval": "[parameters('dataCollectionInterval')]", - "namespaceFilteringMode" : "[parameters('namespaceFilteringModeForDataCollection')]", + "namespaceFilteringMode": "[parameters('namespaceFilteringModeForDataCollection')]", "namespaces": "[parameters('namespacesForDataCollection')]" } }, diff --git a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb index a3949c77a..7f61f9ff9 100644 --- a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb +++ b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb @@ -133,7 +133,7 @@ def getCAdvisorUri(winNode, relativeUri) return baseUri + relativeUri end - def getMetrics(winNode: nil, namespaceFilteringMode: "off", nameSpaces: [], metricTime: Time.now.utc.iso8601) + def getMetrics(winNode: nil, namespaceFilteringMode: "off", namespaces: [], metricTime: Time.now.utc.iso8601) metricDataItems = [] begin cAdvisorStats = getSummaryStatsFromCAdvisor(winNode) @@ -155,8 +155,8 @@ def getMetrics(winNode: nil, namespaceFilteringMode: "off", nameSpaces: [], metr # Checking if we are in windows daemonset and sending only few metrics that are needed for MDM if !@os_type.nil? && !@os_type.empty? && @os_type.strip.casecmp("windows") == 0 # Container metrics - metricDataItems.concat(getContainerMemoryMetricItems(metricInfo, hostName, "workingSetBytes", Constants::MEMORY_WORKING_SET_BYTES, metricTime, operatingSystem, namespaceFilteringMode, nameSpaces)) - containerCpuUsageNanoSecondsRate = getContainerCpuMetricItemRate(metricInfo, hostName, "usageCoreNanoSeconds", Constants::CPU_USAGE_NANO_CORES, metricTime, namespaceFilteringMode, nameSpaces) + metricDataItems.concat(getContainerMemoryMetricItems(metricInfo, hostName, "workingSetBytes", Constants::MEMORY_WORKING_SET_BYTES, metricTime, operatingSystem, namespaceFilteringMode, namespaces)) + containerCpuUsageNanoSecondsRate = getContainerCpuMetricItemRate(metricInfo, hostName, "usageCoreNanoSeconds", Constants::CPU_USAGE_NANO_CORES, metricTime, namespaceFilteringMode, namespaces) if containerCpuUsageNanoSecondsRate && !containerCpuUsageNanoSecondsRate.empty? && !containerCpuUsageNanoSecondsRate.nil? metricDataItems.concat(containerCpuUsageNanoSecondsRate) end @@ -167,15 +167,15 @@ def getMetrics(winNode: nil, namespaceFilteringMode: "off", nameSpaces: [], metr end metricDataItems.push(getNodeMetricItem(metricInfo, hostName, "memory", "workingSetBytes", Constants::MEMORY_WORKING_SET_BYTES, metricTime)) else - metricDataItems.concat(getContainerMemoryMetricItems(metricInfo, hostName, "workingSetBytes", Constants::MEMORY_WORKING_SET_BYTES, metricTime, operatingSystem, namespaceFilteringMode, nameSpaces)) - metricDataItems.concat(getContainerStartTimeMetricItems(metricInfo, hostName, "restartTimeEpoch", metricTime, namespaceFilteringMode, nameSpaces)) + metricDataItems.concat(getContainerMemoryMetricItems(metricInfo, hostName, "workingSetBytes", Constants::MEMORY_WORKING_SET_BYTES, metricTime, operatingSystem, namespaceFilteringMode, namespaces)) + metricDataItems.concat(getContainerStartTimeMetricItems(metricInfo, hostName, "restartTimeEpoch", metricTime, namespaceFilteringMode, namespaces)) if operatingSystem == "Linux" - metricDataItems.concat(getContainerCpuMetricItems(metricInfo, hostName, "usageNanoCores", Constants::CPU_USAGE_NANO_CORES, metricTime, namespaceFilteringMode, nameSpaces)) - metricDataItems.concat(getContainerMemoryMetricItems(metricInfo, hostName, "rssBytes", Constants::MEMORY_RSS_BYTES, metricTime, operatingSystem, namespaceFilteringMode, nameSpaces)) + metricDataItems.concat(getContainerCpuMetricItems(metricInfo, hostName, "usageNanoCores", Constants::CPU_USAGE_NANO_CORES, metricTime, namespaceFilteringMode, namespaces)) + metricDataItems.concat(getContainerMemoryMetricItems(metricInfo, hostName, "rssBytes", Constants::MEMORY_RSS_BYTES, metricTime, operatingSystem, namespaceFilteringMode, namespaces)) metricDataItems.push(getNodeMetricItem(metricInfo, hostName, "memory", "rssBytes", Constants::MEMORY_RSS_BYTES, metricTime)) elsif operatingSystem == "Windows" - containerCpuUsageNanoSecondsRate = getContainerCpuMetricItemRate(metricInfo, hostName, "usageCoreNanoSeconds", Constants::CPU_USAGE_NANO_CORES, metricTime, namespaceFilteringMode, nameSpaces) + containerCpuUsageNanoSecondsRate = getContainerCpuMetricItemRate(metricInfo, hostName, "usageCoreNanoSeconds", Constants::CPU_USAGE_NANO_CORES, metricTime, namespaceFilteringMode, namespaces) if containerCpuUsageNanoSecondsRate && !containerCpuUsageNanoSecondsRate.empty? && !containerCpuUsageNanoSecondsRate.nil? metricDataItems.concat(containerCpuUsageNanoSecondsRate) end @@ -210,7 +210,7 @@ def getMetrics(winNode: nil, namespaceFilteringMode: "off", nameSpaces: [], metr return metricDataItems end - def getContainerCpuMetricItems(metricJSON, hostName, cpuMetricNameToCollect, metricNametoReturn, metricPollTime, namespaceFilteringMode, nameSpaces) + def getContainerCpuMetricItems(metricJSON, hostName, cpuMetricNameToCollect, metricNametoReturn, metricPollTime, namespaceFilteringMode, namespaces) metricItems = [] clusterId = KubernetesApiClient.getClusterId timeDifference = (DateTime.now.to_time.to_i - @@telemetryCpuMetricTimeTracker).abs @@ -222,7 +222,7 @@ def getContainerCpuMetricItems(metricJSON, hostName, cpuMetricNameToCollect, met podName = pod["podRef"]["name"] podNamespace = pod["podRef"]["namespace"] - next unless !KubernetesApiClient.isExcludeResourceItem(podName, podNamespace, namespaceFilteringMode, nameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(podName, podNamespace, namespaceFilteringMode, namespaces) if (!pod["containers"].nil?) pod["containers"].each do |container| @@ -312,7 +312,7 @@ def getContainerCpuMetricItems(metricJSON, hostName, cpuMetricNameToCollect, met return metricItems end - def getInsightsMetrics(winNode: nil, namespaceFilteringMode: "off", nameSpaces: [], metricTime: Time.now.utc.iso8601) + def getInsightsMetrics(winNode: nil, namespaceFilteringMode: "off", namespaces: [], metricTime: Time.now.utc.iso8601) metricDataItems = [] begin cAdvisorStats = getSummaryStatsFromCAdvisor(winNode) @@ -331,11 +331,11 @@ def getInsightsMetrics(winNode: nil, namespaceFilteringMode: "off", nameSpaces: operatingSystem = "Linux" end if !metricInfo.nil? - metricDataItems.concat(getContainerGpuMetricsAsInsightsMetrics(metricInfo, hostName, "memoryTotal", "containerGpumemoryTotalBytes", metricTime, namespaceFilteringMode, nameSpaces)) - metricDataItems.concat(getContainerGpuMetricsAsInsightsMetrics(metricInfo, hostName, "memoryUsed", "containerGpumemoryUsedBytes", metricTime, namespaceFilteringMode, nameSpaces)) - metricDataItems.concat(getContainerGpuMetricsAsInsightsMetrics(metricInfo, hostName, "dutyCycle", "containerGpuDutyCycle", metricTime, namespaceFilteringMode, nameSpaces)) + metricDataItems.concat(getContainerGpuMetricsAsInsightsMetrics(metricInfo, hostName, "memoryTotal", "containerGpumemoryTotalBytes", metricTime, namespaceFilteringMode, namespaces)) + metricDataItems.concat(getContainerGpuMetricsAsInsightsMetrics(metricInfo, hostName, "memoryUsed", "containerGpumemoryUsedBytes", metricTime, namespaceFilteringMode, namespaces)) + metricDataItems.concat(getContainerGpuMetricsAsInsightsMetrics(metricInfo, hostName, "dutyCycle", "containerGpuDutyCycle", metricTime, namespaceFilteringMode, namespaces)) - metricDataItems.concat(getPersistentVolumeMetrics(metricInfo, hostName, "usedBytes", Constants::PV_USED_BYTES, metricTime, namespaceFilteringMode, nameSpaces)) + metricDataItems.concat(getPersistentVolumeMetrics(metricInfo, hostName, "usedBytes", Constants::PV_USED_BYTES, metricTime, namespaceFilteringMode, namespaces)) else @Log.warn("Couldn't get Insights metrics information for host: #{hostName} os:#{operatingSystem}") end @@ -346,7 +346,7 @@ def getInsightsMetrics(winNode: nil, namespaceFilteringMode: "off", nameSpaces: return metricDataItems end - def getPersistentVolumeMetrics(metricJSON, hostName, metricNameToCollect, metricNameToReturn, metricPollTime, namespaceFilteringMode, nameSpaces) + def getPersistentVolumeMetrics(metricJSON, hostName, metricNameToCollect, metricNameToReturn, metricPollTime, namespaceFilteringMode, namespaces) telemetryTimeDifference = (DateTime.now.to_time.to_i - @@telemetryPVKubeSystemMetricsTimeTracker).abs telemetryTimeDifferenceInMinutes = telemetryTimeDifference / 60 @@ -357,7 +357,7 @@ def getPersistentVolumeMetrics(metricJSON, hostName, metricNameToCollect, metric metricInfo = metricJSON metricInfo["pods"].each do |pod| podNamespace = pod["podRef"]["namespace"] - next unless !KubernetesApiClient.isExcludeResourceItem(pod["podRef"]["name"], podNamespace, namespaceFilteringMode, nameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(pod["podRef"]["name"], podNamespace, namespaceFilteringMode, namespaces) excludeNamespace = false if (podNamespace.downcase == "kube-system") && @pvKubeSystemCollectionMetricsEnabled == "false" excludeNamespace = true @@ -419,7 +419,7 @@ def getPersistentVolumeMetrics(metricJSON, hostName, metricNameToCollect, metric return metricItems end - def getContainerGpuMetricsAsInsightsMetrics(metricJSON, hostName, metricNameToCollect, metricNametoReturn, metricPollTime, namespaceFilteringMode, nameSpaces) + def getContainerGpuMetricsAsInsightsMetrics(metricJSON, hostName, metricNameToCollect, metricNametoReturn, metricPollTime, namespaceFilteringMode, namespaces) metricItems = [] clusterId = KubernetesApiClient.getClusterId clusterName = KubernetesApiClient.getClusterName @@ -429,7 +429,7 @@ def getContainerGpuMetricsAsInsightsMetrics(metricJSON, hostName, metricNameToCo podUid = pod["podRef"]["uid"] podName = pod["podRef"]["name"] podNamespace = pod["podRef"]["namespace"] - next unless !KubernetesApiClient.isExcludeResourceItem(podName, podNamespace, namespaceFilteringMode, nameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(podName, podNamespace, namespaceFilteringMode, namespaces) if (!pod["containers"].nil?) pod["containers"].each do |container| @@ -507,7 +507,7 @@ def resetWinContainerIdCache end # usageNanoCores doesnt exist for windows nodes. Hence need to compute this from usageCoreNanoSeconds - def getContainerCpuMetricItemRate(metricJSON, hostName, cpuMetricNameToCollect, metricNametoReturn, metricPollTime, namespaceFilteringMode, nameSpaces) + def getContainerCpuMetricItemRate(metricJSON, hostName, cpuMetricNameToCollect, metricNametoReturn, metricPollTime, namespaceFilteringMode, namespaces) metricItems = [] clusterId = KubernetesApiClient.getClusterId timeDifference = (DateTime.now.to_time.to_i - @@telemetryCpuMetricTimeTracker).abs @@ -521,7 +521,7 @@ def getContainerCpuMetricItemRate(metricJSON, hostName, cpuMetricNameToCollect, podName = pod["podRef"]["name"] podNamespace = pod["podRef"]["namespace"] - next unless !KubernetesApiClient.isExcludeResourceItem(podName, podNamespace, namespaceFilteringMode, nameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(podName, podNamespace, namespaceFilteringMode, namespaces) if (!pod["containers"].nil?) pod["containers"].each do |container| @@ -638,7 +638,7 @@ def getContainerCpuMetricItemRate(metricJSON, hostName, cpuMetricNameToCollect, return metricItems end - def getContainerMemoryMetricItems(metricJSON, hostName, memoryMetricNameToCollect, metricNametoReturn, metricPollTime, operatingSystem, namespaceFilteringMode, nameSpaces) + def getContainerMemoryMetricItems(metricJSON, hostName, memoryMetricNameToCollect, metricNametoReturn, metricPollTime, operatingSystem, namespaceFilteringMode, namespaces) metricItems = [] clusterId = KubernetesApiClient.getClusterId timeDifference = (DateTime.now.to_time.to_i - @@telemetryMemoryMetricTimeTracker).abs @@ -649,7 +649,7 @@ def getContainerMemoryMetricItems(metricJSON, hostName, memoryMetricNameToCollec podUid = pod["podRef"]["uid"] podName = pod["podRef"]["name"] podNamespace = pod["podRef"]["namespace"] - next unless !KubernetesApiClient.isExcludeResourceItem(podName, podNamespace, namespaceFilteringMode, nameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(podName, podNamespace, namespaceFilteringMode, namespaces) if (!pod["containers"].nil?) pod["containers"].each do |container| containerName = container["name"] @@ -887,7 +887,7 @@ def getNodeLastRebootTimeMetric(metricJSON, hostName, metricNametoReturn, metric return metricItem end - def getContainerStartTimeMetricItems(metricJSON, hostName, metricNametoReturn, metricPollTime, namespaceFilteringMode, nameSpaces) + def getContainerStartTimeMetricItems(metricJSON, hostName, metricNametoReturn, metricPollTime, namespaceFilteringMode, namespaces) metricItems = [] clusterId = KubernetesApiClient.getClusterId #currentTime = Time.now.utc.iso8601 #2018-01-30T19:36:14Z @@ -897,7 +897,7 @@ def getContainerStartTimeMetricItems(metricJSON, hostName, metricNametoReturn, m podUid = pod["podRef"]["uid"] podNamespace = pod["podRef"]["namespace"] podName = pod["podRef"]["name"] - next unless !KubernetesApiClient.isExcludeResourceItem(podName, podNamespace, namespaceFilteringMode, nameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(podName, podNamespace, namespaceFilteringMode, namespaces) if (!pod["containers"].nil?) pod["containers"].each do |container| containerName = container["name"] diff --git a/source/plugins/ruby/KubernetesApiClient.rb b/source/plugins/ruby/KubernetesApiClient.rb index 65f8c6e97..cbcdf301d 100644 --- a/source/plugins/ruby/KubernetesApiClient.rb +++ b/source/plugins/ruby/KubernetesApiClient.rb @@ -1409,17 +1409,17 @@ def isEmitCacheTelemetry return isEmitCacheTelemtryEnabled end - def isExcludeResourceItem(resourceName, resourceNamespace, namespaceFilteringMode, nameSpaces) + def isExcludeResourceItem(resourceName, resourceNamespace, namespaceFilteringMode, namespaces) isExclude = false begin if !resourceName.nil? && !resourceName.empty? && !resourceNamespace.nil? && !resourceNamespace.empty? # data collection namespace filtering not applicable for ama-logs agent as customer needs to monitor the agent if resourceName.start_with?("ama-logs") && resourceNamespace.eql?("kube-system") isExclude = false - elsif !nameSpaces.nil? && !nameSpaces.empty? && nameSpaces.length > 0 && !namespaceFilteringMode.nil? && !namespaceFilteringMode.empty? - if namespaceFilteringMode == "exclude" && nameSpaces.include?(resourceNamespace) + elsif !namespaces.nil? && !namespaces.empty? && namespaces.length > 0 && !namespaceFilteringMode.nil? && !namespaceFilteringMode.empty? + if namespaceFilteringMode == "exclude" && namespaces.include?(resourceNamespace) isExclude = true - elsif namespaceFilteringMode == "include" && !nameSpaces.include?(resourceNamespace) + elsif namespaceFilteringMode == "include" && !namespaces.include?(resourceNamespace) isExclude = true end end diff --git a/source/plugins/ruby/extension_utils.rb b/source/plugins/ruby/extension_utils.rb index f39a727f3..8420c131c 100644 --- a/source/plugins/ruby/extension_utils.rb +++ b/source/plugins/ruby/extension_utils.rb @@ -66,7 +66,7 @@ def getDataCollectionIntervalSeconds end def getNamespacesForDataCollection - nameSpaces = [] + namespaces = [] begin dataCollectionSettings = Extension.instance.get_extension_data_collection_settings() if !dataCollectionSettings.nil? && @@ -75,15 +75,15 @@ def getNamespacesForDataCollection nameSpacesSetting = dataCollectionSettings[Constants::EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_NAMESPACES] if !nameSpacesSetting.nil? && !nameSpacesSetting.empty? && nameSpacesSetting.kind_of?(Array) && nameSpacesSetting.length > 0 uniqNamespaces = nameSpacesSetting.uniq - nameSpaces = uniqNamespaces.map(&:downcase) + namespaces = uniqNamespaces.map(&:downcase) else - $log.warn("ExtensionUtils::getNamespacesForDataCollection: nameSpaces: #{nameSpacesSetting} not valid hence using default") + $log.warn("ExtensionUtils::getNamespacesForDataCollection: namespaces: #{nameSpacesSetting} not valid hence using default") end end rescue => errorStr $log.warn("ExtensionUtils::getNamespacesForDataCollection: failed with an exception: #{errorStr}") end - return nameSpaces + return namespaces end def getNamespaceFilteringModeForDataCollection diff --git a/source/plugins/ruby/in_cadvisor_perf.rb b/source/plugins/ruby/in_cadvisor_perf.rb index 399743182..295346ef8 100644 --- a/source/plugins/ruby/in_cadvisor_perf.rb +++ b/source/plugins/ruby/in_cadvisor_perf.rb @@ -22,7 +22,7 @@ def initialize require_relative "omslog" require_relative "constants" require_relative "extension_utils" - @nameSpaces = [] + @namespaces = [] @namespaceFilteringMode = "off" end @@ -78,14 +78,14 @@ def enumerate() if ExtensionUtils.isDataCollectionSettingsConfigured() @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() $log.info("in_cadvisor_perf::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") - @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() - $log.info("in_cadvisor_perf::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") + @namespaces = ExtensionUtils.getNamespacesForDataCollection() + $log.info("in_cadvisor_perf::enumerate: using data collection namespaces: #{@namespaces} @ #{Time.now.utc.iso8601}") @namespaceFilteringMode = ExtensionUtils.getNamespaceFilteringModeForDataCollection() - $log.info("in_cadvisor_perf::enumerate: using data collection filtering mode for nameSpaces: #{@namespaceFilteringMode} @ #{Time.now.utc.iso8601}") + $log.info("in_cadvisor_perf::enumerate: using data collection filtering mode for namespaces: #{@namespaceFilteringMode} @ #{Time.now.utc.iso8601}") end end - metricData = CAdvisorMetricsAPIClient.getMetrics(winNode: nil, namespaceFilteringMode: @namespaceFilteringMode, nameSpaces: @nameSpaces, metricTime: batchTime) + metricData = CAdvisorMetricsAPIClient.getMetrics(winNode: nil, namespaceFilteringMode: @namespaceFilteringMode, namespaces: @namespaces, metricTime: batchTime) metricData.each do |record| eventStream.add(time, record) if record end @@ -101,7 +101,7 @@ def enumerate() begin if !@@isWindows.nil? && @@isWindows == false containerGPUusageInsightsMetricsDataItems = [] - containerGPUusageInsightsMetricsDataItems.concat(CAdvisorMetricsAPIClient.getInsightsMetrics(winNode: nil, namespaceFilteringMode: @namespaceFilteringMode, nameSpaces: @nameSpaces, metricTime: batchTime)) + containerGPUusageInsightsMetricsDataItems.concat(CAdvisorMetricsAPIClient.getInsightsMetrics(winNode: nil, namespaceFilteringMode: @namespaceFilteringMode, namespaces: @namespaces, metricTime: batchTime)) containerGPUusageInsightsMetricsDataItems.each do |insightsMetricsRecord| insightsMetricsEventStream.add(time, insightsMetricsRecord) if insightsMetricsRecord diff --git a/source/plugins/ruby/in_containerinventory.rb b/source/plugins/ruby/in_containerinventory.rb index a97c26e0d..97476b4cb 100644 --- a/source/plugins/ruby/in_containerinventory.rb +++ b/source/plugins/ruby/in_containerinventory.rb @@ -59,7 +59,7 @@ def enumerate eventStream = Fluent::MultiEventStream.new hostName = "" @namespaceFilteringMode = "off" - @nameSpaces = [] + @namespaces = [] $log.info("in_container_inventory::enumerate : Begin processing @ #{Time.now.utc.iso8601}") if ExtensionUtils.isAADMSIAuthMode() $log.info("in_container_inventory::enumerate: AAD AUTH MSI MODE") @@ -70,10 +70,10 @@ def enumerate if ExtensionUtils.isDataCollectionSettingsConfigured() @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() $log.info("in_container_inventory::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") - @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() - $log.info("in_container_inventory::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") + @namespaces = ExtensionUtils.getNamespacesForDataCollection() + $log.info("in_container_inventory::enumerate: using data collection namespaces: #{@namespaces} @ #{Time.now.utc.iso8601}") @namespaceFilteringMode = ExtensionUtils.getNamespaceFilteringModeForDataCollection() - $log.info("in_container_inventory::enumerate: using data collection filtering mode for nameSpaces: #{@namespaceFilteringMode} @ #{Time.now.utc.iso8601}") + $log.info("in_container_inventory::enumerate: using data collection filtering mode for namespaces: #{@namespaceFilteringMode} @ #{Time.now.utc.iso8601}") end end begin @@ -87,7 +87,7 @@ def enumerate podList = JSON.parse(response.body) if !podList.nil? && !podList.empty? && podList.key?("items") && !podList["items"].nil? && !podList["items"].empty? podList["items"].each do |item| - next unless !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["name"], item["metadata"]["namespace"], @namespaceFilteringMode, @nameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["name"], item["metadata"]["namespace"], @namespaceFilteringMode, @namespaces) containerInventoryRecords = KubernetesContainerInventory.getContainerInventoryRecords(item, batchTime, clusterCollectEnvironmentVar) containerInventoryRecords.each do |containerRecord| ContainerInventoryState.writeContainerState(containerRecord) diff --git a/source/plugins/ruby/in_kube_events.rb b/source/plugins/ruby/in_kube_events.rb index c470eb414..452be7a8b 100644 --- a/source/plugins/ruby/in_kube_events.rb +++ b/source/plugins/ruby/in_kube_events.rb @@ -28,7 +28,7 @@ def initialize # Initilize enable/disable normal event collection @collectAllKubeEvents = false - @nameSpaces = [] + @namespaces = [] @namespaceFilteringMode = "off" end @@ -94,10 +94,10 @@ def enumerate @tag = ExtensionUtils.getOutputStreamId(Constants::KUBE_EVENTS_DATA_TYPE) end $log.info("in_kube_events::enumerate: using kubeevents tag -#{@tag} @ #{Time.now.utc.iso8601}") - @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() - $log.info("in_kube_events::enumerate: using data collection nameSpaces -#{@nameSpaces} @ #{Time.now.utc.iso8601}") + @namespaces = ExtensionUtils.getNamespacesForDataCollection() + $log.info("in_kube_events::enumerate: using data collection namespaces -#{@namespaces} @ #{Time.now.utc.iso8601}") @namespaceFilteringMode = ExtensionUtils.getNamespaceFilteringModeForDataCollection() - $log.info("in_kube_events::enumerate: using data collection filtering mode for nameSpaces -#{@namespaceFilteringMode} @ #{Time.now.utc.iso8601}") + $log.info("in_kube_events::enumerate: using data collection filtering mode for namespaces -#{@namespaceFilteringMode} @ #{Time.now.utc.iso8601}") end # Initializing continuation token to nil continuationToken = nil @@ -170,7 +170,7 @@ def parse_and_emit_records(events, eventQueryState, newEventQueryState, batchTim end # drop the events if the event of the excluded namespace - next unless !KubernetesApiClient.isExcludeResourceItem("", items["involvedObject"]["namespace"], @namespaceFilteringMode, @nameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem("", items["involvedObject"]["namespace"], @namespaceFilteringMode, @namespaces) record["ObjectKind"] = items["involvedObject"]["kind"] record["Namespace"] = items["involvedObject"]["namespace"] diff --git a/source/plugins/ruby/in_kube_perfinventory.rb b/source/plugins/ruby/in_kube_perfinventory.rb index b575d05ba..ce290ffb5 100644 --- a/source/plugins/ruby/in_kube_perfinventory.rb +++ b/source/plugins/ruby/in_kube_perfinventory.rb @@ -32,7 +32,7 @@ def initialize @kubeperfTag = "oneagent.containerInsights.LINUX_PERF_BLOB" @insightsMetricsTag = "oneagent.containerInsights.INSIGHTS_METRICS_BLOB" - @nameSpaces = [] + @namespaces = [] @namespaceFilteringMode = "off" end @@ -104,10 +104,10 @@ def enumerate(podList = nil) if ExtensionUtils.isDataCollectionSettingsConfigured() @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() $log.info("in_kube_perfinventory::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") - @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() - $log.info("in_kube_perfinventory::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") + @namespaces = ExtensionUtils.getNamespacesForDataCollection() + $log.info("in_kube_perfinventory::enumerate: using data collection namespaces: #{@namespaces} @ #{Time.now.utc.iso8601}") @namespaceFilteringMode = ExtensionUtils.getNamespaceFilteringModeForDataCollection() - $log.info("in_kube_perfinventory::enumerate: using data collection filtering mode for nameSpaces: #{@namespaceFilteringMode} @ #{Time.now.utc.iso8601}") + $log.info("in_kube_perfinventory::enumerate: using data collection filtering mode for namespaces: #{@namespaceFilteringMode} @ #{Time.now.utc.iso8601}") end end @@ -144,7 +144,7 @@ def parse_and_emit_records(podInventory, nodeAllocatableRecords, continuationTok begin #begin block start podInventory["items"].each do |item| #podInventory block start - next unless !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["name"], item["metadata"]["namespace"], @namespaceFilteringMode, @nameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["name"], item["metadata"]["namespace"], @namespaceFilteringMode, @namespaces) nodeName = "" if !item["spec"]["nodeName"].nil? nodeName = item["spec"]["nodeName"] diff --git a/source/plugins/ruby/in_kube_podinventory.rb b/source/plugins/ruby/in_kube_podinventory.rb index 465312f15..8e2048b02 100644 --- a/source/plugins/ruby/in_kube_podinventory.rb +++ b/source/plugins/ruby/in_kube_podinventory.rb @@ -59,7 +59,7 @@ def initialize @kubeservicesTag = "oneagent.containerInsights.KUBE_SERVICES_BLOB" @containerInventoryTag = "oneagent.containerInsights.CONTAINER_INVENTORY_BLOB" - @nameSpaces = [] + @namespaces = [] @namespaceFilteringMode = "off" end @@ -176,10 +176,10 @@ def enumerate(podList = nil) if ExtensionUtils.isDataCollectionSettingsConfigured() @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() $log.info("in_kube_podinventory::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") - @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() - $log.info("in_kube_podinventory::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") + @namespaces = ExtensionUtils.getNamespacesForDataCollection() + $log.info("in_kube_podinventory::enumerate: using data collection namespaces: #{@namespaces} @ #{Time.now.utc.iso8601}") @namespaceFilteringMode = ExtensionUtils.getNamespaceFilteringModeForDataCollection() - $log.info("in_kube_podinventory::enumerate: using data collection filtering mode for nameSpaces: #{@namespaceFilteringMode} @ #{Time.now.utc.iso8601}") + $log.info("in_kube_podinventory::enumerate: using data collection filtering mode for namespaces: #{@namespaceFilteringMode} @ #{Time.now.utc.iso8601}") end end @@ -242,8 +242,8 @@ def enumerate(podList = nil) end if ExtensionUtils.isDataCollectionSettingsConfigured() telemetryProperties["dataCollectionSettingsEnabled"] = "true" - if !@nameSpaces.nil? && !@nameSpaces.empty? && @nameSpaces.length > 0 - telemetryProperties["DATA_COLLECTION_NAMESPACES"] = @nameSpaces + if !@namespaces.nil? && !@namespaces.empty? && @namespaces.length > 0 + telemetryProperties["DATA_COLLECTION_NAMESPACES"] = @namespaces end if !@namespaceFilteringMode.nil? && !@namespaceFilteringMode.empty? telemetryProperties["DATA_COLLECTION_NAMESPACES_FILTERING_MODE"] = @namespaceFilteringMode @@ -297,7 +297,7 @@ def parse_and_emit_records(podInventory, serviceRecords, continuationToken, batc begin #begin block start podInventory["items"].each do |item| #podInventory block start - next unless !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["name"], item["metadata"]["namespace"], @namespaceFilteringMode, @nameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(item["metadata"]["name"], item["metadata"]["namespace"], @namespaceFilteringMode, @namespaces) # pod inventory records podInventoryRecords = getPodInventoryRecords(item, serviceRecords, batchTime) @containerCount += podInventoryRecords.length @@ -395,7 +395,7 @@ def parse_and_emit_records(podInventory, serviceRecords, continuationToken, batc if continuationToken.nil? # sending kube services inventory records kubeServicesEventStream = Fluent::MultiEventStream.new serviceRecords.each do |kubeServiceRecord| - next unless !KubernetesApiClient.isExcludeResourceItem(kubeServiceRecord["ServiceName"], kubeServiceRecord["namespace"], @namespaceFilteringMode, @nameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(kubeServiceRecord["ServiceName"], kubeServiceRecord["namespace"], @namespaceFilteringMode, @namespaces) if !kubeServiceRecord.nil? # adding before emit to reduce memory foot print kubeServiceRecord["ClusterId"] = KubernetesApiClient.getClusterId diff --git a/source/plugins/ruby/in_kube_pvinventory.rb b/source/plugins/ruby/in_kube_pvinventory.rb index ee7debfb4..54d1325e7 100644 --- a/source/plugins/ruby/in_kube_pvinventory.rb +++ b/source/plugins/ruby/in_kube_pvinventory.rb @@ -24,7 +24,7 @@ def initialize # Response size is around 1500 bytes per PV @PV_CHUNK_SIZE = "5000" @pvTypeToCountHash = {} - @nameSpaces = [] + @namespaces = [] @namespaceFilteringMode = "off" end @@ -72,10 +72,10 @@ def enumerate if ExtensionUtils.isDataCollectionSettingsConfigured() @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() $log.info("in_kube_pvinventory::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") - @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() - $log.info("in_kube_pvinventory::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") + @namespaces = ExtensionUtils.getNamespacesForDataCollection() + $log.info("in_kube_pvinventory::enumerate: using data collection namespaces: #{@namespaces} @ #{Time.now.utc.iso8601}") @namespaceFilteringMode = ExtensionUtils.getNamespaceFilteringModeForDataCollection() - $log.info("in_kube_pvinventory::enumerate: using data collection filtering mode for nameSpaces: #{@namespaceFilteringMode} @ #{Time.now.utc.iso8601}") + $log.info("in_kube_pvinventory::enumerate: using data collection filtering mode for namespaces: #{@namespaceFilteringMode} @ #{Time.now.utc.iso8601}") end end @@ -142,7 +142,7 @@ def parse_and_emit_records(pvInventory, batchTime = Time.utc.iso8601) record["PVType"] = type record["PVTypeInfo"] = typeInfo - next unless !KubernetesApiClient.isExcludeResourceItem(pvcName, pvcNamespace, @namespaceFilteringMode, @nameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(pvcName, pvcNamespace, @namespaceFilteringMode, @namespaces) record["CollectionTime"] = batchTime record["ClusterId"] = KubernetesApiClient.getClusterId diff --git a/source/plugins/ruby/in_kubestate_deployments.rb b/source/plugins/ruby/in_kubestate_deployments.rb index 8ada925e9..7a49e5bd3 100644 --- a/source/plugins/ruby/in_kubestate_deployments.rb +++ b/source/plugins/ruby/in_kubestate_deployments.rb @@ -35,7 +35,7 @@ def initialize @NodeName = OMS::Common.get_hostname @ClusterId = KubernetesApiClient.getClusterId @ClusterName = KubernetesApiClient.getClusterName - @nameSpaces = [] + @namespaces = [] @namespaceFilteringMode = "off" end @@ -93,10 +93,10 @@ def enumerate if ExtensionUtils.isDataCollectionSettingsConfigured() @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() $log.info("in_kubestate_deployments::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") - @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() - $log.info("in_kubestate_deployments::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") + @namespaces = ExtensionUtils.getNamespacesForDataCollection() + $log.info("in_kubestate_deployments::enumerate: using data collection namespaces: #{@namespaces} @ #{Time.now.utc.iso8601}") @namespaceFilteringMode = ExtensionUtils.getNamespaceFilteringModeForDataCollection() - $log.info("in_kubestate_deployments::enumerate: using data collection filtering mode for nameSpaces: #{@namespaceFilteringMode} @ #{Time.now.utc.iso8601}") + $log.info("in_kubestate_deployments::enumerate: using data collection filtering mode for namespaces: #{@namespaceFilteringMode} @ #{Time.now.utc.iso8601}") end end # Initializing continuation token to nil @@ -150,7 +150,7 @@ def parse_and_emit_records(deployments, batchTime = Time.utc.iso8601) begin metricInfo = deployments metricInfo["items"].each do |deployment| - next unless !KubernetesApiClient.isExcludeResourceItem(deployment["metadata"]["name"], deployment["metadata"]["namespace"], @namespaceFilteringMode, @nameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(deployment["metadata"]["name"], deployment["metadata"]["namespace"], @namespaceFilteringMode, @namespaces) deploymentName = deployment["metadata"]["name"] deploymentNameSpace = deployment["metadata"]["namespace"] deploymentCreatedTime = "" diff --git a/source/plugins/ruby/in_kubestate_hpa.rb b/source/plugins/ruby/in_kubestate_hpa.rb index 0d29d07a4..15c5adfd2 100644 --- a/source/plugins/ruby/in_kubestate_hpa.rb +++ b/source/plugins/ruby/in_kubestate_hpa.rb @@ -32,7 +32,7 @@ def initialize @NodeName = OMS::Common.get_hostname @ClusterId = KubernetesApiClient.getClusterId @ClusterName = KubernetesApiClient.getClusterName - @nameSpaces = [] + @namespaces = [] @namespaceFilteringMode = "off" end @@ -90,10 +90,10 @@ def enumerate if ExtensionUtils.isDataCollectionSettingsConfigured() @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() $log.info("in_kubestate_hpa::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") - @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() - $log.info("in_kubestate_hpa::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") + @namespaces = ExtensionUtils.getNamespacesForDataCollection() + $log.info("in_kubestate_hpa::enumerate: using data collection namespaces: #{@namespaces} @ #{Time.now.utc.iso8601}") @namespaceFilteringMode = ExtensionUtils.getNamespaceFilteringModeForDataCollection() - $log.info("in_kubestate_hpa::enumerate: using data collection filtering mode for nameSpaces: #{@namespaceFilteringMode} @ #{Time.now.utc.iso8601}") + $log.info("in_kubestate_hpa::enumerate: using data collection filtering mode for namespaces: #{@namespaceFilteringMode} @ #{Time.now.utc.iso8601}") end end # Initializing continuation token to nil @@ -138,7 +138,7 @@ def parse_and_emit_records(hpas, batchTime = Time.utc.iso8601) begin metricInfo = hpas metricInfo["items"].each do |hpa| - next unless !KubernetesApiClient.isExcludeResourceItem(hpa["metadata"]["name"], hpa["metadata"]["namespace"], @namespaceFilteringMode, @nameSpaces) + next unless !KubernetesApiClient.isExcludeResourceItem(hpa["metadata"]["name"], hpa["metadata"]["namespace"], @namespaceFilteringMode, @namespaces) hpaName = hpa["metadata"]["name"] hpaNameSpace = hpa["metadata"]["namespace"] hpaCreatedTime = "" diff --git a/source/plugins/ruby/in_win_cadvisor_perf.rb b/source/plugins/ruby/in_win_cadvisor_perf.rb index cbc995528..25bb20977 100644 --- a/source/plugins/ruby/in_win_cadvisor_perf.rb +++ b/source/plugins/ruby/in_win_cadvisor_perf.rb @@ -22,7 +22,7 @@ def initialize require_relative "constants" require_relative "extension_utils" @insightsMetricsTag = "oneagent.containerInsights.INSIGHTS_METRICS_BLOB" - @nameSpaces = [] + @namespaces = [] @namespaceFilteringMode = "off" end @@ -75,10 +75,10 @@ def enumerate() if ExtensionUtils.isDataCollectionSettingsConfigured() @run_interval = ExtensionUtils.getDataCollectionIntervalSeconds() $log.info("in_win_cadvisor_perf::enumerate: using data collection interval(seconds): #{@run_interval} @ #{Time.now.utc.iso8601}") - @nameSpaces = ExtensionUtils.getNamespacesForDataCollection() - $log.info("in_win_cadvisor_perf::enumerate: using data collection nameSpaces: #{@nameSpaces} @ #{Time.now.utc.iso8601}") + @namespaces = ExtensionUtils.getNamespacesForDataCollection() + $log.info("in_win_cadvisor_perf::enumerate: using data collection namespaces: #{@namespaces} @ #{Time.now.utc.iso8601}") @namespaceFilteringMode = ExtensionUtils.getNamespaceFilteringModeForDataCollection() - $log.info("in_cadvisor_perf::enumerate: using data collection filtering mode for nameSpaces: #{@namespaceFilteringMode} @ #{Time.now.utc.iso8601}") + $log.info("in_cadvisor_perf::enumerate: using data collection filtering mode for namespaces: #{@namespaceFilteringMode} @ #{Time.now.utc.iso8601}") end end @@ -95,7 +95,7 @@ def enumerate() end @@winNodes.each do |winNode| eventStream = Fluent::MultiEventStream.new - metricData = CAdvisorMetricsAPIClient.getMetrics(winNode: winNode, namespaceFilteringMode: @namespaceFilteringMode, nameSpaces: @nameSpaces, metricTime: Time.now.utc.iso8601) + metricData = CAdvisorMetricsAPIClient.getMetrics(winNode: winNode, namespaceFilteringMode: @namespaceFilteringMode, namespaces: @namespaces, metricTime: Time.now.utc.iso8601) metricData.each do |record| if !record.empty? eventStream.add(time, record) if record @@ -110,7 +110,7 @@ def enumerate() #start GPU InsightsMetrics items begin containerGPUusageInsightsMetricsDataItems = [] - containerGPUusageInsightsMetricsDataItems.concat(CAdvisorMetricsAPIClient.getInsightsMetrics(winNode: winNode, namespaceFilteringMode: @namespaceFilteringMode, nameSpaces: @nameSpaces, metricTime: Time.now.utc.iso8601)) + containerGPUusageInsightsMetricsDataItems.concat(CAdvisorMetricsAPIClient.getInsightsMetrics(winNode: winNode, namespaceFilteringMode: @namespaceFilteringMode, namespaces: @namespaces, metricTime: Time.now.utc.iso8601)) insightsMetricsEventStream = Fluent::MultiEventStream.new containerGPUusageInsightsMetricsDataItems.each do |insightsMetricsRecord| From d4728befc03bd1b59299e9977d529ba2ea1e2956 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Wed, 23 Nov 2022 13:46:27 -0800 Subject: [PATCH 298/301] fix pr feedback --- source/plugins/ruby/extension_utils.rb | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/source/plugins/ruby/extension_utils.rb b/source/plugins/ruby/extension_utils.rb index 8420c131c..03d5a4daa 100644 --- a/source/plugins/ruby/extension_utils.rb +++ b/source/plugins/ruby/extension_utils.rb @@ -72,12 +72,12 @@ def getNamespacesForDataCollection if !dataCollectionSettings.nil? && !dataCollectionSettings.empty? && dataCollectionSettings.has_key?(Constants::EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_NAMESPACES) - nameSpacesSetting = dataCollectionSettings[Constants::EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_NAMESPACES] - if !nameSpacesSetting.nil? && !nameSpacesSetting.empty? && nameSpacesSetting.kind_of?(Array) && nameSpacesSetting.length > 0 - uniqNamespaces = nameSpacesSetting.uniq + namespacesSetting = dataCollectionSettings[Constants::EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_NAMESPACES] + if !namespacesSetting.nil? && !namespacesSetting.empty? && namespacesSetting.kind_of?(Array) && namespacesSetting.length > 0 + uniqNamespaces = namespacesSetting.uniq namespaces = uniqNamespaces.map(&:downcase) else - $log.warn("ExtensionUtils::getNamespacesForDataCollection: namespaces: #{nameSpacesSetting} not valid hence using default") + $log.warn("ExtensionUtils::getNamespacesForDataCollection: namespaces: #{namespacesSetting} not valid hence using default") end end rescue => errorStr From 2e282c41244be0ef4ef91539a374481d3fa091a6 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Wed, 23 Nov 2022 18:50:38 -0800 Subject: [PATCH 299/301] fix renaming variable --- source/plugins/ruby/extension_utils.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/plugins/ruby/extension_utils.rb b/source/plugins/ruby/extension_utils.rb index 03d5a4daa..0a150f8db 100644 --- a/source/plugins/ruby/extension_utils.rb +++ b/source/plugins/ruby/extension_utils.rb @@ -95,7 +95,7 @@ def getNamespaceFilteringModeForDataCollection dataCollectionSettings.has_key?(Constants::EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_NAMESPACE_FILTERING_MODE) mode = dataCollectionSettings[Constants::EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_NAMESPACE_FILTERING_MODE] if !mode.nil? && !mode.empty? - if Constants::EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_NAMESPACES_FILTERING_MODES.include?(mode.downcase) + if Constants::EXTENSION_SETTINGS_DATA_COLLECTION_SETTINGS_NAMESPACE_FILTERING_MODES.include?(mode.downcase) namespaceFilteringMode = mode.downcase else $log.warn("ExtensionUtils::getNamespaceFilteringModeForDataCollection: namespaceFilteringMode: #{mode} not supported hence using default") From 6e91e492ff6d4ad4741131ce3ab673a3c00fca10 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Wed, 23 Nov 2022 19:29:50 -0800 Subject: [PATCH 300/301] add known cve to the ignore list --- .trivyignore | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.trivyignore b/.trivyignore index 559ffc14b..fe78d98dc 100644 --- a/.trivyignore +++ b/.trivyignore @@ -9,3 +9,6 @@ CVE-2022-32149 #dpkg vulnerability in ubuntu CVE-2022-2526 + +#https://avd.aquasec.com/nvd/cve-2021-33621 +CVE-2021-33621 From 34cd31060876339b44d4513af62fc8f86093edde Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Thu, 24 Nov 2022 10:01:56 -0800 Subject: [PATCH 301/301] minor log updates --- source/plugins/ruby/in_kube_events.rb | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/source/plugins/ruby/in_kube_events.rb b/source/plugins/ruby/in_kube_events.rb index 452be7a8b..eb61dd3b7 100644 --- a/source/plugins/ruby/in_kube_events.rb +++ b/source/plugins/ruby/in_kube_events.rb @@ -93,11 +93,11 @@ def enumerate if @tag.nil? || !@tag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) @tag = ExtensionUtils.getOutputStreamId(Constants::KUBE_EVENTS_DATA_TYPE) end - $log.info("in_kube_events::enumerate: using kubeevents tag -#{@tag} @ #{Time.now.utc.iso8601}") + $log.info("in_kube_events::enumerate: using kubeevents tag: #{@tag} @ #{Time.now.utc.iso8601}") @namespaces = ExtensionUtils.getNamespacesForDataCollection() - $log.info("in_kube_events::enumerate: using data collection namespaces -#{@namespaces} @ #{Time.now.utc.iso8601}") + $log.info("in_kube_events::enumerate: using data collection namespaces: #{@namespaces} @ #{Time.now.utc.iso8601}") @namespaceFilteringMode = ExtensionUtils.getNamespaceFilteringModeForDataCollection() - $log.info("in_kube_events::enumerate: using data collection filtering mode for namespaces -#{@namespaceFilteringMode} @ #{Time.now.utc.iso8601}") + $log.info("in_kube_events::enumerate: using data collection filtering mode for namespaces: #{@namespaceFilteringMode} @ #{Time.now.utc.iso8601}") end # Initializing continuation token to nil continuationToken = nil