diff --git a/.pipelines/azure_pipeline_dev.yaml b/.pipelines/azure_pipeline_dev.yaml index a7819fb0f..88e5f934f 100644 --- a/.pipelines/azure_pipeline_dev.yaml +++ b/.pipelines/azure_pipeline_dev.yaml @@ -115,14 +115,14 @@ jobs: az acr login -n ${{ variables.containerRegistry }} if [ "$(Build.Reason)" != "PullRequest" ]; then - docker buildx build --platform linux/amd64,linux/arm64 --tag ${{ variables.repoImageName }}:$(linuxImagetag) -f kubernetes/linux/Dockerfile.multiarch --metadata-file $(Build.ArtifactStagingDirectory)/linux/metadata.json --build-arg IMAGE_TAG=$(linuxImagetag) --push . + docker buildx build --platform linux/amd64,linux/arm64 --tag ${{ variables.repoImageName }}:$(linuxImagetag) -f kubernetes/linux/Dockerfile.multiarch --metadata-file $(Build.ArtifactStagingDirectory)/linux/metadata.json --build-arg IMAGE_TAG=$(linuxImagetag) --build-arg GOLANG_BASE_IMAGE=$(GOLANG_BASE_IMAGE) --build-arg CI_BASE_IMAGE=$(CI_BASE_IMAGE) --push . docker pull ${{ variables.repoImageName }}:$(linuxImagetag) else - docker buildx build --platform linux/amd64,linux/arm64 --tag ${{ variables.repoImageName }}:$(linuxImagetag) -f kubernetes/linux/Dockerfile.multiarch --metadata-file $(Build.ArtifactStagingDirectory)/linux/metadata.json --build-arg IMAGE_TAG=$(linuxImagetag) . + docker buildx build --platform linux/amd64,linux/arm64 --tag ${{ variables.repoImageName }}:$(linuxImagetag) -f kubernetes/linux/Dockerfile.multiarch --metadata-file $(Build.ArtifactStagingDirectory)/linux/metadata.json --build-arg IMAGE_TAG=$(linuxImagetag) --build-arg GOLANG_BASE_IMAGE=$(GOLANG_BASE_IMAGE) --build-arg CI_BASE_IMAGE=$(CI_BASE_IMAGE) . # load the multi-arch image to run tests - docker buildx build --tag ${{ variables.repoImageName }}:$(linuxImagetag) -f kubernetes/linux/Dockerfile.multiarch --metadata-file $(Build.ArtifactStagingDirectory)/linux/metadata.json --build-arg IMAGE_TAG=$(linuxImagetag) --load . + docker buildx build --tag ${{ variables.repoImageName }}:$(linuxImagetag) -f kubernetes/linux/Dockerfile.multiarch --metadata-file $(Build.ArtifactStagingDirectory)/linux/metadata.json --build-arg IMAGE_TAG=$(linuxImagetag) --build-arg GOLANG_BASE_IMAGE=$(GOLANG_BASE_IMAGE) --build-arg CI_BASE_IMAGE=$(CI_BASE_IMAGE) --load . fi curl -sfL https://raw.githubusercontent.com/aquasecurity/trivy/main/contrib/install.sh | sh -s -- -b /usr/local/bin @@ -135,14 +135,14 @@ jobs: condition: eq(variables.IS_PR, true) inputs: BuildDropPath: '$(Build.ArtifactStagingDirectory)/linux' - DockerImagesToScan: 'golang:1.15.14, ubuntu:18.04' + DockerImagesToScan: '$(GOLANG_BASE_IMAGE), $(CI_BASE_IMAGE)' - task: AzureArtifacts.manifest-generator-task.manifest-generator-task.ManifestGeneratorTask@0 displayName: 'Generation Task' condition: eq(variables.IS_PR, false) inputs: BuildDropPath: '$(Build.ArtifactStagingDirectory)/linux' - DockerImagesToScan: 'golang:1.15.14, ubuntu:18.04, ${{ variables.repoImageName }}:$(linuxImagetag)' + DockerImagesToScan: '$(GOLANG_BASE_IMAGE), $(CI_BASE_IMAGE), ${{ variables.repoImageName }}:$(linuxImagetag)' - task: PublishBuildArtifacts@1 inputs: diff --git a/.pipelines/azure_pipeline_prod.yaml b/.pipelines/azure_pipeline_prod.yaml index a7fab03b0..b4a93adfd 100644 --- a/.pipelines/azure_pipeline_prod.yaml +++ b/.pipelines/azure_pipeline_prod.yaml @@ -119,14 +119,14 @@ jobs: az acr login -n ${{ variables.containerRegistry }} if [ "$(Build.Reason)" != "PullRequest" ]; then - docker buildx build --platform linux/amd64,linux/arm64 --tag ${{ variables.repoImageNameLinux }}:$(linuxImagetag) -f kubernetes/linux/Dockerfile.multiarch --metadata-file $(Build.ArtifactStagingDirectory)/linux/metadata.json --push . + docker buildx build --platform linux/amd64,linux/arm64 --tag ${{ variables.repoImageNameLinux }}:$(linuxImagetag) -f kubernetes/linux/Dockerfile.multiarch --metadata-file $(Build.ArtifactStagingDirectory)/linux/metadata.json --build-arg GOLANG_BASE_IMAGE=$(GOLANG_BASE_IMAGE) --build-arg CI_BASE_IMAGE=$(CI_BASE_IMAGE) --push . docker pull ${{ variables.repoImageNameLinux }}:$(linuxImagetag) else - docker buildx build --platform linux/amd64,linux/arm64 --tag ${{ variables.repoImageNameLinux }}:$(linuxImagetag) -f kubernetes/linux/Dockerfile.multiarch --metadata-file $(Build.ArtifactStagingDirectory)/linux/metadata.json . + docker buildx build --platform linux/amd64,linux/arm64 --tag ${{ variables.repoImageNameLinux }}:$(linuxImagetag) -f kubernetes/linux/Dockerfile.multiarch --metadata-file $(Build.ArtifactStagingDirectory)/linux/metadata.json --build-arg GOLANG_BASE_IMAGE=$(GOLANG_BASE_IMAGE) --build-arg CI_BASE_IMAGE=$(CI_BASE_IMAGE) . # load the multi-arch image to run tests - docker buildx build --tag ${{ variables.repoImageNameLinux }}:$(linuxImagetag) -f kubernetes/linux/Dockerfile.multiarch --metadata-file $(Build.ArtifactStagingDirectory)/linux/metadata.json --load . + docker buildx build --tag ${{ variables.repoImageNameLinux }}:$(linuxImagetag) -f kubernetes/linux/Dockerfile.multiarch --metadata-file $(Build.ArtifactStagingDirectory)/linux/metadata.json --build-arg GOLANG_BASE_IMAGE=$(GOLANG_BASE_IMAGE) --build-arg CI_BASE_IMAGE=$(CI_BASE_IMAGE) --load . fi curl -sfL https://raw.githubusercontent.com/aquasecurity/trivy/main/contrib/install.sh | sh -s -- -b /usr/local/bin @@ -138,14 +138,14 @@ jobs: condition: eq(variables.IS_PR, true) inputs: BuildDropPath: '$(Build.ArtifactStagingDirectory)/linux' - DockerImagesToScan: 'golang:1.15.14, ubuntu:18.04' + DockerImagesToScan: '$(GOLANG_BASE_IMAGE), $(CI_BASE_IMAGE)' - task: AzureArtifacts.manifest-generator-task.manifest-generator-task.ManifestGeneratorTask@0 displayName: 'Generation Task' condition: eq(variables.IS_PR, false) inputs: BuildDropPath: '$(Build.ArtifactStagingDirectory)/linux' - DockerImagesToScan: 'golang:1.15.14, ubuntu:18.04, ${{ variables.repoImageNameLinux }}:$(linuxImagetag)' + DockerImagesToScan: '$(GOLANG_BASE_IMAGE), $(CI_BASE_IMAGE), ${{ variables.repoImageNameLinux }}:$(linuxImagetag)' - task: PublishBuildArtifacts@1 inputs: diff --git a/.trivyignore b/.trivyignore index edaa3debd..7a46d4890 100644 --- a/.trivyignore +++ b/.trivyignore @@ -1,27 +1,10 @@ -# related to telegraf -#[vishwa] - Fix telegraf & test all for next release - see work item #https://msazure.visualstudio.com/InfrastructureInsights/_workitems/edit/13322134 -# Unfixed as of 4/28/2022 +# telegraf vulnerabilities CVE-2019-3826 - -#still present in mdsd telegraf -CVE-2021-42836 - -# ruby in /usr/lib -CVE-2020-36327 -CVE-2021-43809 -CVE-2021-41816 -CVE-2021-41819 -CVE-2021-31799 -CVE-2021-28965 +CVE-2022-29190 +CVE-2022-29222 +CVE-2022-29189 +CVE-2022-1996 #dpkg vulnerability in ubuntu CVE-2022-1304 - - -# Adding for Hotfix : This needs to be fixed -CVE-2022-27191 -CVE-2022-29190 -CVE-2022-29222 -CVE-2022-31030 -CVE-2022-29189 -CVE-2022-29526 +CVE-2022-2509 diff --git a/README.md b/README.md index 6e51d256b..f24405bb6 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ Feel free to contact engineering team owners in case you have any questions abou ## Common - [Visual Studio Code](https://code.visualstudio.com/) for authoring -- [Go lang](https://golang.org/) for building go code. Go lang version 1.15.14 (both Linux & Windows) +- [Go lang](https://golang.org/) for building go code. Go lang version 1.18.3 (both Linux & Windows) > Note: If you are using WSL2, make sure you have cloned the code onto ubuntu not onto windows @@ -90,7 +90,6 @@ The general directory structure is: │ │ | ├── health/ - code for health feature │ │ | ├── lib/ - lib for app insights ruby and this code of application_insights gem │ │ | ... - plugins in, out and filters code in ruby -│ ├── toml-parser/ - code for parsing of toml configuration files ├── test/ - source code for tests │ ├── e2e/ - e2e tests to validate agent and e2e workflow(s) │ ├── unit-tests/ - unit tests code @@ -121,7 +120,7 @@ We recommend using [Visual Studio Code](https://code.visualstudio.com/) for auth ### Install Pre-requisites -1. Install go1.15.14, dotnet, powershell, docker and build dependencies to build go code for both Linux and Windows platforms +1. Install go1.18.3, dotnet, powershell, docker and build dependencies to build go code for both Linux and Windows platforms ``` bash ~/Docker-Provider/scripts/build/linux/install-build-pre-requisites.sh ``` @@ -143,13 +142,35 @@ bash ~/Docker-Provider/scripts/build/linux/install-build-pre-requisites.sh > Note: If you are using WSL2, ensure `Docker for windows` running with Linux containers mode on your windows machine to build Linux agent image successfully +> Note: format of the imagetag will be `ci`. possible values for release are test, dev, preview, dogfood, prod etc. Please use MCR urls while building internally. + +Preferred Way: You can build and push images for multiple architectures. This is powered by docker buildx +Directly use the docker buildx commands (the MCR images can be found in our internal wiki to be used as arguments) +``` +# multiple platforms +cd ~/Docker-Provider +docker buildx build --platform linux/arm64/v8,linux/amd64 -t /: --build-arg IMAGE_TAG= --build-arg CI_BASE_IMAGE= --build-arg GOLANG_BASE_IMAGE= -f kubernetes/linux/Dockerfile.multiarch --push . + +# single platform +cd ~/Docker-Provider +docker buildx build --platform linux/amd64 -t /: --build-arg IMAGE_TAG= --build-arg CI_BASE_IMAGE= --build-arg GOLANG_BASE_IMAGE= -f kubernetes/linux/Dockerfile.multiarch --push . +``` + +Using the build and publish script + ``` cd ~/Docker-Provider/kubernetes/linux/dockerbuild sudo docker login # if you want to publish the image to acr then login to acr via `docker login ` # build provider, docker image and publish to docker image -bash build-and-publish-docker-image.sh --image /: +bash build-and-publish-docker-image.sh --image /: --ubuntu --golang +``` + +``` +cd ~/Docker-Provider/kubernetes/linux/dockerbuild +sudo docker login # if you want to publish the image to acr then login to acr via `docker login ` +# build and publish using docker buildx +bash build-and-publish-docker-image.sh --image /: --ubuntu --golang --multiarch ``` -> Note: format of the imagetag will be `ci`. possible values for release are test, dev, preview, dogfood, prod etc. You can also build and push images for multiple architectures. This is powered by docker buildx ``` @@ -182,7 +203,7 @@ make ``` cd ~/Docker-Provider/kubernetes/linux/ -docker build -t /: --build-arg IMAGE_TAG= . +docker build -t /: --build-arg IMAGE_TAG= --build-arg CI_BASE_IMAGE= . docker push /: ``` ## Windows Agent diff --git a/ReleaseNotes.md b/ReleaseNotes.md index dfa703a9f..f888a9324 100644 --- a/ReleaseNotes.md +++ b/ReleaseNotes.md @@ -11,6 +11,24 @@ additional questions or comments. Note : The agent version(s) below has dates (ciprod), which indicate the agent build dates (not release dates) +### 08/10/2022 - +##### Version microsoft/oms:ciprod08102022 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08102022 (linux) +##### Version microsoft/oms:win-ciprod08102022 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod08102022 (windows) +##### Code change log +- Linux Agent + - Consume ruby from RVM instead of brightbox ppa + +- Common + - Update to Ruby 3.1.1 + - Update telegraf to 1.23.2 + - Updates fluentd to 1.14.6 + - Use default JSON gem instead of yajl-json + - Consume tomlrb as a gem instead of committed source code + - Move from beta.kubernetes.io to kubernetes.io + - Bug fixes + - Fix bug in processing fractional memory limits + - Fix log loss due to inode reuse + ### 07/27/2022 - ##### Version microsoft/oms:ciprod06272022-hotfix Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod06272022-hotfix (linux) - Fixes for sending the proper node allocatable cpu and memory value for the container which does not specify limits. diff --git a/build/common/installer/scripts/td-agent-bit-conf-customizer.rb b/build/common/installer/scripts/td-agent-bit-conf-customizer.rb index 995d72b87..5db387911 100644 --- a/build/common/installer/scripts/td-agent-bit-conf-customizer.rb +++ b/build/common/installer/scripts/td-agent-bit-conf-customizer.rb @@ -24,15 +24,16 @@ def substituteFluentBitPlaceHolders bufferChunkSize = ENV["FBIT_TAIL_BUFFER_CHUNK_SIZE"] bufferMaxSize = ENV["FBIT_TAIL_BUFFER_MAX_SIZE"] memBufLimit = ENV["FBIT_TAIL_MEM_BUF_LIMIT"] + ignoreOlder = ENV["FBIT_TAIL_IGNORE_OLDER"] - serviceInterval = (!interval.nil? && is_number?(interval) && interval.to_i > 0 ) ? interval : @default_service_interval + serviceInterval = (!interval.nil? && is_number?(interval) && interval.to_i > 0) ? interval : @default_service_interval serviceIntervalSetting = "Flush " + serviceInterval tailBufferChunkSize = (!bufferChunkSize.nil? && is_number?(bufferChunkSize) && bufferChunkSize.to_i > 0) ? bufferChunkSize : nil tailBufferMaxSize = (!bufferMaxSize.nil? && is_number?(bufferMaxSize) && bufferMaxSize.to_i > 0) ? bufferMaxSize : nil - if ((!tailBufferChunkSize.nil? && tailBufferMaxSize.nil?) || (!tailBufferChunkSize.nil? && !tailBufferMaxSize.nil? && tailBufferChunkSize.to_i > tailBufferMaxSize.to_i)) + if ((!tailBufferChunkSize.nil? && tailBufferMaxSize.nil?) || (!tailBufferChunkSize.nil? && !tailBufferMaxSize.nil? && tailBufferChunkSize.to_i > tailBufferMaxSize.to_i)) puts "config:warn buffer max size must be greater or equal to chunk size" tailBufferMaxSize = tailBufferChunkSize end @@ -54,6 +55,12 @@ def substituteFluentBitPlaceHolders new_contents = new_contents.gsub("\n ${TAIL_BUFFER_MAX_SIZE}\n", "\n") end + if !ignoreOlder.nil? && !ignoreOlder.empty? + new_contents = new_contents.gsub("${TAIL_IGNORE_OLDER}", "Ignore_Older " + ignoreOlder) + else + new_contents = new_contents.gsub("\n ${TAIL_IGNORE_OLDER}\n", "\n") + end + File.open(@td_agent_bit_conf_path, "w") { |file| file.puts new_contents } puts "config::Successfully substituted the placeholders in td-agent-bit.conf file" rescue => errorStr diff --git a/build/common/installer/scripts/tomlparser-agent-config.rb b/build/common/installer/scripts/tomlparser-agent-config.rb index ebe1e3982..4cfbc45ee 100644 --- a/build/common/installer/scripts/tomlparser-agent-config.rb +++ b/build/common/installer/scripts/tomlparser-agent-config.rb @@ -2,11 +2,7 @@ #this should be require relative in Linux and require in windows, since it is a gem install on windows @os_type = ENV["OS_TYPE"] -if !@os_type.nil? && !@os_type.empty? && @os_type.strip.casecmp("windows") == 0 - require "tomlrb" -else - require_relative "tomlrb" -end +require "tomlrb" require_relative "ConfigParseErrorLogger" @@ -59,7 +55,7 @@ @fbitTailBufferChunkSizeMBs = 0 @fbitTailBufferMaxSizeMBs = 0 @fbitTailMemBufLimitMBs = 0 - +@fbitTailIgnoreOlder = "" def is_number?(value) true if Integer(value) rescue false @@ -149,7 +145,7 @@ def populateSettingValuesFromConfigMap(parsedConfig) end fbitTailBufferMaxSizeMBs = fbit_config[:tail_buf_maxsize_megabytes] - if !fbitTailBufferMaxSizeMBs.nil? && is_number?(fbitTailBufferMaxSizeMBs) && fbitTailBufferMaxSizeMBs.to_i > 0 + if !fbitTailBufferMaxSizeMBs.nil? && is_number?(fbitTailBufferMaxSizeMBs) && fbitTailBufferMaxSizeMBs.to_i > 0 if fbitTailBufferMaxSizeMBs.to_i >= @fbitTailBufferChunkSizeMBs @fbitTailBufferMaxSizeMBs = fbitTailBufferMaxSizeMBs.to_i puts "Using config map value: tail_buf_maxsize_megabytes = #{@fbitTailBufferMaxSizeMBs}" @@ -160,16 +156,27 @@ def populateSettingValuesFromConfigMap(parsedConfig) end end # in scenario - tail_buf_chunksize_megabytes provided but not tail_buf_maxsize_megabytes to prevent fbit crash - if @fbitTailBufferChunkSizeMBs > 0 && @fbitTailBufferMaxSizeMBs == 0 + if @fbitTailBufferChunkSizeMBs > 0 && @fbitTailBufferMaxSizeMBs == 0 @fbitTailBufferMaxSizeMBs = @fbitTailBufferChunkSizeMBs puts "config::warn: since tail_buf_maxsize_megabytes not provided hence using tail_buf_maxsize_megabytes=#{@fbitTailBufferMaxSizeMBs} which is same as the value of tail_buf_chunksize_megabytes" - end + end fbitTailMemBufLimitMBs = fbit_config[:tail_mem_buf_limit_megabytes] if !fbitTailMemBufLimitMBs.nil? && is_number?(fbitTailMemBufLimitMBs) && fbitTailMemBufLimitMBs.to_i > 0 @fbitTailMemBufLimitMBs = fbitTailMemBufLimitMBs.to_i puts "Using config map value: tail_mem_buf_limit_megabytes = #{@fbitTailMemBufLimitMBs}" end + + fbitTailIgnoreOlder = fbit_config[:tail_ignore_older] + re = /^[0-9]+[mhd]$/ + if !fbitTailIgnoreOlder.nil? && !fbitTailIgnoreOlder.empty? + if !re.match(fbitTailIgnoreOlder).nil? + @fbitTailIgnoreOlder = fbitTailIgnoreOlder + puts "Using config map value: tail_ignore_older = #{@fbitTailIgnoreOlder}" + else + puts "config:warn: provided tail_ignore_older value is not valid hence using default value" + end + end end end rescue => errorStr @@ -210,10 +217,15 @@ def populateSettingValuesFromConfigMap(parsedConfig) end if @fbitTailBufferMaxSizeMBs > 0 file.write("export FBIT_TAIL_BUFFER_MAX_SIZE=#{@fbitTailBufferMaxSizeMBs}\n") - end + end if @fbitTailMemBufLimitMBs > 0 file.write("export FBIT_TAIL_MEM_BUF_LIMIT=#{@fbitTailMemBufLimitMBs}\n") - end + end + + if !@fbitTailIgnoreOlder.nil? && !@fbitTailIgnoreOlder.empty? + file.write("export FBIT_TAIL_IGNORE_OLDER=#{@fbitTailIgnoreOlder}\n") + end + # Close file after writing all environment variables file.close else @@ -231,21 +243,25 @@ def get_command_windows(env_variable_name, env_variable_value) if !file.nil? if @fbitFlushIntervalSecs > 0 - commands = get_command_windows('FBIT_SERVICE_FLUSH_INTERVAL', @fbitFlushIntervalSecs) + commands = get_command_windows("FBIT_SERVICE_FLUSH_INTERVAL", @fbitFlushIntervalSecs) file.write(commands) end if @fbitTailBufferChunkSizeMBs > 0 - commands = get_command_windows('FBIT_TAIL_BUFFER_CHUNK_SIZE', @fbitTailBufferChunkSizeMBs) + commands = get_command_windows("FBIT_TAIL_BUFFER_CHUNK_SIZE", @fbitTailBufferChunkSizeMBs) file.write(commands) end if @fbitTailBufferMaxSizeMBs > 0 - commands = get_command_windows('FBIT_TAIL_BUFFER_MAX_SIZE', @fbitTailBufferMaxSizeMBs) + commands = get_command_windows("FBIT_TAIL_BUFFER_MAX_SIZE", @fbitTailBufferMaxSizeMBs) file.write(commands) - end + end if @fbitTailMemBufLimitMBs > 0 - commands = get_command_windows('FBIT_TAIL_MEM_BUF_LIMIT', @fbitTailMemBufLimitMBs) + commands = get_command_windows("FBIT_TAIL_MEM_BUF_LIMIT", @fbitTailMemBufLimitMBs) file.write(commands) - end + end + if !@fbitTailIgnoreOlder.nil? && !@fbitTailIgnoreOlder.empty? + commands = get_command_windows("FBIT_TAIL_IGNORE_OLDER", @fbitTailIgnoreOlder) + file.write(commands) + end # Close file after writing all environment variables file.close puts "****************End Config Processing********************" @@ -253,4 +269,4 @@ def get_command_windows(env_variable_name, env_variable_value) puts "Exception while opening file for writing config environment variables for WINDOWS LOG" puts "****************End Config Processing********************" end -end \ No newline at end of file +end diff --git a/build/common/installer/scripts/tomlparser-mdm-metrics-config.rb b/build/common/installer/scripts/tomlparser-mdm-metrics-config.rb index b6a4419cf..17c1ca118 100644 --- a/build/common/installer/scripts/tomlparser-mdm-metrics-config.rb +++ b/build/common/installer/scripts/tomlparser-mdm-metrics-config.rb @@ -3,11 +3,7 @@ #this should be require relative in Linux and require in windows, since it is a gem install on windows @os_type = ENV["OS_TYPE"] -if !@os_type.nil? && !@os_type.empty? && @os_type.strip.casecmp("windows") == 0 - require "tomlrb" -else - require_relative "tomlrb" -end +require "tomlrb" require_relative "/etc/fluent/plugin/constants" require_relative "ConfigParseErrorLogger" diff --git a/build/common/installer/scripts/tomlparser-prom-agent-config.rb b/build/common/installer/scripts/tomlparser-prom-agent-config.rb index 664691a44..abc939f52 100644 --- a/build/common/installer/scripts/tomlparser-prom-agent-config.rb +++ b/build/common/installer/scripts/tomlparser-prom-agent-config.rb @@ -2,11 +2,7 @@ #this should be require relative in Linux and require in windows, since it is a gem install on windows @os_type = ENV["OS_TYPE"] -if !@os_type.nil? && !@os_type.empty? && @os_type.strip.casecmp("windows") == 0 - require "tomlrb" -else - require_relative "tomlrb" -end +require "tomlrb" require_relative "ConfigParseErrorLogger" diff --git a/build/common/installer/scripts/tomlparser-prom-customconfig.rb b/build/common/installer/scripts/tomlparser-prom-customconfig.rb index 642eadc14..76909b17c 100644 --- a/build/common/installer/scripts/tomlparser-prom-customconfig.rb +++ b/build/common/installer/scripts/tomlparser-prom-customconfig.rb @@ -2,12 +2,8 @@ #this should be require relative in Linux and require in windows, since it is a gem install on windows @os_type = ENV["OS_TYPE"] -if !@os_type.nil? && !@os_type.empty? && @os_type.strip.casecmp("windows") == 0 - require "tomlrb" -else - require_relative "tomlrb" -end -# require_relative "tomlrb" +require "tomlrb" + require_relative "ConfigParseErrorLogger" require "fileutils" diff --git a/build/common/installer/scripts/tomlparser.rb b/build/common/installer/scripts/tomlparser.rb index 6a2f3c6d6..6d3ee6e78 100644 --- a/build/common/installer/scripts/tomlparser.rb +++ b/build/common/installer/scripts/tomlparser.rb @@ -2,11 +2,7 @@ #this should be require relative in Linux and require in windows, since it is a gem install on windows @os_type = ENV["OS_TYPE"] -if !@os_type.nil? && !@os_type.empty? && @os_type.strip.casecmp("windows") == 0 - require "tomlrb" -else - require_relative "tomlrb" -end +require "tomlrb" require_relative "ConfigParseErrorLogger" diff --git a/build/linux/installer/conf/td-agent-bit.conf b/build/linux/installer/conf/td-agent-bit.conf index beba6a3ca..fe550ab62 100644 --- a/build/linux/installer/conf/td-agent-bit.conf +++ b/build/linux/installer/conf/td-agent-bit.conf @@ -26,7 +26,7 @@ Refresh_Interval 30 Path_Key filepath Skip_Long_Lines On - Ignore_Older 5m + ${TAIL_IGNORE_OLDER} Exclude_Path ${AZMON_CLUSTER_LOG_TAIL_EXCLUDE_PATH} [INPUT] diff --git a/build/linux/installer/datafiles/base_container.data b/build/linux/installer/datafiles/base_container.data index 3e1dc9f77..8174adcd4 100644 --- a/build/linux/installer/datafiles/base_container.data +++ b/build/linux/installer/datafiles/base_container.data @@ -20,15 +20,6 @@ MAINTAINER: 'Microsoft Corporation' /etc/opt/microsoft/docker-cimprov/container.conf; build/linux/installer/conf/container.conf; 644; root; root -/opt/tomlrb.rb; source/toml-parser/tomlrb.rb; 644; root; root -/opt/tomlrb/generated_parser.rb; source/toml-parser/tomlrb/generated_parser.rb; 644; root; root -/opt/tomlrb/handler.rb; source/toml-parser/tomlrb/handler.rb; 644; root; root -/opt/tomlrb/parser.rb; source/toml-parser/tomlrb/parser.rb; 644; root; root -/opt/tomlrb/parser.y; source/toml-parser/tomlrb/parser.y; 644; root; root -/opt/tomlrb/scanner.rb; source/toml-parser/tomlrb/scanner.rb; 644; root; root -/opt/tomlrb/string_utils.rb; source/toml-parser/tomlrb/string_utils.rb; 644; root; root -/opt/tomlrb/version.rb; source/toml-parser/tomlrb/version.rb; 644; root; root - /opt/td-agent-bit/bin/out_oms.so; intermediate/${{BUILD_CONFIGURATION}}/out_oms.so; 755; root; root /etc/opt/microsoft/docker-cimprov/prometheus-side-car.conf; build/linux/installer/conf/prometheus-side-car.conf; 644; root; root /etc/opt/microsoft/docker-cimprov/td-agent-bit.conf; build/linux/installer/conf/td-agent-bit.conf; 644; root; root diff --git a/build/linux/installer/scripts/tomlparser-metric-collection-config.rb b/build/linux/installer/scripts/tomlparser-metric-collection-config.rb index cee41312b..3001fdbaf 100644 --- a/build/linux/installer/scripts/tomlparser-metric-collection-config.rb +++ b/build/linux/installer/scripts/tomlparser-metric-collection-config.rb @@ -1,7 +1,7 @@ #!/usr/local/bin/ruby # frozen_string_literal: true -require_relative "tomlrb" +require "tomlrb" require_relative "ConfigParseErrorLogger" require_relative "/etc/fluent/plugin/constants" diff --git a/build/linux/installer/scripts/tomlparser-npm-config.rb b/build/linux/installer/scripts/tomlparser-npm-config.rb index 7d72b7b34..9ce46291e 100644 --- a/build/linux/installer/scripts/tomlparser-npm-config.rb +++ b/build/linux/installer/scripts/tomlparser-npm-config.rb @@ -2,11 +2,7 @@ #this should be require relative in Linux and require in windows, since it is a gem install on windows @os_type = ENV["OS_TYPE"] -if !@os_type.nil? && !@os_type.empty? && @os_type.strip.casecmp("windows") == 0 - require "tomlrb" -else - require_relative "tomlrb" -end +require "tomlrb" require_relative "ConfigParseErrorLogger" diff --git a/build/linux/installer/scripts/tomlparser-osm-config.rb b/build/linux/installer/scripts/tomlparser-osm-config.rb index 096064db8..2ac5ef387 100644 --- a/build/linux/installer/scripts/tomlparser-osm-config.rb +++ b/build/linux/installer/scripts/tomlparser-osm-config.rb @@ -1,6 +1,6 @@ #!/usr/local/bin/ruby -require_relative "tomlrb" +require "tomlrb" require "fileutils" require_relative "ConfigParseErrorLogger" diff --git a/build/windows/installer/certificategenerator/CertificateGenerator.csproj b/build/windows/installer/certificategenerator/CertificateGenerator.csproj index b14cc4502..dfbbc51e0 100644 --- a/build/windows/installer/certificategenerator/CertificateGenerator.csproj +++ b/build/windows/installer/certificategenerator/CertificateGenerator.csproj @@ -8,7 +8,7 @@ - + diff --git a/build/windows/installer/conf/fluent-bit.conf b/build/windows/installer/conf/fluent-bit.conf index 1e2d8a93e..b43354e3f 100644 --- a/build/windows/installer/conf/fluent-bit.conf +++ b/build/windows/installer/conf/fluent-bit.conf @@ -25,7 +25,7 @@ Refresh_Interval 30 Path_Key filepath Skip_Long_Lines On - Ignore_Older 5m + ${TAIL_IGNORE_OLDER} Exclude_Path ${AZMON_CLUSTER_LOG_TAIL_EXCLUDE_PATH} [INPUT] diff --git a/build/windows/installer/conf/fluent.conf b/build/windows/installer/conf/fluent.conf index a78ac58fa..73d62a3ff 100644 --- a/build/windows/installer/conf/fluent.conf +++ b/build/windows/installer/conf/fluent.conf @@ -1,5 +1,5 @@ - type heartbeat_request + @type heartbeat_request run_interval 30m @log_level info diff --git a/charts/azuremonitor-containers/Chart.yaml b/charts/azuremonitor-containers/Chart.yaml index 54159a6ce..2f90a8aed 100644 --- a/charts/azuremonitor-containers/Chart.yaml +++ b/charts/azuremonitor-containers/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v1 appVersion: 7.0.0-1 description: Helm chart for deploying Azure Monitor container monitoring agent in Kubernetes name: azuremonitor-containers -version: 2.9.5 +version: 2.9.6 kubeVersion: "^1.10.0-0" keywords: - monitoring diff --git a/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml b/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml index ef72b385b..9e10a1ca9 100644 --- a/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml @@ -37,7 +37,7 @@ spec: kubernetes.io/os: windows {{- else }} nodeSelector: - beta.kubernetes.io/os: windows + kubernetes.io/os: windows {{- end }} {{- if .Values.omsagent.rbac }} serviceAccountName: omsagent diff --git a/charts/azuremonitor-containers/values.yaml b/charts/azuremonitor-containers/values.yaml index af94fad75..0bf94e82c 100644 --- a/charts/azuremonitor-containers/values.yaml +++ b/charts/azuremonitor-containers/values.yaml @@ -22,8 +22,8 @@ Azure: omsagent: image: repo: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod" - tag: "ciprod06272022-hotfix" - tagWindows: "win-ciprod06142022" + tag: "ciprod08102022" + tagWindows: "win-ciprod08102022" pullPolicy: IfNotPresent dockerProviderVersion: "18.0.1-0" agentVersion: "azure-mdsd-1.17.0" @@ -112,7 +112,7 @@ omsagent: nodeSelectorTerms: - labelSelector: matchExpressions: - - key: beta.kubernetes.io/os + - key: kubernetes.io/os operator: In values: - linux @@ -120,7 +120,7 @@ omsagent: operator: NotIn values: - virtual-kubelet - - key: beta.kubernetes.io/arch + - key: kubernetes.io/arch operator: In values: - amd64 @@ -159,7 +159,7 @@ omsagent: nodeSelectorTerms: - labelSelector: matchExpressions: - - key: beta.kubernetes.io/os + - key: kubernetes.io/os operator: In values: - linux @@ -171,7 +171,7 @@ omsagent: operator: NotIn values: - master - - key: beta.kubernetes.io/arch + - key: kubernetes.io/arch operator: In values: - amd64 diff --git a/kubernetes/container-azm-ms-agentconfig.yaml b/kubernetes/container-azm-ms-agentconfig.yaml index a54b34682..a274724ef 100644 --- a/kubernetes/container-azm-ms-agentconfig.yaml +++ b/kubernetes/container-azm-ms-agentconfig.yaml @@ -155,11 +155,13 @@ data: # The following settings are "undocumented", we don't recommend uncommenting them unless directed by Microsoft. # They increase the maximum stdout/stderr log collection rate but will also cause higher cpu/memory usage. + ## Ref for more details about Ignore_Older - https://docs.fluentbit.io/manual/v/1.7/pipeline/inputs/tail # [agent_settings.fbit_config] # log_flush_interval_secs = "1" # default value is 15 # tail_mem_buf_limit_megabytes = "10" # default value is 10 # tail_buf_chunksize_megabytes = "1" # default value is 32kb (comment out this line for default) # tail_buf_maxsize_megabytes = "1" # defautl value is 32kb (comment out this line for default) + # tail_ignore_older = "5m" # default value same as fluent-bit default i.e.0m metadata: name: container-azm-ms-agentconfig diff --git a/kubernetes/container-azm-ms-vpaconfig.yaml b/kubernetes/container-azm-ms-vpaconfig.yaml new file mode 100644 index 000000000..9734a59f7 --- /dev/null +++ b/kubernetes/container-azm-ms-vpaconfig.yaml @@ -0,0 +1,13 @@ +kind: ConfigMap +apiVersion: v1 +data: + NannyConfiguration: |- + apiVersion: nannyconfig/v1alpha1 + kind: NannyConfiguration + baseCPU: 200m + cpuPerNode: 2m + baseMemory: 350Mi + memoryPerNode: 4Mi +metadata: + name: container-azm-ms-vpaconfig + namespace: kube-system diff --git a/kubernetes/linux/Dockerfile b/kubernetes/linux/Dockerfile index 264256317..41c75efc0 100644 --- a/kubernetes/linux/Dockerfile +++ b/kubernetes/linux/Dockerfile @@ -1,4 +1,5 @@ -FROM ubuntu:18.04 +ARG CI_BASE_IMAGE= +FROM ${CI_BASE_IMAGE} MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" @@ -17,7 +18,7 @@ ENV RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR 0.9 RUN /usr/bin/apt-get update && /usr/bin/apt-get install -y libc-bin wget openssl curl sudo python-ctypes init-system-helpers net-tools rsyslog cron vim dmidecode apt-transport-https gnupg && rm -rf /var/lib/apt/lists/* COPY setup.sh main.sh defaultpromenvvariables defaultpromenvvariables-rs defaultpromenvvariables-sidecar mdsd.xml envmdsd logrotate.conf $tmpdir/ -ARG IMAGE_TAG=ciprod06272022-hotfix +ARG IMAGE_TAG=ciprod08102022 ENV AGENT_VERSION ${IMAGE_TAG} WORKDIR ${tmpdir} diff --git a/kubernetes/linux/Dockerfile.multiarch b/kubernetes/linux/Dockerfile.multiarch index c2289392c..ee8db1e8b 100644 --- a/kubernetes/linux/Dockerfile.multiarch +++ b/kubernetes/linux/Dockerfile.multiarch @@ -1,4 +1,8 @@ -FROM --platform=$BUILDPLATFORM golang:1.15.14 AS builder +# Default base images. If you update them don't forgot to update variables in our build pipelines. Default values can be found in internal wiki. External can use ubuntu 18.04 and golang 1.18.3 +ARG GOLANG_BASE_IMAGE= +ARG CI_BASE_IMAGE= + +FROM --platform=$BUILDPLATFORM ${GOLANG_BASE_IMAGE} AS builder ARG TARGETOS TARGETARCH RUN /usr/bin/apt-get update && /usr/bin/apt-get install git g++ make pkg-config libssl-dev libpam0g-dev rpm librpm-dev uuid-dev libkrb5-dev python sudo gcc-aarch64-linux-gnu -y @@ -7,7 +11,7 @@ COPY source /src/source RUN cd /src/build/linux && make arch=${TARGETARCH} -FROM ubuntu:18.04 AS base_image +FROM ${CI_BASE_IMAGE} AS base_image ARG TARGETOS TARGETARCH MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ @@ -29,7 +33,7 @@ RUN /usr/bin/apt-get update && /usr/bin/apt-get install -y libc-bin wget openssl COPY --from=builder /src/kubernetes/linux/Linux_ULINUX_1.0_*_64_Release/docker-cimprov-*.*.*-*.*.sh $tmpdir/ COPY kubernetes/linux/setup.sh kubernetes/linux/main.sh kubernetes/linux/defaultpromenvvariables kubernetes/linux/defaultpromenvvariables-rs kubernetes/linux/defaultpromenvvariables-sidecar kubernetes/linux/mdsd.xml kubernetes/linux/envmdsd kubernetes/linux/logrotate.conf $tmpdir/ -ARG IMAGE_TAG=ciprod06272022-hotfix +ARG IMAGE_TAG=ciprod08102022 ENV AGENT_VERSION ${IMAGE_TAG} WORKDIR ${tmpdir} @@ -38,8 +42,8 @@ RUN chmod 775 $tmpdir/*.sh; sync; $tmpdir/setup.sh ${TARGETARCH} # Do vulnerability scan in a seperate stage to avoid adding layer FROM base_image AS vulnscan -COPY --from=aquasec/trivy:latest /usr/local/bin/trivy /usr/local/bin/trivy COPY .trivyignore .trivyignore +RUN curl -sfL https://raw.githubusercontent.com/aquasecurity/trivy/main/contrib/install.sh | sh -s -- -b /usr/local/bin v0.28.1 RUN trivy rootfs --ignore-unfixed --no-progress --severity HIGH,CRITICAL,MEDIUM --skip-files "/usr/local/bin/trivy" / RUN trivy rootfs --ignore-unfixed --no-progress --severity HIGH,CRITICAL,MEDIUM /usr/lib RUN trivy rootfs --exit-code 1 --ignore-unfixed --no-progress --severity HIGH,CRITICAL,MEDIUM --skip-files "/usr/local/bin/trivy" / > /dev/null 2>&1 && trivy rootfs --exit-code 1 --ignore-unfixed --no-progress --severity HIGH,CRITICAL,MEDIUM /usr/lib > /dev/null 2>&1 diff --git a/kubernetes/linux/dockerbuild/build-and-publish-docker-image.sh b/kubernetes/linux/dockerbuild/build-and-publish-docker-image.sh index 638236507..40ce83cd4 100644 --- a/kubernetes/linux/dockerbuild/build-and-publish-docker-image.sh +++ b/kubernetes/linux/dockerbuild/build-and-publish-docker-image.sh @@ -13,8 +13,8 @@ usage() local basename=`basename $0` echo echo "Build and publish docker image:" - echo "$basename --image " - echo "$basename --image --multiarch" + echo "$basename --image --ubuntu --golang " + echo "$basename --image --ubuntu --golang --multiarch" } parse_args() @@ -32,6 +32,8 @@ for arg in "$@"; do case "$arg" in "--image") set -- "$@" "-i" ;; "--multiarch") set -- "$@" "-m" ;; + "--ubuntu") set -- "$@" "-u" ;; + "--golang") set -- "$@" "-g" ;; "--"*) usage ;; *) set -- "$@" "$arg" esac @@ -39,7 +41,7 @@ done local OPTIND opt -while getopts 'hi:m' opt; do +while getopts 'hi:u:g:m' opt; do case "$opt" in h) usage @@ -54,7 +56,12 @@ while getopts 'hi:m' opt; do multi=1 echo "using multiarch dockerfile" ;; - + u) + ci_base_image=$OPTARG + ;; + g) + golang_base_image=$OPTARG + ;; ?) usage exit 1 @@ -69,6 +76,16 @@ while getopts 'hi:m' opt; do exit 1 fi + if [ -z "$ci_base_image" ]; then + echo "-e invalid ubuntu image url. please try with valid values from internal wiki. do not use 3P entries" + exit 1 + fi + + if [ -z "$golang_base_image" ]; then + echo "-e invalid golang image url. please try with valid values from internal wiki. do not use 3P entries" + exit 1 + fi + # extract image tag imageTag=$(echo ${image} | sed "s/.*://") @@ -89,39 +106,6 @@ fi } -build_docker_provider() -{ - echo "building docker provider shell bundle" - cd $buildDir - echo "trigger make to build docker build provider shell bundle" - make - echo "building docker provider shell bundle completed" -} - -login_to_docker() -{ - echo "login to docker with provided creds" - # sudo docker login --username=$dockerUser - sudo docker login - echo "login to docker with provided creds completed" -} - -build_docker_image() -{ - echo "build docker image: $image and image tage is $imageTag" - cd $baseDir/kubernetes/linux - sudo docker build -t $image --build-arg IMAGE_TAG=$imageTag . - - echo "build docker image completed" -} - -publish_docker_image() -{ - echo "publishing docker image: $image" - sudo docker push $image - echo "publishing docker image: $image done." -} - # parse and validate args parse_args $@ @@ -138,22 +122,18 @@ echo "source code base directory: $baseDir" echo "build directory for docker provider: $buildDir" echo "docker file directory: $dockerFileDir" +echo "build docker image: $image and image tage is $imageTag" + if [ -n "$multi" ] && [ "$multi" -eq "1" ]; then echo "building multiarch" cd $baseDir - docker buildx build --platform linux/arm64/v8,linux/amd64 -t $image --build-arg IMAGE_TAG=$imageTag -f $linuxDir/Dockerfile.multiarch --push . - exit 0 + docker buildx build --platform linux/arm64/v8,linux/amd64 -t $image --build-arg IMAGE_TAG=$imageTag --build-arg CI_BASE_IMAGE="$ci_base_image" --build-arg GOLANG_BASE_IMAGE="$golang_base_image" -f $linuxDir/Dockerfile.multiarch --push . +else + echo "building amd64" + cd $baseDir + docker buildx build --platform linux/amd64 -t $image --build-arg IMAGE_TAG=$imageTag --build-arg CI_BASE_IMAGE="$ci_base_image" --build-arg GOLANG_BASE_IMAGE="$golang_base_image" -f $linuxDir/Dockerfile.multiarch --push . fi -# build docker provider shell bundle -build_docker_provider - -# build docker image -build_docker_image - -# publish docker image -publish_docker_image - -cd $currentDir - +echo "build and push docker image completed" +cd $currentDir \ No newline at end of file diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index 1e00457d9..efb95698a 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -88,6 +88,8 @@ checkAgentOnboardingStatus() { fi } +# setup paths for ruby +[ -f /etc/profile.d/rvm.sh ] && source /etc/profile.d/rvm.sh setReplicaSetSpecificConfig() { echo "num of fluentd workers:${NUM_OF_FLUENTD_WORKERS}" export FLUENTD_FLUSH_INTERVAL="20s" @@ -453,7 +455,7 @@ source ~/.bashrc if [ "${CONTAINER_TYPE}" != "PrometheusSidecar" ]; then #Parse the configmap to set the right environment variables. - /usr/bin/ruby2.7 tomlparser.rb + ruby tomlparser.rb cat config_env_var | while read line; do echo $line >>~/.bashrc @@ -464,7 +466,7 @@ fi #Parse the configmap to set the right environment variables for agent config. #Note > tomlparser-agent-config.rb has to be parsed first before td-agent-bit-conf-customizer.rb for fbit agent settings if [ "${CONTAINER_TYPE}" != "PrometheusSidecar" ]; then - /usr/bin/ruby2.7 tomlparser-agent-config.rb + ruby tomlparser-agent-config.rb cat agent_config_env_var | while read line; do echo $line >> ~/.bashrc @@ -472,7 +474,7 @@ if [ "${CONTAINER_TYPE}" != "PrometheusSidecar" ]; then source agent_config_env_var #Parse the configmap to set the right environment variables for network policy manager (npm) integration. - /usr/bin/ruby2.7 tomlparser-npm-config.rb + ruby tomlparser-npm-config.rb cat integration_npm_config_env_var | while read line; do echo $line >> ~/.bashrc @@ -482,11 +484,11 @@ fi #Replace the placeholders in td-agent-bit.conf file for fluentbit with custom/default values in daemonset if [ ! -e "/etc/config/kube.conf" ] && [ "${CONTAINER_TYPE}" != "PrometheusSidecar" ]; then - /usr/bin/ruby2.7 td-agent-bit-conf-customizer.rb + ruby td-agent-bit-conf-customizer.rb fi #Parse the prometheus configmap to create a file with new custom settings. -/usr/bin/ruby2.7 tomlparser-prom-customconfig.rb +ruby tomlparser-prom-customconfig.rb #Setting default environment variables to be used in any case of failure in the above steps if [ ! -e "/etc/config/kube.conf" ]; then @@ -520,7 +522,7 @@ fi if [ ! -e "/etc/config/kube.conf" ]; then if [ "${CONTAINER_TYPE}" == "PrometheusSidecar" ]; then #Parse the agent configmap to create a file with new custom settings. - /usr/bin/ruby2.7 tomlparser-prom-agent-config.rb + ruby tomlparser-prom-agent-config.rb #Sourcing config environment variable file if it exists if [ -e "side_car_fbit_config_env_var" ]; then cat side_car_fbit_config_env_var | while read line; do @@ -533,7 +535,7 @@ fi #Parse the configmap to set the right environment variables for MDM metrics configuration for Alerting. if [ "${CONTAINER_TYPE}" != "PrometheusSidecar" ]; then - /usr/bin/ruby2.7 tomlparser-mdm-metrics-config.rb + ruby tomlparser-mdm-metrics-config.rb cat config_mdm_metrics_env_var | while read line; do echo $line >>~/.bashrc @@ -541,7 +543,7 @@ if [ "${CONTAINER_TYPE}" != "PrometheusSidecar" ]; then source config_mdm_metrics_env_var #Parse the configmap to set the right environment variables for metric collection settings - /usr/bin/ruby2.7 tomlparser-metric-collection-config.rb + ruby tomlparser-metric-collection-config.rb cat config_metric_collection_env_var | while read line; do echo $line >>~/.bashrc @@ -552,7 +554,7 @@ fi # OSM scraping to be done in replicaset if sidecar car scraping is disabled and always do the scraping from the sidecar (It will always be either one of the two) if [[ ( ( ! -e "/etc/config/kube.conf" ) && ( "${CONTAINER_TYPE}" == "PrometheusSidecar" ) ) || ( ( -e "/etc/config/kube.conf" ) && ( "${SIDECAR_SCRAPING_ENABLED}" == "false" ) ) ]]; then - /usr/bin/ruby2.7 tomlparser-osm-config.rb + ruby tomlparser-osm-config.rb if [ -e "integration_osm_config_env_var" ]; then cat integration_osm_config_env_var | while read line; do @@ -649,7 +651,8 @@ if [ "$CONTAINER_RUNTIME" != "docker" ]; then fi echo "set caps for ruby process to read container env from proc" -sudo setcap cap_sys_ptrace,cap_dac_read_search+ep /usr/bin/ruby2.7 +RUBY_PATH=$(which ruby) +sudo setcap cap_sys_ptrace,cap_dac_read_search+ep "$RUBY_PATH" echo "export KUBELET_RUNTIME_OPERATIONS_METRIC="$KUBELET_RUNTIME_OPERATIONS_METRIC >> ~/.bashrc echo "export KUBELET_RUNTIME_OPERATIONS_ERRORS_METRIC="$KUBELET_RUNTIME_OPERATIONS_ERRORS_METRIC >> ~/.bashrc diff --git a/kubernetes/linux/setup.sh b/kubernetes/linux/setup.sh index c478af0e5..0e3e43757 100644 --- a/kubernetes/linux/setup.sh +++ b/kubernetes/linux/setup.sh @@ -1,3 +1,5 @@ +#!/bin/bash + TMPDIR="/opt" cd $TMPDIR @@ -11,6 +13,20 @@ fi #upgrade apt to latest version apt-get update && apt-get install -y apt && DEBIAN_FRONTEND=noninteractive apt-get install -y locales + +curl -sSL https://rvm.io/mpapis.asc | gpg --import - +curl -sSL https://rvm.io/pkuczynski.asc | gpg --import - +curl -sSL https://get.rvm.io | bash -s stable + +# setup paths for ruby and rvm +if [ -f /etc/profile.d/rvm.sh ]; then + source /etc/profile.d/rvm.sh + echo "[ -f /etc/profile.d/rvm.sh ] && source /etc/profile.d/rvm.sh" >> ~/.bashrc +fi + +rvm install 3.1.1 +rvm --default use 3.1.1 + sed -i -e 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen && \ dpkg-reconfigure --frontend=noninteractive locales && \ update-locale LANG=en_US.UTF-8 @@ -25,6 +41,7 @@ fi /usr/bin/dpkg -i $TMPDIR/azure-mdsd*.deb cp -f $TMPDIR/mdsd.xml /etc/mdsd.d cp -f $TMPDIR/envmdsd /etc/mdsd.d +rm /usr/sbin/telegraf # log rotate conf for mdsd and can be extended for other log files as well cp -f $TMPDIR/logrotate.conf /etc/logrotate.d/ci-agent @@ -40,10 +57,10 @@ sudo apt-get install jq=1.5+dfsg-2 -y #used to setcaps for ruby process to read /proc/env sudo apt-get install libcap2-bin -y -wget https://dl.influxdata.com/telegraf/releases/telegraf-1.22.2_linux_$ARCH.tar.gz -tar -zxvf telegraf-1.22.2_linux_$ARCH.tar.gz +wget https://dl.influxdata.com/telegraf/releases/telegraf-1.23.2_linux_$ARCH.tar.gz +tar -zxvf telegraf-1.23.2_linux_$ARCH.tar.gz -mv /opt/telegraf-1.22.2/usr/bin/telegraf /opt/telegraf +mv /opt/telegraf-1.23.2/usr/bin/telegraf /opt/telegraf chmod 544 /opt/telegraf @@ -56,15 +73,11 @@ sudo echo "deb https://packages.fluentbit.io/ubuntu/bionic bionic main" >> /etc/ sudo apt-get update sudo apt-get install td-agent-bit=1.7.8 -y -# install ruby2.7 -sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys F5DA5F09C3173AA6 -sudo echo "deb http://ppa.launchpad.net/brightbox/ruby-ng/ubuntu bionic main" >> /etc/apt/sources.list -sudo apt-get update -sudo apt-get install ruby2.7 ruby2.7-dev gcc make -y # fluentd v1 gem -gem install fluentd -v "1.14.2" --no-document +gem install fluentd -v "1.14.6" --no-document fluentd --setup ./fluent gem install gyoku iso8601 --no-doc +gem install tomlrb -v "2.0.1" --no-document rm -f $TMPDIR/docker-cimprov*.sh @@ -74,7 +87,8 @@ rm -f $TMPDIR/envmdsd rm -f $TMPDIR/telegraf-*.tar.gz # remove build dependencies -sudo apt-get remove ruby2.7-dev gcc make -y +sudo apt-get remove gcc make -y +sudo apt autoremove -y # Remove settings for cron.daily that conflict with the node's cron.daily. Since both are trying to rotate the same files # in /var/log at the same time, the rotation doesn't happen correctly and then the *.1 file is forever logged to. diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index c11650b9e..addb84d07 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -27,6 +27,11 @@ rules: - apiGroups: ["apps", "extensions", "autoscaling"] resources: ["replicasets", "deployments", "horizontalpodautoscalers"] verbs: ["list"] + # Uncomment below lines if AddonResizer VPA enabled + # - apiGroups: ["apps"] + # resources: ["deployments"] + # resourceNames: [ "omsagent-rs" ] + # verbs: ["get", "patch"] # Uncomment below lines for MSI Auth Mode testing # - apiGroups: [""] # resources: ["secrets"] @@ -379,7 +384,7 @@ spec: # - NET_ADMIN # - NET_RAW - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod06272022-hotfix" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08102022" imagePullPolicy: IfNotPresent resources: limits: @@ -468,7 +473,7 @@ spec: timeoutSeconds: 15 #Only in sidecar scraping mode - name: omsagent-prometheus - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod06272022-hotfix" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08102022" imagePullPolicy: IfNotPresent resources: limits: @@ -617,6 +622,42 @@ spec: spec: serviceAccountName: omsagent containers: + # Uncomment below lines to enable VPA + # # Make sure this matching with version in AKS RP side + # - image: "mcr.microsoft.com/oss/kubernetes/autoscaler/addon-resizer:1.8.14" + # imagePullPolicy: IfNotPresent + # name: omsagent-vpa + # resources: + # limits: + # cpu: 100m + # memory: 300Mi + # requests: + # cpu: 5m + # memory: 30Mi + # env: + # - name: MY_POD_NAME + # valueFrom: + # fieldRef: + # fieldPath: metadata.name + # - name: MY_POD_NAMESPACE + # valueFrom: + # fieldRef: + # fieldPath: metadata.namespace + # volumeMounts: + # - name: omsagent-rs-vpa-config-volume + # mountPath: /etc/config + # command: + # - /pod_nanny + # - --config-dir=/etc/config + # - --cpu=200m + # - --extra-cpu=2m + # - --memory=300Mi + # - --extra-memory=4Mi + # - --poll-period=180000 + # - --threshold=5 + # - --namespace=kube-system + # - --deployment=omsagent-rs + # - --container=omsagent # Uncomment below lines for MSI Auth Mode testing # - name: addon-token-adapter # command: @@ -653,12 +694,13 @@ spec: # - NET_ADMIN # - NET_RAW - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod06272022-hotfix" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08102022" imagePullPolicy: IfNotPresent + # comment resources if VPA configured since the VPA will set these values resources: limits: cpu: 1 - memory: 1Gi + memory: 1.25Gi requests: cpu: 150m memory: 250Mi @@ -695,6 +737,9 @@ spec: # Uncomment below lines for MSI Auth Mode testing # - name: USING_AAD_MSI_AUTH # value: "true" + # Uncomment below lines when the Addon-resizer VPA enabled + # - name: RS_ADDON-RESIZER_VPA_ENABLED + # value: "true" securityContext: privileged: true ports: @@ -746,7 +791,7 @@ spec: nodeSelectorTerms: - labelSelector: matchExpressions: - - key: beta.kubernetes.io/os + - key: kubernetes.io/os operator: In values: - linux @@ -798,6 +843,11 @@ spec: configMap: name: container-azm-ms-osmconfig optional: true + # Uncomment below lines to enable VPA + # - name: omsagent-rs-vpa-config-volume + # configMap: + # name: omsagent-rs-vpa-config + # optional: true --- apiVersion: apps/v1 kind: DaemonSet @@ -831,7 +881,7 @@ spec: value: "3" containers: - name: omsagent-win - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod06142022" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod08102022" imagePullPolicy: IfNotPresent resources: limits: @@ -920,7 +970,7 @@ spec: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: - - key: beta.kubernetes.io/os + - key: kubernetes.io/os operator: In values: - windows diff --git a/kubernetes/windows/Dockerfile b/kubernetes/windows/Dockerfile index 3b663132e..f25aaecf1 100644 --- a/kubernetes/windows/Dockerfile +++ b/kubernetes/windows/Dockerfile @@ -5,34 +5,33 @@ MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=win-ciprod06142022 +ARG IMAGE_TAG=win-ciprod08102022 # Do not split this into multiple RUN! # Docker creates a layer for every RUN-Statement RUN powershell -Command "Set-ExecutionPolicy Bypass -Scope Process -Force; iex ((New-Object System.Net.WebClient).DownloadString('https://chocolatey.org/install.ps1'))" # Fluentd depends on cool.io whose fat gem is only available for Ruby < 2.5, so need to specify --platform ruby when install Ruby > 2.5 and install msys2 to get dev tools -RUN choco install -y ruby --version 2.7.5.1 --params "'/InstallDir:C:\ruby27'" \ -&& choco install -y msys2 --version 20211130.0.0 --params "'/NoPath /NoUpdate /InstallDir:C:\ruby27\msys64'" \ +RUN choco install -y ruby --version 3.1.1.1 --params "'/InstallDir:C:\ruby31'" \ +&& choco install -y msys2 --version 20211130.0.0 --params "'/NoPath /NoUpdate /InstallDir:C:\ruby31\msys64'" \ && choco install -y vim # gangams - optional MSYS2 update via ridk failing in merged docker file so skipping that since we dont need optional update RUN refreshenv \ && ridk install 3 \ && echo gem: --no-document >> C:\ProgramData\gemrc \ -&& gem install cool.io -v 1.5.4 --platform ruby \ +&& gem install cool.io -v 1.7.1 --platform ruby \ && gem install oj -v 3.3.10 \ -&& gem install json -v 2.2.0 \ -&& gem install fluentd -v 1.14.2 \ +&& gem install fluentd -v 1.14.6 \ && gem install win32-service -v 1.0.1 \ && gem install win32-ipc -v 0.7.0 \ && gem install win32-event -v 0.6.3 \ && gem install windows-pr -v 1.2.6 \ -&& gem install tomlrb -v 1.3.0 \ +&& gem install tomlrb -v 2.0.1 \ && gem install gyoku -v 1.3.1 \ && gem sources --clear-all # Remove gem cache and chocolatey -RUN powershell -Command "Remove-Item -Force C:\ruby27\lib\ruby\gems\2.7.0\cache\*.gem; Remove-Item -Recurse -Force 'C:\ProgramData\chocolatey'" +RUN powershell -Command "Remove-Item -Force C:\ruby31\lib\ruby\gems\3.1.0\cache\*.gem; Remove-Item -Recurse -Force 'C:\ProgramData\chocolatey'" SHELL ["powershell"] diff --git a/kubernetes/windows/setup.ps1 b/kubernetes/windows/setup.ps1 index 2fd429e43..c5f1f422d 100644 --- a/kubernetes/windows/setup.ps1 +++ b/kubernetes/windows/setup.ps1 @@ -35,7 +35,7 @@ Write-Host ('Finished Installing Fluentbit') Write-Host ('Installing Telegraf'); try { - $telegrafUri='https://dl.influxdata.com/telegraf/releases/telegraf-1.22.2_windows_amd64.zip' + $telegrafUri='https://dl.influxdata.com/telegraf/releases/telegraf-1.23.2_windows_amd64.zip' Invoke-WebRequest -Uri $telegrafUri -OutFile /installation/telegraf.zip Expand-Archive -Path /installation/telegraf.zip -Destination /installation/telegraf Move-Item -Path /installation/telegraf/*/* -Destination /opt/telegraf/ -ErrorAction SilentlyContinue diff --git a/scripts/build/linux/install-build-pre-requisites.sh b/scripts/build/linux/install-build-pre-requisites.sh index b85e54fc4..88f9fbef9 100644 --- a/scripts/build/linux/install-build-pre-requisites.sh +++ b/scripts/build/linux/install-build-pre-requisites.sh @@ -8,17 +8,17 @@ TEMP_DIR=temp-$RANDOM install_go_lang() { export goVersion="$(echo $(go version))" - if [[ $goVersion == *go1.15.14* ]] ; then - echo "found existing installation of go version 1.15.14 so skipping the installation of go" + if [[ $goVersion == *go1.18.3* ]] ; then + echo "found existing installation of go version 1.18.3 so skipping the installation of go" else - echo "installing go 1.15.14 version ..." - sudo curl -O https://dl.google.com/go/go1.15.14.linux-amd64.tar.gz - sudo tar -xvf go1.15.14.linux-amd64.tar.gz + echo "installing go 1.18.3 version ..." + sudo curl -O https://dl.google.com/go/go1.18.3.linux-amd64.tar.gz + sudo tar -xvf go1.18.3.linux-amd64.tar.gz sudo mv -f go /usr/local echo "set file permission for go bin" sudo chmod 744 /usr/local/go/bin - echo "installation of go 1.15.14 completed." - echo "installation of go 1.15.14 completed." + echo "installation of go 1.18.3 completed." + echo "installation of go 1.18.3 completed." fi } @@ -173,4 +173,4 @@ sudo rm -rf $TEMP_DIR # set go env vars install_go_env_vars -echo "installing build pre-requisites python, go 1.15.14, dotnet, powershell, build dependencies and docker completed" +echo "installing build pre-requisites python, go 1.18.3, dotnet, powershell, build dependencies and docker completed" diff --git a/scripts/build/windows/install-build-pre-requisites.ps1 b/scripts/build/windows/install-build-pre-requisites.ps1 index 235f6ace9..1ceeda353 100644 --- a/scripts/build/windows/install-build-pre-requisites.ps1 +++ b/scripts/build/windows/install-build-pre-requisites.ps1 @@ -13,8 +13,8 @@ function Install-Go { exit 1 } - $url = "https://go.dev/dl/go1.15.14.windows-amd64.msi" - $output = Join-Path -Path $tempGo -ChildPath "go1.15.14.windows-amd64.msi" + $url = "https://go.dev/dl/go1.18.3.windows-amd64.msi" + $output = Join-Path -Path $tempGo -ChildPath "go1.18.3.windows-amd64.msi" Write-Host("downloading go msi into directory path : " + $output + " ...") Invoke-WebRequest -Uri $url -OutFile $output -ErrorAction Stop Write-Host("downloading of go msi into directory path : " + $output + " completed") @@ -137,7 +137,7 @@ function Install-Docker() { # https://stackoverflow.com/questions/28682642/powershell-why-is-using-invoke-webrequest-much-slower-than-a-browser-download $ProgressPreference = 'SilentlyContinue' -Write-Host "Install GO 1.15.14 version" +Write-Host "Install GO 1.18.3 version" Install-Go Write-Host "Install Build dependencies" Build-Dependencies diff --git a/scripts/cluster-creation/aks-engine.sh b/scripts/cluster-creation/aks-engine.sh index 9d287ea07..ba763e354 100644 --- a/scripts/cluster-creation/aks-engine.sh +++ b/scripts/cluster-creation/aks-engine.sh @@ -89,11 +89,8 @@ while getopts 'hs:c:w:d:l:' opt; do } create_cluster() { - -sudo touch kubernetes.json -sudo chmod 777 kubernetes.json # For docker runtime, remove kubernetesConfig block -cat >> kubernetes.json < /dev/null << 'EOF' { "apiVersion": "vlabs", "properties": { @@ -132,7 +129,7 @@ cat >> kubernetes.json <> kind-config.yaml < /dev/null << 'EOF' kind: Cluster apiVersion: kind.sigs.k8s.io/v1alpha3 nodes: - role: control-plane - role: worker -EOL +EOF + sudo kind create cluster --config kind-config.yaml --name $clusterName } diff --git a/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json b/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json index c42a1d074..2024e611a 100644 --- a/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json +++ b/scripts/onboarding/aks/onboarding-using-msi-auth/existingClusterOnboarding.json @@ -61,7 +61,7 @@ "resources": [ { "type": "Microsoft.Insights/dataCollectionRules", - "apiVersion": "2019-11-01-preview", + "apiVersion": "2021-04-01", "name": "[variables('dcrName')]", "location": "[parameters('workspaceLocation')]", "tags": "[parameters('resourceTagValues')]", @@ -72,18 +72,7 @@ { "name": "ContainerInsightsExtension", "streams": [ - "Microsoft-Perf", - "Microsoft-ContainerInventory", - "Microsoft-ContainerLog", - "Microsoft-ContainerLogV2", - "Microsoft-ContainerNodeInventory", - "Microsoft-KubeEvents", - "Microsoft-KubeMonAgentEvents", - "Microsoft-KubeNodeInventory", - "Microsoft-KubePodInventory", - "Microsoft-KubePVInventory", - "Microsoft-KubeServices", - "Microsoft-InsightsMetrics" + "Microsoft-ContainerInsights-Group-Default" ], "extensionName": "ContainerInsights" } @@ -100,18 +89,7 @@ "dataFlows": [ { "streams": [ - "Microsoft-Perf", - "Microsoft-ContainerInventory", - "Microsoft-ContainerLog", - "Microsoft-ContainerLogV2", - "Microsoft-ContainerNodeInventory", - "Microsoft-KubeEvents", - "Microsoft-KubeMonAgentEvents", - "Microsoft-KubeNodeInventory", - "Microsoft-KubePodInventory", - "Microsoft-KubePVInventory", - "Microsoft-KubeServices", - "Microsoft-InsightsMetrics" + "Microsoft-ContainerInsights-Group-Default" ], "destinations": [ "ciworkspace" @@ -145,7 +123,7 @@ { "type": "Microsoft.ContainerService/managedClusters/providers/dataCollectionRuleAssociations", "name": "[concat(variables('clusterName'),'/microsoft.insights/', variables('associationName'))]", - "apiVersion": "2019-11-01-preview", + "apiVersion": "2021-04-01", "properties": { "description": "Association of data collection rule. Deleting this association will break the data collection for this AKS Cluster.", "dataCollectionRuleId": "[variables('dataCollectionRuleId')]" diff --git a/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterOnboarding.json b/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterOnboarding.json index a4a4e3453..424572857 100644 --- a/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterOnboarding.json +++ b/scripts/onboarding/templates/arc-k8s-extension-msi-auth/existingClusterOnboarding.json @@ -75,7 +75,7 @@ "resources": [ { "type": "Microsoft.Insights/dataCollectionRules", - "apiVersion": "2019-11-01-preview", + "apiVersion": "2021-04-01", "name": "[variables('dcrName')]", "location": "[parameters('workspaceRegion')]", "tags": "[parameters('resourceTagValues')]", @@ -86,18 +86,7 @@ { "name": "ContainerInsightsExtension", "streams": [ - "Microsoft-Perf", - "Microsoft-ContainerInventory", - "Microsoft-ContainerLog", - "Microsoft-ContainerLogV2", - "Microsoft-ContainerNodeInventory", - "Microsoft-KubeEvents", - "Microsoft-KubeMonAgentEvents", - "Microsoft-KubeNodeInventory", - "Microsoft-KubePodInventory", - "Microsoft-KubePVInventory", - "Microsoft-KubeServices", - "Microsoft-InsightsMetrics" + "Microsoft-ContainerInsights-Group-Default" ], "extensionName": "ContainerInsights" } @@ -114,18 +103,7 @@ "dataFlows": [ { "streams": [ - "Microsoft-Perf", - "Microsoft-ContainerInventory", - "Microsoft-ContainerLog", - "Microsoft-ContainerLogV2", - "Microsoft-ContainerNodeInventory", - "Microsoft-KubeEvents", - "Microsoft-KubeMonAgentEvents", - "Microsoft-KubeNodeInventory", - "Microsoft-KubePodInventory", - "Microsoft-KubePVInventory", - "Microsoft-KubeServices", - "Microsoft-InsightsMetrics" + "Microsoft-ContainerInsights-Group-Default" ], "destinations": [ "ciworkspace" @@ -159,7 +137,7 @@ { "type": "Microsoft.Kubernetes/connectedClusters/providers/dataCollectionRuleAssociations", "name": "[concat(variables('clusterName'),'/microsoft.insights/', variables('associationName'))]", - "apiVersion": "2019-11-01-preview", + "apiVersion": "2021-04-01", "properties": { "description": "Association of data collection rule. Deleting this association will break the data collection for this AKS Cluster.", "dataCollectionRuleId": "[variables('dataCollectionRuleId')]" diff --git a/source/plugins/ruby/ApplicationInsightsUtility.rb b/source/plugins/ruby/ApplicationInsightsUtility.rb index 70d0a400e..6f499e8bd 100644 --- a/source/plugins/ruby/ApplicationInsightsUtility.rb +++ b/source/plugins/ruby/ApplicationInsightsUtility.rb @@ -7,7 +7,7 @@ class ApplicationInsightsUtility require_relative "DockerApiClient" require_relative "oms_common" require_relative "proxy_utils" - require "yajl/json_gem" + require "json" require "base64" @@HeartBeat = "HeartBeatEvent" @@ -22,6 +22,7 @@ class ApplicationInsightsUtility @@EnvControllerType = "CONTROLLER_TYPE" @@EnvContainerRuntime = "CONTAINER_RUNTIME" @@EnvAADMSIAuthMode = "AAD_MSI_AUTH_MODE" + @@EnvAddonResizerVPAEnabled = "RS_ADDON-RESIZER_VPA_ENABLED" @@isWindows = false @@hostName = (OMS::Common.get_hostname) @@ -93,6 +94,10 @@ def initializeUtility() else @@CustomProperties["aadAuthMSIMode"] = "false" end + addonResizerVPAEnabled = ENV[@@EnvAddonResizerVPAEnabled] + if !addonResizerVPAEnabled.nil? && !addonResizerVPAEnabled.empty? && addonResizerVPAEnabled.downcase == "true".downcase + @@CustomProperties["addonResizerVPAEnabled"] = "true" + end #Check if telemetry is turned off telemetryOffSwitch = ENV["DISABLE_TELEMETRY"] if telemetryOffSwitch && !telemetryOffSwitch.nil? && !telemetryOffSwitch.empty? && telemetryOffSwitch.downcase == "true".downcase diff --git a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb index caf7ac95a..b18e887fd 100644 --- a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb +++ b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb @@ -2,7 +2,7 @@ # frozen_string_literal: true class CAdvisorMetricsAPIClient - require "yajl/json_gem" + require "json" require "logger" require "net/http" require "net/https" @@ -235,17 +235,17 @@ def getContainerCpuMetricItems(metricJSON, hostName, cpuMetricNameToCollect, met metricItem["ObjectName"] = Constants::OBJECT_NAME_K8S_CONTAINER metricItem["InstanceName"] = clusterId + "/" + podUid + "/" + containerName - + metricCollection = {} metricCollection["CounterName"] = metricNametoReturn metricCollection["Value"] = metricValue metricItem["json_Collections"] = [] - metricCollections = [] - metricCollections.push(metricCollection) + metricCollections = [] + metricCollections.push(metricCollection) metricItem["json_Collections"] = metricCollections.to_json - metricItems.push(metricItem) - + metricItems.push(metricItem) + #Telemetry about agent performance begin # we can only do this much now. Ideally would like to use the docker image repository to find our pods/containers @@ -525,13 +525,13 @@ def getContainerCpuMetricItemRate(metricJSON, hostName, cpuMetricNameToCollect, containerName = container["name"] metricValue = container["cpu"][cpuMetricNameToCollect] metricTime = metricPollTime #container["cpu"]["time"] - + metricItem = {} metricItem["Timestamp"] = metricTime metricItem["Host"] = hostName metricItem["ObjectName"] = Constants::OBJECT_NAME_K8S_CONTAINER metricItem["InstanceName"] = clusterId + "/" + podUid + "/" + containerName - + metricItem["json_Collections"] = [] metricCollection = {} metricCollection["CounterName"] = metricNametoReturn @@ -566,9 +566,9 @@ def getContainerCpuMetricItemRate(metricJSON, hostName, cpuMetricNameToCollect, end metricCollection["Value"] = metricValue - - metricCollections = [] - metricCollections.push(metricCollection) + + metricCollections = [] + metricCollections.push(metricCollection) metricItem["json_Collections"] = metricCollections.to_json metricItems.push(metricItem) #Telemetry about agent performance @@ -655,16 +655,16 @@ def getContainerMemoryMetricItems(metricJSON, hostName, memoryMetricNameToCollec metricItem["Host"] = hostName metricItem["ObjectName"] = Constants::OBJECT_NAME_K8S_CONTAINER metricItem["InstanceName"] = clusterId + "/" + podUid + "/" + containerName - + metricCollection = {} metricCollection["CounterName"] = metricNametoReturn metricCollection["Value"] = metricValue metricItem["json_Collections"] = [] - metricCollections = [] - metricCollections.push(metricCollection) + metricCollections = [] + metricCollections.push(metricCollection) metricItem["json_Collections"] = metricCollections.to_json - metricItems.push(metricItem) + metricItems.push(metricItem) #Telemetry about agent performance begin @@ -708,21 +708,21 @@ def getNodeMetricItem(metricJSON, hostName, metricCategory, metricNameToCollect, if !node[metricCategory].nil? metricValue = node[metricCategory][metricNameToCollect] metricTime = metricPollTime #node[metricCategory]["time"] - + metricItem["Timestamp"] = metricTime metricItem["Host"] = hostName metricItem["ObjectName"] = Constants::OBJECT_NAME_K8S_NODE metricItem["InstanceName"] = clusterId + "/" + nodeName - + metricCollection = {} metricCollection["CounterName"] = metricNametoReturn metricCollection["Value"] = metricValue metricItem["json_Collections"] = [] - metricCollections = [] - metricCollections.push(metricCollection) - metricItem["json_Collections"] = metricCollections.to_json + metricCollections = [] + metricCollections.push(metricCollection) + metricItem["json_Collections"] = metricCollections.to_json end rescue => error @Log.warn("getNodeMetricItem failed: #{error} for metric #{metricNameToCollect}") @@ -825,19 +825,19 @@ def getNodeMetricItemRate(metricJSON, hostName, metricCategory, metricNameToColl end end end - + metricItem["Timestamp"] = metricTime metricItem["Host"] = hostName metricItem["ObjectName"] = Constants::OBJECT_NAME_K8S_NODE metricItem["InstanceName"] = clusterId + "/" + nodeName - + metricCollection = {} metricCollection["CounterName"] = metricNametoReturn metricCollection["Value"] = metricValue metricItem["json_Collections"] = [] - metricCollections = [] - metricCollections.push(metricCollection) + metricCollections = [] + metricCollections.push(metricCollection) metricItem["json_Collections"] = metricCollections.to_json end rescue => error @@ -860,21 +860,21 @@ def getNodeLastRebootTimeMetric(metricJSON, hostName, metricNametoReturn, metric metricValue = node["startTime"] metricTime = metricPollTime #Time.now.utc.iso8601 #2018-01-30T19:36:14Z - + metricItem["Timestamp"] = metricTime metricItem["Host"] = hostName metricItem["ObjectName"] = Constants::OBJECT_NAME_K8S_NODE metricItem["InstanceName"] = clusterId + "/" + nodeName - + metricCollection = {} metricCollection["CounterName"] = metricNametoReturn #Read it from /proc/uptime metricCollection["Value"] = DateTime.parse(metricTime).to_time.to_i - IO.read("/proc/uptime").split[0].to_f metricItem["json_Collections"] = [] - metricCollections = [] - metricCollections.push(metricCollection) + metricCollections = [] + metricCollections.push(metricCollection) metricItem["json_Collections"] = metricCollections.to_json rescue => error @Log.warn("getNodeLastRebootTimeMetric failed: #{error} ") @@ -903,14 +903,14 @@ def getContainerStartTimeMetricItems(metricJSON, hostName, metricNametoReturn, m metricItem["Host"] = hostName metricItem["ObjectName"] = Constants::OBJECT_NAME_K8S_CONTAINER metricItem["InstanceName"] = clusterId + "/" + podUid + "/" + containerName - + metricCollection = {} metricCollection["CounterName"] = metricNametoReturn metricCollection["Value"] = DateTime.parse(metricValue).to_time.to_i metricItem["json_Collections"] = [] - metricCollections = [] - metricCollections.push(metricCollection) + metricCollections = [] + metricCollections.push(metricCollection) metricItem["json_Collections"] = metricCollections.to_json metricItems.push(metricItem) end diff --git a/source/plugins/ruby/ContainerInventoryState.rb b/source/plugins/ruby/ContainerInventoryState.rb index 170fa65e3..7e5ca18e8 100644 --- a/source/plugins/ruby/ContainerInventoryState.rb +++ b/source/plugins/ruby/ContainerInventoryState.rb @@ -2,7 +2,7 @@ # frozen_string_literal: true class ContainerInventoryState - require 'yajl/json_gem' + require 'json' require_relative 'omslog' @@InventoryDirectory = "/var/opt/microsoft/docker-cimprov/state/ContainerInventory/" diff --git a/source/plugins/ruby/DockerApiClient.rb b/source/plugins/ruby/DockerApiClient.rb index 53dd1f39f..cff9f359f 100644 --- a/source/plugins/ruby/DockerApiClient.rb +++ b/source/plugins/ruby/DockerApiClient.rb @@ -3,7 +3,7 @@ class DockerApiClient require "socket" - require "yajl/json_gem" + require "json" require "timeout" require_relative "omslog" require_relative "DockerApiRestHelper" diff --git a/source/plugins/ruby/KubernetesApiClient.rb b/source/plugins/ruby/KubernetesApiClient.rb index fa7052da2..eab360471 100644 --- a/source/plugins/ruby/KubernetesApiClient.rb +++ b/source/plugins/ruby/KubernetesApiClient.rb @@ -2,7 +2,7 @@ # frozen_string_literal: true class KubernetesApiClient - require "yajl/json_gem" + require "json" require "logger" require "net/http" require "net/https" @@ -37,7 +37,10 @@ class KubernetesApiClient @Log = Logger.new(@LogPath, 2, 10 * 1048576) #keep last 2 files, max log file size = 10M @@TokenFileName = "/var/run/secrets/kubernetes.io/serviceaccount/token" @@TokenStr = nil - @@telemetryTimeTracker = DateTime.now.to_time.to_i + @@cpuLimitsTelemetryTimeTracker = DateTime.now.to_time.to_i + @@cpuRequestsTelemetryTimeTracker = DateTime.now.to_time.to_i + @@memoryLimitsTelemetryTimeTracker = DateTime.now.to_time.to_i + @@memoryRequestsTelemetryTimeTracker = DateTime.now.to_time.to_i @@resourceLimitsTelemetryHash = {} def initialize @@ -470,6 +473,7 @@ def getContainerResourceRequestsAndLimits(pod, metricCategory, metricNameToColle if podUid.nil? return metricItems end + podName = pod["metadata"]["name"] nodeName = "" #for unscheduled (non-started) pods nodeName does NOT exist @@ -514,8 +518,12 @@ def getContainerResourceRequestsAndLimits(pod, metricCategory, metricNameToColle metricCollections.push(metricCollection) metricProps["json_Collections"] = metricCollections.to_json metricItems.push(metricProps) - #No container level limit for the given metric, so default to node level limit + + if isAddonResizerVPAEnabled() + sendReplicasetAgentRequestsAndLimitsTelemetry(podName, podNameSpace, containerName, metricNametoReturn, metricValue) + end else + #No container level limit for the given metric, so default to node level limit if (metricCategory == "limits" && !nodeAllocatableRecord.nil? && !nodeAllocatableRecord.empty? && nodeAllocatableRecord.has_key?(metricNameToCollect)) metricValue = getMetricNumericValue(metricNameToCollect, nodeAllocatableRecord[metricNameToCollect]) metricProps = {} @@ -612,25 +620,6 @@ def getContainerResourceRequestsAndLimitsAsInsightsMetrics(pod, metricCategory, metricItem["Tags"] = metricTags - metricItems.push(metricItem) - end - if (!metricValue.nil?) - metricItem = {} - metricItem["CollectionTime"] = metricTime - metricItem["Computer"] = nodeName - metricItem["Name"] = metricNametoReturn - metricItem["Value"] = metricValue - metricItem["Origin"] = Constants::INSIGHTSMETRICS_TAGS_ORIGIN - metricItem["Namespace"] = Constants::INSIGHTSMETRICS_TAGS_GPU_NAMESPACE - - metricTags = {} - metricTags[Constants::INSIGHTSMETRICS_TAGS_CLUSTERID] = clusterId - metricTags[Constants::INSIGHTSMETRICS_TAGS_CLUSTERNAME] = clusterName - metricTags[Constants::INSIGHTSMETRICS_TAGS_CONTAINER_NAME] = podUid + "/" + containerName - #metricTags[Constants::INSIGHTSMETRICS_TAGS_K8SNAMESPACE] = podNameSpace - - metricItem["Tags"] = metricTags - metricItems.push(metricItem) end end @@ -762,8 +751,13 @@ def getMetricNumericValue(metricName, metricVal) metricValue.chomp!("k") metricValue = Float(metricValue) * 1000.0 ** 1 elsif (metricValue.end_with?("m")) + #original value before downcase ending with M is megabyte and value ending with m is milli-byte metricValue.chomp!("m") - metricValue = Float(metricValue) * 1000.0 ** 2 + if (metricVal.end_with?("M")) + metricValue = Float(metricValue) * 1000.0 ** 2 + else + metricValue = Float(metricValue) / 1000.0 + end elsif (metricValue.end_with?("g")) metricValue.chomp!("g") metricValue = Float(metricValue) * 1000.0 ** 3 @@ -820,9 +814,9 @@ def getResourcesAndContinuationTokenV2(uri, api_group: nil) responseCode, resourceInfo = getKubeResourceInfoV2(uri, api_group: api_group) @Log.info "KubernetesApiClient::getResourcesAndContinuationTokenV2 : Done getting resources from Kube API using url: #{uri} @ #{Time.now.utc.iso8601}" if !responseCode.nil? && responseCode == "200" && !resourceInfo.nil? - @Log.info "KubernetesApiClient::getResourcesAndContinuationTokenV2:Start:Parsing data for #{uri} using yajl @ #{Time.now.utc.iso8601}" - resourceInventory = Yajl::Parser.parse(StringIO.new(resourceInfo.body)) - @Log.info "KubernetesApiClient::getResourcesAndContinuationTokenV2:End:Parsing data for #{uri} using yajl @ #{Time.now.utc.iso8601}" + @Log.info "KubernetesApiClient::getResourcesAndContinuationTokenV2:Start:Parsing data for #{uri} using JSON @ #{Time.now.utc.iso8601}" + resourceInventory = JSON.parse(resourceInfo.body) + @Log.info "KubernetesApiClient::getResourcesAndContinuationTokenV2:End:Parsing data for #{uri} using JSON @ #{Time.now.utc.iso8601}" resourceInfo = nil end if (!resourceInventory.nil? && !resourceInventory["metadata"].nil?) @@ -844,9 +838,9 @@ def getResourcesAndContinuationToken(uri, api_group: nil) resourceInfo = getKubeResourceInfo(uri, api_group: api_group) @Log.info "KubernetesApiClient::getResourcesAndContinuationToken : Done getting resources from Kube API using url: #{uri} @ #{Time.now.utc.iso8601}" if !resourceInfo.nil? - @Log.info "KubernetesApiClient::getResourcesAndContinuationToken:Start:Parsing data for #{uri} using yajl @ #{Time.now.utc.iso8601}" - resourceInventory = Yajl::Parser.parse(StringIO.new(resourceInfo.body)) - @Log.info "KubernetesApiClient::getResourcesAndContinuationToken:End:Parsing data for #{uri} using yajl @ #{Time.now.utc.iso8601}" + @Log.info "KubernetesApiClient::getResourcesAndContinuationToken:Start:Parsing data for #{uri} using JSON @ #{Time.now.utc.iso8601}" + resourceInventory = JSON.parse(resourceInfo.body) + @Log.info "KubernetesApiClient::getResourcesAndContinuationToken:End:Parsing data for #{uri} using JSON @ #{Time.now.utc.iso8601}" resourceInfo = nil end if (!resourceInventory.nil? && !resourceInventory["metadata"].nil?) @@ -1413,5 +1407,55 @@ def isEmitCacheTelemetry end return isEmitCacheTelemtryEnabled end + + def isAddonResizerVPAEnabled + isAddonResizerVPAEnabled = false + if !ENV["RS_ADDON-RESIZER_VPA_ENABLED"].nil? && !ENV["RS_ADDON-RESIZER_VPA_ENABLED"].empty? && ENV["RS_ADDON-RESIZER_VPA_ENABLED"].downcase == "true".downcase + isAddonResizerVPAEnabled = true + end + return isAddonResizerVPAEnabled + end + + def sendReplicasetAgentRequestsAndLimitsTelemetry(podName, podNameSpace, containerName, metricName, metricValue) + begin + if (!podName.nil? && podName.downcase.start_with?("omsagent-rs-") && podNameSpace.eql?("kube-system") && containerName.eql?("omsagent")) + telemetryProps = {} + telemetryProps["PodName"] = podName + telemetryProps["ContainerName"] = containerName + case metricName + when "cpuLimitNanoCores" + timeDifference = (DateTime.now.to_time.to_i - @@cpuLimitsTelemetryTimeTracker).abs + timeDifferenceInMinutes = timeDifference / 60 + if (timeDifferenceInMinutes >= Constants::TELEMETRY_FLUSH_INTERVAL_IN_MINUTES) + @@cpuLimitsTelemetryTimeTracker = DateTime.now.to_time.to_i + ApplicationInsightsUtility.sendMetricTelemetry(metricName, metricValue, telemetryProps) + end + when "memoryLimitBytes" + timeDifference = (DateTime.now.to_time.to_i - @@memoryLimitsTelemetryTimeTracker).abs + timeDifferenceInMinutes = timeDifference / 60 + if (timeDifferenceInMinutes >= Constants::TELEMETRY_FLUSH_INTERVAL_IN_MINUTES) + @@memoryLimitsTelemetryTimeTracker = DateTime.now.to_time.to_i + ApplicationInsightsUtility.sendMetricTelemetry(metricName, metricValue, telemetryProps) + end + when "cpuRequestNanoCores" + timeDifference = (DateTime.now.to_time.to_i - @@cpuRequestsTelemetryTimeTracker).abs + timeDifferenceInMinutes = timeDifference / 60 + if (timeDifferenceInMinutes >= Constants::TELEMETRY_FLUSH_INTERVAL_IN_MINUTES) + @@cpuRequestsTelemetryTimeTracker = DateTime.now.to_time.to_i + ApplicationInsightsUtility.sendMetricTelemetry(metricName, metricValue, telemetryProps) + end + when "memoryRequestBytes" + timeDifference = (DateTime.now.to_time.to_i - @@memoryRequestsTelemetryTimeTracker).abs + timeDifferenceInMinutes = timeDifference / 60 + if (timeDifferenceInMinutes >= Constants::TELEMETRY_FLUSH_INTERVAL_IN_MINUTES) + @@memoryRequestsTelemetryTimeTracker = DateTime.now.to_time.to_i + ApplicationInsightsUtility.sendMetricTelemetry(metricName, metricValue, telemetryProps) + end + end + end + rescue => err + @Log.warn "KubernetesApiClient::sendReplicasetAgentRequestsAndLimitsTelemetry failed with an error: #{err}" + end + end end end diff --git a/source/plugins/ruby/MdmMetricsGenerator.rb b/source/plugins/ruby/MdmMetricsGenerator.rb index f4904697c..6877c2623 100644 --- a/source/plugins/ruby/MdmMetricsGenerator.rb +++ b/source/plugins/ruby/MdmMetricsGenerator.rb @@ -3,7 +3,6 @@ class MdmMetricsGenerator require "logger" - require "yajl/json_gem" require "json" require_relative "MdmAlertTemplates" require_relative "ApplicationInsightsUtility" @@ -140,7 +139,7 @@ def appendPodMetrics(records, metricName, metricHash, batch_time, metricsTemplat containerCountMetricValue: value, } end - records.push(Yajl::Parser.parse(StringIO.new(record))) + records.push(JSON.parse(record)) } else @log.info "No records found in hash for metric: #{metricName}" @@ -334,7 +333,7 @@ def getContainerResourceUtilMetricRecords(recordTimeStamp, metricName, percentag containerResourceUtilizationPercentage: percentageMetricValue, thresholdPercentageDimValue: thresholdPercentage, } - records.push(Yajl::Parser.parse(StringIO.new(resourceUtilRecord))) + records.push(JSON.parse(resourceUtilRecord)) # Adding another metric for threshold violation resourceThresholdViolatedRecord = MdmAlertTemplates::Container_resource_threshold_violation_template % { @@ -347,7 +346,7 @@ def getContainerResourceUtilMetricRecords(recordTimeStamp, metricName, percentag containerResourceThresholdViolated: isZeroFill ? 0 : 1, thresholdPercentageDimValue: thresholdPercentage, } - records.push(Yajl::Parser.parse(StringIO.new(resourceThresholdViolatedRecord))) + records.push(JSON.parse(resourceThresholdViolatedRecord)) rescue => errorStr @log.info "Error in getContainerResourceUtilMetricRecords: #{errorStr}" ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) @@ -374,7 +373,7 @@ def getPVResourceUtilMetricRecords(recordTimeStamp, metricName, computer, percen pvResourceUtilizationPercentage: percentageMetricValue, thresholdPercentageDimValue: thresholdPercentage, } - records.push(Yajl::Parser.parse(StringIO.new(resourceUtilRecord))) + records.push(JSON.parse(resourceUtilRecord)) # Adding another metric for threshold violation resourceThresholdViolatedRecord = MdmAlertTemplates::PV_resource_threshold_violation_template % { @@ -387,7 +386,7 @@ def getPVResourceUtilMetricRecords(recordTimeStamp, metricName, computer, percen pvResourceThresholdViolated: isZeroFill ? 0 : 1, thresholdPercentageDimValue: thresholdPercentage, } - records.push(Yajl::Parser.parse(StringIO.new(resourceThresholdViolatedRecord))) + records.push(JSON.parse(resourceThresholdViolatedRecord)) rescue => errorStr @log.info "Error in getPVResourceUtilMetricRecords: #{errorStr}" ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) @@ -418,7 +417,7 @@ def getDiskUsageMetricRecords(record) devicevalue: deviceName, diskUsagePercentageValue: usedPercent, } - records.push(Yajl::Parser.parse(StringIO.new(diskUsedPercentageRecord))) + records.push(JSON.parse(diskUsedPercentageRecord)) end rescue => errorStr @log.info "Error in getDiskUsageMetricRecords: #{errorStr}" @@ -469,7 +468,7 @@ def getMetricRecords(record) dimValues: dimValues, metricValue: v, } - records.push(Yajl::Parser.parse(StringIO.new(metricRecord))) + records.push(JSON.parse(metricRecord)) #@log.info "pushed mdmgenericmetric: #{k},#{v}" end } @@ -545,7 +544,7 @@ def getNodeResourceMetricRecords(record, metric_name, metric_value, percentage_m metricmaxvalue: metric_value, metricsumvalue: metric_value, } - records.push(Yajl::Parser.parse(StringIO.new(custommetricrecord))) + records.push(JSON.parse(custommetricrecord)) if !percentage_metric_value.nil? additional_record = MdmAlertTemplates::Node_resource_metrics_template % { @@ -558,7 +557,21 @@ def getNodeResourceMetricRecords(record, metric_name, metric_value, percentage_m metricmaxvalue: percentage_metric_value, metricsumvalue: percentage_metric_value, } - records.push(Yajl::Parser.parse(StringIO.new(additional_record))) + records.push(JSON.parse(additional_record)) + end + + if !allocatable_percentage_metric_value.nil? + additional_record = MdmAlertTemplates::Node_resource_metrics_template % { + timestamp: record["Timestamp"], + metricName: @@node_metric_name_metric_allocatable_percentage_name_hash[metric_name], + hostvalue: record["Host"], + objectnamevalue: record["ObjectName"], + instancenamevalue: record["InstanceName"], + metricminvalue: allocatable_percentage_metric_value, + metricmaxvalue: allocatable_percentage_metric_value, + metricsumvalue: allocatable_percentage_metric_value, + } + records.push(JSON.parse(additional_record)) end if !allocatable_percentage_metric_value.nil? diff --git a/source/plugins/ruby/WatchStream.rb b/source/plugins/ruby/WatchStream.rb index 6cc850450..78ce25dd5 100644 --- a/source/plugins/ruby/WatchStream.rb +++ b/source/plugins/ruby/WatchStream.rb @@ -3,7 +3,7 @@ require "net/http" require "net/https" -require "yajl/json_gem" +require "json" require "logger" require "time" @@ -50,7 +50,7 @@ def each response.read_body do |chunk| buffer << chunk while (line = buffer.slice!(/.+\n/)) - yield(Yajl::Parser.parse(StringIO.new(line.chomp))) + yield(JSON.parse(line.chomp)) end end end diff --git a/source/plugins/ruby/arc_k8s_cluster_identity.rb b/source/plugins/ruby/arc_k8s_cluster_identity.rb index 39b8c1c96..43707b91f 100644 --- a/source/plugins/ruby/arc_k8s_cluster_identity.rb +++ b/source/plugins/ruby/arc_k8s_cluster_identity.rb @@ -3,7 +3,7 @@ require "net/http" require "net/https" require "uri" -require "yajl/json_gem" +require "json" require "base64" require "time" require_relative "KubernetesApiClient" diff --git a/source/plugins/ruby/filter_cadvisor2mdm.rb b/source/plugins/ruby/filter_cadvisor2mdm.rb index 6bafa372a..621c94992 100644 --- a/source/plugins/ruby/filter_cadvisor2mdm.rb +++ b/source/plugins/ruby/filter_cadvisor2mdm.rb @@ -6,7 +6,7 @@ module Fluent::Plugin require "logger" - require "yajl/json_gem" + require "json" require_relative "oms_common" require_relative "CustomMetricsUtils" require_relative "kubelet_utils" diff --git a/source/plugins/ruby/filter_inventory2mdm.rb b/source/plugins/ruby/filter_inventory2mdm.rb index 509ac608e..165bb63cf 100644 --- a/source/plugins/ruby/filter_inventory2mdm.rb +++ b/source/plugins/ruby/filter_inventory2mdm.rb @@ -6,7 +6,7 @@ module Fluent::Plugin require 'logger' - require 'yajl/json_gem' + require 'json' require_relative 'oms_common' require_relative 'CustomMetricsUtils' diff --git a/source/plugins/ruby/filter_telegraf2mdm.rb b/source/plugins/ruby/filter_telegraf2mdm.rb index fd71f1682..0819afdb7 100644 --- a/source/plugins/ruby/filter_telegraf2mdm.rb +++ b/source/plugins/ruby/filter_telegraf2mdm.rb @@ -6,7 +6,7 @@ module Fluent::Plugin require "logger" - require "yajl/json_gem" + require "json" require_relative "oms_common" require_relative "kubelet_utils" require_relative "MdmMetricsGenerator" diff --git a/source/plugins/ruby/in_cadvisor_perf.rb b/source/plugins/ruby/in_cadvisor_perf.rb index 901ecefab..d929e86fb 100644 --- a/source/plugins/ruby/in_cadvisor_perf.rb +++ b/source/plugins/ruby/in_cadvisor_perf.rb @@ -14,7 +14,7 @@ class CAdvisor_Perf_Input < Input def initialize super require "yaml" - require "yajl/json_gem" + require "json" require "time" require_relative "CAdvisorMetricsAPIClient" diff --git a/source/plugins/ruby/in_containerinventory.rb b/source/plugins/ruby/in_containerinventory.rb index c8ffe7d05..aeb70c68a 100644 --- a/source/plugins/ruby/in_containerinventory.rb +++ b/source/plugins/ruby/in_containerinventory.rb @@ -11,7 +11,7 @@ class Container_Inventory_Input < Input def initialize super - require "yajl/json_gem" + require "json" require "time" require_relative "ContainerInventoryState" require_relative "ApplicationInsightsUtility" diff --git a/source/plugins/ruby/in_kube_events.rb b/source/plugins/ruby/in_kube_events.rb index deeae6e14..6ccb02c54 100644 --- a/source/plugins/ruby/in_kube_events.rb +++ b/source/plugins/ruby/in_kube_events.rb @@ -10,8 +10,7 @@ class Kube_Event_Input < Input def initialize super - require "yajl/json_gem" - require "yajl" + require "json" require "time" require_relative "KubernetesApiClient" diff --git a/source/plugins/ruby/in_kube_nodes.rb b/source/plugins/ruby/in_kube_nodes.rb index 368eb61d4..8473cca81 100644 --- a/source/plugins/ruby/in_kube_nodes.rb +++ b/source/plugins/ruby/in_kube_nodes.rb @@ -16,8 +16,7 @@ def initialize(is_unit_test_mode = nil, kubernetesApiClient = nil, super() require "yaml" - require "yajl/json_gem" - require "yajl" + require "json" require "time" require_relative "KubernetesApiClient" diff --git a/source/plugins/ruby/in_kube_perfinventory.rb b/source/plugins/ruby/in_kube_perfinventory.rb index 20589167b..25f9c93e8 100644 --- a/source/plugins/ruby/in_kube_perfinventory.rb +++ b/source/plugins/ruby/in_kube_perfinventory.rb @@ -10,8 +10,7 @@ class Kube_PerfInventory_Input < Input def initialize super require "yaml" - require "yajl/json_gem" - require "yajl" + require "json" require "set" require "time" require "net/http" @@ -407,7 +406,7 @@ def getNodeAllocatableRecords() isAcquiredLock = f.flock(File::LOCK_EX | File::LOCK_NB) raise "in_kube_perfinventory:getNodeAllocatableRecords:Failed to acquire file lock @ #{Time.now.utc.iso8601}" if !isAcquiredLock startTime = (Time.now.to_f * 1000).to_i - nodeAllocatableRecords = Yajl::Parser.parse(f) + nodeAllocatableRecords = JSON.parse(f.read) timetakenMs = ((Time.now.to_f * 1000).to_i - startTime) $log.info "in_kube_perfinventory:getNodeAllocatableRecords:Number of Node Allocatable records: #{nodeAllocatableRecords.length} with time taken(ms) for read: #{timetakenMs} @ #{Time.now.utc.iso8601}" else diff --git a/source/plugins/ruby/in_kube_podinventory.rb b/source/plugins/ruby/in_kube_podinventory.rb index 37c9741c3..a1986bd4a 100644 --- a/source/plugins/ruby/in_kube_podinventory.rb +++ b/source/plugins/ruby/in_kube_podinventory.rb @@ -12,8 +12,7 @@ class Kube_PodInventory_Input < Input def initialize super require "yaml" - require "yajl/json_gem" - require "yajl" + require "json" require "set" require "time" require "net/http" @@ -916,9 +915,9 @@ def watch_services else $log.info("in_kube_podinventory::watch_services: Done getting services from Kube API @ #{Time.now.utc.iso8601}") if !serviceInfo.nil? - $log.info("in_kube_podinventory::watch_services:Start:Parsing services data using yajl @ #{Time.now.utc.iso8601}") - serviceInventory = Yajl::Parser.parse(StringIO.new(serviceInfo.body)) - $log.info("in_kube_podinventory::watch_services:End:Parsing services data using yajl @ #{Time.now.utc.iso8601}") + $log.info("in_kube_podinventory::watch_services:Start:Parsing services data using JSON @ #{Time.now.utc.iso8601}") + serviceInventory = JSON.parse(serviceInfo.body) + $log.info("in_kube_podinventory::watch_services:End:Parsing services data using JSON @ #{Time.now.utc.iso8601}") serviceInfo = nil if (!serviceInventory.nil? && !serviceInventory.empty?) servicesResourceVersion = serviceInventory["metadata"]["resourceVersion"] diff --git a/source/plugins/ruby/in_kube_podmdminventory.rb b/source/plugins/ruby/in_kube_podmdminventory.rb index b872650d2..38e07d860 100644 --- a/source/plugins/ruby/in_kube_podmdminventory.rb +++ b/source/plugins/ruby/in_kube_podmdminventory.rb @@ -14,8 +14,7 @@ class Kube_PodMDMInventory_Input < Input def initialize super require "yaml" - require "yajl/json_gem" - require "yajl" + require "json" require "set" require "time" require "net/http" @@ -187,7 +186,7 @@ def getMDMRecords() isAcquiredLock = f.flock(File::LOCK_EX | File::LOCK_NB) raise "in_kube_podmdminventory:getMDMRecords:Failed to acquire file lock @ #{Time.now.utc.iso8601}" if !isAcquiredLock startTime = (Time.now.to_f * 1000).to_i - mdmRecords = Yajl::Parser.parse(f) + mdmRecords = JSON.parse(f.read) timetakenMs = ((Time.now.to_f * 1000).to_i - startTime) if mdmRecords.nil? || mdmRecords.empty? || mdmRecords["items"].nil? || mdmRecords["collectionTime"] == @prevCollectionTime raise "in_kube_podmdminventory:getMDMRecords: either read mdmRecords is nil or empty or stale @ #{Time.now.utc.iso8601}" diff --git a/source/plugins/ruby/in_kube_pvinventory.rb b/source/plugins/ruby/in_kube_pvinventory.rb index fccfd459d..1e25e4057 100644 --- a/source/plugins/ruby/in_kube_pvinventory.rb +++ b/source/plugins/ruby/in_kube_pvinventory.rb @@ -12,8 +12,7 @@ class Kube_PVInventory_Input < Input def initialize super require "yaml" - require "yajl/json_gem" - require "yajl" + require "json" require "time" require_relative "KubernetesApiClient" require_relative "ApplicationInsightsUtility" diff --git a/source/plugins/ruby/in_kubestate_deployments.rb b/source/plugins/ruby/in_kubestate_deployments.rb index 0b563a890..92e6318b9 100644 --- a/source/plugins/ruby/in_kubestate_deployments.rb +++ b/source/plugins/ruby/in_kubestate_deployments.rb @@ -12,8 +12,7 @@ class Kube_Kubestate_Deployments_Input < Input def initialize super - require "yajl/json_gem" - require "yajl" + require "json" require "date" require "time" diff --git a/source/plugins/ruby/in_kubestate_hpa.rb b/source/plugins/ruby/in_kubestate_hpa.rb index 178f7944f..7f7e3aac5 100644 --- a/source/plugins/ruby/in_kubestate_hpa.rb +++ b/source/plugins/ruby/in_kubestate_hpa.rb @@ -10,8 +10,7 @@ class Kube_Kubestate_HPA_Input < Input def initialize super - require "yajl/json_gem" - require "yajl" + require "json" require "time" require_relative "KubernetesApiClient" diff --git a/source/plugins/ruby/in_win_cadvisor_perf.rb b/source/plugins/ruby/in_win_cadvisor_perf.rb index dd462fdf2..841c4867a 100644 --- a/source/plugins/ruby/in_win_cadvisor_perf.rb +++ b/source/plugins/ruby/in_win_cadvisor_perf.rb @@ -12,7 +12,7 @@ class Win_CAdvisor_Perf_Input < Input def initialize super require "yaml" - require "yajl/json_gem" + require "json" require "time" require_relative "CAdvisorMetricsAPIClient" diff --git a/source/plugins/ruby/kubelet_utils.rb b/source/plugins/ruby/kubelet_utils.rb index 368ca8639..c9893fc84 100644 --- a/source/plugins/ruby/kubelet_utils.rb +++ b/source/plugins/ruby/kubelet_utils.rb @@ -3,7 +3,7 @@ # frozen_string_literal: true require "logger" -require "yajl/json_gem" +require "json" require_relative "CAdvisorMetricsAPIClient" require_relative "KubernetesApiClient" require "bigdecimal" @@ -52,7 +52,6 @@ def get_node_allocatable(cpu_capacity, memory_capacity) cpu_allocatable = 1.0 memory_allocatable = 1.0 - allocatable_response = CAdvisorMetricsAPIClient.getCongifzCAdvisor(winNode: nil) parsed_response = JSON.parse(allocatable_response.body) @@ -66,7 +65,7 @@ def get_node_allocatable(cpu_capacity, memory_capacity) @log.error "Error in get_node_allocatable::kubereserved_cpu: #{errorStr}" kubereserved_cpu = "0.0" ApplicationInsightsUtility.sendExceptionTelemetry("Error in get_node_allocatable::kubereserved_cpu: #{errorStr}") - end + end begin kubereserved_memory = parsed_response["kubeletconfig"]["kubeReserved"]["memory"] @@ -78,7 +77,7 @@ def get_node_allocatable(cpu_capacity, memory_capacity) @log.error "Error in get_node_allocatable::kubereserved_memory: #{errorStr}" kubereserved_memory = "0.0" ApplicationInsightsUtility.sendExceptionTelemetry("Error in get_node_allocatable::kubereserved_memory: #{errorStr}") - end + end begin systemReserved_cpu = parsed_response["kubeletconfig"]["systemReserved"]["cpu"] if systemReserved_cpu.nil? || systemReserved_cpu == "" @@ -90,7 +89,7 @@ def get_node_allocatable(cpu_capacity, memory_capacity) @log.error "Error in get_node_allocatable::systemReserved_cpu: #{errorStr}" systemReserved_cpu = "0.0" ApplicationInsightsUtility.sendExceptionTelemetry("Error in get_node_allocatable::systemReserved_cpu: #{errorStr}") - end + end begin explicitlyReserved_cpu = parsed_response["kubeletconfig"]["reservedCPUs"] @@ -103,19 +102,19 @@ def get_node_allocatable(cpu_capacity, memory_capacity) @log.error "Error in get_node_allocatable::explicitlyReserved_cpu: #{errorStr}" explicitlyReserved_cpu = "0.0" ApplicationInsightsUtility.sendExceptionTelemetry("Error in get_node_allocatable::explicitlyReserved_cpu: #{errorStr}") - end + end begin - systemReserved_memory = parsed_response["kubeletconfig"]["systemReserved"]["memory"] - if systemReserved_memory.nil? || systemReserved_memory == "" + systemReserved_memory = parsed_response["kubeletconfig"]["systemReserved"]["memory"] + if systemReserved_memory.nil? || systemReserved_memory == "" systemReserved_memory = "0.0" - end - @log.info "get_node_allocatable::systemReserved_memory #{systemReserved_memory}" + end + @log.info "get_node_allocatable::systemReserved_memory #{systemReserved_memory}" rescue => errorStr - @log.error "Error in get_node_allocatable::systemReserved_memory: #{errorStr}" - systemReserved_memory = "0.0" + @log.error "Error in get_node_allocatable::systemReserved_memory: #{errorStr}" + systemReserved_memory = "0.0" ApplicationInsightsUtility.sendExceptionTelemetry("Error in get_node_allocatable::systemReserved_memory: #{errorStr}") - end + end begin evictionHard_memory = parsed_response["kubeletconfig"]["evictionHard"]["memory.available"] @@ -127,16 +126,16 @@ def get_node_allocatable(cpu_capacity, memory_capacity) @log.error "Error in get_node_allocatable::evictionHard_memory: #{errorStr}" evictionHard_memory = "0.0" ApplicationInsightsUtility.sendExceptionTelemetry("Error in get_node_allocatable::evictionHard_memory: #{errorStr}") - end + end # do calculation in nanocore since that's what KubernetesApiClient.getMetricNumericValue expects cpu_capacity_number = cpu_capacity.to_i * 1000.0 ** 2 # subtract to get allocatable. Formula : Allocatable = Capacity - ( kube reserved + system reserved + eviction threshold ) # https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/#node-allocatable if KubernetesApiClient.getMetricNumericValue("cpu", explicitlyReserved_cpu) > 0 - cpu_allocatable = cpu_capacity_number - KubernetesApiClient.getMetricNumericValue("cpu", explicitlyReserved_cpu) + cpu_allocatable = cpu_capacity_number - KubernetesApiClient.getMetricNumericValue("cpu", explicitlyReserved_cpu) else - cpu_allocatable = cpu_capacity_number - (KubernetesApiClient.getMetricNumericValue("cpu", kubereserved_cpu) + KubernetesApiClient.getMetricNumericValue("cpu", systemReserved_cpu)) + cpu_allocatable = cpu_capacity_number - (KubernetesApiClient.getMetricNumericValue("cpu", kubereserved_cpu) + KubernetesApiClient.getMetricNumericValue("cpu", systemReserved_cpu)) end # convert back to units similar to what we get for capacity cpu_allocatable = cpu_allocatable / (1000.0 ** 2) @@ -165,7 +164,7 @@ def get_all_container_limits containerResourceDimensionHash = {} response = CAdvisorMetricsAPIClient.getPodsFromCAdvisor(winNode: nil) if !response.nil? && !response.body.nil? && !response.body.empty? - podInventory = Yajl::Parser.parse(StringIO.new(response.body)) + podInventory = JSON.parse(response.body) podInventory["items"].each do |items| @log.info "in pod inventory items..." podNameSpace = items["metadata"]["namespace"] diff --git a/source/plugins/ruby/kubernetes_container_inventory.rb b/source/plugins/ruby/kubernetes_container_inventory.rb index 0b4da760a..24322b9e2 100644 --- a/source/plugins/ruby/kubernetes_container_inventory.rb +++ b/source/plugins/ruby/kubernetes_container_inventory.rb @@ -2,9 +2,8 @@ # frozen_string_literal: true class KubernetesContainerInventory - require "yajl/json_gem" - require "time" require "json" + require "time" require_relative "omslog" require_relative "ApplicationInsightsUtility" diff --git a/source/plugins/ruby/lib/application_insights/channel/contracts/json_serializable.rb b/source/plugins/ruby/lib/application_insights/channel/contracts/json_serializable.rb index 60838e215..8f4677044 100644 --- a/source/plugins/ruby/lib/application_insights/channel/contracts/json_serializable.rb +++ b/source/plugins/ruby/lib/application_insights/channel/contracts/json_serializable.rb @@ -1,4 +1,4 @@ -require 'yajl/json_gem' +require 'json' module ApplicationInsights module Channel diff --git a/source/plugins/ruby/lib/application_insights/channel/sender_base.rb b/source/plugins/ruby/lib/application_insights/channel/sender_base.rb index e5a4dea62..f5102c27a 100644 --- a/source/plugins/ruby/lib/application_insights/channel/sender_base.rb +++ b/source/plugins/ruby/lib/application_insights/channel/sender_base.rb @@ -1,4 +1,4 @@ -require "yajl/json_gem" +require "json" require "net/http" require "openssl" require "stringio" diff --git a/source/plugins/ruby/out_mdm.rb b/source/plugins/ruby/out_mdm.rb index 8f7f68727..6fcc22cda 100644 --- a/source/plugins/ruby/out_mdm.rb +++ b/source/plugins/ruby/out_mdm.rb @@ -14,7 +14,7 @@ def initialize require "net/https" require "securerandom" require "uri" - require "yajl/json_gem" + require "json" require_relative "KubernetesApiClient" require_relative "ApplicationInsightsUtility" require_relative "constants" @@ -102,7 +102,7 @@ def start end # If CUSTOM_METRICS_ENDPOINT provided, the url format shall be validated before emitting metrics into given endpoint. - custom_metrics_endpoint = ENV["CUSTOM_METRICS_ENDPOINT"] + custom_metrics_endpoint = ENV['CUSTOM_METRICS_ENDPOINT'] if !custom_metrics_endpoint.to_s.empty? metrics_endpoint = custom_metrics_endpoint.strip URI.parse(metrics_endpoint) diff --git a/source/plugins/ruby/podinventory_to_mdm.rb b/source/plugins/ruby/podinventory_to_mdm.rb index a7f9c5435..5102274ed 100644 --- a/source/plugins/ruby/podinventory_to_mdm.rb +++ b/source/plugins/ruby/podinventory_to_mdm.rb @@ -3,7 +3,7 @@ # frozen_string_literal: true require "logger" -require "yajl/json_gem" +require "json" require "time" require_relative "oms_common" require_relative "CustomMetricsUtils" @@ -129,7 +129,7 @@ def get_pod_inventory_mdm_records(batch_time) controllerNameDimValue: podControllerNameDimValue, podCountMetricValue: value, } - records.push(Yajl::Parser.parse(record)) + records.push(JSON.parse(record)) } #Add pod metric records diff --git a/source/toml-parser/tomlrb.rb b/source/toml-parser/tomlrb.rb deleted file mode 100644 index c0eff9093..000000000 --- a/source/toml-parser/tomlrb.rb +++ /dev/null @@ -1,44 +0,0 @@ -require "time" -require "stringio" -require_relative "tomlrb/version" -require_relative "tomlrb/string_utils" -require_relative "tomlrb/scanner" -require_relative "tomlrb/parser" -require_relative "tomlrb/handler" - -module Tomlrb - class ParseError < StandardError; end - - # Parses a valid TOML string into its Ruby data structure - # - # @param string_or_io [String, StringIO] the content - # @param options [Hash] the options hash - # @option options [Boolean] :symbolize_keys (false) whether to return the keys as symbols or strings - # @return [Hash] the Ruby data structure represented by the input - def self.parse(string_or_io, **options) - io = string_or_io.is_a?(String) ? StringIO.new(string_or_io) : string_or_io - scanner = Scanner.new(io) - parser = Parser.new(scanner, options) - begin - handler = parser.parse - rescue Racc::ParseError => e - raise ParseError, e.message - end - - handler.output - end - - # Reads a file content and parses it into its Ruby data structure - # - # @param path [String] the path to the file - # @param options [Hash] the options hash - # @option options [Boolean] :symbolize_keys (false) whether to return the keys as symbols or strings - # @return [Hash] the Ruby data structure represented by the input - def self.load_file(path, **options) - # By default Ruby sets the external encoding of an IO object to the - # default external encoding. The default external encoding is set by - # locale encoding or the interpreter -E option. - tmp = File.read(path, :encoding => "utf-8") - Tomlrb.parse(tmp, options) - end -end diff --git a/source/toml-parser/tomlrb/generated_parser.rb b/source/toml-parser/tomlrb/generated_parser.rb deleted file mode 100644 index ebf815e7d..000000000 --- a/source/toml-parser/tomlrb/generated_parser.rb +++ /dev/null @@ -1,542 +0,0 @@ -# -# DO NOT MODIFY!!!! -# This file is automatically generated by Racc 1.4.14 -# from Racc grammer file "". -# - -require 'racc/parser.rb' -module Tomlrb - class GeneratedParser < Racc::Parser -##### State transition tables begin ### - -racc_action_table = [ - 2, 17, 11, 31, 12, 31, 13, 27, 14, 77, - 15, 16, 8, 78, 32, 10, 33, 29, 34, 29, - 57, 58, 59, 60, 56, 53, 52, 54, 55, 46, - 40, 41, 10, 57, 58, 59, 60, 56, 53, 52, - 54, 55, 46, 69, 70, 10, 57, 58, 59, 60, - 56, 53, 52, 54, 55, 46, 35, 36, 10, 57, - 58, 59, 60, 56, 53, 52, 54, 55, 46, 37, - 38, 10, 57, 58, 59, 60, 56, 53, 52, 54, - 55, 46, 43, 66, 10, 57, 58, 59, 60, 56, - 53, 52, 54, 55, 46, nil, nil, 10, 57, 58, - 59, 60, 56, 53, 52, 54, 55, 46, nil, nil, - 10, 57, 58, 59, 60, 56, 53, 52, 54, 55, - 46, 73, nil, 10, 57, 58, 59, 60, 56, 53, - 52, 54, 55, 46, 73, 21, 10, 22, nil, 23, - nil, 24, nil, 25, 26, 21, 19, 22, nil, 23, - nil, 24, nil, 25, 26, nil, 19 ] - -racc_action_check = [ - 1, 2, 1, 9, 1, 70, 1, 8, 1, 74, - 1, 1, 1, 74, 11, 1, 12, 9, 13, 70, - 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, - 20, 20, 32, 33, 33, 33, 33, 33, 33, 33, - 33, 33, 33, 42, 42, 33, 34, 34, 34, 34, - 34, 34, 34, 34, 34, 34, 14, 15, 34, 35, - 35, 35, 35, 35, 35, 35, 35, 35, 35, 16, - 19, 35, 36, 36, 36, 36, 36, 36, 36, 36, - 36, 36, 30, 40, 36, 37, 37, 37, 37, 37, - 37, 37, 37, 37, 37, nil, nil, 37, 43, 43, - 43, 43, 43, 43, 43, 43, 43, 43, nil, nil, - 43, 45, 45, 45, 45, 45, 45, 45, 45, 45, - 45, 45, nil, 45, 78, 78, 78, 78, 78, 78, - 78, 78, 78, 78, 78, 7, 78, 7, nil, 7, - nil, 7, nil, 7, 7, 41, 7, 41, nil, 41, - nil, 41, nil, 41, 41, nil, 41 ] - -racc_action_pointer = [ - nil, 0, 1, nil, nil, nil, nil, 133, -5, 1, - nil, -4, -2, 0, 38, 39, 51, nil, nil, 57, - 17, nil, nil, nil, nil, nil, nil, nil, nil, nil, - 64, nil, 17, 30, 43, 56, 69, 82, nil, nil, - 70, 143, 27, 95, nil, 108, nil, nil, nil, nil, - nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - 3, nil, nil, nil, -4, nil, nil, nil, 121, nil ] - -racc_action_default = [ - -1, -56, -56, -2, -3, -4, -5, -56, -8, -56, - -22, -56, -56, -56, -56, -56, -56, 80, -6, -10, - -56, -15, -16, -17, -18, -19, -20, -7, -21, -23, - -56, -27, -46, -46, -46, -46, -46, -46, -9, -11, - -13, -56, -56, -46, -29, -46, -40, -41, -42, -43, - -44, -45, -47, -48, -49, -50, -51, -52, -53, -54, - -55, -30, -31, -32, -33, -34, -12, -14, -24, -25, - -56, -28, -35, -36, -56, -26, -37, -38, -46, -39 ] - -racc_goto_table = [ - 28, 18, 1, 72, 44, 61, 62, 63, 64, 65, - 3, 4, 5, 6, 7, 71, 39, 42, 68, 76, - nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - nil, nil, nil, nil, nil, 67, 79, nil, nil, nil, - nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - nil, 75 ] - -racc_goto_check = [ - 11, 7, 1, 18, 15, 15, 15, 15, 15, 15, - 2, 3, 4, 5, 6, 15, 9, 13, 14, 19, - nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - nil, nil, nil, nil, nil, 7, 18, nil, nil, nil, - nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - nil, 11 ] - -racc_goto_pointer = [ - nil, 2, 9, 10, 11, 12, 13, -6, nil, -4, - nil, -9, nil, -13, -24, -28, nil, nil, -42, -55, - nil, nil, nil ] - -racc_goto_default = [ - nil, nil, nil, nil, nil, 49, nil, nil, 20, nil, - 9, nil, 30, nil, nil, 74, 48, 45, nil, nil, - 47, 50, 51 ] - -racc_reduce_table = [ - 0, 0, :racc_error, - 0, 20, :_reduce_none, - 2, 20, :_reduce_none, - 1, 21, :_reduce_none, - 1, 21, :_reduce_none, - 1, 21, :_reduce_none, - 2, 22, :_reduce_none, - 2, 25, :_reduce_7, - 1, 25, :_reduce_8, - 2, 26, :_reduce_9, - 1, 26, :_reduce_10, - 2, 26, :_reduce_none, - 2, 28, :_reduce_12, - 1, 28, :_reduce_13, - 2, 28, :_reduce_none, - 1, 27, :_reduce_15, - 1, 27, :_reduce_16, - 1, 27, :_reduce_17, - 1, 27, :_reduce_18, - 1, 27, :_reduce_19, - 1, 27, :_reduce_20, - 2, 24, :_reduce_none, - 1, 29, :_reduce_22, - 1, 30, :_reduce_23, - 3, 30, :_reduce_none, - 1, 33, :_reduce_25, - 2, 33, :_reduce_none, - 1, 31, :_reduce_27, - 2, 32, :_reduce_none, - 3, 23, :_reduce_29, - 3, 23, :_reduce_30, - 3, 23, :_reduce_31, - 3, 23, :_reduce_32, - 3, 23, :_reduce_33, - 3, 23, :_reduce_34, - 2, 35, :_reduce_none, - 1, 37, :_reduce_36, - 2, 37, :_reduce_none, - 1, 38, :_reduce_38, - 2, 38, :_reduce_none, - 1, 36, :_reduce_40, - 1, 34, :_reduce_41, - 1, 34, :_reduce_none, - 1, 34, :_reduce_none, - 1, 39, :_reduce_none, - 1, 39, :_reduce_none, - 0, 41, :_reduce_none, - 1, 41, :_reduce_47, - 1, 41, :_reduce_48, - 1, 41, :_reduce_49, - 1, 41, :_reduce_50, - 1, 41, :_reduce_51, - 1, 40, :_reduce_52, - 1, 40, :_reduce_53, - 1, 40, :_reduce_54, - 1, 40, :_reduce_55 ] - -racc_reduce_n = 56 - -racc_shift_n = 80 - -racc_token_table = { - false => 0, - :error => 1, - :IDENTIFIER => 2, - :STRING_MULTI => 3, - :STRING_BASIC => 4, - :STRING_LITERAL_MULTI => 5, - :STRING_LITERAL => 6, - :DATETIME => 7, - :INTEGER => 8, - :FLOAT => 9, - :TRUE => 10, - :FALSE => 11, - "[" => 12, - "]" => 13, - "." => 14, - "{" => 15, - "}" => 16, - "," => 17, - "=" => 18 } - -racc_nt_base = 19 - -racc_use_result_var = true - -Racc_arg = [ - racc_action_table, - racc_action_check, - racc_action_default, - racc_action_pointer, - racc_goto_table, - racc_goto_check, - racc_goto_default, - racc_goto_pointer, - racc_nt_base, - racc_reduce_table, - racc_token_table, - racc_shift_n, - racc_reduce_n, - racc_use_result_var ] - -Racc_token_to_s_table = [ - "$end", - "error", - "IDENTIFIER", - "STRING_MULTI", - "STRING_BASIC", - "STRING_LITERAL_MULTI", - "STRING_LITERAL", - "DATETIME", - "INTEGER", - "FLOAT", - "TRUE", - "FALSE", - "\"[\"", - "\"]\"", - "\".\"", - "\"{\"", - "\"}\"", - "\",\"", - "\"=\"", - "$start", - "expressions", - "expression", - "table", - "assignment", - "inline_table", - "table_start", - "table_continued", - "table_identifier", - "table_next", - "inline_table_start", - "inline_continued", - "inline_assignment_key", - "inline_assignment_value", - "inline_next", - "value", - "array", - "start_array", - "array_continued", - "array_next", - "scalar", - "string", - "literal" ] - -Racc_debug_parser = false - -##### State transition tables end ##### - -# reduce 0 omitted - -# reduce 1 omitted - -# reduce 2 omitted - -# reduce 3 omitted - -# reduce 4 omitted - -# reduce 5 omitted - -# reduce 6 omitted - -module_eval(<<'.,.,', 'parser.y', 15) - def _reduce_7(val, _values, result) - @handler.start_(:array_of_tables) - result - end -.,., - -module_eval(<<'.,.,', 'parser.y', 16) - def _reduce_8(val, _values, result) - @handler.start_(:table) - result - end -.,., - -module_eval(<<'.,.,', 'parser.y', 19) - def _reduce_9(val, _values, result) - array = @handler.end_(:array_of_tables); @handler.set_context(array, is_array_of_tables: true) - result - end -.,., - -module_eval(<<'.,.,', 'parser.y', 20) - def _reduce_10(val, _values, result) - array = @handler.end_(:table); @handler.set_context(array) - result - end -.,., - -# reduce 11 omitted - -module_eval(<<'.,.,', 'parser.y', 24) - def _reduce_12(val, _values, result) - array = @handler.end_(:array_of_tables); @handler.set_context(array, is_array_of_tables: true) - result - end -.,., - -module_eval(<<'.,.,', 'parser.y', 25) - def _reduce_13(val, _values, result) - array = @handler.end_(:table); @handler.set_context(array) - result - end -.,., - -# reduce 14 omitted - -module_eval(<<'.,.,', 'parser.y', 29) - def _reduce_15(val, _values, result) - @handler.push(val[0]) - result - end -.,., - -module_eval(<<'.,.,', 'parser.y', 30) - def _reduce_16(val, _values, result) - @handler.push(val[0]) - result - end -.,., - -module_eval(<<'.,.,', 'parser.y', 31) - def _reduce_17(val, _values, result) - @handler.push(val[0]) - result - end -.,., - -module_eval(<<'.,.,', 'parser.y', 32) - def _reduce_18(val, _values, result) - @handler.push(val[0]) - result - end -.,., - -module_eval(<<'.,.,', 'parser.y', 33) - def _reduce_19(val, _values, result) - @handler.push(val[0]) - result - end -.,., - -module_eval(<<'.,.,', 'parser.y', 34) - def _reduce_20(val, _values, result) - @handler.push(val[0]) - result - end -.,., - -# reduce 21 omitted - -module_eval(<<'.,.,', 'parser.y', 40) - def _reduce_22(val, _values, result) - @handler.start_(:inline) - result - end -.,., - -module_eval(<<'.,.,', 'parser.y', 43) - def _reduce_23(val, _values, result) - array = @handler.end_(:inline); @handler.push(Hash[*array]) - result - end -.,., - -# reduce 24 omitted - -module_eval(<<'.,.,', 'parser.y', 48) - def _reduce_25(val, _values, result) - array = @handler.end_(:inline) - array.map!.with_index{ |n,i| i.even? ? n.to_sym : n } if @handler.symbolize_keys - @handler.push(Hash[*array]) - - result - end -.,., - -# reduce 26 omitted - -module_eval(<<'.,.,', 'parser.y', 55) - def _reduce_27(val, _values, result) - @handler.push(val[0]) - result - end -.,., - -# reduce 28 omitted - -module_eval(<<'.,.,', 'parser.y', 61) - def _reduce_29(val, _values, result) - @handler.assign(val[0]) - result - end -.,., - -module_eval(<<'.,.,', 'parser.y', 62) - def _reduce_30(val, _values, result) - @handler.assign(val[0]) - result - end -.,., - -module_eval(<<'.,.,', 'parser.y', 63) - def _reduce_31(val, _values, result) - @handler.assign(val[0]) - result - end -.,., - -module_eval(<<'.,.,', 'parser.y', 64) - def _reduce_32(val, _values, result) - @handler.assign(val[0]) - result - end -.,., - -module_eval(<<'.,.,', 'parser.y', 65) - def _reduce_33(val, _values, result) - @handler.assign(val[0]) - result - end -.,., - -module_eval(<<'.,.,', 'parser.y', 66) - def _reduce_34(val, _values, result) - @handler.assign(val[0]) - result - end -.,., - -# reduce 35 omitted - -module_eval(<<'.,.,', 'parser.y', 72) - def _reduce_36(val, _values, result) - array = @handler.end_(:array); @handler.push(array) - result - end -.,., - -# reduce 37 omitted - -module_eval(<<'.,.,', 'parser.y', 76) - def _reduce_38(val, _values, result) - array = @handler.end_(:array); @handler.push(array) - result - end -.,., - -# reduce 39 omitted - -module_eval(<<'.,.,', 'parser.y', 80) - def _reduce_40(val, _values, result) - @handler.start_(:array) - result - end -.,., - -module_eval(<<'.,.,', 'parser.y', 83) - def _reduce_41(val, _values, result) - @handler.push(val[0]) - result - end -.,., - -# reduce 42 omitted - -# reduce 43 omitted - -# reduce 44 omitted - -# reduce 45 omitted - -# reduce 46 omitted - -module_eval(<<'.,.,', 'parser.y', 92) - def _reduce_47(val, _values, result) - result = val[0].to_f - result - end -.,., - -module_eval(<<'.,.,', 'parser.y', 93) - def _reduce_48(val, _values, result) - result = val[0].to_i - result - end -.,., - -module_eval(<<'.,.,', 'parser.y', 94) - def _reduce_49(val, _values, result) - result = true - result - end -.,., - -module_eval(<<'.,.,', 'parser.y', 95) - def _reduce_50(val, _values, result) - result = false - result - end -.,., - -module_eval(<<'.,.,', 'parser.y', 96) - def _reduce_51(val, _values, result) - result = Time.new(*val[0]) - result - end -.,., - -module_eval(<<'.,.,', 'parser.y', 99) - def _reduce_52(val, _values, result) - result = StringUtils.replace_escaped_chars(StringUtils.multiline_replacements(val[0])) - result - end -.,., - -module_eval(<<'.,.,', 'parser.y', 100) - def _reduce_53(val, _values, result) - result = StringUtils.replace_escaped_chars(val[0]) - result - end -.,., - -module_eval(<<'.,.,', 'parser.y', 101) - def _reduce_54(val, _values, result) - result = StringUtils.strip_spaces(val[0]) - result - end -.,., - -module_eval(<<'.,.,', 'parser.y', 102) - def _reduce_55(val, _values, result) - result = val[0] - result - end -.,., - -def _reduce_none(val, _values, result) - val[0] -end - - end # class GeneratedParser - end # module Tomlrb diff --git a/source/toml-parser/tomlrb/handler.rb b/source/toml-parser/tomlrb/handler.rb deleted file mode 100644 index d60b54bc3..000000000 --- a/source/toml-parser/tomlrb/handler.rb +++ /dev/null @@ -1,73 +0,0 @@ -module Tomlrb - class Handler - attr_reader :output, :symbolize_keys - - def initialize(**options) - @output = {} - @current = @output - @stack = [] - @array_names = [] - @symbolize_keys = options[:symbolize_keys] - end - - def set_context(identifiers, is_array_of_tables: false) - @current = @output - - deal_with_array_of_tables(identifiers, is_array_of_tables) do |identifierz| - identifierz.each do |k| - k = k.to_sym if @symbolize_keys - if @current[k].is_a?(Array) - @current[k] << {} if @current[k].empty? - @current = @current[k].last - else - @current[k] ||= {} - @current = @current[k] - end - end - end - end - - def deal_with_array_of_tables(identifiers, is_array_of_tables) - identifiers.map!{|n| n.gsub("\"", '')} - stringified_identifier = identifiers.join('.') - - if is_array_of_tables - @array_names << stringified_identifier - last_identifier = identifiers.pop - elsif @array_names.include?(stringified_identifier) - raise ParseError, 'Cannot define a normal table with the same name as an already established array' - end - - yield(identifiers) - - if is_array_of_tables - last_identifier = last_identifier.to_sym if @symbolize_keys - @current[last_identifier] ||= [] - @current[last_identifier] << {} - @current = @current[last_identifier].last - end - end - - def assign(k) - k = k.to_sym if @symbolize_keys - @current[k] = @stack.pop - end - - def push(o) - @stack << o - end - - def start_(type) - push([type]) - end - - def end_(type) - array = [] - while (value = @stack.pop) != [type] - raise ParseError, 'Unclosed table' unless value - array.unshift(value) - end - array - end - end -end diff --git a/source/toml-parser/tomlrb/parser.rb b/source/toml-parser/tomlrb/parser.rb deleted file mode 100644 index 31771a1ca..000000000 --- a/source/toml-parser/tomlrb/parser.rb +++ /dev/null @@ -1,18 +0,0 @@ -require_relative "generated_parser" - -class Tomlrb::Parser < Tomlrb::GeneratedParser - def initialize(tokenizer, **options) - @tokenizer = tokenizer - @handler = Tomlrb::Handler.new(options) - super() - end - - def next_token - @tokenizer.next_token - end - - def parse - do_parse - @handler - end -end diff --git a/source/toml-parser/tomlrb/parser.y b/source/toml-parser/tomlrb/parser.y deleted file mode 100644 index fcebcac06..000000000 --- a/source/toml-parser/tomlrb/parser.y +++ /dev/null @@ -1,104 +0,0 @@ -class Tomlrb::GeneratedParser -token IDENTIFIER STRING_MULTI STRING_BASIC STRING_LITERAL_MULTI STRING_LITERAL DATETIME INTEGER FLOAT TRUE FALSE -rule - expressions - | expressions expression - ; - expression - : table - | assignment - | inline_table - ; - table - : table_start table_continued - ; - table_start - : '[' '[' { @handler.start_(:array_of_tables) } - | '[' { @handler.start_(:table) } - ; - table_continued - : ']' ']' { array = @handler.end_(:array_of_tables); @handler.set_context(array, is_array_of_tables: true) } - | ']' { array = @handler.end_(:table); @handler.set_context(array) } - | table_identifier table_next - ; - table_next - : ']' ']' { array = @handler.end_(:array_of_tables); @handler.set_context(array, is_array_of_tables: true) } - | ']' { array = @handler.end_(:table); @handler.set_context(array) } - | '.' table_continued - ; - table_identifier - : IDENTIFIER { @handler.push(val[0]) } - | STRING_BASIC { @handler.push(val[0]) } - | STRING_LITERAL { @handler.push(val[0]) } - | INTEGER { @handler.push(val[0]) } - | TRUE { @handler.push(val[0]) } - | FALSE { @handler.push(val[0]) } - ; - inline_table - : inline_table_start inline_continued - ; - inline_table_start - : '{' { @handler.start_(:inline) } - ; - inline_continued - : '}' { array = @handler.end_(:inline); @handler.push(Hash[*array]) } - | inline_assignment_key inline_assignment_value inline_next - ; - inline_next - : '}' { - array = @handler.end_(:inline) - array.map!.with_index{ |n,i| i.even? ? n.to_sym : n } if @handler.symbolize_keys - @handler.push(Hash[*array]) - } - | ',' inline_continued - ; - inline_assignment_key - : IDENTIFIER { @handler.push(val[0]) } - ; - inline_assignment_value - : '=' value - ; - assignment - : IDENTIFIER '=' value { @handler.assign(val[0]) } - | STRING_BASIC '=' value { @handler.assign(val[0]) } - | STRING_LITERAL '=' value { @handler.assign(val[0]) } - | INTEGER '=' value { @handler.assign(val[0]) } - | TRUE '=' value { @handler.assign(val[0]) } - | FALSE '=' value { @handler.assign(val[0]) } - ; - array - : start_array array_continued - ; - array_continued - : ']' { array = @handler.end_(:array); @handler.push(array) } - | value array_next - ; - array_next - : ']' { array = @handler.end_(:array); @handler.push(array) } - | ',' array_continued - ; - start_array - : '[' { @handler.start_(:array) } - ; - value - : scalar { @handler.push(val[0]) } - | array - | inline_table - ; - scalar - : string - | literal - ; - literal - | FLOAT { result = val[0].to_f } - | INTEGER { result = val[0].to_i } - | TRUE { result = true } - | FALSE { result = false } - | DATETIME { result = Time.new(*val[0])} - ; - string - : STRING_MULTI { result = StringUtils.replace_escaped_chars(StringUtils.multiline_replacements(val[0])) } - | STRING_BASIC { result = StringUtils.replace_escaped_chars(val[0]) } - | STRING_LITERAL_MULTI { result = StringUtils.strip_spaces(val[0]) } - | STRING_LITERAL { result = val[0] } - ; diff --git a/source/toml-parser/tomlrb/scanner.rb b/source/toml-parser/tomlrb/scanner.rb deleted file mode 100644 index d0f479eef..000000000 --- a/source/toml-parser/tomlrb/scanner.rb +++ /dev/null @@ -1,54 +0,0 @@ -require 'strscan' - -module Tomlrb - class Scanner - COMMENT = /#.*/ - IDENTIFIER = /[A-Za-z0-9_-]+/ - SPACE = /[ \t\r\n]/ - STRING_BASIC = /(["])(?:\\?.)*?\1/ - STRING_MULTI = /"{3}([\s\S]*?"{3,4})/m - STRING_LITERAL = /(['])(?:\\?.)*?\1/ - STRING_LITERAL_MULTI = /'{3}([\s\S]*?'{3})/m - DATETIME = /(-?\d{4})-(\d{2})-(\d{2})(?:(?:t|\s)(\d{2}):(\d{2}):(\d{2}(?:\.\d+)?))?(z|[-+]\d{2}:\d{2})?/i - FLOAT = /[+-]?(?:[0-9_]+\.[0-9_]*|\d+(?=[eE]))(?:[eE][+-]?[0-9_]+)?/ - INTEGER = /[+-]?([1-9](_?\d)*|0)(?![A-Za-z0-9_-]+)/ - TRUE = /true/ - FALSE = /false/ - - def initialize(io) - @ss = StringScanner.new(io.read) - end - - def next_token - return if @ss.eos? - - case - when @ss.scan(SPACE) then next_token - when @ss.scan(COMMENT) then next_token - when @ss.scan(DATETIME) then process_datetime - when text = @ss.scan(STRING_MULTI) then [:STRING_MULTI, text[3..-4]] - when text = @ss.scan(STRING_BASIC) then [:STRING_BASIC, text[1..-2]] - when text = @ss.scan(STRING_LITERAL_MULTI) then [:STRING_LITERAL_MULTI, text[3..-4]] - when text = @ss.scan(STRING_LITERAL) then [:STRING_LITERAL, text[1..-2]] - when text = @ss.scan(FLOAT) then [:FLOAT, text] - when text = @ss.scan(INTEGER) then [:INTEGER, text] - when text = @ss.scan(TRUE) then [:TRUE, text] - when text = @ss.scan(FALSE) then [:FALSE, text] - when text = @ss.scan(IDENTIFIER) then [:IDENTIFIER, text] - else - x = @ss.getch - [x, x] - end - end - - def process_datetime - if @ss[7].nil? - offset = '+00:00' - else - offset = @ss[7].gsub('Z', '+00:00') - end - args = [@ss[1], @ss[2], @ss[3], @ss[4] || 0, @ss[5] || 0, @ss[6].to_f, offset] - [:DATETIME, args] - end - end -end diff --git a/source/toml-parser/tomlrb/string_utils.rb b/source/toml-parser/tomlrb/string_utils.rb deleted file mode 100644 index 53d27e414..000000000 --- a/source/toml-parser/tomlrb/string_utils.rb +++ /dev/null @@ -1,33 +0,0 @@ -module Tomlrb - class StringUtils - - SPECIAL_CHARS = { - '\\t' => "\t", - '\\b' => "\b", - '\\f' => "\f", - '\\n' => "\n", - '\\r' => "\r", - '\\"' => '"', - '\\\\' => '\\' - }.freeze - - def self.multiline_replacements(str) - strip_spaces(str).gsub(/\\\n\s+/, '') - end - - def self.replace_escaped_chars(str) - str.gsub(/\\(u[\da-fA-F]{4}|U[\da-fA-F]{8}|.)/) do |m| - if m.size == 2 - SPECIAL_CHARS[m] || (raise Tomlrb::ParseError.new "Escape sequence #{m} is reserved") - else - m[2..-1].to_i(16).chr(Encoding::UTF_8) - end - end - end - - def self.strip_spaces(str) - str[0] = '' if str[0] == "\n" - str - end - end -end diff --git a/source/toml-parser/tomlrb/version.rb b/source/toml-parser/tomlrb/version.rb deleted file mode 100644 index b72a81b60..000000000 --- a/source/toml-parser/tomlrb/version.rb +++ /dev/null @@ -1,3 +0,0 @@ -module Tomlrb - VERSION = "1.2.8" -end diff --git a/test/e2e/src/core/Dockerfile b/test/e2e/src/core/Dockerfile index 52bcd7cf8..499ec3edb 100644 --- a/test/e2e/src/core/Dockerfile +++ b/test/e2e/src/core/Dockerfile @@ -1,4 +1,6 @@ -FROM python:3.6 +# default value can be found in internal wiki. External can use python 3.6 base image +ARG PYTHON_BASE_IMAGE= +FROM ${PYTHON_BASE_IMAGE} RUN pip install --trusted-host pypi.org --trusted-host files.pythonhosted.org pytest pytest-xdist filelock requests kubernetes adal msrestazure @@ -11,14 +13,9 @@ RUN apt-get update && apt-get -y upgrade && \ CLI_REPO=$(lsb_release -cs) && \ echo "deb [arch=amd64] https://packages.microsoft.com/repos/azure-cli/ ${CLI_REPO} main" \ > /etc/apt/sources.list.d/azure-cli.list && \ - apt-get update && \ - apt-get install -y azure-cli && \ rm -rf /var/lib/apt/lists/* RUN python3 -m pip install junit_xml - -COPY --from=lachlanevenson/k8s-kubectl:v1.20.5 /usr/local/bin/kubectl /usr/local/bin/kubectl - COPY ./core/e2e_tests.sh / COPY ./core/setup_failure_handler.py / COPY ./core/pytest.ini /e2etests/ diff --git a/test/scenario/yamls/many-containers-in-pod.yaml b/test/scenario/yamls/many-containers-in-pod.yaml index ac3871068..986c94e93 100644 --- a/test/scenario/yamls/many-containers-in-pod.yaml +++ b/test/scenario/yamls/many-containers-in-pod.yaml @@ -1117,7 +1117,7 @@ spec: sleep 1; done nodeSelector: - beta.kubernetes.io/os: linux + kubernetes.io/os: linux tolerations: - key: "node-role.kubernetes.io/master" operator: "Equal" diff --git a/test/scenario/yamls/nested-json.yaml b/test/scenario/yamls/nested-json.yaml index f881b3dc9..e3c7ac3b0 100644 --- a/test/scenario/yamls/nested-json.yaml +++ b/test/scenario/yamls/nested-json.yaml @@ -11,5 +11,5 @@ spec: command: ["/bin/sh"] args: ["-c", "while true; do echo '{\"container_name\": \"nested-json\", \"pod_name\": \"nested-json\", \"pod_namespace\": \"nested-json\", \"environment\": \"test\", \"logmessage\": { \"msg\": \"hello, world\", \"level\": \"info\"} }'; sleep 30; done"] nodeSelector: - beta.kubernetes.io/os: linux + kubernetes.io/os: linux restartPolicy: OnFailure diff --git a/test/scenario/yamls/simple-json.yaml b/test/scenario/yamls/simple-json.yaml index 9a9597651..8ab5822d4 100644 --- a/test/scenario/yamls/simple-json.yaml +++ b/test/scenario/yamls/simple-json.yaml @@ -11,5 +11,5 @@ spec: command: ["/bin/sh"] args: ["-c", "while true; do echo '{\"container_name\": \"simple-json\", \"pod_name\": \"simple-json\",\"pod_namespace\": \"simple-json\",\"environment\": \"test\"}'; sleep 30; done"] nodeSelector: - beta.kubernetes.io/os: linux + kubernetes.io/os: linux restartPolicy: OnFailure diff --git a/test/scenario/yamls/two-containers-in-pod.yaml b/test/scenario/yamls/two-containers-in-pod.yaml index 52d36494c..d892be59d 100644 --- a/test/scenario/yamls/two-containers-in-pod.yaml +++ b/test/scenario/yamls/two-containers-in-pod.yaml @@ -24,4 +24,4 @@ spec: sleep 1; done nodeSelector: - beta.kubernetes.io/os: linux + kubernetes.io/os: linux