diff --git a/.buildkite/auditbeat/auditbeat-pipeline.yml b/.buildkite/auditbeat/auditbeat-pipeline.yml index 2ac4a8911a11..f5ca39b36456 100644 --- a/.buildkite/auditbeat/auditbeat-pipeline.yml +++ b/.buildkite/auditbeat/auditbeat-pipeline.yml @@ -27,6 +27,33 @@ env: TEST_COVERAGE: "true" steps: + - group: "Auditbeat Check/Update" + key: "auditbeat-check-update" + + steps: + - label: "Run check/update" + command: | + make -C auditbeat check update + make check-no-changes + retry: + automatic: + - limit: 3 + agents: + image: "docker.elastic.co/ci-agent-images/platform-ingest/buildkite-agent-beats-ci-with-hooks:latest" + cpu: "4000m" + memory: "8Gi" + useCustomGlobalHooks: true + notify: + - github_commit_status: + context: "auditbeat: check/update" + + - wait: ~ + # with PRs, we want to run mandatory tests only if check/update step succeed + # for other cases, e.g. merge commits, we want to run mundatory test (and publish) independently of other tests + # this allows building DRA artifacts even if there is flakiness in check/update step + if: build.env("BUILDKITE_PULL_REQUEST") != "false" + depends_on: "auditbeat-check-update" + - group: "Auditbeat Mandatory Testing" key: "auditbeat-mandatory-tests" diff --git a/.buildkite/filebeat/filebeat-pipeline.yml b/.buildkite/filebeat/filebeat-pipeline.yml index 87f1925a3722..849720d1e2df 100644 --- a/.buildkite/filebeat/filebeat-pipeline.yml +++ b/.buildkite/filebeat/filebeat-pipeline.yml @@ -27,6 +27,32 @@ env: TEST_COVERAGE: "true" steps: + - group: "Filebeat Check/Update" + key: "filebeat-check-update" + steps: + - label: "Run check/update" + command: | + make -C filebeat check update + make check-no-changes + retry: + automatic: + - limit: 3 + agents: + image: "docker.elastic.co/ci-agent-images/platform-ingest/buildkite-agent-beats-ci-with-hooks:0.3" + cpu: "4000m" + memory: "8Gi" + useCustomGlobalHooks: true + notify: + - github_commit_status: + context: "filebeat: check/update" + + - wait: ~ + # with PRs, we want to run mandatory tests only if check/update step succeed + # for other cases, e.g. merge commits, we want to run mundatory test (and publish) independently of other tests + # this allows building DRA artifacts even if there is flakiness in check/update step + if: build.env("BUILDKITE_PULL_REQUEST") != "false" + depends_on: "filebeat-check-update" + - group: "Filebeat Mandatory Tests" key: "filebeat-mandatory-tests" steps: diff --git a/.buildkite/libbeat/pipeline.libbeat.yml b/.buildkite/libbeat/pipeline.libbeat.yml index fd485279858e..7ebe5d76f988 100644 --- a/.buildkite/libbeat/pipeline.libbeat.yml +++ b/.buildkite/libbeat/pipeline.libbeat.yml @@ -16,6 +16,32 @@ env: TEST_COVERAGE: "true" steps: + - group: "libbeat Check/Update" + key: "libbeat-check-update" + steps: + - label: "Run check/update" + command: | + make -C libbeat check update + make check-no-changes + retry: + automatic: + - limit: 3 + agents: + image: "docker.elastic.co/ci-agent-images/platform-ingest/buildkite-agent-beats-ci-with-hooks:latest" + cpu: "4000m" + memory: "8Gi" + useCustomGlobalHooks: true + notify: + - github_commit_status: + context: "libbeat: check/update" + + - wait: ~ + # with PRs, we want to run mandatory tests only if check/update step succeed + # for other cases, e.g. merge commits, we want to run mundatory test (and publish) independently of other tests + # this allows building DRA artifacts even if there is flakiness in check/update step + if: build.env("BUILDKITE_PULL_REQUEST") != "false" + depends_on: "libbeat-check-update" + - group: "Mandatory Tests" key: "mandatory-tests" steps: diff --git a/.buildkite/metricbeat/pipeline.yml b/.buildkite/metricbeat/pipeline.yml index ed9fb14f3d42..a8092887386e 100644 --- a/.buildkite/metricbeat/pipeline.yml +++ b/.buildkite/metricbeat/pipeline.yml @@ -2,7 +2,8 @@ name: "beats-metricbeat" env: - AWS_ARM_INSTANCE_TYPE: "t4g.xlarge" + AWS_ARM_INSTANCE_TYPE: "m6g.xlarge" + AWS_IMAGE_UBUNTU_ARM_64: "platform-ingest-beats-ubuntu-2204-aarch64" GCP_DEFAULT_MACHINE_TYPE: "c2d-highcpu-8" GCP_HI_PERF_MACHINE_TYPE: "c2d-highcpu-16" @@ -10,7 +11,6 @@ env: IMAGE_MACOS_ARM: "generic-13-ventura-arm" IMAGE_MACOS_X86_64: "generic-13-ventura-x64" - IMAGE_UBUNTU_ARM_64: "platform-ingest-beats-ubuntu-2204-aarch64" IMAGE_UBUNTU_X86_64: "family/platform-ingest-beats-ubuntu-2204" IMAGE_WIN_10: "family/platform-ingest-beats-windows-10" IMAGE_WIN_11: "family/platform-ingest-beats-windows-11" @@ -32,10 +32,13 @@ env: steps: - group: "Metricbeat Mandatory Tests" key: "metricbeat-mandatory-tests" + steps: - - label: ":linux: Ubuntu Unit Tests" + - label: ":ubuntu: Metricbeat Unit Tests" key: "mandatory-linux-unit-test" - command: "cd metricbeat && mage build unitTest" + command: | + cd metricbeat + mage build unitTest retry: automatic: - limit: 3 @@ -48,27 +51,20 @@ steps: - "metricbeat/build/*.json" notify: - github_commit_status: - context: "metricbeat: Ubuntu Unit Tests" + context: "metricbeat: Linux x86_64 Unit Tests" - - label: ":go: Go Integration Tests" + - label: ":ubuntu: Metricbeat Go Integration Tests" key: "mandatory-int-test" command: | set -euo pipefail # defines the MODULE env var based on what's changed in a PR source .buildkite/scripts/changesets.sh defineModuleFromTheChangeSet metricbeat - echo "~~~ Running tests" - - # TODO move this section to base image / pre-command hook - echo "~~~ Installing kind" - asdf plugin add kind - asdf install kind $ASDF_KIND_VERSION - - .buildkite/deploy/kubernetes/scripts/kind-setup.sh echo "~~~ Running tests" export KUBECONFIG="$$PWD/kubecfg" - cd metricbeat && mage goIntegTest + cd metricbeat + mage goIntegTest retry: automatic: - limit: 3 @@ -83,25 +79,18 @@ steps: - github_commit_status: context: "metricbeat: Go Integration Tests" - - label: ":python: Python Integration Tests" + - label: ":ubuntu: Metricbeat Python Integration Tests" key: "mandatory-python-int-test" command: | set -euo pipefail # defines the MODULE env var based on what's changed in a PR source .buildkite/scripts/changesets.sh defineModuleFromTheChangeSet metricbeat - echo "~~~ Running tests" - - # TODO move this section to base image / pre-command hook - echo "~~~ Installing kind" - asdf plugin add kind - asdf install kind $ASDF_KIND_VERSION - - .buildkite/deploy/kubernetes/scripts/kind-setup.sh echo "~~~ Running tests" export KUBECONFIG="$$PWD/kubecfg" - cd metricbeat && mage pythonIntegTest + cd metricbeat + mage pythonIntegTest retry: automatic: - limit: 3 @@ -116,7 +105,7 @@ steps: - github_commit_status: context: "metricbeat: Python Integration Tests" - - label: ":negative_squared_cross_mark: Cross compile" + - label: ":ubuntu: Metricbeat Crosscompile" key: "mandatory-cross-compile" command: "make -C metricbeat crosscompile" retry: @@ -131,9 +120,9 @@ steps: - "metricbeat/build/*.json" notify: - github_commit_status: - context: "metricbeat: Cross compile" + context: "metricbeat: Crosscompile" - - label: ":windows: Windows 2016 Unit Tests" + - label: ":windows: Metricbeat Win-2016 Unit Tests" command: | Set-Location -Path metricbeat mage build unitTest @@ -152,9 +141,9 @@ steps: - "metricbeat/build/*.json" notify: - github_commit_status: - context: "metricbeat: Windows 2016 Unit Tests" + context: "metricbeat: Win-2016 Unit Tests" - - label: ":windows: Windows 2022 Unit Tests" + - label: ":windows: Metricbeat Win-2022 Unit Tests" command: | Set-Location -Path metricbeat mage build unitTest @@ -173,13 +162,14 @@ steps: - "metricbeat/build/*.json" notify: - github_commit_status: - context: "metricbeat: Windows 2022 Unit Tests" + context: "metricbeat: Win-2022 Unit Tests" - group: "Metricbeat Extended Windows Tests" key: "metricbeat-extended-win-tests" if: build.env("BUILDKITE_PULL_REQUEST") == "false" || build.env("GITHUB_PR_LABELS") =~ /.*[Ww]indows.*/ + steps: - - label: ":windows: Windows 10 Unit Tests" + - label: ":windows: Metricbeat Win 10 Unit Tests" command: | Set-Location -Path metricbeat mage build unitTest @@ -198,9 +188,9 @@ steps: - "metricbeat/build/*.json" notify: - github_commit_status: - context: "metricbeat: Extended Windows 10 Unit Tests" + context: "metricbeat: Win 10 Unit Tests" - - label: ":windows: Windows 11 Unit Tests" + - label: ":windows: Metricbeat Win 11 Unit Tests" command: | Set-Location -Path metricbeat mage build unitTest @@ -219,9 +209,9 @@ steps: - "metricbeat/build/*.json" notify: - github_commit_status: - context: "metricbeat: Extended Windows 11 Unit Tests" + context: "metricbeat: Win 11 Unit Tests" - - label: ":windows: Windows 2019 Unit Tests" + - label: ":windows: Metricbeat Win-2019 Unit Tests" command: | Set-Location -Path metricbeat mage build unitTest @@ -240,18 +230,20 @@ steps: - "metricbeat/build/*.json" notify: - github_commit_status: - context: "metricbeat: Extended Windows 2019 Unit Tests" + context: "metricbeat: Win-2019 Unit Tests" - - group: "Metricbeat Extended MacOS Tests" + - group: "Metricbeat Extended macOS Tests" key: "metricbeat-extended-macos-tests" if: build.env("BUILDKITE_PULL_REQUEST") == "false" || build.env("GITHUB_PR_LABELS") =~ /.*macOS.*/ + steps: - - label: ":mac: MacOS x64_64 Unit Tests" + - label: ":mac: Metricbeat macOS x64_64 Unit Tests" key: "extended-macos-x64-64-unit-tests" command: | set -euo pipefail source .buildkite/scripts/install_macos_tools.sh - cd metricbeat && mage build unitTest + cd metricbeat + mage build unitTest retry: automatic: - limit: 3 @@ -263,15 +255,17 @@ steps: - "metricbeat/build/*.json" notify: - github_commit_status: - context: "metricbeat: Extended MacOS x86_64 Unit Tests" + context: "metricbeat: macOS x86_64 Unit Tests" - - label: ":mac: MacOS arm64 Unit Tests" + - label: ":mac: Metricbeat macOS arm64 Unit Tests" key: "extended-macos-arm64-unit-tests" - skip: "due to https://github.com/elastic/beats/issues/33035" + skip: "Skipping due to elastic/beats#33035" + # https://github.com/elastic/beats/issues/33035 command: | set -euo pipefail source .buildkite/scripts/install_macos_tools.sh - cd metricbeat && mage build unitTest + cd metricbeat + mage build unitTest retry: automatic: - limit: 3 @@ -283,7 +277,7 @@ steps: - "metricbeat/build/*.json" notify: - github_commit_status: - context: "metricbeat: Extended MacOS arm64 Unit Tests" + context: "metricbeat: macOS arm64 Unit Tests" - wait: ~ # with PRs, we want to run packaging only if mandatory tests succeed @@ -295,9 +289,11 @@ steps: - group: "Metricbeat Packaging" key: "metricbeat-packaging" steps: - - label: ":linux: Packaging Linux" + - label: ":ubuntu: Metricbeat Packaging Linux" key: "packaging-linux" - command: "cd metricbeat && mage package" + command: | + cd metricbeat + mage package retry: automatic: - limit: 3 @@ -314,20 +310,22 @@ steps: - github_commit_status: context: "metricbeat: Packaging Linux" - - label: ":linux: Packaging ARM" + - label: ":ubuntu: Metricbeat Packaging Linux arm64" key: "packaging-arm" - command: "cd metricbeat && mage package" + command: | + cd metricbeat + mage package retry: automatic: - limit: 3 timeout_in_minutes: 20 agents: provider: "aws" - imagePrefix: "${IMAGE_UBUNTU_ARM_64}" + imagePrefix: "${AWS_IMAGE_UBUNTU_ARM_64}" instanceType: "${AWS_ARM_INSTANCE_TYPE}" env: PLATFORMS: "linux/arm64" PACKAGES: "docker" notify: - github_commit_status: - context: "metricbeat: Packaging Linux ARM" + context: "metricbeat: Packaging Linux arm64" diff --git a/.buildkite/packetbeat/pipeline.packetbeat.yml b/.buildkite/packetbeat/pipeline.packetbeat.yml index c9a5c9f06f2a..40e5f0f4b6fb 100644 --- a/.buildkite/packetbeat/pipeline.packetbeat.yml +++ b/.buildkite/packetbeat/pipeline.packetbeat.yml @@ -25,6 +25,32 @@ env: TEST_COVERAGE: "true" steps: + - group: "Packetbeat Check/Update" + key: "packetbeat-check-update" + steps: + - label: "Run check/update" + command: | + make -C packetbeat check update + make check-no-changes + retry: + automatic: + - limit: 3 + agents: + image: "docker.elastic.co/ci-agent-images/platform-ingest/buildkite-agent-beats-ci-with-hooks:latest" + cpu: "4000m" + memory: "8Gi" + useCustomGlobalHooks: true + notify: + - github_commit_status: + context: "packetbeat: check/update" + + - wait: ~ + # with PRs, we want to run mandatory tests only if check/update step succeed + # for other cases, e.g. merge commits, we want to run mundatory test (and publish) independently of other tests + # this allows building DRA artifacts even if there is flakiness in check/update step + if: build.env("BUILDKITE_PULL_REQUEST") != "false" + depends_on: "packetbeat-check-update" + - group: "packetbeat Mandatory Tests" key: "packetbeat-mandatory-tests" steps: diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 52768481bff9..712b6200697d 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -116,6 +116,7 @@ steps: - .buildkite/x-pack/pipeline.xpack.filebeat.yml - .buildkite/scripts - .buildkite/hooks/ + - .buildkite/deploy/docker/docker-compose.yml #OSS - go.mod - pytest.ini @@ -219,6 +220,7 @@ steps: - .buildkite/x-pack/pipeline.xpack.metricbeat.yml - .buildkite/scripts - .buildkite/hooks/ + - .buildkite/deploy/docker/docker-compose.yml #OSS - go.mod - pytest.ini diff --git a/.buildkite/pull-requests.json b/.buildkite/pull-requests.json index 55affae41289..b2cbb06e70ff 100644 --- a/.buildkite/pull-requests.json +++ b/.buildkite/pull-requests.json @@ -5,7 +5,7 @@ "pipelineSlug": "beats", "allow_org_users": true, "allowed_repo_permissions": ["admin", "write"], - "allowed_list": ["dependabot[bot]", "mergify[bot]"], + "allowed_list": ["dependabot[bot]", "mergify[bot]", "github-actions[bot]"], "set_commit_status": true, "build_on_commit": true, "build_on_comment": true, @@ -21,7 +21,7 @@ "pipelineSlug": "beats-xpack-elastic-agent", "allow_org_users": true, "allowed_repo_permissions": ["admin", "write"], - "allowed_list": ["dependabot[bot]", "mergify[bot]"], + "allowed_list": ["dependabot[bot]", "mergify[bot]", "github-actions[bot]"], "set_commit_status": true, "build_on_commit": true, "build_on_comment": true, diff --git a/.buildkite/scripts/changesets.sh b/.buildkite/scripts/changesets.sh index 7e79d2d31647..1ab80edc585c 100644 --- a/.buildkite/scripts/changesets.sh +++ b/.buildkite/scripts/changesets.sh @@ -15,7 +15,7 @@ definePattern() { } defineExclusions() { - exclude="^$beatPath\/module\/(.*(?- + build.pull_request.id == null || (build.creator.name == 'elasticmachine' && build.pull_request.id != null) + repository: elastic/beats + cancel_intermediate_builds: true + cancel_intermediate_builds_branch_filter: "!main !8.*" + skip_intermediate_builds: true + skip_intermediate_builds_branch_filter: "!main !8.*" + env: + # TODO set to true once https://github.com/elastic/ingest-dev/issues/3001 has been resolved + ELASTIC_PR_COMMENTS_ENABLED: "false" + teams: + ingest-fp: + access_level: MANAGE_BUILD_AND_READ + everyone: + access_level: BUILD_AND_READ + + --- # yaml-language-server: $schema=https://gist.githubusercontent.com/elasticmachine/988b80dae436cafea07d9a4a460a011d/raw/rre.schema.json apiVersion: backstage.io/v1alpha1 diff --git a/dev-tools/kubernetes/filebeat/Dockerfile.debug b/dev-tools/kubernetes/filebeat/Dockerfile.debug index ccd70822f3f0..38a16d72813f 100644 --- a/dev-tools/kubernetes/filebeat/Dockerfile.debug +++ b/dev-tools/kubernetes/filebeat/Dockerfile.debug @@ -1,4 +1,4 @@ -FROM golang:1.21.10 as builder +FROM golang:1.21.11 as builder ENV PATH=/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/bin:/go/bin:/usr/local/go/bin diff --git a/dev-tools/kubernetes/heartbeat/Dockerfile.debug b/dev-tools/kubernetes/heartbeat/Dockerfile.debug index 2b24c9452865..d8b83e68719b 100644 --- a/dev-tools/kubernetes/heartbeat/Dockerfile.debug +++ b/dev-tools/kubernetes/heartbeat/Dockerfile.debug @@ -1,4 +1,4 @@ -FROM golang:1.21.10 as builder +FROM golang:1.21.11 as builder ENV PATH=/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/bin:/go/bin:/usr/local/go/bin diff --git a/dev-tools/kubernetes/metricbeat/Dockerfile.debug b/dev-tools/kubernetes/metricbeat/Dockerfile.debug index f76069d22157..5ab57fd73561 100644 --- a/dev-tools/kubernetes/metricbeat/Dockerfile.debug +++ b/dev-tools/kubernetes/metricbeat/Dockerfile.debug @@ -1,4 +1,4 @@ -FROM golang:1.21.10 as builder +FROM golang:1.21.11 as builder ENV PATH=/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/bin:/go/bin:/usr/local/go/bin diff --git a/filebeat/docs/howto/migrate-to-filestream.asciidoc b/filebeat/docs/howto/migrate-to-filestream.asciidoc index 30057fab7251..55c05a5b46d8 100644 --- a/filebeat/docs/howto/migrate-to-filestream.asciidoc +++ b/filebeat/docs/howto/migrate-to-filestream.asciidoc @@ -234,6 +234,13 @@ The example configuration shown earlier needs to be adjusted as well: |backoff.max |=== +=== Step 4 + +The events produced by `filestream` input with `take_over: true` contain a `take_over` tag. +You can filter on this tag in Kibana and see the events which came from a filestream in the "take_over" mode. + +Once you start receiving events with this tag, you can remove `take_over: true` and restart the fileinput again. + === If something went wrong If for whatever reason you'd like to revert the configuration after running the migrated configuration diff --git a/filebeat/tests/system/test_crawler.py b/filebeat/tests/system/test_crawler.py index 2bea57223fe8..39f0d4541240 100644 --- a/filebeat/tests/system/test_crawler.py +++ b/filebeat/tests/system/test_crawler.py @@ -485,7 +485,7 @@ def test_tail_files(self): self.wait_until( lambda: self.log_contains( "Start next scan"), - max_timeout=5) + max_timeout=10) with open(testfile, 'a') as f: # write additional lines @@ -596,7 +596,7 @@ def test_encodings(self): # run filebeat filebeat = self.start_beat() self.wait_until(lambda: self.output_has(lines=len(encodings)), - max_timeout=15) + max_timeout=25) # write another line in all files for _, enc_py, text in encodings: diff --git a/filebeat/tests/system/test_harvester.py b/filebeat/tests/system/test_harvester.py index 3b40f3a6730d..9099abc699e0 100644 --- a/filebeat/tests/system/test_harvester.py +++ b/filebeat/tests/system/test_harvester.py @@ -858,6 +858,6 @@ def test_debug_reader(self): # 13 on unix, 14 on windows. self.wait_until(lambda: self.log_contains(re.compile( - 'Matching null byte found at offset (13|14)')), max_timeout=5) + 'Matching null byte found at offset (13|14)')), max_timeout=10) filebeat.check_kill_and_wait() diff --git a/filebeat/tests/system/test_registrar.py b/filebeat/tests/system/test_registrar.py index 53af186dbf9b..b08eda2777cb 100644 --- a/filebeat/tests/system/test_registrar.py +++ b/filebeat/tests/system/test_registrar.py @@ -252,9 +252,11 @@ def test_registry_file_update_permissions(self): self.assertEqual(self.file_permissions(os.path.join(registry_path, "log.json")), "0o600") + registry_home = "a/b/c/d/registry_x" + registry_path = os.path.join(registry_home, "filebeat") self.render_config_template( path=os.path.abspath(self.working_dir) + "/log/*", - registry_home="a/b/c/registry_x", + registry_home=registry_home, registry_file_permissions=0o640 ) @@ -266,7 +268,7 @@ def test_registry_file_update_permissions(self): # the logging and actual writing the file. Seems to happen on Windows. self.wait_until( lambda: self.has_registry(registry_path), - max_timeout=1) + max_timeout=10) # Wait a moment to make sure registry is completely written time.sleep(1) @@ -950,7 +952,7 @@ def test_restart_state(self): path=os.path.abspath(self.working_dir) + "/log/*", close_inactive="200ms", ignore_older="2000ms", - clean_inactive="3s", + clean_inactive="10s", ) filebeat = self.start_beat() @@ -976,7 +978,7 @@ def test_restart_state_reset(self): self.render_config_template( path=os.path.abspath(self.working_dir) + "/log/*", clean_inactive="10s", - ignore_older="5s" + ignore_older="9s" ) os.mkdir(self.working_dir + "/log/") @@ -1003,7 +1005,7 @@ def test_restart_state_reset(self): self.render_config_template( path=os.path.abspath(self.working_dir) + "/log/test2.log", clean_inactive="10s", - ignore_older="5s", + ignore_older="9s", ) filebeat = self.start_beat(output="filebeat2.log") @@ -1137,7 +1139,7 @@ def test_restart_state_reset_ttl_no_clean_inactive(self): self.render_config_template( path=os.path.abspath(self.working_dir) + "/log/test.log", clean_inactive="10s", - ignore_older="5s" + ignore_older="9s" ) os.mkdir(self.working_dir + "/log/") diff --git a/filebeat/tests/system/test_shutdown.py b/filebeat/tests/system/test_shutdown.py index 8f18337435f5..8e7818377309 100644 --- a/filebeat/tests/system/test_shutdown.py +++ b/filebeat/tests/system/test_shutdown.py @@ -27,7 +27,13 @@ def test_shutdown(self): ) for i in range(1, 5): proc = self.start_beat(logging_args=["-e", "-v"]) - time.sleep(.5) + + # Flaky on MacOS, see https://github.com/elastic/beats/issues/39613#issuecomment-2158812325 + # we need to wait a bit longer for filebeat to start + if platform.system() == "Darwin": + time.sleep(10) + else: + time.sleep(.5) proc.check_kill_and_wait() @unittest.skip("Skipped as flaky: https://github.com/elastic/beats/issues/14647") diff --git a/go.mod b/go.mod index d77097340676..5204045ddd9b 100644 --- a/go.mod +++ b/go.mod @@ -2,7 +2,7 @@ module github.com/elastic/beats/v7 go 1.21.0 -toolchain go1.21.10 +toolchain go1.21.11 require ( cloud.google.com/go/bigquery v1.55.0 @@ -74,7 +74,7 @@ require ( github.com/elastic/elastic-agent-client/v7 v7.11.0 github.com/elastic/go-concert v0.2.0 github.com/elastic/go-libaudit/v2 v2.5.0 - github.com/elastic/go-licenser v0.4.1 + github.com/elastic/go-licenser v0.4.2 github.com/elastic/go-lookslike v1.0.1 github.com/elastic/go-lumber v0.1.2-0.20220819171948-335fde24ea0f github.com/elastic/go-perf v0.0.0-20191212140718-9c656876f595 @@ -150,7 +150,7 @@ require ( go.elastic.co/ecszap v1.0.2 go.elastic.co/go-licence-detector v0.6.0 go.etcd.io/bbolt v1.3.6 - go.uber.org/atomic v1.11.0 + go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.11.0 go.uber.org/zap v1.27.0 golang.org/x/crypto v0.22.0 diff --git a/go.sum b/go.sum index 3f0ce0bf1fc3..845774c1f890 100644 --- a/go.sum +++ b/go.sum @@ -568,8 +568,9 @@ github.com/elastic/go-elasticsearch/v8 v8.14.0 h1:1ywU8WFReLLcxE1WJqii3hTtbPUE2h github.com/elastic/go-elasticsearch/v8 v8.14.0/go.mod h1:WRvnlGkSuZyp83M2U8El/LGXpCjYLrvlkSgkAH4O5I4= github.com/elastic/go-libaudit/v2 v2.5.0 h1:5OK919QRnGtcjVBz3n/cs5F42im1mPlVTA9TyIn2K54= github.com/elastic/go-libaudit/v2 v2.5.0/go.mod h1:AjlnhinP+kKQuUJoXLVrqxBM8uyhQmkzoV6jjsCFP4Q= -github.com/elastic/go-licenser v0.4.1 h1:1xDURsc8pL5zYT9R29425J3vkHdt4RT5TNEMeRN48x4= github.com/elastic/go-licenser v0.4.1/go.mod h1:V56wHMpmdURfibNBggaSBfqgPxyT1Tldns1i87iTEvU= +github.com/elastic/go-licenser v0.4.2 h1:bPbGm8bUd8rxzSswFOqvQh1dAkKGkgAmrPxbUi+Y9+A= +github.com/elastic/go-licenser v0.4.2/go.mod h1:W8eH6FaZDR8fQGm+7FnVa7MxI1b/6dAqxz+zPB8nm5c= github.com/elastic/go-lookslike v1.0.1 h1:qVieyn6i/kx4xntar1cEB0qrGHVGNCX5KC8czAaTW/0= github.com/elastic/go-lookslike v1.0.1/go.mod h1:iSXdN6mmM0UVwbup8BY39Tyb51Dd+nX3eBsi5EgBAEo= github.com/elastic/go-lumber v0.1.2-0.20220819171948-335fde24ea0f h1:TsPpU5EAwlt7YZoupKlxZ093qTZYdGou3EhfTF1U0B4= diff --git a/heartbeat/Dockerfile b/heartbeat/Dockerfile index f8765612d4bb..0a1e0501ab4f 100644 --- a/heartbeat/Dockerfile +++ b/heartbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.21.10 +FROM golang:1.21.11 RUN \ apt-get update \ diff --git a/libbeat/autodiscover/providers/kubernetes/pod.go b/libbeat/autodiscover/providers/kubernetes/pod.go index b11faac4931d..c5f9c721eb90 100644 --- a/libbeat/autodiscover/providers/kubernetes/pod.go +++ b/libbeat/autodiscover/providers/kubernetes/pod.go @@ -100,9 +100,9 @@ func NewPodEventer(uuid uuid.UUID, cfg *conf.C, client k8s.Interface, publish fu if metaConf.Node.Enabled() || config.Hints.Enabled() { options := kubernetes.WatchOptions{ - SyncTimeout: config.SyncPeriod, - Node: config.Node, - Namespace: config.Namespace, + SyncTimeout: config.SyncPeriod, + Node: config.Node, + HonorReSyncs: true, } nodeWatcher, err = kubernetes.NewNamedWatcher("node", client, &kubernetes.Node{}, options, nil) if err != nil { @@ -112,20 +112,24 @@ func NewPodEventer(uuid uuid.UUID, cfg *conf.C, client k8s.Interface, publish fu if metaConf.Namespace.Enabled() || config.Hints.Enabled() { namespaceWatcher, err = kubernetes.NewNamedWatcher("namespace", client, &kubernetes.Namespace{}, kubernetes.WatchOptions{ - SyncTimeout: config.SyncPeriod, + SyncTimeout: config.SyncPeriod, + Namespace: config.Namespace, + HonorReSyncs: true, }, nil) if err != nil { logger.Errorf("couldn't create watcher for %T due to error %+v", &kubernetes.Namespace{}, err) } } - // Resource is Pod so we need to create watchers for Replicasets and Jobs that it might belongs to + // Resource is Pod, so we need to create watchers for Replicasets and Jobs that it might belong to // in order to be able to retrieve 2nd layer Owner metadata like in case of: // Deployment -> Replicaset -> Pod // CronJob -> job -> Pod if metaConf.Deployment { replicaSetWatcher, err = kubernetes.NewNamedWatcher("resource_metadata_enricher_rs", client, &kubernetes.ReplicaSet{}, kubernetes.WatchOptions{ - SyncTimeout: config.SyncPeriod, + SyncTimeout: config.SyncPeriod, + Namespace: config.Namespace, + HonorReSyncs: true, }, nil) if err != nil { logger.Errorf("Error creating watcher for %T due to error %+v", &kubernetes.ReplicaSet{}, err) @@ -133,7 +137,9 @@ func NewPodEventer(uuid uuid.UUID, cfg *conf.C, client k8s.Interface, publish fu } if metaConf.CronJob { jobWatcher, err = kubernetes.NewNamedWatcher("resource_metadata_enricher_job", client, &kubernetes.Job{}, kubernetes.WatchOptions{ - SyncTimeout: config.SyncPeriod, + SyncTimeout: config.SyncPeriod, + Namespace: config.Namespace, + HonorReSyncs: true, }, nil) if err != nil { logger.Errorf("Error creating watcher for %T due to error %+v", &kubernetes.Job{}, err) diff --git a/libbeat/autodiscover/providers/kubernetes/service.go b/libbeat/autodiscover/providers/kubernetes/service.go index ba62dda9c47c..e9e71c921bd8 100644 --- a/libbeat/autodiscover/providers/kubernetes/service.go +++ b/libbeat/autodiscover/providers/kubernetes/service.go @@ -74,8 +74,9 @@ func NewServiceEventer(uuid uuid.UUID, cfg *conf.C, client k8s.Interface, publis if metaConf.Namespace.Enabled() || config.Hints.Enabled() { namespaceWatcher, err = kubernetes.NewNamedWatcher("namespace", client, &kubernetes.Namespace{}, kubernetes.WatchOptions{ - SyncTimeout: config.SyncPeriod, - Namespace: config.Namespace, + SyncTimeout: config.SyncPeriod, + Namespace: config.Namespace, + HonorReSyncs: true, }, nil) if err != nil { return nil, fmt.Errorf("couldn't create watcher for %T due to error %w", &kubernetes.Namespace{}, err) diff --git a/libbeat/docs/release.asciidoc b/libbeat/docs/release.asciidoc index bad3f87b38d5..3dc94738d127 100644 --- a/libbeat/docs/release.asciidoc +++ b/libbeat/docs/release.asciidoc @@ -8,6 +8,7 @@ This section summarizes the changes in each release. Also read <> for more detail about changes that affect upgrade. +* <> * <> * <> * <> diff --git a/libbeat/docs/shared-autodiscover.asciidoc b/libbeat/docs/shared-autodiscover.asciidoc index 1e1ea567b7cc..83d44b498ac2 100644 --- a/libbeat/docs/shared-autodiscover.asciidoc +++ b/libbeat/docs/shared-autodiscover.asciidoc @@ -140,10 +140,7 @@ The `kubernetes` autodiscover provider has the following configuration settings: `node`:: (Optional) Specify the node to scope {beatname_lc} to in case it cannot be accurately detected, as when running {beatname_lc} in host network mode. -`namespace`:: (Optional) Select the namespace from which to collect the - metadata. If it is not set, the processor collects metadata from all - namespaces. It is unset by default. The namespace configuration only applies to - kubernetes resources that are namespace scoped. +`namespace`:: (Optional) Select the namespace from which to collect the events from the resources. If it is not set, the provider collects them from all namespaces. It is unset by default. The namespace configuration only applies to kubernetes resources that are namespace scoped and if `unique` field is set to `false`. `cleanup_timeout`:: (Optional) Specify the time of inactivity before stopping the running configuration for a container, ifeval::["{beatname_lc}"=="filebeat"] @@ -196,7 +193,7 @@ Example: `unique`:: (Optional) Defaults to `false`. Marking an autodiscover provider as unique results into making the provider to enable the provided templates only when it will gain the leader lease. - This setting can only be combined with `cluster` scope. When `unique` is enabled enabled, `resource` + This setting can only be combined with `cluster` scope. When `unique` is enabled, `resource` and `add_resource_metadata` settings are not taken into account. `leader_lease`:: (Optional) Defaults to +{beatname_lc}-cluster-leader+. This will be name of the lock lease. One can monitor the status of the lease with `kubectl describe lease beats-cluster-leader`. diff --git a/libbeat/docs/shared/opendashboards.asciidoc b/libbeat/docs/shared/opendashboards.asciidoc index 7e73fbc8f4bd..1b9425049984 100644 --- a/libbeat/docs/shared/opendashboards.asciidoc +++ b/libbeat/docs/shared/opendashboards.asciidoc @@ -13,7 +13,7 @@ include::{libbeat-dir}/tab-widgets/open-kibana-widget.asciidoc[] -- . In the side navigation, click *Discover*. To see {beatname_uc} data, make -sure the predefined +{beatname_lc}-*+ index pattern is selected. +sure the predefined +{beatname_lc}-*+ data view is selected. + -- TIP: If you don’t see data in {kib}, try changing the time filter to a larger diff --git a/libbeat/docs/version.asciidoc b/libbeat/docs/version.asciidoc index cb37384bb30f..03efb1aa4190 100644 --- a/libbeat/docs/version.asciidoc +++ b/libbeat/docs/version.asciidoc @@ -1,6 +1,6 @@ :stack-version: 8.15.0 :doc-branch: main -:go-version: 1.21.10 +:go-version: 1.21.11 :release-state: unreleased :python: 3.7 :docker: 1.12 diff --git a/metricbeat/Dockerfile b/metricbeat/Dockerfile index ce43c81c312b..de48ddeff9d0 100644 --- a/metricbeat/Dockerfile +++ b/metricbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.21.10 +FROM golang:1.21.11 COPY --from=docker:26.0.0-alpine3.19 /usr/local/bin/docker /usr/local/bin/ RUN \ diff --git a/metricbeat/docs/running-on-docker.asciidoc b/metricbeat/docs/running-on-docker.asciidoc index 72cfed9757de..26ba27658b8f 100644 --- a/metricbeat/docs/running-on-docker.asciidoc +++ b/metricbeat/docs/running-on-docker.asciidoc @@ -21,6 +21,7 @@ docker run \ --mount type=bind,source=/var/run/dbus/system_bus_socket,target=/hostfs/var/run/dbus/system_bus_socket,readonly \ <4> --env DBUS_SYSTEM_BUS_ADDRESS='unix:path=/hostfs/var/run/dbus/system_bus_socket' \ <4> --net=host \ <5> + --cgroupns=host \ <6> {dockerimage} -e -system.hostfs=/hostfs ---- @@ -45,6 +46,7 @@ both require access to dbus. Mount the dbus socket and set the `DBUS_SYSTEM_BUS_ to make this file contain the host's network devices is to use the `--net=host` flag. This is due to Linux namespacing; simply bind mounting the host's `/proc` to `/hostfs/proc` is not sufficient. +<6> Runs the container using the host's cgroup namespace, instead of a private namespace. While this is optional, <> may produce more correct cgroup metrics when running in host mode. NOTE: The special filesystems +/proc+ and +/sys+ are only available if the host system is running Linux. Attempts to bind-mount these filesystems will diff --git a/metricbeat/module/http/_meta/Dockerfile b/metricbeat/module/http/_meta/Dockerfile index 0a5646a9d75c..b6517fea093f 100644 --- a/metricbeat/module/http/_meta/Dockerfile +++ b/metricbeat/module/http/_meta/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.21.10 +FROM golang:1.21.11 COPY test/main.go main.go diff --git a/metricbeat/module/nats/_meta/Dockerfile b/metricbeat/module/nats/_meta/Dockerfile index f3cab807dfd5..f443a51dd3ae 100644 --- a/metricbeat/module/nats/_meta/Dockerfile +++ b/metricbeat/module/nats/_meta/Dockerfile @@ -2,7 +2,7 @@ ARG NATS_VERSION=2.0.4 FROM nats:$NATS_VERSION # build stage -FROM golang:1.21.10 AS build-env +FROM golang:1.21.11 AS build-env RUN apt-get install git mercurial gcc RUN git clone https://github.com/nats-io/nats.go.git /nats-go RUN cd /nats-go/examples/nats-bench && git checkout tags/v1.10.0 && go build . diff --git a/metricbeat/module/vsphere/_meta/Dockerfile b/metricbeat/module/vsphere/_meta/Dockerfile index 9dea6777c572..6301d44f4e2a 100644 --- a/metricbeat/module/vsphere/_meta/Dockerfile +++ b/metricbeat/module/vsphere/_meta/Dockerfile @@ -1,5 +1,5 @@ ARG VSPHERE_GOLANG_VERSION -FROM golang:1.21.10 +FROM golang:1.21.11 RUN apt-get install curl git RUN go install github.com/vmware/govmomi/vcsim@v0.30.4 diff --git a/packetbeat/Dockerfile b/packetbeat/Dockerfile index 912cd87cc43c..22c50eeb4305 100644 --- a/packetbeat/Dockerfile +++ b/packetbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.21.10 +FROM golang:1.21.11 RUN \ apt-get update \ diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 5b6dd6ffd009..05ba6d0a25cf 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.15.0-9bce1e63-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.15.0-d4efed09-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -31,7 +31,7 @@ services: - "./docker/elasticsearch/users_roles:/usr/share/elasticsearch/config/users_roles" logstash: - image: docker.elastic.co/logstash/logstash:8.15.0-9bce1e63-SNAPSHOT + image: docker.elastic.co/logstash/logstash:8.15.0-d4efed09-SNAPSHOT healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9600/_node/stats"] retries: 600 @@ -44,7 +44,7 @@ services: - 5055:5055 kibana: - image: docker.elastic.co/kibana/kibana:8.15.0-9bce1e63-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.15.0-d4efed09-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" diff --git a/x-pack/filebeat/docs/inputs/input-aws-s3.asciidoc b/x-pack/filebeat/docs/inputs/input-aws-s3.asciidoc index 794a51de0819..c55c80952a4c 100644 --- a/x-pack/filebeat/docs/inputs/input-aws-s3.asciidoc +++ b/x-pack/filebeat/docs/inputs/input-aws-s3.asciidoc @@ -120,7 +120,7 @@ characters. This only applies to non-JSON logs. See <<_encoding_3>>. ==== `decoding` The file decoding option is used to specify a codec that will be used to -decode the file contents. This can apply to any file stream data. +decode the file contents. This can apply to any file stream data. An example config is shown below: [source,yaml] @@ -131,17 +131,17 @@ An example config is shown below: Currently supported codecs are given below:- 1. <>: This codec decodes parquet compressed data streams. - + [id="attrib-decoding-parquet"] [float] ==== `the parquet codec` The `parquet` codec is used to decode parquet compressed data streams. Only enabling the codec will use the default codec options. The parquet codec supports -two sub attributes which can make parquet decoding more efficient. The `batch_size` attribute and +two sub attributes which can make parquet decoding more efficient. The `batch_size` attribute and the `process_parallel` attribute. The `batch_size` attribute can be used to specify the number of -records to read from the parquet stream at a time. By default the `batch size` is set to `1` and -`process_parallel` is set to `false`. If the `process_parallel` attribute is set to `true` then functions -which read multiple columns will read those columns in parallel from the parquet stream with a +records to read from the parquet stream at a time. By default the `batch size` is set to `1` and +`process_parallel` is set to `false`. If the `process_parallel` attribute is set to `true` then functions +which read multiple columns will read those columns in parallel from the parquet stream with a number of readers equal to the number of columns. Setting `process_parallel` to `true` will greatly increase the rate of processing at the cost of increased memory usage. Having a larger `batch_size` also helps to increase the rate of processing. An example config is shown below: @@ -162,6 +162,8 @@ value can be assigned the name of the field or `.[]`. This setting will be able the messages under the group value into separate events. For example, CloudTrail logs are in JSON format and events are found under the JSON object "Records". +NOTE: When using `expand_event_list_from_field`, `content_type` config parameter has to be set to `application/json`. + ["source","json"] ---- { diff --git a/x-pack/filebeat/input/http_endpoint/handler.go b/x-pack/filebeat/input/http_endpoint/handler.go index b799248a9350..4f3fdd550aa4 100644 --- a/x-pack/filebeat/input/http_endpoint/handler.go +++ b/x-pack/filebeat/input/http_endpoint/handler.go @@ -18,13 +18,13 @@ import ( "sort" "strconv" "strings" + "sync/atomic" "time" "github.com/google/cel-go/cel" "github.com/google/cel-go/checker/decls" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" - "go.uber.org/atomic" "go.uber.org/zap" "go.uber.org/zap/zapcore" "google.golang.org/protobuf/types/known/structpb" @@ -53,8 +53,8 @@ type handler struct { publish func(beat.Event) log *logp.Logger validator apiValidator - txBaseID string // Random value to make transaction IDs unique. - txIDCounter *atomic.Uint64 // Transaction ID counter that is incremented for each request. + txBaseID string // Random value to make transaction IDs unique. + txIDCounter atomic.Uint64 // Transaction ID counter that is incremented for each request. reqLogger *zap.Logger host, scheme string @@ -290,7 +290,7 @@ func (h *handler) logRequest(txID string, r *http.Request, status int, respBody } func (h *handler) nextTxID() string { - count := h.txIDCounter.Inc() + count := h.txIDCounter.Add(1) return h.formatTxID(count) } diff --git a/x-pack/filebeat/input/http_endpoint/input.go b/x-pack/filebeat/input/http_endpoint/input.go index 6737a9b9aa06..7f0440deb601 100644 --- a/x-pack/filebeat/input/http_endpoint/input.go +++ b/x-pack/filebeat/input/http_endpoint/input.go @@ -24,7 +24,6 @@ import ( "github.com/rcrowley/go-metrics" "go.elastic.co/ecszap" - "go.uber.org/atomic" "go.uber.org/zap" "go.uber.org/zap/zapcore" @@ -327,10 +326,9 @@ func (s *server) getErr() error { func newHandler(ctx context.Context, c config, prg *program, pub func(beat.Event), log *logp.Logger, metrics *inputMetrics) http.Handler { h := &handler{ - ctx: ctx, - log: log, - txBaseID: newID(), - txIDCounter: atomic.NewUint64(0), + ctx: ctx, + log: log, + txBaseID: newID(), publish: pub, metrics: metrics, diff --git a/x-pack/filebeat/input/httpjson/policy.go b/x-pack/filebeat/input/httpjson/policy.go index 0c671cb85bbb..43360c1ed0ff 100644 --- a/x-pack/filebeat/input/httpjson/policy.go +++ b/x-pack/filebeat/input/httpjson/policy.go @@ -91,6 +91,12 @@ func (p *Policy) CustomRetryPolicy(ctx context.Context, resp *http.Response, err // errors and may relate to outages on the server side. This will catch // invalid response codes as well, like 0 and 999. if resp.StatusCode == 0 || (resp.StatusCode >= 500 && resp.StatusCode != 501) { + defer func() { + if resp.Body != nil { + _, _ = io.Copy(io.Discard, resp.Body) + resp.Body.Close() + } + }() return true, nil } diff --git a/x-pack/filebeat/input/httpjson/request.go b/x-pack/filebeat/input/httpjson/request.go index 3e63f0267162..b15f3db51b10 100644 --- a/x-pack/filebeat/input/httpjson/request.go +++ b/x-pack/filebeat/input/httpjson/request.go @@ -676,6 +676,7 @@ func (r *requester) processChainPaginationEvents(ctx context.Context, trCtx *tra if err != nil { return -1, fmt.Errorf("failed to collect response: %w", err) } + // store data according to response type if i == len(r.requestFactories)-1 && len(ids) != 0 { finalResps = append(finalResps, httpResp) @@ -702,12 +703,6 @@ func (r *requester) processChainPaginationEvents(ctx context.Context, trCtx *tra n += p.eventCount() } - defer func() { - if httpResp != nil && httpResp.Body != nil { - httpResp.Body.Close() - } - }() - return n, nil } diff --git a/x-pack/filebeat/input/internal/httplog/roundtripper.go b/x-pack/filebeat/input/internal/httplog/roundtripper.go index ce68147a2a7d..9e60cb60942f 100644 --- a/x-pack/filebeat/input/internal/httplog/roundtripper.go +++ b/x-pack/filebeat/input/internal/httplog/roundtripper.go @@ -15,9 +15,9 @@ import ( "net/http" "net/http/httputil" "strconv" + "sync/atomic" "time" - "go.uber.org/atomic" "go.uber.org/zap" "go.uber.org/zap/zapcore" @@ -36,22 +36,21 @@ type contextKey string // responses to the provided logger. Transaction creation is logged to log. func NewLoggingRoundTripper(next http.RoundTripper, logger *zap.Logger, maxBodyLen int, log *logp.Logger) *LoggingRoundTripper { return &LoggingRoundTripper{ - transport: next, - maxBodyLen: maxBodyLen, - txLog: logger, - txBaseID: newID(), - txIDCounter: atomic.NewUint64(0), - log: log, + transport: next, + maxBodyLen: maxBodyLen, + txLog: logger, + txBaseID: newID(), + log: log, } } // LoggingRoundTripper is an http.RoundTripper that logs requests and responses. type LoggingRoundTripper struct { transport http.RoundTripper - maxBodyLen int // The maximum length of a body. Longer bodies will be truncated. - txLog *zap.Logger // Destination logger. - txBaseID string // Random value to make transaction IDs unique. - txIDCounter *atomic.Uint64 // Transaction ID counter that is incremented for each request. + maxBodyLen int // The maximum length of a body. Longer bodies will be truncated. + txLog *zap.Logger // Destination logger. + txBaseID string // Random value to make transaction IDs unique. + txIDCounter atomic.Uint64 // Transaction ID counter that is incremented for each request. log *logp.Logger } @@ -220,7 +219,7 @@ func (rt *LoggingRoundTripper) TxID() string { // nextTxID returns the next transaction.id value. It increments the internal // request counter. func (rt *LoggingRoundTripper) nextTxID() string { - count := rt.txIDCounter.Inc() + count := rt.txIDCounter.Add(1) return rt.formatTxID(count) } diff --git a/x-pack/functionbeat/Dockerfile b/x-pack/functionbeat/Dockerfile index 3b3a785a0ca5..d1c4d5af3cdf 100644 --- a/x-pack/functionbeat/Dockerfile +++ b/x-pack/functionbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.21.10 +FROM golang:1.21.11 RUN \ apt-get update \ diff --git a/x-pack/functionbeat/manager/aws/event_stack_poller_test.go b/x-pack/functionbeat/manager/aws/event_stack_poller_test.go index fe7473d0f3da..3679f07fb547 100644 --- a/x-pack/functionbeat/manager/aws/event_stack_poller_test.go +++ b/x-pack/functionbeat/manager/aws/event_stack_poller_test.go @@ -7,6 +7,7 @@ package aws import ( "context" "strconv" + "sync/atomic" "testing" "time" @@ -14,7 +15,6 @@ import ( "github.com/aws/aws-sdk-go-v2/service/cloudformation" "github.com/stretchr/testify/assert" - "go.uber.org/atomic" "github.com/elastic/elastic-agent-libs/logp" ) @@ -29,7 +29,7 @@ func (m *mockEventHandler) sync(event types.StackEvent) bool { if m.skipCount.Load() >= m.skipEvents { return false } - m.skipCount.Inc() + m.skipCount.Add(1) return true } diff --git a/x-pack/metricbeat/module/oracle/_meta/kibana/7/dashboard/862e2c20-9bf0-11e9-a61b-f742ed613c57.json b/x-pack/metricbeat/module/oracle/_meta/kibana/7/dashboard/862e2c20-9bf0-11e9-a61b-f742ed613c57.json index f093d13a3e45..216971d16b51 100644 --- a/x-pack/metricbeat/module/oracle/_meta/kibana/7/dashboard/862e2c20-9bf0-11e9-a61b-f742ed613c57.json +++ b/x-pack/metricbeat/module/oracle/_meta/kibana/7/dashboard/862e2c20-9bf0-11e9-a61b-f742ed613c57.json @@ -46,7 +46,7 @@ }, "panelIndex": "2", "panelRefName": "panel_2", - "title": "Tablespace Total Size", + "title": "Tablespace Max Total Size", "type": "visualization", "version": "8.0.0-SNAPSHOT" }, diff --git a/x-pack/metricbeat/module/oracle/_meta/kibana/7/visualization/05acae50-9bf0-11e9-a61b-f742ed613c57.json b/x-pack/metricbeat/module/oracle/_meta/kibana/7/visualization/05acae50-9bf0-11e9-a61b-f742ed613c57.json index 603fcc02ec2f..27de5af579a2 100644 --- a/x-pack/metricbeat/module/oracle/_meta/kibana/7/visualization/05acae50-9bf0-11e9-a61b-f742ed613c57.json +++ b/x-pack/metricbeat/module/oracle/_meta/kibana/7/visualization/05acae50-9bf0-11e9-a61b-f742ed613c57.json @@ -4,7 +4,7 @@ "kibanaSavedObjectMeta": { "searchSourceJSON": {} }, - "title": "Tablespace Total Size [Metricbeat Oracle]", + "title": "Tablespace Max Total Size [Metricbeat Oracle]", "uiStateJSON": {}, "version": 1, "visState": { @@ -24,13 +24,40 @@ "fill": 0.5, "formatter": "bytes", "id": "61ca57f1-469d-11e7-af02-69e470af7417", - "label": "Tablespace total size", + "label": "Tablespace max total size", "line_width": 1, "metrics": [ { - "field": "oracle.tablespace.space.total.bytes", + "agg_with": "avg", + "field": "oracle.tablespace.space.used.bytes", "id": "61ca57f2-469d-11e7-af02-69e470af7417", - "type": "avg" + "type": "max" + }, + { + "agg_with": "avg", + "colors": [ + "#68BC00" + ], + "field": "oracle.tablespace.space.free.bytes", + "id": "e04e8f40-24cd-4066-b12c-da0db0ff73d4", + "type": "max" + }, + { + "id": "2cf57800-8b54-41fa-a877-159b49699a50", + "script": "params.used_bytes + params.free_bytes", + "type": "math", + "variables": [ + { + "field": "61ca57f2-469d-11e7-af02-69e470af7417", + "id": "631a44d5-d18a-4743-bea0-6f61930fd65f", + "name": "used_bytes" + }, + { + "field": "e04e8f40-24cd-4066-b12c-da0db0ff73d4", + "id": "c255d24c-3a29-4879-b999-77af43d97c6b", + "name": "free_bytes" + } + ] } ], "point_size": 1, @@ -48,7 +75,7 @@ "type": "timeseries", "use_kibana_indexes": false }, - "title": "Tablespace Total Size [Metricbeat Oracle]", + "title": "Tablespace Max Total Size [Metricbeat Oracle]", "type": "metrics" } }, diff --git a/x-pack/metricbeat/module/oracle/tablespace/data.go b/x-pack/metricbeat/module/oracle/tablespace/data.go index 30c86d766dad..82f93a060b1e 100644 --- a/x-pack/metricbeat/module/oracle/tablespace/data.go +++ b/x-pack/metricbeat/module/oracle/tablespace/data.go @@ -16,8 +16,9 @@ import ( // extract is the E of a ETL processing. Gets the data files, used/free space and temp free space data that is fetch // by doing queries to Oracle -func (m *MetricSet) extract(ctx context.Context, extractor tablespaceExtractMethods) (out *extractedData, err error) { - out = &extractedData{} +func (m *MetricSet) extract(ctx context.Context, extractor tablespaceExtractMethods) (*extractedData, error) { + out := &extractedData{} + var err error if out.dataFiles, err = extractor.dataFilesData(ctx); err != nil { return nil, fmt.Errorf("error getting data_files: %w", err) @@ -31,23 +32,23 @@ func (m *MetricSet) extract(ctx context.Context, extractor tablespaceExtractMeth return nil, fmt.Errorf("error getting free space data: %w", err) } - return + return out, nil } // transform is the T of an ETL (refer to the 'extract' method above if you need to see the origin). Transforms the data // to create a Kibana/Elasticsearch friendly JSON. Data from Oracle is pretty fragmented by design so a lot of data // was necessary. Data is organized by Tablespace entity (Tablespaces might contain one or more data files) -func (m *MetricSet) transform(in *extractedData) (out map[string]mapstr.M) { - out = make(map[string]mapstr.M, 0) +func (m *MetricSet) transform(in *extractedData) map[string]mapstr.M { + out := make(map[string]mapstr.M, 0) - for _, dataFile := range in.dataFiles { - m.addDataFileData(&dataFile, out) + for i := range in.dataFiles { + m.addDataFileData(&in.dataFiles[i], out) } m.addUsedAndFreeSpaceData(in.freeSpace, out) m.addTempFreeSpaceData(in.tempFreeSpace, out) - return + return out } func (m *MetricSet) extractAndTransform(ctx context.Context) ([]mb.Event, error) { @@ -78,7 +79,7 @@ func (m *MetricSet) addTempFreeSpaceData(tempFreeSpaces []tempFreeSpace, out map name := val.(string) if name == "TEMP" { for _, tempFreeSpaceTable := range tempFreeSpaces { - oracle.SetSqlValueWithParentKey(m.Logger(), out, key, "space.total.bytes", &oracle.Int64Value{NullInt64: tempFreeSpaceTable.TablespaceSize}) + oracle.SetSqlValueWithParentKey(m.Logger(), out, key, "space.total.bytes", &oracle.Int64Value{NullInt64: tempFreeSpaceTable.TotalSpaceBytes}) oracle.SetSqlValueWithParentKey(m.Logger(), out, key, "space.used.bytes", &oracle.Int64Value{NullInt64: tempFreeSpaceTable.UsedSpaceBytes}) oracle.SetSqlValueWithParentKey(m.Logger(), out, key, "space.free.bytes", &oracle.Int64Value{NullInt64: tempFreeSpaceTable.FreeSpace}) } @@ -101,6 +102,7 @@ func (m *MetricSet) addUsedAndFreeSpaceData(freeSpaces []usedAndFreeSpace, out m if name == freeSpaceTable.TablespaceName { oracle.SetSqlValueWithParentKey(m.Logger(), out, key, "space.free.bytes", &oracle.Int64Value{NullInt64: freeSpaceTable.TotalFreeBytes}) oracle.SetSqlValueWithParentKey(m.Logger(), out, key, "space.used.bytes", &oracle.Int64Value{NullInt64: freeSpaceTable.TotalUsedBytes}) + oracle.SetSqlValueWithParentKey(m.Logger(), out, key, "space.total.bytes", &oracle.Int64Value{NullInt64: freeSpaceTable.TotalSpaceBytes}) } } } diff --git a/x-pack/metricbeat/module/oracle/tablespace/data_test.go b/x-pack/metricbeat/module/oracle/tablespace/data_test.go index 02e4bdd4528c..9c6990a25984 100644 --- a/x-pack/metricbeat/module/oracle/tablespace/data_test.go +++ b/x-pack/metricbeat/module/oracle/tablespace/data_test.go @@ -12,13 +12,13 @@ import ( "github.com/stretchr/testify/assert" ) -var expectedResults = []string{`{"data_file":{"id":18,"name":"/u02/app/oracle/oradata/ORCLCDB/orclpdb1/sysaux01.dbf","online_status":"ONLINE","size":{"bytes":9999990,"free":{"bytes":99999994},"max":{"bytes":9999994}},"status":"AVAILABLE"},"name":"SYSAUX","space":{"free":{"bytes":9999},"used":{"bytes":9991}}}`, - `{"data_file":{"id":181,"name":"/u02/app/oracle/oradata/ORCLCDB/orclpdb1/sysaux02.dbf","online_status":"ONLINE","size":{"bytes":9999991,"free":{"bytes":99999995},"max":{"bytes":9999995}},"status":"AVAILABLE"},"name":"SYSAUX","space":{"free":{"bytes":9999},"used":{"bytes":9991}}}`, - `{"data_file":{"id":182,"name":"/u02/app/oracle/oradata/ORCLCDB/orclpdb1/sysaux03.dbf","online_status":"ONLINE","size":{"bytes":9999992,"free":{"bytes":99999996},"max":{"bytes":9999996}},"status":"AVAILABLE"},"name":"SYSAUX","space":{"free":{"bytes":9999},"used":{"bytes":9991}}}`, - `{"data_file":{"id":18,"name":"/u02/app/oracle/oradata/ORCLCDB/orclpdb1/system01.dbf","online_status":"ONLINE","size":{"bytes":999990,"free":{"bytes":9999994},"max":{"bytes":9999994}},"status":"AVAILABLE"},"name":"SYSTEM","space":{"free":{"bytes":9990},"used":{"bytes":9991}}}`, +var expectedResults = []string{`{"data_file":{"id":18,"name":"/u02/app/oracle/oradata/ORCLCDB/orclpdb1/sysaux01.dbf","online_status":"ONLINE","size":{"bytes":9999990,"free":{"bytes":99999994},"max":{"bytes":9999994}},"status":"AVAILABLE"},"name":"SYSAUX","space":{"free":{"bytes":9999},"total":{"bytes":99999},"used":{"bytes":9991}}}`, + `{"data_file":{"id":181,"name":"/u02/app/oracle/oradata/ORCLCDB/orclpdb1/sysaux02.dbf","online_status":"ONLINE","size":{"bytes":9999991,"free":{"bytes":99999995},"max":{"bytes":9999995}},"status":"AVAILABLE"},"name":"SYSAUX","space":{"free":{"bytes":9999},"total":{"bytes":99999},"used":{"bytes":9991}}}`, + `{"data_file":{"id":182,"name":"/u02/app/oracle/oradata/ORCLCDB/orclpdb1/sysaux03.dbf","online_status":"ONLINE","size":{"bytes":9999992,"free":{"bytes":99999996},"max":{"bytes":9999996}},"status":"AVAILABLE"},"name":"SYSAUX","space":{"free":{"bytes":9999},"total":{"bytes":99999},"used":{"bytes":9991}}}`, + `{"data_file":{"id":18,"name":"/u02/app/oracle/oradata/ORCLCDB/orclpdb1/system01.dbf","online_status":"ONLINE","size":{"bytes":999990,"free":{"bytes":9999994},"max":{"bytes":9999994}},"status":"AVAILABLE"},"name":"SYSTEM","space":{"free":{"bytes":9990},"total":{"bytes":99999},"used":{"bytes":9991}}}`, `{"data_file":{"id":18,"name":"/u02/app/oracle/oradata/ORCLCDB/orclpdb1/temp012017-03-02_07-54-38-075-AM.dbf","online_status":"ONLINE","size":{"bytes":999991,"free":{"bytes":9999994},"max":{"bytes":9999994}},"status":"AVAILABLE"},"name":"TEMP","space":{"free":{"bytes":99999},"total":{"bytes":99999},"used":{"bytes":99999}}}`, - `{"data_file":{"id":18,"name":"/u02/app/oracle/oradata/ORCLCDB/orclpdb1/undotbs01.dbf","online_status":"ONLINE","size":{"bytes":999992,"free":{"bytes":9999994},"max":{"bytes":9999994}},"status":"AVAILABLE"},"name":"UNDOTBS1","space":{"free":{"bytes":9999},"used":{"bytes":9991}}}`, - `{"data_file":{"id":18,"name":"/u02/app/oracle/oradata/ORCLCDB/orclpdb1/users01.dbf","online_status":"ONLINE","size":{"bytes":999993,"free":{"bytes":9999994},"max":{"bytes":9999994}},"status":"AVAILABLE"},"name":"USERS","space":{"free":{"bytes":9999},"used":{"bytes":9991}}}`} + `{"data_file":{"id":18,"name":"/u02/app/oracle/oradata/ORCLCDB/orclpdb1/undotbs01.dbf","online_status":"ONLINE","size":{"bytes":999992,"free":{"bytes":9999994},"max":{"bytes":9999994}},"status":"AVAILABLE"},"name":"UNDOTBS1","space":{"free":{"bytes":9999},"total":{"bytes":99999},"used":{"bytes":9991}}}`, + `{"data_file":{"id":18,"name":"/u02/app/oracle/oradata/ORCLCDB/orclpdb1/users01.dbf","online_status":"ONLINE","size":{"bytes":999993,"free":{"bytes":9999994},"max":{"bytes":9999994}},"status":"AVAILABLE"},"name":"USERS","space":{"free":{"bytes":9999},"total":{"bytes":99999},"used":{"bytes":9991}}}`} var notExpectedEvents = []string{`{}`, `{"foo":"bar"}`} diff --git a/x-pack/metricbeat/module/oracle/tablespace/mocks_test.go b/x-pack/metricbeat/module/oracle/tablespace/mocks_test.go index 12348236bccc..9f5800166af1 100644 --- a/x-pack/metricbeat/module/oracle/tablespace/mocks_test.go +++ b/x-pack/metricbeat/module/oracle/tablespace/mocks_test.go @@ -77,16 +77,16 @@ func (h happyDataFiles) dataFilesData(_ context.Context) ([]dataFile, error) { type happyTempFreeSpaceData struct{} func (happyTempFreeSpaceData) tempFreeSpaceData(_ context.Context) ([]tempFreeSpace, error) { - return []tempFreeSpace{{TablespaceName: "TEMP", TablespaceSize: sql.NullInt64{Valid: true, Int64: 99999}, UsedSpaceBytes: sql.NullInt64{Valid: true, Int64: 99999}, FreeSpace: sql.NullInt64{Int64: 99999, Valid: true}}}, nil + return []tempFreeSpace{{TablespaceName: "TEMP", TotalSpaceBytes: sql.NullInt64{Valid: true, Int64: 99999}, UsedSpaceBytes: sql.NullInt64{Valid: true, Int64: 99999}, FreeSpace: sql.NullInt64{Int64: 99999, Valid: true}}}, nil } type happyFreeSpaceData struct{} func (happyFreeSpaceData) usedAndFreeSpaceData(_ context.Context) ([]usedAndFreeSpace, error) { return []usedAndFreeSpace{ - {TablespaceName: "SYSTEM", TotalFreeBytes: sql.NullInt64{Int64: 9990, Valid: true}, TotalUsedBytes: sql.NullInt64{Int64: 9991, Valid: true}}, - {TablespaceName: "SYSAUX", TotalFreeBytes: sql.NullInt64{Int64: 9999, Valid: true}, TotalUsedBytes: sql.NullInt64{Int64: 9991, Valid: true}}, - {TablespaceName: "UNDOTBS1", TotalFreeBytes: sql.NullInt64{Int64: 9999, Valid: true}, TotalUsedBytes: sql.NullInt64{Int64: 9991, Valid: true}}, - {TablespaceName: "USERS", TotalFreeBytes: sql.NullInt64{Int64: 9999, Valid: true}, TotalUsedBytes: sql.NullInt64{Int64: 9991, Valid: true}}, + {TablespaceName: "SYSTEM", TotalFreeBytes: sql.NullInt64{Int64: 9990, Valid: true}, TotalUsedBytes: sql.NullInt64{Int64: 9991, Valid: true}, TotalSpaceBytes: sql.NullInt64{Int64: 99999, Valid: true}}, + {TablespaceName: "SYSAUX", TotalFreeBytes: sql.NullInt64{Int64: 9999, Valid: true}, TotalUsedBytes: sql.NullInt64{Int64: 9991, Valid: true}, TotalSpaceBytes: sql.NullInt64{Int64: 99999, Valid: true}}, + {TablespaceName: "UNDOTBS1", TotalFreeBytes: sql.NullInt64{Int64: 9999, Valid: true}, TotalUsedBytes: sql.NullInt64{Int64: 9991, Valid: true}, TotalSpaceBytes: sql.NullInt64{Int64: 99999, Valid: true}}, + {TablespaceName: "USERS", TotalFreeBytes: sql.NullInt64{Int64: 9999, Valid: true}, TotalUsedBytes: sql.NullInt64{Int64: 9991, Valid: true}, TotalSpaceBytes: sql.NullInt64{Int64: 99999, Valid: true}}, }, nil } diff --git a/x-pack/metricbeat/module/oracle/tablespace/temp_free_space.go b/x-pack/metricbeat/module/oracle/tablespace/temp_free_space.go index efc8401e2c0b..f4b592d2b75b 100644 --- a/x-pack/metricbeat/module/oracle/tablespace/temp_free_space.go +++ b/x-pack/metricbeat/module/oracle/tablespace/temp_free_space.go @@ -11,31 +11,24 @@ import ( ) type tempFreeSpace struct { - TablespaceName string - TablespaceSize sql.NullInt64 - UsedSpaceBytes sql.NullInt64 - FreeSpace sql.NullInt64 -} - -func (d *tempFreeSpace) hash() string { - return d.TablespaceName -} - -func (d *tempFreeSpace) eventKey() string { - return d.TablespaceName + TablespaceName string + TotalSpaceBytes sql.NullInt64 + UsedSpaceBytes sql.NullInt64 + FreeSpace sql.NullInt64 } func (e *tablespaceExtractor) tempFreeSpaceData(ctx context.Context) ([]tempFreeSpace, error) { - rows, err := e.db.QueryContext(ctx, "SELECT TABLESPACE_NAME, TABLESPACE_SIZE, ALLOCATED_SPACE, FREE_SPACE FROM DBA_TEMP_FREE_SPACE") + rows, err := e.db.QueryContext(ctx, `SELECT t.TABLESPACE_NAME, (SELECT SUM(BYTES) FROM DBA_DATA_FILES) + (SELECT SUM(BYTES) FROM DBA_TEMP_FILES) AS TOTAL_SUM, t.ALLOCATED_SPACE, t.FREE_SPACE FROM DBA_TEMP_FREE_SPACE t `) if err != nil { return nil, fmt.Errorf("error executing query: %w", err) } + defer rows.Close() results := make([]tempFreeSpace, 0) for rows.Next() { dest := tempFreeSpace{} - if err = rows.Scan(&dest.TablespaceName, &dest.TablespaceSize, &dest.UsedSpaceBytes, &dest.FreeSpace); err != nil { + if err = rows.Scan(&dest.TablespaceName, &dest.TotalSpaceBytes, &dest.UsedSpaceBytes, &dest.FreeSpace); err != nil { return nil, err } results = append(results, dest) diff --git a/x-pack/metricbeat/module/oracle/tablespace/used_and_free_space.go b/x-pack/metricbeat/module/oracle/tablespace/used_and_free_space.go index b17b249808d2..e327badcf02d 100644 --- a/x-pack/metricbeat/module/oracle/tablespace/used_and_free_space.go +++ b/x-pack/metricbeat/module/oracle/tablespace/used_and_free_space.go @@ -11,30 +11,24 @@ import ( ) type usedAndFreeSpace struct { - TablespaceName string - TotalFreeBytes sql.NullInt64 - TotalUsedBytes sql.NullInt64 -} - -func (d *usedAndFreeSpace) hash() string { - return d.TablespaceName -} - -func (d *usedAndFreeSpace) eventKey() string { - return d.TablespaceName + TablespaceName string + TotalSpaceBytes sql.NullInt64 + TotalFreeBytes sql.NullInt64 + TotalUsedBytes sql.NullInt64 } func (e *tablespaceExtractor) usedAndFreeSpaceData(ctx context.Context) ([]usedAndFreeSpace, error) { - rows, err := e.db.QueryContext(ctx, "SELECT b.tablespace_name, tbs_size used, a.free_space free FROM (SELECT tablespace_name, sum(bytes) AS free_space FROM dba_free_space GROUP BY tablespace_name) a, (SELECT tablespace_name, sum(bytes) AS tbs_size FROM dba_data_files GROUP BY tablespace_name) b WHERE a.tablespace_name(+)=b.tablespace_name") + rows, err := e.db.QueryContext(ctx, `SELECT b.tablespace_name, (b.tbs_size - NVL(a.free_space, 0)) AS used, NVL(a.free_space, 0) AS free, (SELECT SUM(bytes) FROM DBA_DATA_FILES) + (SELECT SUM(bytes) FROM DBA_TEMP_FILES) AS total_sum FROM (SELECT tablespace_name, SUM(bytes) AS free_space FROM DBA_FREE_SPACE GROUP BY tablespace_name) a RIGHT JOIN (SELECT tablespace_name, SUM(bytes) AS tbs_size FROM DBA_DATA_FILES GROUP BY tablespace_name) b ON a.tablespace_name = b.tablespace_name`) if err != nil { return nil, fmt.Errorf("error executing query: %w", err) } + defer rows.Close() results := make([]usedAndFreeSpace, 0) for rows.Next() { dest := usedAndFreeSpace{} - if err = rows.Scan(&dest.TablespaceName, &dest.TotalUsedBytes, &dest.TotalFreeBytes); err != nil { + if err = rows.Scan(&dest.TablespaceName, &dest.TotalUsedBytes, &dest.TotalFreeBytes, &dest.TotalSpaceBytes); err != nil { return nil, err } results = append(results, dest) diff --git a/x-pack/metricbeat/module/stan/_meta/Dockerfile b/x-pack/metricbeat/module/stan/_meta/Dockerfile index 5023acb7b46a..e7fd7774af83 100644 --- a/x-pack/metricbeat/module/stan/_meta/Dockerfile +++ b/x-pack/metricbeat/module/stan/_meta/Dockerfile @@ -2,7 +2,7 @@ ARG STAN_VERSION=0.15.1 FROM nats-streaming:$STAN_VERSION # build stage -FROM golang:1.21.10 AS build-env +FROM golang:1.21.11 AS build-env RUN apt-get install git mercurial gcc RUN git clone https://github.com/nats-io/stan.go.git /stan-go RUN cd /stan-go/examples/stan-bench && git checkout tags/v0.5.2 && go build . diff --git a/x-pack/packetbeat/magefile.go b/x-pack/packetbeat/magefile.go index 357e5e235855..7e2c70d2f84e 100644 --- a/x-pack/packetbeat/magefile.go +++ b/x-pack/packetbeat/magefile.go @@ -173,10 +173,7 @@ func SystemTest(ctx context.Context) error { } func getBucketName() string { - if os.Getenv("BUILDKITE") == "true" { - return "ingest-buildkite-ci" - } - return "obs-ci-cache" + return "ingest-buildkite-ci" } // getNpcapInstaller gets the installer from the Google Cloud Storage service.