diff --git a/.buildkite/auditbeat/auditbeat-pipeline.yml b/.buildkite/auditbeat/auditbeat-pipeline.yml index b65044e7344..b6b915789db 100644 --- a/.buildkite/auditbeat/auditbeat-pipeline.yml +++ b/.buildkite/auditbeat/auditbeat-pipeline.yml @@ -32,6 +32,11 @@ env: RACE_DETECTOR: "true" TEST_COVERAGE: "true" + # Concurrency definition + CONCURRENCY_GROUP: "orka-concurrency-group" + CONCURRENCY_COUNT: 10 + CONCURRENCY_METHOD: eager + steps: - group: "Check/Update" key: "auditbeat-check-update" @@ -296,6 +301,9 @@ steps: source .buildkite/scripts/install_macos_tools.sh cd auditbeat mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" retry: automatic: - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability @@ -322,6 +330,9 @@ steps: source .buildkite/scripts/install_macos_tools.sh cd auditbeat mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" retry: automatic: - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability diff --git a/.buildkite/filebeat/filebeat-pipeline.yml b/.buildkite/filebeat/filebeat-pipeline.yml index 46720357074..92b007cfdf3 100644 --- a/.buildkite/filebeat/filebeat-pipeline.yml +++ b/.buildkite/filebeat/filebeat-pipeline.yml @@ -31,6 +31,11 @@ env: RACE_DETECTOR: "true" TEST_COVERAGE: "true" + # Concurrency definition + CONCURRENCY_GROUP: "orka-concurrency-group" + CONCURRENCY_COUNT: 10 + CONCURRENCY_METHOD: eager + steps: - group: "Check/Update" key: "filebeat-check-update" @@ -215,6 +220,9 @@ steps: source .buildkite/scripts/install_macos_tools.sh cd filebeat mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" retry: automatic: - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability @@ -242,6 +250,9 @@ steps: source .buildkite/scripts/install_macos_tools.sh cd filebeat mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" retry: automatic: - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability diff --git a/.buildkite/heartbeat/heartbeat-pipeline.yml b/.buildkite/heartbeat/heartbeat-pipeline.yml index abdc8f73e33..13b0c8035b4 100644 --- a/.buildkite/heartbeat/heartbeat-pipeline.yml +++ b/.buildkite/heartbeat/heartbeat-pipeline.yml @@ -29,6 +29,11 @@ env: RACE_DETECTOR: "true" TEST_COVERAGE: "true" + # Concurrency definition + CONCURRENCY_GROUP: "orka-concurrency-group" + CONCURRENCY_COUNT: 10 + CONCURRENCY_METHOD: eager + steps: - group: "Check/Update" key: "heartbeat-check-update" @@ -217,6 +222,9 @@ steps: source .buildkite/scripts/install_macos_tools.sh cd heartbeat mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" retry: automatic: - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability @@ -238,6 +246,9 @@ steps: source .buildkite/scripts/install_macos_tools.sh cd heartbeat mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" retry: automatic: - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability diff --git a/.buildkite/metricbeat/pipeline.yml b/.buildkite/metricbeat/pipeline.yml index 63c1870c158..674e55c5207 100644 --- a/.buildkite/metricbeat/pipeline.yml +++ b/.buildkite/metricbeat/pipeline.yml @@ -34,6 +34,11 @@ env: RACE_DETECTOR: "true" TEST_COVERAGE: "true" + # Concurrency definition + CONCURRENCY_GROUP: "orka-concurrency-group" + CONCURRENCY_COUNT: 10 + CONCURRENCY_METHOD: eager + steps: - group: "Check/Update" key: "metricbeat-check-update" @@ -326,6 +331,9 @@ steps: source .buildkite/scripts/install_macos_tools.sh cd metricbeat mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" retry: automatic: - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability @@ -354,6 +362,9 @@ steps: source .buildkite/scripts/install_macos_tools.sh cd metricbeat mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" retry: automatic: - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability diff --git a/.buildkite/packaging.pipeline.yml b/.buildkite/packaging.pipeline.yml index 07296d3bc3c..ecf2dca5cc0 100644 --- a/.buildkite/packaging.pipeline.yml +++ b/.buildkite/packaging.pipeline.yml @@ -5,7 +5,7 @@ env: ASDF_MAGE_VERSION: 1.15.0 AWS_ARM_INSTANCE_TYPE: "m6g.xlarge" AWS_IMAGE_UBUNTU_ARM_64: "platform-ingest-beats-ubuntu-2204-aarch64" - GCP_DEFAULT_MACHINE_TYPE: "c2d-highcpu-8" + GCP_DEFAULT_MACHINE_TYPE: "c2d-standard-8" IMAGE_UBUNTU_X86_64: "family/platform-ingest-beats-ubuntu-2204" PLATFORMS: "+all linux/amd64 linux/arm64 windows/amd64 darwin/amd64 darwin/arm64" diff --git a/.buildkite/packetbeat/pipeline.packetbeat.yml b/.buildkite/packetbeat/pipeline.packetbeat.yml index 753dd182548..31cdecd4ae0 100644 --- a/.buildkite/packetbeat/pipeline.packetbeat.yml +++ b/.buildkite/packetbeat/pipeline.packetbeat.yml @@ -28,6 +28,11 @@ env: RACE_DETECTOR: "true" TEST_COVERAGE: "true" + # Concurrency definition + CONCURRENCY_GROUP: "orka-concurrency-group" + CONCURRENCY_COUNT: 10 + CONCURRENCY_METHOD: eager + steps: - group: "Check/Update" key: "packetbeat-check-update" @@ -268,6 +273,9 @@ steps: source .buildkite/scripts/install_macos_tools.sh cd packetbeat mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" retry: automatic: - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability @@ -295,6 +303,9 @@ steps: source .buildkite/scripts/install_macos_tools.sh cd packetbeat mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" retry: automatic: - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability diff --git a/.buildkite/x-pack/pipeline.xpack.auditbeat.yml b/.buildkite/x-pack/pipeline.xpack.auditbeat.yml index 88dfb94bfb3..cca15625243 100644 --- a/.buildkite/x-pack/pipeline.xpack.auditbeat.yml +++ b/.buildkite/x-pack/pipeline.xpack.auditbeat.yml @@ -31,6 +31,11 @@ env: # Module tests BEAT_PATH: "x-pack/auditbeat" + # Concurrency definition + CONCURRENCY_GROUP: "orka-concurrency-group" + CONCURRENCY_COUNT: 10 + CONCURRENCY_METHOD: eager + steps: - group: "Check/Update" key: "x-pack-auditbeat-check-update" @@ -276,6 +281,9 @@ steps: source .buildkite/scripts/install_macos_tools.sh cd x-pack/auditbeat mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" retry: automatic: - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability @@ -302,6 +310,9 @@ steps: source .buildkite/scripts/install_macos_tools.sh cd x-pack/auditbeat mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" retry: automatic: - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability diff --git a/.buildkite/x-pack/pipeline.xpack.filebeat.yml b/.buildkite/x-pack/pipeline.xpack.filebeat.yml index 91425933abe..2dc0c818654 100644 --- a/.buildkite/x-pack/pipeline.xpack.filebeat.yml +++ b/.buildkite/x-pack/pipeline.xpack.filebeat.yml @@ -27,6 +27,11 @@ env: RACE_DETECTOR: "true" TEST_COVERAGE: "true" + # Concurrency definition + CONCURRENCY_GROUP: "orka-concurrency-group" + CONCURRENCY_COUNT: 10 + CONCURRENCY_METHOD: eager + steps: - group: "Check/Update" key: "x-pack-filebeat-check-update" @@ -323,6 +328,9 @@ steps: source .buildkite/scripts/install_macos_tools.sh cd x-pack/filebeat mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" retry: automatic: - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability @@ -349,6 +357,9 @@ steps: source .buildkite/scripts/install_macos_tools.sh cd x-pack/filebeat mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" retry: automatic: - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability diff --git a/.buildkite/x-pack/pipeline.xpack.heartbeat.yml b/.buildkite/x-pack/pipeline.xpack.heartbeat.yml index 30d98bec350..839bfc8a35a 100644 --- a/.buildkite/x-pack/pipeline.xpack.heartbeat.yml +++ b/.buildkite/x-pack/pipeline.xpack.heartbeat.yml @@ -32,6 +32,11 @@ env: RACE_DETECTOR: "true" TEST_COVERAGE: "true" + # Concurrency definition + CONCURRENCY_GROUP: "orka-concurrency-group" + CONCURRENCY_COUNT: 10 + CONCURRENCY_METHOD: eager + steps: - group: "Check/Update" key: "x-pack-heartbeat-check-update" @@ -286,6 +291,9 @@ steps: installNodeJsDependencies cd x-pack/heartbeat mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" retry: automatic: - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability @@ -313,6 +321,9 @@ steps: installNodeJsDependencies cd x-pack/heartbeat mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" retry: automatic: - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability diff --git a/.buildkite/x-pack/pipeline.xpack.metricbeat.yml b/.buildkite/x-pack/pipeline.xpack.metricbeat.yml index abf62750451..a074c681a96 100644 --- a/.buildkite/x-pack/pipeline.xpack.metricbeat.yml +++ b/.buildkite/x-pack/pipeline.xpack.metricbeat.yml @@ -30,6 +30,11 @@ env: # Module tests BEAT_PATH: "x-pack/metricbeat" + # Concurrency definition + CONCURRENCY_GROUP: "orka-concurrency-group" + CONCURRENCY_COUNT: 10 + CONCURRENCY_METHOD: eager + steps: - group: "Check/Update" key: "x-pack-metricbeat-check-update" @@ -302,6 +307,9 @@ steps: set -euo pipefail source .buildkite/scripts/install_macos_tools.sh cd x-pack/metricbeat && mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" retry: automatic: - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability @@ -329,6 +337,9 @@ steps: set -euo pipefail source .buildkite/scripts/install_macos_tools.sh cd x-pack/metricbeat && mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" retry: automatic: - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability diff --git a/.buildkite/x-pack/pipeline.xpack.osquerybeat.yml b/.buildkite/x-pack/pipeline.xpack.osquerybeat.yml index 9c397f95d79..c06f473c000 100644 --- a/.buildkite/x-pack/pipeline.xpack.osquerybeat.yml +++ b/.buildkite/x-pack/pipeline.xpack.osquerybeat.yml @@ -24,6 +24,11 @@ env: RACE_DETECTOR: "true" TEST_COVERAGE: "true" + # Concurrency definition + CONCURRENCY_GROUP: "orka-concurrency-group" + CONCURRENCY_COUNT: 10 + CONCURRENCY_METHOD: eager + steps: - group: "Check/Update" key: "x-pack-osquerybeat-check-update" @@ -225,6 +230,9 @@ steps: set -euo pipefail source .buildkite/scripts/install_macos_tools.sh cd x-pack/osquerybeat && mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" retry: automatic: - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability @@ -243,6 +251,9 @@ steps: set -euo pipefail source .buildkite/scripts/install_macos_tools.sh cd x-pack/osquerybeat && mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" retry: automatic: - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability diff --git a/.buildkite/x-pack/pipeline.xpack.packetbeat.yml b/.buildkite/x-pack/pipeline.xpack.packetbeat.yml index 09279478de7..0bd0c546f2e 100644 --- a/.buildkite/x-pack/pipeline.xpack.packetbeat.yml +++ b/.buildkite/x-pack/pipeline.xpack.packetbeat.yml @@ -28,6 +28,11 @@ env: RACE_DETECTOR: "true" TEST_COVERAGE: "true" + # Concurrency definition + CONCURRENCY_GROUP: "orka-concurrency-group" + CONCURRENCY_COUNT: 10 + CONCURRENCY_METHOD: eager + steps: - group: "Check/Update" key: "x-pack-packetbeat-check-update" @@ -381,6 +386,9 @@ steps: source .buildkite/scripts/install_macos_tools.sh cd x-pack/packetbeat mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" retry: automatic: - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability @@ -408,6 +416,9 @@ steps: source .buildkite/scripts/install_macos_tools.sh cd x-pack/packetbeat mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" retry: automatic: - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability diff --git a/.github/workflows/bump-elastic-stack-snapshot.yml b/.github/workflows/bump-elastic-stack-snapshot.yml index e828643d7d5..804ef8226ed 100644 --- a/.github/workflows/bump-elastic-stack-snapshot.yml +++ b/.github/workflows/bump-elastic-stack-snapshot.yml @@ -32,7 +32,7 @@ jobs: - uses: actions/checkout@v4 - name: Install Updatecli in the runner - uses: updatecli/updatecli-action@6b8881a17fc8038e884ec94ff72a49e8e8a4069f # v0.76.1 + uses: updatecli/updatecli-action@704a64517239e0993c5e3bf6749a063b8f950d9f # v0.76.1 - name: Run Updatecli in Apply mode run: updatecli --experimental apply --config .github/workflows/updatecli.d/bump-elastic-stack-snapshot.yml --values .github/workflows/updatecli.d/values.d/scm.yml diff --git a/.github/workflows/bump-golang.yml b/.github/workflows/bump-golang.yml index 3a68d4400c6..29ed18ade6f 100644 --- a/.github/workflows/bump-golang.yml +++ b/.github/workflows/bump-golang.yml @@ -23,7 +23,7 @@ jobs: - uses: actions/checkout@v4 - name: Install Updatecli in the runner - uses: updatecli/updatecli-action@6b8881a17fc8038e884ec94ff72a49e8e8a4069f # v0.76.1 + uses: updatecli/updatecli-action@704a64517239e0993c5e3bf6749a063b8f950d9f # v0.76.1 - name: Run Updatecli in Apply mode run: updatecli --experimental apply --config .github/workflows/updatecli.d/${{ matrix.file }} --values .github/workflows/updatecli.d/values.d/scm.yml diff --git a/.github/workflows/updatecli.d/bump-golang.yml b/.github/workflows/updatecli.d/bump-golang.yml index 17235535060..f03c5471e63 100644 --- a/.github/workflows/updatecli.d/bump-golang.yml +++ b/.github/workflows/updatecli.d/bump-golang.yml @@ -87,7 +87,7 @@ targets: scmid: githubConfig kind: file spec: - content: 'go {{ source "gomod" }}' + content: 'go {{ source "latestGoVersion" }}' file: go.mod matchpattern: 'go \d+.\d+.\d+' update-go-version: diff --git a/.go-version b/.go-version index 229a27c6f20..d28b1eb8f3f 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.22.8 +1.22.9 diff --git a/.golangci.yml b/.golangci.yml index 936215ea909..757e68f7627 100755 --- a/.golangci.yml +++ b/.golangci.yml @@ -152,7 +152,7 @@ linters-settings: gosimple: # Select the Go version to target. The default is '1.13'. - go: "1.22.8" + go: "1.22.9" nakedret: # make an issue if func has more lines of code than this setting and it has naked returns; default is 30 @@ -170,19 +170,19 @@ linters-settings: staticcheck: # Select the Go version to target. The default is '1.13'. - go: "1.22.8" + go: "1.22.9" checks: ["all"] stylecheck: # Select the Go version to target. The default is '1.13'. - go: "1.22.8" + go: "1.22.9" # Disabled: # ST1005: error strings should not be capitalized checks: ["all", "-ST1005"] unused: # Select the Go version to target. The default is '1.13'. - go: "1.22.8" + go: "1.22.9" gosec: excludes: diff --git a/CHANGELOG.asciidoc b/CHANGELOG.asciidoc index 5c273f7bfb1..f981cf60400 100644 --- a/CHANGELOG.asciidoc +++ b/CHANGELOG.asciidoc @@ -3,6 +3,145 @@ :issue: https://github.com/elastic/beats/issues/ :pull: https://github.com/elastic/beats/pull/ +[[release-notes-8.16.0]] +=== Beats version 8.16.0 +https://github.com/elastic/beats/compare/v8.15.4\...v8.16.0[View commits] + +==== Breaking changes + +*Affecting all Beats* + +- Fix FQDN being lowercased when used as `host.hostname`. {issue}39993[39993] +- Beats won't log start up information when running under the Elastic Agent. {pull}40390[40390] +- Filebeat now needs `dup3`, `faccessat2`, `prctl` and `setrlimit` syscalls to run the journald input. If this input is not being used, the syscalls are not needed. All Beats have those syscalls allowed now because the default seccomp policy is global to all Beats. {pull}40061[40061] +- Beats will rate limit the logs about errors when indexing events on Elasticsearch, logging a summary every 10s. The logs sent to the event log is unchanged. {issue}40157[40157] + +*Filebeat* + +- Filebeat, when running with Elastic-Agent, reports status for Filestream input. {pull}40121[40121] +- Added support for hyphens in extension keys in `decode_cef` Filebeat processor. {pull}40427[40427] +- Journald: removed configuration options `include_matches.or`, `include_matches.and`, `backoff`, `max_backoff`, `cursor_seek_fallback`. {pull}40061[40061] +- Journald: `include_matches.match` now behaves in the same way as matchers in `journalctl`. Users should carefully update their input configuration. {pull}40061[40061] +- Journald: `seek` and `since` behaviour have been simplified, if there is a cursor (state) `seek` and `since` are ignored and the cursor is used. {pull}40061[40061] +- Redis: Added replication role as a field to submitted slowlogs. +- Added `container.image.name` to `journald` Filebeat input's Docker-specific translated fields. {pull}40450[40450] +- Remove deprecated awscloudwatch field from Filebeat. {pull}41089[41089] +- The performance of ingesting SQS data with the S3 input has improved by up to 60x for queues with many small events. `max_number_of_messages` config for SQS mode is now ignored, as the new design no longer needs a manual cap on messages. Instead, use `number_of_workers` to scale ingestion rate in both S3 and SQS modes. The increased efficiency may increase network bandwidth consumption, which can be throttled by lowering `number_of_workers`. It may also increase number of events stored in memory, which can be throttled by lowering the configured size of the internal queue. {pull}40699[40699] + +*Metricbeat* + +- Add support for specifying a custom endpoint for GCP service clients. {issue}40848[40848] {pull}40918[40918] + +==== Bugfixes + +*Auditbeat* + +- Request status from a separate socket to avoid data congestion. {pull}41207[41207] + +*Filebeat* + +- Fix crashes in the journald input. {pull}40061[40061] +- Fix long filepaths in diagnostics exceeding max path limits on Windows. {pull}40909[40909] +- Fix a bug in Salesforce input to only handle responses with 200 status code. {pull}41015[41015] +- Fixed failed job handling and removed false-positive error logs in the GCS input. {pull}41142[41142] +- Bump github.com/elastic/go-sfdc dependency used by x-pack/filebeat/input/salesforce. {pull}41192[41192] +- Journald input now can read events from all boots {issue}41083[41083] {pull}41244[41244] +- Fix errors in SQS host resolution in the `aws-s3` input when using custom (non-AWS) endpoints. {pull}41504[41504] + +*Metricbeat* + +- Add GCP 'instance_id' resource label in ECS cloud fields. {issue}40033[40033] {pull}40062[40062] +- Remove excessive info-level logs in cgroups setup. {pull}40491[40491] +- Fix http server helper SSL config. {pull}39405[39405] + +==== Added + +*Filebeat* + +- Implement Elastic Agent status and health reporting for Netflow Filebeat input. {pull}40080[40080] +- Add SSL and username support for Redis input, now the input includes support for Redis 6.0+. {pull}40111[40111] +- Add scaling up support for Netflow input. {issue}37761[37761] {pull}40122[40122] +- Update CEL mito extensions to v1.15.0. {pull}40294[40294] +- Improve logging in Okta Entity Analytics provider. {issue}40106[40106] {pull}40347[40347] +- Document `winlog` input. {issue}40074[40074] {pull}40462[40462] +- Added retry logic to websocket connections in the streaming input. {issue}40271[40271] {pull}40601[40601] +- Disable event normalization for netflow input. {pull}40635[40635] +- Allow attribute selection in the Active Directory entity analytics provider. {issue}40482[40482] {pull}40662[40662] +- Improve error quality when CEL program does not correctly return an events array. {pull}40580[40580] +- Added support for Microsoft Entra ID RBAC authentication. {issue}40434[40434] {pull}40879[40879] +- Add `use_kubeadm` config option for filebeat (both filbeat.input and autodiscovery) in order to toggle kubeadm-config api requests. {pull}40301[40301] +- Make HTTP library function inclusion non-conditional in CEL input. {pull}40912[40912] +- Add support for Crowdstrike streaming API to the streaming input. {issue}40264[40264] {pull}40838[40838] +- Add support to CEL for reading host environment variables. {issue}40762[40762] {pull}40779[40779] +- Add CSV decoder to awss3 input. {pull}40896[40896] +- Change request trace logging to include headers instead of complete request. {pull}41072[41072] +- Improved GCS input documentation. {pull}41143[41143] +- Add CSV decoding capacity to azureblobstorage input. {pull}40978[40978] +- Add CSV decoding capacity to gcs input. {pull}40979[40979] +- Add support to source AWS cloudwatch logs from linked accounts. {pull}41188[41188] +- Jounrald input now supports filtering by facilities. {pull}41061[41061] +- Add support to include AWS cloudwatch linked accounts when using log_group_name_prefix to define log group names. {pull}41206[41206] + +*Heartbeat* + +- Add journey duration to synthetics browser events. {pull}40230[40230] + +*Metricbeat* + +- Add new metrics fot datastore and minor changes to overall vSphere metrics. {pull}40766[40766] +- Add new metricset datastorecluster for vSphere module. {pull}40634[40634] {pull}40694[40694] +- Add AWS Cloudwatch capability to retrieve tags from AWS/ApiGateway resources. {pull}40755[40755] +- Add new metrics for the vSphere Virtualmachine metricset. {pull}40485[40485] +- Add `metrics_count` to Prometheus module if `metrics_count: true` is set. {pull}40411[40411] + + +[[release-notes-8.15.4]] +=== Beats version 8.15.4 +https://github.com/elastic/beats/compare/v8.15.3\...v8.15.4[View commits] + +==== Breaking changes + +*Osquerybeat* + +- Disable `allow_unsafe` osquery configuration. {pull}40130[40130] + +==== Bugfixes + +*Affecting all Beats* + +- Fix issue where old data could be saved in the memory queue after acknowledgment, increasing memory use. {pull}41356[41356] + +*Filebeat* + +- Log bad handshake details when websocket connection fails. {pull}41300[41300] +- Improve modification time handling for entities and entity deletion logic in the Active Directory entityanalytics input. {pull}41179[41179] +- Fix double encoding of `client_secret` in the Entity Analytics input's Azure Active Directory provider. {pull}41393[41393] +- The azure-eventhub input now correctly reports its status to the Elastic Agent on fatal errors. {pull}41469[41469] + +*Metricbeat* + +- Fix Kubernetes metadata sometimes not being present after startup. {pull}41216[41216] + +*Winlogbeat* + +- Fix truncated windows event log message. {pull}41327[41327] + +==== Added + +*Affecting all Beats* + +- Replace Ubuntu 20.04 with 24.04 for Docker base images. {issue}40743[40743] {pull}40942[40942] +- Reduce memory consumption of k8s autodiscovery and the `add_kubernetes_metadata` processor when Deployment metadata is enabled. + +*Heartbeat* + +- Add monitor status reporter under managed mode. {pull}41077[41077] + +*Metricbeat* + +- Only watch metadata for ReplicaSets in metricbeat k8s module. {pull}41289[41289] + + [[release-notes-8.15.3]] === Beats version 8.15.3 https://github.com/elastic/beats/compare/v8.15.2\...v8.15.3[View commits] @@ -85,7 +224,7 @@ https://github.com/elastic/beats/compare/v8.15.0\...v8.15.1[View commits] *Affecting all Beats* -- Beats Docker images do not log to stderr by default. The workaround is to pass the CLI flag `-e` or to set `logging.to_stderr: true` in the configuration file. +- Beats Docker images do not log to stderr by default. The workaround is to pass the CLI flag `-e` or to set `logging.to_stderr: true` in the configuration file. - Beats stop publishing data after a network error unless restarted. Avoid upgrading to 8.15.1. Affected Beats log `Get \"https://${ELASTICSEARCH_HOST}:443\": context canceled` repeatedly. {issue}40705{40705} - Memory usage is not correctly limited by the number of events actively in the memory queue, but rather the maximum size of the memory queue regardless of usage. {issue}41355[41355] diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 05345fb5ec0..202784c979e 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -15,6 +15,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Filebeat now needs `dup3`, `faccessat2`, `prctl` and `setrlimit` syscalls to run the journald input. If this input is not being used, the syscalls are not needed. All Beats have those syscalls allowed now because the default seccomp policy is global to all Beats. {pull}40061[40061] - Beats will rate limit the logs about errors when indexing events on Elasticsearch, logging a summary every 10s. The logs sent to the event log is unchanged. {issue}40157[40157] - Drop support for Debian 10 and upgrade statically linked glibc from 2.28 to 2.31 {pull}41402[41402] +- Fix metrics not being ingested, due to "Limit of total fields [10000] has been exceeded while adding new fields [...]". The total fields limit has been increased to 12500. No significant performance impact on Elasticsearch is anticipated. {pull}41640[41640] *Auditbeat* @@ -36,7 +37,6 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Fix high IO and handling of a corrupted registry log file. {pull}35893[35893] - Enable file ingestion to report detailed status to Elastic Agent {pull}40075[40075] - Filebeat, when running with Elastic-Agent, reports status for Filestream input. {pull}40121[40121] -- Implement Elastic Agent status and health reporting for Winlog Filebeat input. {pull}40163[40163] - Fix filestream's registry GC: registry entries will never be removed if clean_inactive is set to "-1". {pull}40258[40258] - Added `ignore_empty_values` flag in `decode_cef` Filebeat processor. {pull}40268[40268] - Added support for hyphens in extension keys in `decode_cef` Filebeat processor. {pull}40427[40427] @@ -48,6 +48,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Change log.file.path field in awscloudwatch input to nested object. {pull}41099[41099] - Remove deprecated awscloudwatch field from Filebeat. {pull}41089[41089] - The performance of ingesting SQS data with the S3 input has improved by up to 60x for queues with many small events. `max_number_of_messages` config for SQS mode is now ignored, as the new design no longer needs a manual cap on messages. Instead, use `number_of_workers` to scale ingestion rate in both S3 and SQS modes. The increased efficiency may increase network bandwidth consumption, which can be throttled by lowering `number_of_workers`. It may also increase number of events stored in memory, which can be throttled by lowering the configured size of the internal queue. {pull}40699[40699] +- Fixes filestream logging the error "filestream input with ID 'ID' already exists, this will lead to data duplication[...]" on Kubernetes when using autodiscover. {pull}41585[41585] - Add kafka compression support for ZSTD. @@ -57,8 +58,6 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] *Metricbeat* - Setting period for counter cache for Prometheus remote_write at least to 60sec {pull}38553[38553] -- Add support of Graphite series 1.1.0+ tagging extension for statsd module. {pull}39619[39619] -- Allow metricsets to report their status via control v2 protocol. {pull}40025[40025] - Remove fallback to the node limit for the `kubernetes.pod.cpu.usage.limit.pct` and `kubernetes.pod.memory.usage.limit.pct` metrics calculation - Add support for Kibana status metricset in v8 format {pull}40275[40275] - Mark system process metricsets as running if metrics are partially available {pull}40565[40565] @@ -78,11 +77,11 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] *Packetbeat* +- Expire source port mappings. {pull}41581[41581] *Winlogbeat* - Add "event.category" and "event.type" to Sysmon module for EventIDs 8, 9, 19, 20, 27, 28, 255 {pull}35193[35193] -- Fix truncated windows event log message {pull}41327[41327] *Functionbeat* @@ -111,10 +110,10 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Fix issue where old data could be saved in the memory queue after acknowledgment, increasing memory use {pull}41356[41356] - Ensure Elasticsearch output can always recover from network errors {pull}40794[40794] - Add `translate_ldap_attribute` processor. {pull}41472[41472] +- Remove unnecessary debug logs during idle connection teardown {issue}40824[40824] *Auditbeat* -- Request status from a separate socket to avoid data congestion {pull}41207[41207] *Filebeat* @@ -142,10 +141,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Updated Websocket input title to align with existing inputs {pull}39006[39006] - Restore netflow input on Windows {pull}39024[39024] - Upgrade azure-event-hubs-go and azure-storage-blob-go dependencies. {pull}38861[38861] -- Fix concurrency/error handling bugs in the AWS S3 input that could drop data and prevent ingestion of large buckets. {pull}39131[39131] -- Fix EntraID query handling. {issue}39419[39419] {pull}39420[39420] - Fix request trace filename handling in http_endpoint input. {pull}39410[39410] -- Fix filestream not correctly tracking the offset of a file when using the `include_message` parser. {pull}39873[39873] {issue}39653[39653] - Upgrade github.com/hashicorp/go-retryablehttp to mitigate CVE-2024-6104 {pull}40036[40036] - Fix for Google Workspace duplicate events issue by adding canonical sorting over fingerprint keys array to maintain key order. {pull}40055[40055] {issue}39859[39859] - Fix handling of deeply nested numeric values in HTTP Endpoint CEL programs. {pull}40115[40115] @@ -172,7 +168,8 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Improve modification time handling for entities and entity deletion logic in the Active Directory entityanalytics input. {pull}41179[41179] - Journald input now can read events from all boots {issue}41083[41083] {pull}41244[41244] - Fix double encoding of client_secret in the Entity Analytics input's Azure Active Directory provider {pull}41393[41393] -- Fix errors in SQS host resolution in the `aws-s3` input when using custom (non-AWS) endpoints. {pull}41504[41504] +- Fix aws region in aws-s3 input s3 polling mode. {pull}41572[41572] +- Fix the "No such input type exist: 'salesforce'" error on the Windows/AIX platform. {pull}41664[41664] *Heartbeat* @@ -188,8 +185,6 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Fix handling of access errors when reading process metrics {pull}39627[39627] - Fix behavior of cgroups path discovery when monitoring the host system from within a container {pull}39627[39627] - Fix issue where beats may report incorrect metrics for its own process when running inside a container {pull}39627[39627] -- Fix for MySQL/Performance - Query failure for MySQL versions below v8.0.1, for performance metric `quantile_95`. {pull}38710[38710] -- Fix Prometheus helper text parser to store each metric family type. {pull}39743[39743] - Normalize AWS RDS CPU Utilization values before making the metadata API call. {pull}39664[39664] - Fix behavior of pagetypeinfo metrics {pull}39985[39985] - Fix query logic for temp and non-temp tablespaces in Oracle module. {issue}38051[38051] {pull}39787[39787] @@ -206,6 +201,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Fix http server helper SSL config. {pull}39405[39405] - Fix Kubernetes metadata sometimes not being present after startup {pull}41216[41216] - Do not report non-existant 0 values for RSS metrics in docker/memory {pull}41449[41449] +- Log Cisco Meraki `getDevicePerformanceScores` errors without stopping metrics collection. {pull}41622[41622] *Osquerybeat* @@ -239,6 +235,8 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Replace Ubuntu 20.04 with 24.04 for Docker base images {issue}40743[40743] {pull}40942[40942] - Reduce memory consumption of k8s autodiscovery and the add_kubernetes_metadata processor when Deployment metadata is enabled - Add `lowercase` processor. {issue}22254[22254] {pull}41424[41424] +- Add `uppercase` processor. {issue}22254[22254] {pull}41535[41535] +- Replace `compress/gzip` with https://github.com/klauspost/compress/gzip library for gzip compression {pull}41584[41584] *Auditbeat* @@ -280,14 +278,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Parse more fields from Elasticsearch slowlogs {pull}38295[38295] - added benchmark input {pull}37437[37437] - added benchmark input and discard output {pull}37437[37437] -- Ensure all responses sent by HTTP Endpoint are HTML-escaped. {pull}39329[39329] - Update CEL mito extensions to v1.11.0 to improve type checking. {pull}39460[39460] -- Improve logging of request and response with request trace logging in error conditions. {pull}39455[39455] -- Implement Elastic Agent status and health reporting for CEL Filebeat input. {pull}39209[39209] -- Add HTTP metrics to CEL input. {issue}39501[39501] {pull}39503[39503] -- Add default user-agent to CEL HTTP requests. {issue}39502[39502] {pull}39587[39587] -- Improve reindexing support in security module pipelines. {issue}38224[38224] {pull}39588[39588] -- Make HTTP Endpoint input GA. {issue}38979[38979] {pull}39410[39410] - Update CEL mito extensions to v1.12.2. {pull}39755[39755] - Add support for base64-encoded HMAC headers to HTTP Endpoint. {pull}39655[39655] - Add user group membership support to Okta entity analytics provider. {issue}39814[39814] {pull}39815[39815] @@ -330,21 +321,24 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Add support to include AWS cloudwatch linked accounts when using log_group_name_prefix to define log group names. {pull}41206[41206] - Improved Azure Blob Storage input documentation. {pull}41252[41252] - Make ETW input GA. {pull}41389[41389] +- Added input metrics to GCS input. {issue}36640[36640] {pull}41505[41505] - Add support for Okta entity analytics provider to collect role and factor data for users. {pull}41460[41460] +- Add support for Journald in the System module. {pull}41555[41555] +- Add ability to remove request trace logs from http_endpoint input. {pull}40005[40005] +- Add ability to remove request trace logs from entityanalytics input. {pull}40004[40004] *Auditbeat* *Libbeat* +- enrich events with EC2 tags in add_cloud_metadata processor {pull}41477[41477] *Heartbeat* - Added status to monitor run log report. - Upgrade node to latest LTS v18.20.3. {pull}40038[40038] -- Add journey duration to synthetics browser events. {pull}40230[40230] -- Add monitor status reporter under managed mode. {pull}41077[41077] *Metricbeat* @@ -379,7 +373,9 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Added Palo Alto Networks module {pull}40686[40686] - Restore docker.network.in.* and docker.network.out.* fields in docker module {pull}40968[40968] - Add `id` field to all the vSphere metricsets. {pull}41097[41097] +- Bump aerospike-client-go to version v7.7.1 and add support for basic auth in Aerospike module {pull}41233[41233] - Only watch metadata for ReplicaSets in metricbeat k8s module {pull}41289[41289] +- Add support for region/zone for Vertex AI service in GCP module {pull}41551[41551] *Metricbeat* @@ -393,6 +389,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] *Winlogbeat* - Add handling for missing `EvtVarType`s in experimental api. {issue}19337[19337] {pull}41418[41418] +- Implement exclusion range support for event_id. {issue}38623[38623] {pull}41639[41639] *Functionbeat* @@ -441,3 +438,9 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] + + + + + + diff --git a/NOTICE.txt b/NOTICE.txt index f33fb7667c4..5e7a5bd65b9 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -1865,11 +1865,11 @@ SOFTWARE. -------------------------------------------------------------------------------- Dependency : github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 -Version: v4.6.0 +Version: v4.8.0 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/!azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4@v4.6.0/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/!azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4@v4.8.0/LICENSE.txt: MIT License @@ -2806,12 +2806,12 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- -Dependency : github.com/aerospike/aerospike-client-go -Version: v1.27.1-0.20170612174108-0f3b54da6bdc +Dependency : github.com/aerospike/aerospike-client-go/v7 +Version: v7.7.1 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/aerospike/aerospike-client-go@v1.27.1-0.20170612174108-0f3b54da6bdc/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/aerospike/aerospike-client-go/v7@v7.7.1/LICENSE: Apache License @@ -3002,7 +3002,7 @@ Contents of probable licence file $GOMODCACHE/github.com/aerospike/aerospike-cli same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright 2014-2016 Aerospike, Inc. + Copyright 2014-2020 Aerospike, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12800,12 +12800,12 @@ Contents of probable licence file $GOMODCACHE/github.com/dolmen-go/contextio@v0. -------------------------------------------------------------------------------- -Dependency : github.com/andrewkroh/goja +Dependency : github.com/elastic/goja Version: v0.0.0-20190128172624-dd2ac4456e20 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/andrewkroh/goja@v0.0.0-20190128172624-dd2ac4456e20/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/elastic/goja@v0.0.0-20190128172624-dd2ac4456e20/LICENSE: Copyright (c) 2016 Dmitry Panov @@ -13443,11 +13443,11 @@ SOFTWARE -------------------------------------------------------------------------------- Dependency : github.com/elastic/elastic-agent-libs -Version: v0.17.1 +Version: v0.17.3 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-libs@v0.17.1/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-libs@v0.17.3/LICENSE: Apache License Version 2.0, January 2004 @@ -13654,11 +13654,11 @@ Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-l -------------------------------------------------------------------------------- Dependency : github.com/elastic/elastic-agent-system-metrics -Version: v0.11.1 +Version: v0.11.4 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-system-metrics@v0.11.1/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-system-metrics@v0.11.4/LICENSE.txt: Apache License Version 2.0, January 2004 @@ -21070,6 +21070,320 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +-------------------------------------------------------------------------------- +Dependency : github.com/klauspost/compress +Version: v1.17.9 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/klauspost/compress@v1.17.9/LICENSE: + +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2019 Klaus Post. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------ + +Files: gzhttp/* + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016-2017 The New York Times Company + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +------------------ + +Files: s2/cmd/internal/readahead/* + +The MIT License (MIT) + +Copyright (c) 2015 Klaus Post + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +--------------------- +Files: snappy/* +Files: internal/snapref/* + +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +----------------- + +Files: s2/cmd/internal/filepathx/* + +Copyright 2016 The filepathx Authors + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + -------------------------------------------------------------------------------- Dependency : github.com/lib/pq Version: v1.10.3 @@ -23268,15 +23582,15 @@ SOFTWARE. -------------------------------------------------------------------------------- Dependency : github.com/tklauser/go-sysconf -Version: v0.3.10 +Version: v0.3.12 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/tklauser/go-sysconf@v0.3.10/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/tklauser/go-sysconf@v0.3.12/LICENSE: BSD 3-Clause License -Copyright (c) 2018-2021, Tobias Klauser +Copyright (c) 2018-2022, Tobias Klauser All rights reserved. Redistribution and use in source and binary forms, with or without @@ -48249,320 +48563,6 @@ SOFTWARE. --------------------------------------------------------------------------------- -Dependency : github.com/klauspost/compress -Version: v1.17.9 -Licence type (autodetected): Apache-2.0 --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/klauspost/compress@v1.17.9/LICENSE: - -Copyright (c) 2012 The Go Authors. All rights reserved. -Copyright (c) 2019 Klaus Post. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------------------- - -Files: gzhttp/* - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2016-2017 The New York Times Company - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - ------------------- - -Files: s2/cmd/internal/readahead/* - -The MIT License (MIT) - -Copyright (c) 2015 Klaus Post - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ---------------------- -Files: snappy/* -Files: internal/snapref/* - -Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------------------ - -Files: s2/cmd/internal/filepathx/* - -Copyright 2016 The filepathx Authors - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - -------------------------------------------------------------------------------- Dependency : github.com/klauspost/cpuid/v2 Version: v2.2.5 @@ -52576,6 +52576,449 @@ DEALINGS IN THE SOFTWARE. +-------------------------------------------------------------------------------- +Dependency : github.com/shirou/gopsutil/v4 +Version: v4.24.7 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/shirou/gopsutil/v4@v4.24.7/LICENSE: + +gopsutil is distributed under BSD license reproduced below. + +Copyright (c) 2014, WAKAYAMA Shirou +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of the gopsutil authors nor the names of its contributors + may be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +------- +internal/common/binary.go in the gopsutil is copied and modified from golang/encoding/binary.go. + + + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- +Dependency : github.com/shoenig/go-m1cpu +Version: v0.1.6 +Licence type (autodetected): MPL-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/shoenig/go-m1cpu@v0.1.6/LICENSE: + +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + + + -------------------------------------------------------------------------------- Dependency : github.com/shoenig/test Version: v1.7.1 @@ -53166,11 +53609,11 @@ SOFTWARE. -------------------------------------------------------------------------------- Dependency : github.com/tklauser/numcpus -Version: v0.4.0 +Version: v0.6.1 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/tklauser/numcpus@v0.4.0/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/tklauser/numcpus@v0.6.1/LICENSE: Apache License @@ -54182,11 +54625,11 @@ SOFTWARE. -------------------------------------------------------------------------------- Dependency : github.com/yuin/gopher-lua -Version: v0.0.0-20170403160031-b402f3114ec7 +Version: v1.1.1 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/yuin/gopher-lua@v0.0.0-20170403160031-b402f3114ec7/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/yuin/gopher-lua@v1.1.1/LICENSE: The MIT License (MIT) @@ -54213,11 +54656,11 @@ SOFTWARE. -------------------------------------------------------------------------------- Dependency : github.com/yusufpapurcu/wmi -Version: v1.2.2 +Version: v1.2.4 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/yusufpapurcu/wmi@v1.2.2/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/yusufpapurcu/wmi@v1.2.4/LICENSE: The MIT License (MIT) diff --git a/auditbeat/Dockerfile b/auditbeat/Dockerfile index 2241aa16ad1..a6dae96e09b 100644 --- a/auditbeat/Dockerfile +++ b/auditbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.22.8 +FROM golang:1.22.9 RUN \ apt-get update \ diff --git a/dev-tools/kubernetes/filebeat/Dockerfile.debug b/dev-tools/kubernetes/filebeat/Dockerfile.debug index dd5b128caca..be9229eff20 100644 --- a/dev-tools/kubernetes/filebeat/Dockerfile.debug +++ b/dev-tools/kubernetes/filebeat/Dockerfile.debug @@ -1,4 +1,4 @@ -FROM golang:1.22.8 as builder +FROM golang:1.22.9 as builder ENV PATH=/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/bin:/go/bin:/usr/local/go/bin diff --git a/dev-tools/kubernetes/heartbeat/Dockerfile.debug b/dev-tools/kubernetes/heartbeat/Dockerfile.debug index 193516f058a..48ac0852d11 100644 --- a/dev-tools/kubernetes/heartbeat/Dockerfile.debug +++ b/dev-tools/kubernetes/heartbeat/Dockerfile.debug @@ -1,4 +1,4 @@ -FROM golang:1.22.8 as builder +FROM golang:1.22.9 as builder ENV PATH=/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/bin:/go/bin:/usr/local/go/bin diff --git a/dev-tools/kubernetes/metricbeat/Dockerfile.debug b/dev-tools/kubernetes/metricbeat/Dockerfile.debug index a8c567b9da9..2eef906bd1c 100644 --- a/dev-tools/kubernetes/metricbeat/Dockerfile.debug +++ b/dev-tools/kubernetes/metricbeat/Dockerfile.debug @@ -1,4 +1,4 @@ -FROM golang:1.22.8 as builder +FROM golang:1.22.9 as builder ENV PATH=/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/bin:/go/bin:/usr/local/go/bin diff --git a/filebeat/docs/include/use-journald.asciidoc b/filebeat/docs/include/use-journald.asciidoc new file mode 100644 index 00000000000..acfc8b2d94a --- /dev/null +++ b/filebeat/docs/include/use-journald.asciidoc @@ -0,0 +1,4 @@ +*`var.use_journald`*:: + +A boolean that when set to `true` will read logs from Journald. When +Journald is used all events contain the tag `journald`. \ No newline at end of file diff --git a/filebeat/docs/modules/system.asciidoc b/filebeat/docs/modules/system.asciidoc index 1866f2d5c25..f0fa06b8c4f 100644 --- a/filebeat/docs/modules/system.asciidoc +++ b/filebeat/docs/modules/system.asciidoc @@ -64,11 +64,13 @@ include::../include/config-option-intro.asciidoc[] ==== `syslog` fileset settings include::../include/var-paths.asciidoc[] +include::../include/use-journald.asciidoc[] [float] ==== `auth` fileset settings include::../include/var-paths.asciidoc[] +include::../include/use-journald.asciidoc[] *`var.tags`*:: diff --git a/filebeat/filebeat.reference.yml b/filebeat/filebeat.reference.yml index 14e9f276fb4..a8fc37fbbe8 100644 --- a/filebeat/filebeat.reference.yml +++ b/filebeat/filebeat.reference.yml @@ -21,6 +21,9 @@ filebeat.modules: # Filebeat will choose the paths depending on your OS. #var.paths: + # Use journald to collect system logs + #var.use_journald: false + # Input configuration (advanced). Any input configuration option # can be added under this section. #input: @@ -33,6 +36,9 @@ filebeat.modules: # Filebeat will choose the paths depending on your OS. #var.paths: + # Use journald to collect auth logs + #var.use_journald: false + # Input configuration (advanced). Any input configuration option # can be added under this section. #input: diff --git a/filebeat/input/v2/compat/compat.go b/filebeat/input/v2/compat/compat.go index 416b6628e19..fde3f279233 100644 --- a/filebeat/input/v2/compat/compat.go +++ b/filebeat/input/v2/compat/compat.go @@ -26,6 +26,7 @@ import ( "fmt" "sync" + "github.com/gofrs/uuid/v5" "github.com/mitchellh/hashstructure" v2 "github.com/elastic/beats/v7/filebeat/input/v2" @@ -73,12 +74,19 @@ func RunnerFactory( } func (f *factory) CheckConfig(cfg *conf.C) error { - _, err := f.loader.Configure(cfg) + // just check the config, therefore to avoid potential side effects (ID duplication) + // change the ID. + checkCfg, err := f.generateCheckConfig(cfg) + if err != nil { + f.log.Warnw(fmt.Sprintf("input V2 factory.CheckConfig failed to clone config before checking it. Original config will be checked, it might trigger an input duplication warning: %v", err), "original_config", conf.DebugString(cfg, true)) + checkCfg = cfg + } + _, err = f.loader.Configure(checkCfg) if err != nil { return fmt.Errorf("runner factory could not check config: %w", err) } - if err = f.loader.Delete(cfg); err != nil { + if err = f.loader.Delete(checkCfg); err != nil { return fmt.Errorf( "runner factory failed to delete an input after config check: %w", err) @@ -176,3 +184,28 @@ func configID(config *conf.C) (string, error) { return fmt.Sprintf("%16X", id), nil } + +func (f *factory) generateCheckConfig(config *conf.C) (*conf.C, error) { + // copy the config so it's safe to change it + testCfg, err := conf.NewConfigFrom(config) + if err != nil { + return nil, fmt.Errorf("failed to create new config: %w", err) + } + + // let's try to override the `id` field, if it fails, give up + inputID, err := testCfg.String("id", -1) + if err != nil { + return nil, fmt.Errorf("failed to get 'id': %w", err) + } + + id, err := uuid.NewV4() + if err != nil { + return nil, fmt.Errorf("failed to generate check congig id: %w", err) + } + err = testCfg.SetString("id", -1, inputID+"-"+id.String()) + if err != nil { + return nil, fmt.Errorf("failed to set 'id': %w", err) + } + + return testCfg, nil +} diff --git a/filebeat/input/v2/compat/compat_test.go b/filebeat/input/v2/compat/compat_test.go index c5092583c0e..554701cdae8 100644 --- a/filebeat/input/v2/compat/compat_test.go +++ b/filebeat/input/v2/compat/compat_test.go @@ -18,6 +18,8 @@ package compat import ( + "errors" + "fmt" "sync" "testing" @@ -62,6 +64,72 @@ func TestRunnerFactory_CheckConfig(t *testing.T) { assert.Equal(t, 0, countRun) }) + t.Run("does not cause input ID duplication", func(t *testing.T) { + log := logp.NewLogger("test") + var countConfigure, countTest, countRun int + var runWG sync.WaitGroup + var ids = map[string]int{} + var idsMu sync.Mutex + + // setup + plugins := inputest.SinglePlugin("test", &inputest.MockInputManager{ + OnConfigure: func(cfg *conf.C) (v2.Input, error) { + idsMu.Lock() + defer idsMu.Unlock() + id, err := cfg.String("id", -1) + assert.NoError(t, err, "OnConfigure: could not get 'id' fom config") + idsCount := ids[id] + ids[id] = idsCount + 1 + + countConfigure++ + return &inputest.MockInput{ + OnTest: func(_ v2.TestContext) error { countTest++; return nil }, + OnRun: func(_ v2.Context, _ beat.PipelineConnector) error { + runWG.Done() + countRun++ + return nil + }, + }, nil + }, + }) + loader := inputest.MustNewTestLoader(t, plugins, "type", "test") + factory := RunnerFactory(log, beat.Info{}, loader.Loader) + + inputID := "filestream-kubernetes-pod-aee2af1c6365ecdd72416f44aab49cd8bdc7522ab008c39784b7fd9d46f794a4" + inputCfg := fmt.Sprintf(` +id: %s +parsers: + - container: null +paths: + - /var/log/containers/*aee2af1c6365ecdd72416f44aab49cd8bdc7522ab008c39784b7fd9d46f794a4.log +prospector: + scanner: + symlinks: true +take_over: true +type: test +`, inputID) + + runner, err := factory.Create(nil, conf.MustNewConfigFrom(inputCfg)) + require.NoError(t, err, "could not create input") + + runWG.Add(1) + runner.Start() + defer runner.Stop() + // wait input to be running + runWG.Wait() + + err = factory.CheckConfig(conf.MustNewConfigFrom(inputCfg)) + require.NoError(t, err, "unexpected error when calling CheckConfig") + + // validate: configured an input, but do not run test or run + assert.Equal(t, 2, countConfigure, "OnConfigure should be called only 2 times") + assert.Equal(t, 0, countTest, "OnTest should not have been called") + assert.Equal(t, 1, countRun, "OnRun should be called only once") + idsMu.Lock() + assert.Equal(t, 1, ids[inputID]) + idsMu.Unlock() + }) + t.Run("fail if input type is unknown to loader", func(t *testing.T) { log := logp.NewLogger("test") plugins := inputest.SinglePlugin("test", inputest.ConstInputManager(nil)) @@ -118,3 +186,48 @@ func TestRunnerFactory_CreateAndRun(t *testing.T) { assert.Error(t, err) }) } + +func TestGenerateCheckConfig(t *testing.T) { + tcs := []struct { + name string + cfg *conf.C + want *conf.C + wantErr error + assertCfg func(t assert.TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool + }{ + { + name: "id is present", + cfg: conf.MustNewConfigFrom("id: some-id"), + assertCfg: assert.NotEqual, + }, + { + name: "absent id", + cfg: conf.MustNewConfigFrom(""), + wantErr: errors.New("failed to get 'id'"), + assertCfg: func(t assert.TestingT, _ interface{}, got interface{}, msgAndArgs ...interface{}) bool { + return assert.Nil(t, got, msgAndArgs...) + }, + }, + { + name: "invalid config", + cfg: nil, + wantErr: errors.New("failed to create new config"), + assertCfg: func(t assert.TestingT, _ interface{}, got interface{}, msgAndArgs ...interface{}) bool { + return assert.Nil(t, got, msgAndArgs...) + }, + }, + } + + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + f := factory{} + + got, err := f.generateCheckConfig(tc.cfg) + if tc.wantErr != nil { + assert.ErrorContains(t, err, tc.wantErr.Error()) + } + + tc.assertCfg(t, tc.cfg, got) + }) + } +} diff --git a/filebeat/input/winlog/input.go b/filebeat/input/winlog/input.go index 945dd0e3476..ab925cbdd3c 100644 --- a/filebeat/input/winlog/input.go +++ b/filebeat/input/winlog/input.go @@ -26,7 +26,6 @@ import ( input "github.com/elastic/beats/v7/filebeat/input/v2" cursor "github.com/elastic/beats/v7/filebeat/input/v2/input-cursor" "github.com/elastic/beats/v7/libbeat/feature" - "github.com/elastic/beats/v7/libbeat/management/status" "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/go-concert/ctxtool" "github.com/elastic/go-concert/timed" @@ -40,10 +39,6 @@ type eventlogRunner struct{} const pluginName = "winlog" -const channelNotFoundError = "Encountered channel not found error when opening Windows Event Log" -const eventLogReadingError = "Error occurred while reading from Windows Event Log" -const resetError = "Error resetting Windows Event Log handle" - // Plugin create a stateful input Plugin collecting logs from Windows Event Logs. func Plugin(log *logp.Logger, store cursor.StateStore) input.Plugin { return input.Plugin{ @@ -104,7 +99,6 @@ func (eventlogRunner) Run( // Flag used to detect repeat "channel not found" errors, eliminating log spam. channelNotFoundErrDetected := false - ctx.UpdateStatus(status.Running, "") runLoop: for { @@ -115,9 +109,6 @@ runLoop: evtCheckpoint := initCheckpoint(log, cursor) openErr := api.Open(evtCheckpoint) - // Mark the input running. - // Status will be changed to "Degraded" if any error are encountered during opening/reading - ctx.UpdateStatus(status.Running, "") switch { case eventlog.IsRecoverable(openErr): @@ -126,16 +117,14 @@ runLoop: continue case !api.IsFile() && eventlog.IsChannelNotFound(openErr): if !channelNotFoundErrDetected { - log.Errorw(channelNotFoundError, "error", openErr) + log.Errorw("Encountered channel not found error when opening Windows Event Log", "error", openErr) } else { - log.Debugw(channelNotFoundError, "error", openErr) + log.Debugw("Encountered channel not found error when opening Windows Event Log", "error", openErr) } - ctx.UpdateStatus(status.Degraded, fmt.Sprintf("%s: %v", channelNotFoundError, openErr)) channelNotFoundErrDetected = true _ = timed.Wait(cancelCtx, 5*time.Second) continue case openErr != nil: - ctx.UpdateStatus(status.Degraded, fmt.Sprintf("failed to open Windows Event Log channel %q: %v", api.Channel(), openErr)) return fmt.Errorf("failed to open Windows Event Log channel %q: %w", api.Channel(), openErr) } channelNotFoundErrDetected = false @@ -148,16 +137,14 @@ runLoop: if eventlog.IsRecoverable(err) { log.Errorw("Encountered recoverable error when reading from Windows Event Log", "error", err) if resetErr := api.Reset(); resetErr != nil { - log.Errorw(resetError, "error", resetErr) - ctx.UpdateStatus(status.Degraded, fmt.Sprintf("%s: %v", resetError, resetErr)) + log.Errorw("Error resetting Windows Event Log handle", "error", resetErr) } continue runLoop } if !api.IsFile() && eventlog.IsChannelNotFound(err) { log.Errorw("Encountered channel not found error when reading from Windows Event Log", "error", err) if resetErr := api.Reset(); resetErr != nil { - log.Errorw(resetError, "error", resetErr) - ctx.UpdateStatus(status.Degraded, fmt.Sprintf("%s: %v", resetError, resetErr)) + log.Errorw("Error resetting Windows Event Log handle", "error", resetErr) } continue runLoop } @@ -173,8 +160,7 @@ runLoop: return nil } - log.Errorw(eventLogReadingError, "error", err) - ctx.UpdateStatus(status.Degraded, fmt.Sprintf("%s: %v", eventLogReadingError, err)) + log.Errorw("Error occurred while reading from Windows Event Log", "error", err) return err } if len(records) == 0 { @@ -187,7 +173,6 @@ runLoop: if err := publisher.Publish(event, record.Offset); err != nil { // Publisher indicates disconnect when returning an error. // stop trying to publish records and quit - ctx.UpdateStatus(status.Degraded, fmt.Sprintf("Error occurred while publishing from winlog: %v", err)) return err } } diff --git a/filebeat/module/system/README.md b/filebeat/module/system/README.md new file mode 100644 index 00000000000..2471264cfcf --- /dev/null +++ b/filebeat/module/system/README.md @@ -0,0 +1,14 @@ +# Journald tests (Debian 12) +The tests for the journald input (currently only used for Debian 12 +testing) require journal files (test files ending in `.journal`), those +files are generated using `systemd-journal-remote` (see the [Journald +input README.md](../../input/journald/README.md) for more details). + +The source for those journal files are the `.export` files in the test +folder. Those files are the raw output of `journalctl -o export`. They +are added here because journal files format change with different +versions of journald, which can cause `journalclt` to fail reading +them, which leads to test failures. So if tests start failing because +`journalctl` cannot read the journal files as expected, new ones can +easily be generated with the same version of journalctl used on CI +and the original dataset. diff --git a/filebeat/module/system/_meta/config.reference.yml b/filebeat/module/system/_meta/config.reference.yml index 3c7a0b43d49..7536f213639 100644 --- a/filebeat/module/system/_meta/config.reference.yml +++ b/filebeat/module/system/_meta/config.reference.yml @@ -7,6 +7,9 @@ # Filebeat will choose the paths depending on your OS. #var.paths: + # Use journald to collect system logs + #var.use_journald: false + # Input configuration (advanced). Any input configuration option # can be added under this section. #input: @@ -19,6 +22,9 @@ # Filebeat will choose the paths depending on your OS. #var.paths: + # Use journald to collect auth logs + #var.use_journald: false + # Input configuration (advanced). Any input configuration option # can be added under this section. #input: diff --git a/filebeat/module/system/_meta/config.yml b/filebeat/module/system/_meta/config.yml index c1fe882374d..00856cbe016 100644 --- a/filebeat/module/system/_meta/config.yml +++ b/filebeat/module/system/_meta/config.yml @@ -7,6 +7,9 @@ # Filebeat will choose the paths depending on your OS. #var.paths: + # Use journald to collect system logs + #var.use_journald: false + # Authorization logs auth: enabled: false @@ -14,3 +17,6 @@ # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. #var.paths: + + # Use journald to collect auth logs + #var.use_journald: false diff --git a/filebeat/module/system/_meta/docs.asciidoc b/filebeat/module/system/_meta/docs.asciidoc index 6d9209eafe2..b510e010434 100644 --- a/filebeat/module/system/_meta/docs.asciidoc +++ b/filebeat/module/system/_meta/docs.asciidoc @@ -57,11 +57,13 @@ include::../include/config-option-intro.asciidoc[] ==== `syslog` fileset settings include::../include/var-paths.asciidoc[] +include::../include/use-journald.asciidoc[] [float] ==== `auth` fileset settings include::../include/var-paths.asciidoc[] +include::../include/use-journald.asciidoc[] *`var.tags`*:: diff --git a/filebeat/module/system/auth/config/auth.yml b/filebeat/module/system/auth/config/auth.yml index 466b55078af..7e7cc6bf4fd 100644 --- a/filebeat/module/system/auth/config/auth.yml +++ b/filebeat/module/system/auth/config/auth.yml @@ -1,14 +1,22 @@ +{{ if .use_journald }} +type: journald +id: system-auth +facilities: + - 4 + - 10 +{{ else }} type: log paths: {{ range $i, $path := .paths }} - {{$path}} {{ end }} exclude_files: [".gz$"] - multiline: pattern: "^\\s" match: after +{{ end }} +# Common configuration processors: - add_locale: ~ diff --git a/filebeat/module/system/auth/ingest/entrypoint.yml b/filebeat/module/system/auth/ingest/entrypoint.yml new file mode 100644 index 00000000000..75ed68b6f9a --- /dev/null +++ b/filebeat/module/system/auth/ingest/entrypoint.yml @@ -0,0 +1,9 @@ +description: Entrypoint Pipeline for system/auth Filebeat module +processors: + - pipeline: + if: ctx?.input?.type == "journald" + name: '{< IngestPipeline "journald" >}' + + - pipeline: + if: ctx?.input?.type == "log" + name: '{< IngestPipeline "files" >}' diff --git a/filebeat/module/system/auth/ingest/pipeline.yml b/filebeat/module/system/auth/ingest/files.yml similarity index 88% rename from filebeat/module/system/auth/ingest/pipeline.yml rename to filebeat/module/system/auth/ingest/files.yml index c89ef94b28a..39611f484a8 100644 --- a/filebeat/module/system/auth/ingest/pipeline.yml +++ b/filebeat/module/system/auth/ingest/files.yml @@ -18,18 +18,9 @@ processors: TIMESTAMP: (?:%{TIMESTAMP_ISO8601}|%{SYSLOGTIMESTAMP}) patterns: - '^%{TIMESTAMP:system.auth.timestamp} %{SYSLOGHOST:host.hostname}? %{DATA:process.name}(?:\[%{POSINT:process.pid:long}\])?:%{SPACE}%{GREEDYMULTILINE:_temp.message}$' - - grok: + - pipeline: description: Grok specific auth messages. - tag: grok-specific-messages - field: _temp.message - ignore_missing: true - patterns: - - '^%{DATA:system.auth.ssh.event} %{DATA:system.auth.ssh.method} for (invalid user)?%{DATA:user.name} from %{IPORHOST:source.address} port %{NUMBER:source.port:long} ssh2(: %{GREEDYDATA:system.auth.ssh.signature})?' - - '^%{DATA:system.auth.ssh.event} user %{DATA:user.name} from %{IPORHOST:source.address}' - - '^Did not receive identification string from %{IPORHOST:system.auth.ssh.dropped_ip}' - - '^%{DATA:user.name} :( %{DATA:system.auth.sudo.error} ;)? TTY=%{DATA:system.auth.sudo.tty} ; PWD=%{DATA:system.auth.sudo.pwd} ; USER=%{DATA:system.auth.sudo.user} ; COMMAND=%{GREEDYDATA:system.auth.sudo.command}' - - '^new group: name=%{DATA:group.name}, GID=%{NUMBER:group.id}' - - '^new user: name=%{DATA:user.name}, UID=%{NUMBER:user.id}, GID=%{NUMBER:group.id}, home=%{DATA:system.auth.useradd.home}, shell=%{DATA:system.auth.useradd.shell}$' + name: '{< IngestPipeline "grok-auth-messages" >}' on_failure: - rename: description: Leave the unmatched content in message. diff --git a/filebeat/module/system/auth/ingest/grok-auth-messages.yml b/filebeat/module/system/auth/ingest/grok-auth-messages.yml new file mode 100644 index 00000000000..fc09abbff5e --- /dev/null +++ b/filebeat/module/system/auth/ingest/grok-auth-messages.yml @@ -0,0 +1,14 @@ +description: Journald Pipeline for system/auth Filebeat module +processors: + - grok: + description: Grok specific auth messages. + tag: grok-specific-messages + field: _temp.message + ignore_missing: true + patterns: + - '^%{DATA:system.auth.ssh.event} %{DATA:system.auth.ssh.method} for (invalid user)?%{DATA:user.name} from %{IPORHOST:source.address} port %{NUMBER:source.port:long} ssh2(: %{GREEDYDATA:system.auth.ssh.signature})?' + - '^%{DATA:system.auth.ssh.event} user %{DATA:user.name} from %{IPORHOST:source.address}' + - '^Did not receive identification string from %{IPORHOST:system.auth.ssh.dropped_ip}' + - '^%{DATA:user.name} :( %{DATA:system.auth.sudo.error} ;)? TTY=%{DATA:system.auth.sudo.tty} ; PWD=%{DATA:system.auth.sudo.pwd} ; USER=%{DATA:system.auth.sudo.user} ; COMMAND=%{GREEDYDATA:system.auth.sudo.command}' + - '^new group: name=%{DATA:group.name}, GID=%{NUMBER:group.id}' + - '^new user: name=%{DATA:user.name}, UID=%{NUMBER:user.id}, GID=%{NUMBER:group.id}, home=%{DATA:system.auth.useradd.home}, shell=%{DATA:system.auth.useradd.shell}$' diff --git a/filebeat/module/system/auth/ingest/journald.yml b/filebeat/module/system/auth/ingest/journald.yml new file mode 100644 index 00000000000..c6f84f9af13 --- /dev/null +++ b/filebeat/module/system/auth/ingest/journald.yml @@ -0,0 +1,205 @@ +description: Journald Pipeline for system/auth Filebeat module +processors: + - set: + field: event.ingested + copy_from: _ingest.timestamp + - rename: + field: "journald.process.name" + target_field: process.name + - set: + field: "process.pid" + copy_from: "journald.pid" + ignore_failure: true + - rename: + field: message + target_field: _temp.message + - pipeline: + description: Grok specific auth messages. + name: '{< IngestPipeline "grok-auth-messages" >}' + ignore_failure: true + - rename: + field: _temp.message + target_field: message + - grok: + description: Grok usernames from PAM messages. + tag: grok-pam-users + field: message + ignore_missing: true + ignore_failure: true + patterns: + - 'for user %{QUOTE}?%{DATA:_temp.foruser}%{QUOTE}? by %{QUOTE}?%{DATA:_temp.byuser}%{QUOTE}?(?:\(uid=%{NUMBER:_temp.byuid}\))?$' + - 'for user %{QUOTE}?%{DATA:_temp.foruser}%{QUOTE}?$' + - 'by user %{QUOTE}?%{DATA:_temp.byuser}%{QUOTE}?$' + - '%{BOUNDARY} user %{QUOTE}%{DATA:_temp.user}%{QUOTE}' + pattern_definitions: + QUOTE: "['\"]" + BOUNDARY: "(?- + if (ctx.system.auth.ssh.event == "Accepted") { + ctx.event.type = ["info"]; + ctx.event.category = ["authentication", "session"]; + ctx.event.action = "ssh_login"; + ctx.event.outcome = "success"; + } else if (ctx.system.auth.ssh.event == "Invalid" || ctx.system.auth.ssh.event == "Failed") { + ctx.event.type = ["info"]; + ctx.event.category = ["authentication"]; + ctx.event.action = "ssh_login"; + ctx.event.outcome = "failure"; + } + - append: + field: event.category + value: iam + if: ctx.process?.name != null && ['groupadd', 'groupdel', 'groupmod', 'useradd', 'userdel', 'usermod'].contains(ctx.process.name) + - set: + field: event.outcome + value: success + if: ctx.process?.name != null && (ctx.message == null || !ctx.message.contains("fail")) && ['groupadd', 'groupdel', 'groupmod', 'useradd', 'userdel', 'usermod'].contains(ctx.process.name) + - set: + field: event.outcome + value: failure + if: ctx.process?.name != null && (ctx.message != null && ctx.message.contains("fail")) && ['groupadd', 'groupdel', 'groupmod', 'useradd', 'userdel', 'usermod'].contains(ctx.process.name) + - append: + field: event.type + value: user + if: ctx.process?.name != null && ['useradd', 'userdel', 'usermod'].contains(ctx.process.name) + - append: + field: event.type + value: group + if: ctx.process?.name != null && ['groupadd', 'groupdel', 'groupmod'].contains(ctx.process.name) + - append: + field: event.type + value: creation + if: ctx.process?.name != null && ['useradd', 'groupadd'].contains(ctx.process.name) + - append: + field: event.type + value: deletion + if: ctx.process?.name != null && ['userdel', 'groupdel'].contains(ctx.process.name) + - append: + field: event.type + value: change + if: ctx.process?.name != null && ['usermod', 'groupmod'].contains(ctx.process.name) + - append: + field: related.user + value: "{{{ user.name }}}" + allow_duplicates: false + if: ctx.user?.name != null && ctx.user?.name != '' + - append: + field: related.user + value: "{{{ user.effective.name }}}" + allow_duplicates: false + if: ctx.user?.effective?.name != null && ctx.user?.effective?.name != '' + - append: + field: related.ip + value: "{{{ source.ip }}}" + allow_duplicates: false + if: ctx.source?.ip != null && ctx.source?.ip != '' + - append: + field: related.hosts + value: "{{{ host.hostname }}}" + allow_duplicates: false + if: ctx.host?.hostname != null && ctx.host?.hostname != '' + - set: + field: ecs.version + value: 8.0.0 + - remove: + field: event.original + if: "ctx?.tags == null || !(ctx.tags.contains('preserve_original_event'))" + ignore_failure: true + ignore_missing: true + - remove: + description: Remove the extra fields added by the Journald input + ignore_missing: true + field: + - journald + - process.thread + - syslog + - systemd + - message_id +on_failure: + - set: + field: error.message + value: '{{{ _ingest.on_failure_message }}}' diff --git a/filebeat/module/system/auth/manifest.yml b/filebeat/module/system/auth/manifest.yml index bf1a3623cf1..5fe9ef9be8b 100644 --- a/filebeat/module/system/auth/manifest.yml +++ b/filebeat/module/system/auth/manifest.yml @@ -12,6 +12,13 @@ var: os.windows: [] - name: tags default: [] + - name: use_journald + default: false + +ingest_pipeline: + - ingest/entrypoint.yml + - ingest/files.yml + - ingest/journald.yml + - ingest/grok-auth-messages.yml -ingest_pipeline: ingest/pipeline.yml input: config/auth.yml diff --git a/filebeat/module/system/auth/test/debian-12.export b/filebeat/module/system/auth/test/debian-12.export new file mode 100644 index 00000000000..583416f6c7b Binary files /dev/null and b/filebeat/module/system/auth/test/debian-12.export differ diff --git a/filebeat/module/system/auth/test/debian-12.journal b/filebeat/module/system/auth/test/debian-12.journal new file mode 100644 index 00000000000..3195198e604 Binary files /dev/null and b/filebeat/module/system/auth/test/debian-12.journal differ diff --git a/filebeat/module/system/auth/test/debian-12.journal-expected.json b/filebeat/module/system/auth/test/debian-12.journal-expected.json new file mode 100644 index 00000000000..2ef69b76b22 --- /dev/null +++ b/filebeat/module/system/auth/test/debian-12.journal-expected.json @@ -0,0 +1,383 @@ +[ + { + "event.action": "ssh_login", + "event.category": [ + "authentication", + "session" + ], + "event.dataset": "system.auth", + "event.kind": "event", + "event.module": "system", + "event.outcome": "success", + "event.timezone": "-02:00", + "event.type": [ + "info" + ], + "fileset.name": "auth", + "host.hostname": "vagrant-debian-12", + "host.id": "5e6dc8fe417f4ea383e2afaa731f5d8a", + "input.type": "journald", + "log.syslog.facility.code": 4, + "log.syslog.priority": 6, + "message": "Accepted publickey for vagrant from 10.0.2.2 port 48274 ssh2: ED25519 SHA256:k1kjhwoH/H3w31MbGOIGd7qxrkSQJnoAN0eYJVHDmmI", + "process.args": [ + "\"sshd: vagrant [priv]\"" + ], + "process.args_count": 1, + "process.command_line": "\"sshd: vagrant [priv]\"", + "process.name": "sshd", + "process.pid": 26538, + "related.hosts": [ + "vagrant-debian-12" + ], + "related.ip": [ + "10.0.2.2" + ], + "related.user": [ + "vagrant" + ], + "service.type": "system", + "source.address": "10.0.2.2", + "source.ip": "10.0.2.2", + "source.port": 48274, + "system.auth.ssh.event": "Accepted", + "system.auth.ssh.method": "publickey", + "system.auth.ssh.signature": "ED25519 SHA256:k1kjhwoH/H3w31MbGOIGd7qxrkSQJnoAN0eYJVHDmmI", + "user.group.id": "0", + "user.id": "0", + "user.name": "vagrant" + }, + { + "event.action": "ssh_login", + "event.category": [ + "authentication", + "session" + ], + "event.dataset": "system.auth", + "event.kind": "event", + "event.module": "system", + "event.outcome": "success", + "event.timezone": "-02:00", + "event.type": [ + "info" + ], + "fileset.name": "auth", + "host.hostname": "vagrant-debian-12", + "host.id": "5e6dc8fe417f4ea383e2afaa731f5d8a", + "input.type": "journald", + "log.syslog.facility.code": 4, + "log.syslog.priority": 6, + "message": "Accepted password for vagrant from 192.168.42.119 port 55310 ssh2", + "process.args": [ + "\"sshd: vagrant [priv]\"" + ], + "process.args_count": 1, + "process.command_line": "\"sshd: vagrant [priv]\"", + "process.name": "sshd", + "process.pid": 1710, + "related.hosts": [ + "vagrant-debian-12" + ], + "related.ip": [ + "192.168.42.119" + ], + "related.user": [ + "vagrant" + ], + "service.type": "system", + "source.address": "192.168.42.119", + "source.ip": "192.168.42.119", + "source.port": 55310, + "system.auth.ssh.event": "Accepted", + "system.auth.ssh.method": "password", + "user.group.id": "0", + "user.id": "0", + "user.name": "vagrant" + }, + { + "event.action": "ssh_login", + "event.category": [ + "authentication" + ], + "event.dataset": "system.auth", + "event.kind": "event", + "event.module": "system", + "event.outcome": "failure", + "event.timezone": "-02:00", + "event.type": [ + "info" + ], + "fileset.name": "auth", + "host.hostname": "vagrant-debian-12", + "host.id": "5e6dc8fe417f4ea383e2afaa731f5d8a", + "input.type": "journald", + "log.syslog.facility.code": 4, + "log.syslog.priority": 6, + "message": "Invalid user test from 192.168.42.119 port 48890", + "process.args": [ + "\"sshd: unknown [priv]\"" + ], + "process.args_count": 1, + "process.command_line": "\"sshd: unknown [priv]\"", + "process.name": "sshd", + "process.pid": 1721, + "related.hosts": [ + "vagrant-debian-12" + ], + "related.ip": [ + "192.168.42.119" + ], + "related.user": [ + "test" + ], + "service.type": "system", + "source.address": "192.168.42.119", + "source.ip": "192.168.42.119", + "system.auth.ssh.event": "Invalid", + "user.group.id": "0", + "user.id": "0", + "user.name": "test" + }, + { + "event.action": "ssh_login", + "event.category": [ + "authentication" + ], + "event.dataset": "system.auth", + "event.kind": "event", + "event.module": "system", + "event.outcome": "failure", + "event.timezone": "-02:00", + "event.type": [ + "info" + ], + "fileset.name": "auth", + "host.hostname": "vagrant-debian-12", + "host.id": "5e6dc8fe417f4ea383e2afaa731f5d8a", + "input.type": "journald", + "log.syslog.facility.code": 4, + "log.syslog.priority": 6, + "message": "Failed password for root from 192.168.42.119 port 46632 ssh2", + "process.args": [ + "\"sshd: root [priv]\"" + ], + "process.args_count": 1, + "process.command_line": "\"sshd: root [priv]\"", + "process.name": "sshd", + "process.pid": 1723, + "related.hosts": [ + "vagrant-debian-12" + ], + "related.ip": [ + "192.168.42.119" + ], + "related.user": [ + "root" + ], + "service.type": "system", + "source.address": "192.168.42.119", + "source.ip": "192.168.42.119", + "source.port": 46632, + "system.auth.ssh.event": "Failed", + "system.auth.ssh.method": "password", + "user.group.id": "0", + "user.id": "0", + "user.name": "root" + }, + { + "event.action": "ssh_login", + "event.category": [ + "authentication" + ], + "event.dataset": "system.auth", + "event.kind": "event", + "event.module": "system", + "event.outcome": "failure", + "event.timezone": "-02:00", + "event.type": [ + "info" + ], + "fileset.name": "auth", + "host.hostname": "vagrant-debian-12", + "host.id": "5e6dc8fe417f4ea383e2afaa731f5d8a", + "input.type": "journald", + "log.syslog.facility.code": 4, + "log.syslog.priority": 6, + "message": "Failed password for root from 192.168.42.119 port 46632 ssh2", + "process.args": [ + "\"sshd: root [priv]\"" + ], + "process.args_count": 1, + "process.command_line": "\"sshd: root [priv]\"", + "process.name": "sshd", + "process.pid": 1723, + "related.hosts": [ + "vagrant-debian-12" + ], + "related.ip": [ + "192.168.42.119" + ], + "related.user": [ + "root" + ], + "service.type": "system", + "source.address": "192.168.42.119", + "source.ip": "192.168.42.119", + "source.port": 46632, + "system.auth.ssh.event": "Failed", + "system.auth.ssh.method": "password", + "user.group.id": "0", + "user.id": "0", + "user.name": "root" + }, + { + "event.action": "ssh_login", + "event.category": [ + "authentication" + ], + "event.dataset": "system.auth", + "event.kind": "event", + "event.module": "system", + "event.outcome": "failure", + "event.timezone": "-02:00", + "event.type": [ + "info" + ], + "fileset.name": "auth", + "host.hostname": "vagrant-debian-12", + "host.id": "5e6dc8fe417f4ea383e2afaa731f5d8a", + "input.type": "journald", + "log.syslog.facility.code": 4, + "log.syslog.priority": 6, + "message": "Failed password for root from 192.168.42.119 port 46632 ssh2", + "process.args": [ + "\"sshd: root [priv]\"" + ], + "process.args_count": 1, + "process.command_line": "\"sshd: root [priv]\"", + "process.name": "sshd", + "process.pid": 1723, + "related.hosts": [ + "vagrant-debian-12" + ], + "related.ip": [ + "192.168.42.119" + ], + "related.user": [ + "root" + ], + "service.type": "system", + "source.address": "192.168.42.119", + "source.ip": "192.168.42.119", + "source.port": 46632, + "system.auth.ssh.event": "Failed", + "system.auth.ssh.method": "password", + "user.group.id": "0", + "user.id": "0", + "user.name": "root" + }, + { + "event.dataset": "system.auth", + "event.kind": "event", + "event.module": "system", + "event.timezone": "-02:00", + "fileset.name": "auth", + "host.hostname": "vagrant-debian-12", + "host.id": "5e6dc8fe417f4ea383e2afaa731f5d8a", + "input.type": "journald", + "log.syslog.facility.code": 10, + "log.syslog.priority": 5, + "message": " vagrant : TTY=pts/2 ; PWD=/home/vagrant ; USER=root ; COMMAND=/usr/bin/emacs /etc/ssh/sshd_config", + "process.args": [ + "sudo", + "emacs", + "/etc/ssh/sshd_config" + ], + "process.args_count": 3, + "process.command_line": "sudo emacs /etc/ssh/sshd_config", + "process.name": "sudo", + "process.pid": 1582, + "related.hosts": [ + "vagrant-debian-12" + ], + "related.user": [ + " vagrant", + "root" + ], + "service.type": "system", + "system.auth.sudo.command": "/usr/bin/emacs /etc/ssh/sshd_config", + "system.auth.sudo.pwd": "/home/vagrant", + "system.auth.sudo.tty": "pts/2", + "system.auth.sudo.user": "root", + "user.effective.name": "root", + "user.group.id": "1000", + "user.id": "1000", + "user.name": " vagrant" + }, + { + "event.category": [ + "iam" + ], + "event.dataset": "system.auth", + "event.kind": "event", + "event.module": "system", + "event.outcome": "success", + "event.timezone": "-02:00", + "event.type": [ + "creation", + "group" + ], + "fileset.name": "auth", + "group.id": "1001", + "group.name": "test", + "host.hostname": "vagrant-debian-12", + "host.id": "5e6dc8fe417f4ea383e2afaa731f5d8a", + "input.type": "journald", + "log.syslog.facility.code": 10, + "log.syslog.priority": 6, + "message": "new group: name=test, GID=1001", + "process.args": [ + "/sbin/groupadd", + "-g", + "1001", + "test" + ], + "process.args_count": 4, + "process.command_line": "/sbin/groupadd -g 1001 test", + "process.name": "groupadd", + "process.pid": 1743, + "related.hosts": [ + "vagrant-debian-12" + ], + "service.type": "system", + "user.effective.group.id": "0", + "user.effective.id": "0", + "user.id": "1000" + }, + { + "event.dataset": "system.auth", + "event.kind": "event", + "event.module": "system", + "event.timezone": "-02:00", + "fileset.name": "auth", + "host.hostname": "vagrant-debian-12", + "host.id": "5e6dc8fe417f4ea383e2afaa731f5d8a", + "input.type": "journald", + "log.syslog.facility.code": 4, + "log.syslog.priority": 6, + "message": "Session 8 logged out. Waiting for processes to exit.", + "process.args": [ + "/lib/systemd/systemd-logind" + ], + "process.args_count": 1, + "process.command_line": "/lib/systemd/systemd-logind", + "process.name": "systemd-logind", + "process.pid": 316, + "related.hosts": [ + "vagrant-debian-12" + ], + "service.type": "system", + "user.group.id": "0", + "user.id": "0" + } +] \ No newline at end of file diff --git a/filebeat/module/system/syslog/config/syslog.yml b/filebeat/module/system/syslog/config/syslog.yml index e7f238d8af8..90fdd719b9f 100644 --- a/filebeat/module/system/syslog/config/syslog.yml +++ b/filebeat/module/system/syslog/config/syslog.yml @@ -1,4 +1,22 @@ +{{ if .use_journald }} +type: journald +id: system-syslog +facilities: + - 0 + - 1 + - 2 + - 3 + - 5 + - 6 + - 7 + - 8 + - 9 + - 11 + - 12 + - 15 +{{ else }} type: log +id: system-syslog paths: {{ range $i, $path := .paths }} - {{$path}} @@ -7,6 +25,9 @@ exclude_files: [".gz$"] multiline: pattern: "^\\s" match: after +{{ end }} + +# Common configuration processors: - add_locale: ~ - add_fields: diff --git a/filebeat/module/system/syslog/ingest/entrypoint.yml b/filebeat/module/system/syslog/ingest/entrypoint.yml new file mode 100644 index 00000000000..42a0f4ebb82 --- /dev/null +++ b/filebeat/module/system/syslog/ingest/entrypoint.yml @@ -0,0 +1,9 @@ +description: Entrypoint Pipeline for system/syslog Filebeat module +processors: + - pipeline: + if: ctx?.input?.type == "journald" + name: '{< IngestPipeline "journald" >}' + + - pipeline: + if: ctx?.input?.type == "log" + name: '{< IngestPipeline "files" >}' diff --git a/filebeat/module/system/syslog/ingest/pipeline.yml b/filebeat/module/system/syslog/ingest/files.yml similarity index 100% rename from filebeat/module/system/syslog/ingest/pipeline.yml rename to filebeat/module/system/syslog/ingest/files.yml diff --git a/filebeat/module/system/syslog/ingest/journald.yml b/filebeat/module/system/syslog/ingest/journald.yml new file mode 100644 index 00000000000..38b87dc8e0a --- /dev/null +++ b/filebeat/module/system/syslog/ingest/journald.yml @@ -0,0 +1,34 @@ +description: Journald Pipeline for system/syslog Filebeat module +processors: + - set: + field: event.ingested + copy_from: _ingest.timestamp + - set: + field: "process.pid" + copy_from: "journald.pid" + ignore_failure: true + - set: + field: "process.name" + copy_from: "journald.process.name" + ignore_failure: true + - set: + field: event.kind + value: event + - append: + field: related.hosts + value: "{{host.hostname}}" + if: "ctx.host?.hostname != null && ctx.host?.hostname != ''" + allow_duplicates: false + - remove: + description: Remove the extra fields added by the Journald input + ignore_missing: true + field: + - journald + - process.thread + - syslog + - systemd + - message_id +on_failure: + - set: + field: error.message + value: '{{ _ingest.on_failure_message }}' diff --git a/filebeat/module/system/syslog/manifest.yml b/filebeat/module/system/syslog/manifest.yml index 39a34e56ca3..a53715ceb7b 100644 --- a/filebeat/module/system/syslog/manifest.yml +++ b/filebeat/module/system/syslog/manifest.yml @@ -8,6 +8,12 @@ var: os.darwin: - /var/log/system.log* os.windows: [] + - name: use_journald + default: false + +ingest_pipeline: + - ingest/entrypoint.yml + - ingest/files.yml + - ingest/journald.yml -ingest_pipeline: ingest/pipeline.yml input: config/syslog.yml diff --git a/filebeat/module/system/syslog/test/debian-12.export b/filebeat/module/system/syslog/test/debian-12.export new file mode 100644 index 00000000000..780bd46990e Binary files /dev/null and b/filebeat/module/system/syslog/test/debian-12.export differ diff --git a/filebeat/module/system/syslog/test/debian-12.journal b/filebeat/module/system/syslog/test/debian-12.journal new file mode 100644 index 00000000000..f4c01a22c3f Binary files /dev/null and b/filebeat/module/system/syslog/test/debian-12.journal differ diff --git a/filebeat/module/system/syslog/test/debian-12.journal-expected.json b/filebeat/module/system/syslog/test/debian-12.journal-expected.json new file mode 100644 index 00000000000..b75cce10fc8 --- /dev/null +++ b/filebeat/module/system/syslog/test/debian-12.journal-expected.json @@ -0,0 +1,62 @@ +[ + { + "event.dataset": "system.syslog", + "event.kind": "event", + "event.module": "system", + "event.timezone": "-02:00", + "fileset.name": "syslog", + "host.hostname": "vagrant-debian-12", + "host.id": "5e6dc8fe417f4ea383e2afaa731f5d8a", + "input.type": "journald", + "log.syslog.facility.code": 3, + "log.syslog.priority": 6, + "message": "Stopped target getty.target - Login Prompts.", + "process.args": [ + "/sbin/init" + ], + "process.args_count": 1, + "process.command_line": "/sbin/init", + "process.name": "systemd", + "process.pid": 1, + "related.hosts": [ + "vagrant-debian-12" + ], + "service.type": "system", + "user.group.id": "0", + "user.id": "0" + }, + { + "event.dataset": "system.syslog", + "event.kind": "event", + "event.module": "system", + "event.timezone": "-02:00", + "fileset.name": "syslog", + "host.hostname": "vagrant-debian-12", + "host.id": "5e6dc8fe417f4ea383e2afaa731f5d8a", + "input.type": "journald", + "log.syslog.facility.code": 0, + "log.syslog.priority": 6, + "message": "Console: switching to colour frame buffer device 160x50", + "related.hosts": [ + "vagrant-debian-12" + ], + "service.type": "system" + }, + { + "event.dataset": "system.syslog", + "event.kind": "event", + "event.module": "system", + "event.timezone": "-02:00", + "fileset.name": "syslog", + "host.hostname": "bookworm", + "host.id": "5e6dc8fe417f4ea383e2afaa731f5d8a", + "input.type": "journald", + "log.syslog.facility.code": 0, + "log.syslog.priority": 6, + "message": "thermal_sys: Registered thermal governor 'power_allocator'", + "related.hosts": [ + "bookworm" + ], + "service.type": "system" + } +] \ No newline at end of file diff --git a/filebeat/modules.d/system.yml.disabled b/filebeat/modules.d/system.yml.disabled index 1302c6374da..fc4debca3bf 100644 --- a/filebeat/modules.d/system.yml.disabled +++ b/filebeat/modules.d/system.yml.disabled @@ -10,6 +10,9 @@ # Filebeat will choose the paths depending on your OS. #var.paths: + # Use journald to collect system logs + #var.use_journald: false + # Authorization logs auth: enabled: false @@ -17,3 +20,6 @@ # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. #var.paths: + + # Use journald to collect auth logs + #var.use_journald: false diff --git a/filebeat/tests/system/test_modules.py b/filebeat/tests/system/test_modules.py index db8022b372f..f5e7bdc2c39 100644 --- a/filebeat/tests/system/test_modules.py +++ b/filebeat/tests/system/test_modules.py @@ -196,7 +196,7 @@ def run_on_file(self, module, fileset, test_file, cfgfile): cmd.append("{module}.{fileset}.var.use_journald=true".format( module=module, fileset=fileset)) cmd.append("-M") - cmd.append("{module}.{fileset}.input.journald.paths=[{test_file}]".format( + cmd.append("{module}.{fileset}.input.paths=[{test_file}]".format( module=module, fileset=fileset, test_file=test_file)) else: cmd.append("-M") diff --git a/go.mod b/go.mod index e3178b6f5d0..0ae36e088b8 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/elastic/beats/v7 -go 1.22.0 +go 1.22.9 require ( cloud.google.com/go/bigquery v1.62.0 @@ -20,7 +20,6 @@ require ( github.com/PaesslerAG/jsonpath v0.1.1 github.com/Shopify/sarama v1.27.0 github.com/StackExchange/wmi v1.2.1 - github.com/aerospike/aerospike-client-go v1.27.1-0.20170612174108-0f3b54da6bdc github.com/akavel/rsrc v0.8.0 // indirect github.com/apoydence/eachers v0.0.0-20181020210610-23942921fe77 // indirect github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 @@ -169,13 +168,14 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs v1.2.1 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/consumption/armconsumption v1.1.0 - github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 v4.6.0 + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 v4.8.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/costmanagement/armcostmanagement v1.1.1 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/monitor/armmonitor v0.8.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.0 github.com/Azure/azure-storage-blob-go v0.15.0 github.com/Azure/go-autorest/autorest/adal v0.9.24 + github.com/aerospike/aerospike-client-go/v7 v7.7.1 github.com/apache/arrow/go/v14 v14.0.2 github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.12 github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.13 @@ -191,8 +191,8 @@ require ( github.com/elastic/bayeux v1.0.5 github.com/elastic/ebpfevents v0.6.0 github.com/elastic/elastic-agent-autodiscover v0.9.0 - github.com/elastic/elastic-agent-libs v0.17.1 - github.com/elastic/elastic-agent-system-metrics v0.11.1 + github.com/elastic/elastic-agent-libs v0.17.3 + github.com/elastic/elastic-agent-system-metrics v0.11.4 github.com/elastic/go-elasticsearch/v8 v8.14.0 github.com/elastic/go-quark v0.2.0 github.com/elastic/go-sfdc v0.0.0-20241010131323-8e176480d727 @@ -210,13 +210,14 @@ require ( github.com/gorilla/mux v1.8.0 github.com/gorilla/websocket v1.5.0 github.com/icholy/digest v0.1.22 + github.com/klauspost/compress v1.17.9 github.com/meraki/dashboard-api-go/v3 v3.0.9 github.com/otiai10/copy v1.12.0 github.com/pierrec/lz4/v4 v4.1.18 github.com/pkg/xattr v0.4.9 github.com/prometheus/prometheus v0.54.1 github.com/shirou/gopsutil/v3 v3.22.10 - github.com/tklauser/go-sysconf v0.3.10 + github.com/tklauser/go-sysconf v0.3.12 github.com/xdg-go/scram v1.1.2 github.com/zyedidia/generic v1.2.1 go.elastic.co/apm/module/apmelasticsearch/v2 v2.6.0 @@ -334,7 +335,6 @@ require ( github.com/json-iterator/go v1.1.12 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect github.com/klauspost/asmfmt v1.3.2 // indirect - github.com/klauspost/compress v1.17.9 // indirect github.com/klauspost/cpuid/v2 v2.2.5 // indirect github.com/kortschak/utter v1.5.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect @@ -365,15 +365,17 @@ require ( github.com/prometheus/client_golang v1.20.2 // indirect github.com/rogpeppe/go-internal v1.12.0 // indirect github.com/sergi/go-diff v1.3.1 // indirect + github.com/shirou/gopsutil/v4 v4.24.7 // indirect + github.com/shoenig/go-m1cpu v0.1.6 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/stoewer/go-strcase v1.2.0 // indirect github.com/stretchr/objx v0.5.2 // indirect - github.com/tklauser/numcpus v0.4.0 // indirect + github.com/tklauser/numcpus v0.6.1 // indirect github.com/vishvananda/netlink v1.2.1-beta.2 // indirect github.com/xdg-go/pbkdf2 v1.0.0 // indirect github.com/xdg-go/stringprep v1.0.4 // indirect github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a // indirect - github.com/yusufpapurcu/wmi v1.2.2 // indirect + github.com/yusufpapurcu/wmi v1.2.4 // indirect github.com/zeebo/xxh3 v1.0.2 // indirect go.elastic.co/fastjson v1.1.0 // indirect go.opencensus.io v0.24.0 // indirect @@ -406,7 +408,7 @@ require ( github.com/dlclark/regexp2 v1.4.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/moby/term v0.5.0 // indirect - github.com/yuin/gopher-lua v0.0.0-20170403160031-b402f3114ec7 // indirect + github.com/yuin/gopher-lua v1.1.1 // indirect gopkg.in/jcmturner/rpc.v1 v1.1.0 // indirect ) @@ -415,8 +417,7 @@ replace ( github.com/Shopify/sarama => github.com/elastic/sarama v1.19.1-0.20220310193331-ebc2b0d8eef3 github.com/apoydence/eachers => github.com/poy/eachers v0.0.0-20181020210610-23942921fe77 //indirect, see https://github.com/elastic/beats/pull/29780 for details. - github.com/dop251/goja => github.com/andrewkroh/goja v0.0.0-20190128172624-dd2ac4456e20 - github.com/dop251/goja_nodejs => github.com/dop251/goja_nodejs v0.0.0-20171011081505-adff31b136e6 + github.com/dop251/goja => github.com/elastic/goja v0.0.0-20190128172624-dd2ac4456e20 github.com/fsnotify/fsevents => github.com/elastic/fsevents v0.0.0-20181029231046-e1d381a4d270 github.com/fsnotify/fsnotify => github.com/elastic/fsnotify v1.6.1-0.20240920222514-49f82bdbc9e3 github.com/google/gopacket => github.com/elastic/gopacket v1.1.20-0.20241002174017-e8c5fda595e6 diff --git a/go.sum b/go.sum index 93bce761422..56c592bb507 100644 --- a/go.sum +++ b/go.sum @@ -59,8 +59,8 @@ github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xP github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs v1.2.1 h1:0f6XnzroY1yCQQwxGf/n/2xlaBF02Qhof2as99dGNsY= github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs v1.2.1/go.mod h1:vMGz6NOUGJ9h5ONl2kkyaqq5E0g7s4CHNSrXN5fl8UY= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 v4.6.0 h1:AAIdAyPkFff6XTct2lQCxOWN/+LnA41S7kIkzKaMbyE= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 v4.6.0/go.mod h1:noQIdW75SiQFB3mSFJBr4iRRH83S9skaFiBv4C0uEs0= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 v4.8.0 h1:0nGmzwBv5ougvzfGPCO2ljFRHvun57KpNrVCMrlk0ns= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 v4.8.0/go.mod h1:gYq8wyDgv6JLhGbAU6gg8amCPgQWRE+aCvrV2gyzdfs= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/costmanagement/armcostmanagement v1.1.1 h1:ehSLdbLah6kk6HTVc6e/lrbmbz7MMbpNxkOd3OYlhB0= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/costmanagement/armcostmanagement v1.1.1/go.mod h1:Am1cUioOk0HdZIsjpXJkQ4RIeQbwYsW6LkNIc5z/5XY= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub v1.2.0 h1:+dggnR89/BIIlRlQ6d19dkhhdd/mQUiQbXhyHUFiB4w= @@ -132,16 +132,14 @@ github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWso github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= -github.com/aerospike/aerospike-client-go v1.27.1-0.20170612174108-0f3b54da6bdc h1:9iW/Fbn/R/nyUOiqo6AgwBe8uirqUIoTGF3vKG8qjoc= -github.com/aerospike/aerospike-client-go v1.27.1-0.20170612174108-0f3b54da6bdc/go.mod h1:zj8LBEnWBDOVEIJt8LvaRvDG5ARAoa5dBeHaB472NRc= +github.com/aerospike/aerospike-client-go/v7 v7.7.1 h1:lcskBtPZYe6ESObhIEQEp4XO1axYZpaFD3ie4iwr6tg= +github.com/aerospike/aerospike-client-go/v7 v7.7.1/go.mod h1:STlBtOkKT8nmp7iD+sEkr/JGEOu+4e2jGlNN0Jiu2a4= github.com/akavel/rsrc v0.8.0 h1:zjWn7ukO9Kc5Q62DOJCcxGpXC18RawVtYAGdz2aLlfw= github.com/akavel/rsrc v0.8.0/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74 h1:Kk6a4nehpJ3UuJRqlA3JxYxBZEqCeOmATOvrbT4p9RA= github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4= -github.com/andrewkroh/goja v0.0.0-20190128172624-dd2ac4456e20 h1:7rj9qZ63knnVo2ZeepYHvHuRdG76f3tRUTdIQDzRBeI= -github.com/andrewkroh/goja v0.0.0-20190128172624-dd2ac4456e20/go.mod h1:cI59GRkC2FRaFYtgbYEqMlgnnfvAwXzjojyZKXwklNg= github.com/andybalholm/brotli v1.0.5 h1:8uQZIdzKmjc/iuPu7O2ioW48L81FgatrcpfFmiq/cCs= github.com/andybalholm/brotli v1.0.5/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI= @@ -342,10 +340,10 @@ github.com/elastic/elastic-agent-autodiscover v0.9.0 h1:+iWIKh0u3e8I+CJa3FfWe9h0 github.com/elastic/elastic-agent-autodiscover v0.9.0/go.mod h1:5iUxLHhVdaGSWYTveSwfJEY4RqPXTG13LPiFoxcpFd4= github.com/elastic/elastic-agent-client/v7 v7.15.0 h1:nDB7v8TBoNuD6IIzC3z7Q0y+7bMgXoT2DsHfolO2CHE= github.com/elastic/elastic-agent-client/v7 v7.15.0/go.mod h1:6h+f9QdIr3GO2ODC0Y8+aEXRwzbA5W4eV4dd/67z7nI= -github.com/elastic/elastic-agent-libs v0.17.1 h1:1MXoc1eHGE8hCdVJ9+qiGiZAGeHzT2QBVVzD/oxwqeU= -github.com/elastic/elastic-agent-libs v0.17.1/go.mod h1:5CR02awPrBr+tfmjBBK+JI+dMmHNQjpVY24J0wjbC7M= -github.com/elastic/elastic-agent-system-metrics v0.11.1 h1:BxViQHnqxvvi/65rj3mGwG6Eto6ldFCTnuDTUJnakaU= -github.com/elastic/elastic-agent-system-metrics v0.11.1/go.mod h1:3QiMu9wTKJFvpCN+5klgGqasTMNKJbgY3xcoN1KQXJk= +github.com/elastic/elastic-agent-libs v0.17.3 h1:q79P05dhQkc5REzieVkkD9oRKrnptKY4MC6Typ+d8bc= +github.com/elastic/elastic-agent-libs v0.17.3/go.mod h1:5CR02awPrBr+tfmjBBK+JI+dMmHNQjpVY24J0wjbC7M= +github.com/elastic/elastic-agent-system-metrics v0.11.4 h1:Z/8CML5RKvGpi6/QUFok1K3EriBAv2kUAXnsk8hCifk= +github.com/elastic/elastic-agent-system-metrics v0.11.4/go.mod h1:TTW2ysv78uHBQ68hG8TXiaX1m6f29ZHgGWb8XONYsU8= github.com/elastic/elastic-transport-go/v8 v8.6.0 h1:Y2S/FBjx1LlCv5m6pWAF2kDJAHoSjSRSJCApolgfthA= github.com/elastic/elastic-transport-go/v8 v8.6.0/go.mod h1:YLHer5cj0csTzNFXoNQ8qhtGY1GTvSqPnKWKaqQE3Hk= github.com/elastic/fsevents v0.0.0-20181029231046-e1d381a4d270 h1:cWPqxlPtir4RoQVCpGSRXmLqjEHpJKbR60rxh1nQZY4= @@ -381,6 +379,8 @@ github.com/elastic/go-ucfg v0.8.8 h1:54KIF/2zFKfl0MzsSOCGOsZ3O2bnjFQJ0nDJcLhviyk github.com/elastic/go-ucfg v0.8.8/go.mod h1:4E8mPOLSUV9hQ7sgLEJ4bvt0KhMuDJa8joDT2QGAEKA= github.com/elastic/go-windows v1.0.2 h1:yoLLsAsV5cfg9FLhZ9EXZ2n2sQFKeDYrHenkcivY4vI= github.com/elastic/go-windows v1.0.2/go.mod h1:bGcDpBzXgYSqM0Gx3DM4+UxFj300SZLixie9u9ixLM8= +github.com/elastic/goja v0.0.0-20190128172624-dd2ac4456e20 h1:bVZ3kDKa8Tqw9qvNrD91MwJMW6alg4Wn31l1TQ6RlTY= +github.com/elastic/goja v0.0.0-20190128172624-dd2ac4456e20/go.mod h1:A1DWjF89MFVnxzmzTaMF7CwVy9PDem7DalMkm8RIMoY= github.com/elastic/gopacket v1.1.20-0.20241002174017-e8c5fda595e6 h1:VgOx6omXIMKozR+R4HhQRT9q1Irm/h13DLtSkejoAJY= github.com/elastic/gopacket v1.1.20-0.20241002174017-e8c5fda595e6/go.mod h1:riddUzxTSBpJXk3qBHtYr4qOhFhT6k/1c0E3qkQjQpA= github.com/elastic/gosigar v0.14.3 h1:xwkKwPia+hSfg9GqrCUKYdId102m9qTJIIr7egmK/uo= @@ -836,6 +836,10 @@ github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= github.com/shirou/gopsutil/v3 v3.22.10 h1:4KMHdfBRYXGF9skjDWiL4RA2N+E8dRdodU/bOZpPoVg= github.com/shirou/gopsutil/v3 v3.22.10/go.mod h1:QNza6r4YQoydyCfo6rH0blGfKahgibh4dQmV5xdFkQk= +github.com/shirou/gopsutil/v4 v4.24.7 h1:V9UGTK4gQ8HvcnPKf6Zt3XHyQq/peaekfxpJ2HSocJk= +github.com/shirou/gopsutil/v4 v4.24.7/go.mod h1:0uW/073rP7FYLOkvxolUQM5rMOLTNmRXnFKafpb71rw= +github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= +github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= github.com/shoenig/test v1.7.1 h1:UJcjSAI3aUKx52kfcfhblgyhZceouhvvs3OYdWgn+PY= github.com/shoenig/test v1.7.1/go.mod h1:UxJ6u/x2v/TNs/LoLxBNJRV9DiwBBKYxXSyczsBHFoI= github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= @@ -872,10 +876,12 @@ github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/tklauser/go-sysconf v0.3.10 h1:IJ1AZGZRWbY8T5Vfk04D9WOA5WSejdflXxP03OUqALw= github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk= -github.com/tklauser/numcpus v0.4.0 h1:E53Dm1HjH1/R2/aoCtXtPgzmElmn51aOkhCFSuZq//o= +github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= +github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ= +github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= +github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= github.com/tommyers-elastic/dashboard-api-go/v3 v3.0.0-20240913150833-a945473a8f25 h1:o24r+NDexzdlwgqI0Dglq2I/cdONYRACikcUmYmovtQ= github.com/tommyers-elastic/dashboard-api-go/v3 v3.0.0-20240913150833-a945473a8f25/go.mod h1:COGDRzuD05ZS/zp0lDCTDFhx6kAuuNdhDjY0y2ifi5o= github.com/tsg/go-daemon v0.0.0-20200207173439-e704b93fd89b h1:X/8hkb4rQq3+QuOxpJK7gWmAXmZucF0EI1s1BfBLq6U= @@ -908,10 +914,11 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/yuin/gopher-lua v0.0.0-20170403160031-b402f3114ec7 h1:0gYLpmzecnaDCoeWxSfEJ7J1b6B/67+NV++4HKQXx+Y= -github.com/yuin/gopher-lua v0.0.0-20170403160031-b402f3114ec7/go.mod h1:aEV29XrmTYFr3CiRxZeGHpkvbwq+prZduBqMaascyCU= -github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= +github.com/yuin/gopher-lua v1.1.1 h1:kYKnWBjvbNP4XLT3+bPEwAXJx262OhaHDWDVOPjL46M= +github.com/yuin/gopher-lua v1.1.1/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= +github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= github.com/zeebo/assert v1.3.0 h1:g7C04CbJuIDKNPFHmsk4hwZDO5O+kntRxzaUoNXj+IQ= github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0= diff --git a/heartbeat/Dockerfile b/heartbeat/Dockerfile index a30644dd04a..4b763fcdd23 100644 --- a/heartbeat/Dockerfile +++ b/heartbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.22.8 +FROM golang:1.22.9 RUN \ apt-get update \ diff --git a/libbeat/docs/release.asciidoc b/libbeat/docs/release.asciidoc index 94cece60fb8..993560ec288 100644 --- a/libbeat/docs/release.asciidoc +++ b/libbeat/docs/release.asciidoc @@ -8,6 +8,8 @@ This section summarizes the changes in each release. Also read <> for more detail about changes that affect upgrade. +* <> +* <> * <> * <> * <> diff --git a/libbeat/docs/version.asciidoc b/libbeat/docs/version.asciidoc index 468ad95c9ce..165f72f91b0 100644 --- a/libbeat/docs/version.asciidoc +++ b/libbeat/docs/version.asciidoc @@ -1,6 +1,6 @@ :stack-version: 9.0.0 :doc-branch: main -:go-version: 1.22.8 +:go-version: 1.22.9 :release-state: unreleased :python: 3.7 :docker: 1.12 diff --git a/libbeat/esleg/eslegclient/enc.go b/libbeat/esleg/eslegclient/enc.go index 27e409f9172..79e6c430a38 100644 --- a/libbeat/esleg/eslegclient/enc.go +++ b/libbeat/esleg/eslegclient/enc.go @@ -19,11 +19,13 @@ package eslegclient import ( "bytes" - "compress/gzip" + "io" "net/http" "time" + "github.com/klauspost/compress/gzip" + "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/outputs/codec" "github.com/elastic/elastic-agent-libs/mapstr" diff --git a/libbeat/internal/testutil/util.go b/libbeat/internal/testutil/util.go index 51a811587e5..a44becb7008 100644 --- a/libbeat/internal/testutil/util.go +++ b/libbeat/internal/testutil/util.go @@ -21,9 +21,13 @@ package testutil import ( "flag" + "fmt" "math/rand" "testing" "time" + + "github.com/elastic/beats/v7/libbeat/beat" + "github.com/elastic/elastic-agent-libs/mapstr" ) var ( @@ -37,5 +41,32 @@ func SeedPRNG(t *testing.T) { } t.Logf("reproduce test with `go test ... -seed %v`", seed) - rand.Seed(seed) + rand.New(rand.NewSource(seed)) +} + +func GenerateEvents(numEvents, fieldsPerLevel, depth int) []beat.Event { + events := make([]beat.Event, numEvents) + for i := 0; i < numEvents; i++ { + event := &beat.Event{Fields: mapstr.M{}} + generateFields(event, fieldsPerLevel, depth) + events[i] = *event + } + return events +} + +func generateFields(event *beat.Event, fieldsPerLevel, depth int) { + if depth == 0 { + return + } + + for j := 1; j <= fieldsPerLevel; j++ { + var key string + for d := 1; d <= depth; d++ { + key += fmt.Sprintf("level%dfield%d", d, j) + key += "." + } + event.Fields.Put(key, "value") + key = "" + } + } diff --git a/libbeat/outputs/elasticsearch/client_test.go b/libbeat/outputs/elasticsearch/client_test.go index abda06a02ee..f6322383cb5 100644 --- a/libbeat/outputs/elasticsearch/client_test.go +++ b/libbeat/outputs/elasticsearch/client_test.go @@ -40,6 +40,7 @@ import ( "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/esleg/eslegclient" "github.com/elastic/beats/v7/libbeat/idxmgmt" + "github.com/elastic/beats/v7/libbeat/internal/testutil" "github.com/elastic/beats/v7/libbeat/outputs" "github.com/elastic/beats/v7/libbeat/outputs/outest" "github.com/elastic/beats/v7/libbeat/outputs/outil" @@ -713,6 +714,83 @@ func BenchmarkCollectPublishFailAll(b *testing.B) { } } +func BenchmarkPublish(b *testing.B) { + tests := []struct { + Name string + Events []beat.Event + }{ + { + Name: "5 events", + Events: testutil.GenerateEvents(50, 5, 3), + }, + { + Name: "50 events", + Events: testutil.GenerateEvents(500, 5, 3), + }, + { + Name: "500 events", + Events: testutil.GenerateEvents(500, 5, 3), + }, + } + + levels := []int{1, 4, 7, 9} + + requestCount := 0 + + // start a mock HTTP server + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Equal(b, "testing value", r.Header.Get("X-Test")) + // from the documentation: https://golang.org/pkg/net/http/ + // For incoming requests, the Host header is promoted to the + // Request.Host field and removed from the Header map. + assert.Equal(b, "myhost.local", r.Host) + + var response string + if r.URL.Path == "/" { + response = `{ "version": { "number": "7.6.0" } }` + } else { + response = `{"items":[{"index":{}},{"index":{}},{"index":{}}]}` + + } + fmt.Fprintln(w, response) + requestCount++ + })) + defer ts.Close() + + // Indexing to _bulk api + for _, test := range tests { + for _, l := range levels { + b.Run(fmt.Sprintf("%s with compression level %d", test.Name, l), func(b *testing.B) { + client, err := NewClient( + clientSettings{ + connection: eslegclient.ConnectionSettings{ + URL: ts.URL, + Headers: map[string]string{ + "host": "myhost.local", + "X-Test": "testing value", + }, + CompressionLevel: l, + }, + }, + + nil, + ) + assert.NoError(b, err) + batch := encodeBatch(client, outest.NewBatch(test.Events...)) + + // It uses gzip encoder internally for encoding data + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := client.Publish(context.Background(), batch) + assert.NoError(b, err) + } + }) + + } + } + +} + func TestClientWithHeaders(t *testing.T) { requestCount := 0 // start a mock HTTP server diff --git a/libbeat/outputs/otelconsumer/otelconsumer.go b/libbeat/outputs/otelconsumer/otelconsumer.go index cad11ab1442..ca5da5308e5 100644 --- a/libbeat/outputs/otelconsumer/otelconsumer.go +++ b/libbeat/outputs/otelconsumer/otelconsumer.go @@ -20,12 +20,14 @@ package otelconsumer import ( "context" "fmt" + "strings" "time" "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/outputs" "github.com/elastic/beats/v7/libbeat/publisher" "github.com/elastic/elastic-agent-libs/config" + "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/mapstr" "go.opentelemetry.io/collector/consumer" @@ -42,6 +44,7 @@ type otelConsumer struct { observer outputs.Observer logsConsumer consumer.Logs beatInfo beat.Info + log *logp.Logger } func makeOtelConsumer(_ outputs.IndexManager, beat beat.Info, observer outputs.Observer, cfg *config.C) (outputs.Group, error) { @@ -50,6 +53,7 @@ func makeOtelConsumer(_ outputs.IndexManager, beat beat.Info, observer outputs.O observer: observer, logsConsumer: beat.LogConsumer, beatInfo: beat, + log: logp.NewLogger("otelconsumer"), } ocConfig := defaultConfig() @@ -99,6 +103,30 @@ func (out *otelConsumer) logsPublish(ctx context.Context, batch publisher.Batch) err := out.logsConsumer.ConsumeLogs(ctx, pLogs) if err != nil { + // If the batch is too large, the elasticsearchexporter will + // return a 413 error. + // + // At the moment, the exporter does not support batch splitting + // on error so we do it here. + // + // See https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/36163. + if strings.Contains(err.Error(), "Request Entity Too Large") { + // Try and split the batch into smaller batches and retry + if batch.SplitRetry() { + st.BatchSplit() + st.RetryableErrors(len(events)) + } else { + // If the batch could not be split, there is no option left but + // to drop it and log the error state. + batch.Drop() + st.PermanentErrors(len(events)) + out.log.Errorf("the batch is too large to be sent: %v", err) + } + + // Don't propagate the error, the batch was split and retried. + return nil + } + // Permanent errors shouldn't be retried. This tipically means // the data cannot be serialized by the exporter that is attached // to the pipeline or when the destination refuses the data because diff --git a/libbeat/outputs/otelconsumer/otelconsumer_test.go b/libbeat/outputs/otelconsumer/otelconsumer_test.go index a18bf77e6b8..2751ce7f721 100644 --- a/libbeat/outputs/otelconsumer/otelconsumer_test.go +++ b/libbeat/outputs/otelconsumer/otelconsumer_test.go @@ -32,6 +32,7 @@ import ( "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/outputs" "github.com/elastic/beats/v7/libbeat/outputs/outest" + "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/mapstr" ) @@ -46,12 +47,15 @@ func TestPublish(t *testing.T) { makeOtelConsumer := func(t *testing.T, consumeFn func(ctx context.Context, ld plog.Logs) error) *otelConsumer { t.Helper() + assert.NoError(t, logp.TestingSetup(logp.WithSelectors("otelconsumer"))) + logConsumer, err := consumer.NewLogs(consumeFn) assert.NoError(t, err) consumer := &otelConsumer{ observer: outputs.NewNilObserver(), logsConsumer: logConsumer, beatInfo: beat.Info{}, + log: logp.NewLogger("otelconsumer"), } return consumer } @@ -86,6 +90,33 @@ func TestPublish(t *testing.T) { assert.Equal(t, outest.BatchRetry, batch.Signals[0].Tag) }) + t.Run("split batch on entity too large error", func(t *testing.T) { + batch := outest.NewBatch(event1, event2, event3) + + otelConsumer := makeOtelConsumer(t, func(ctx context.Context, ld plog.Logs) error { + return errors.New("Request Entity Too Large") + }) + + err := otelConsumer.Publish(ctx, batch) + assert.NoError(t, err) + assert.Len(t, batch.Signals, 1) + assert.Equal(t, outest.BatchSplitRetry, batch.Signals[0].Tag) + }) + + t.Run("drop batch if can't split on entity too large error", func(t *testing.T) { + batch := outest.NewBatch(event1) + + otelConsumer := makeOtelConsumer(t, func(ctx context.Context, ld plog.Logs) error { + return errors.New("Request Entity Too Large") + }) + + err := otelConsumer.Publish(ctx, batch) + assert.NoError(t, err) + assert.Len(t, batch.Signals, 2) + assert.Equal(t, outest.BatchSplitRetry, batch.Signals[0].Tag) + assert.Equal(t, outest.BatchDrop, batch.Signals[1].Tag) + }) + t.Run("drop batch on permanent consumer error", func(t *testing.T) { batch := outest.NewBatch(event1, event2, event3) diff --git a/libbeat/processors/actions/docs/uppercase.asciidoc b/libbeat/processors/actions/docs/uppercase.asciidoc new file mode 100644 index 00000000000..b9ede3b35e9 --- /dev/null +++ b/libbeat/processors/actions/docs/uppercase.asciidoc @@ -0,0 +1,119 @@ +[[uppercase]] +=== Uppercase fields in events + +++++ +uppercase +++++ + +The `uppercase` processor specifies a list of `fields` and `values` to be converted to uppercase. Keys listed in `fields` will be matched case-insensitively and converted to uppercase. For `values`, only exact, case-sensitive matches are transformed to uppercase. This way, keys and values can be selectively converted based on the specified criteria. + + +==== Examples: + +1. Default scenario + +[source,yaml] +---- +processors: + - rename: + fields: + - "ab.cd" + values: + - "testKey" + ignore_missing: false + fail_on_error: true + alter_full_field: true +---- +[source,json] +---- +// Input +{ + "ab": {"cd":"data"}, + "CD": {"ef":"data"}, + "testKey": {"testvalue"} +} + + +// output +{ + "ab": {"cd":"data"}, // `ab.cd` -> `AB.CD` + "CD": {"ef":"data"}, + "testKey": {"TESTVALUE"} // `testvalue` -> `TESTVALUE` is uppercased +} +---- + +[start=2] +2. When `alter_full_field` is false (applicable only for fields) + +[source,yaml] +---- +processors: + - rename: + fields: + - "ab.cd" + ignore_missing: false + fail_on_error: true + alter_full_field: false +---- + +[source,json] +---- +// Input +{ + "ab": {"cd":"data"}, + "CD": {"ef":"data"}, +} + + +// output +{ + "ab": {"CD":"data"}, // `ab.cd` -> `ab.CD` (only `cd` is uppercased) + "CD": {"ef":"data"}, +} +---- + +[start=2] +2. In case of non unique path to the key + +[source,yaml] +---- +processors: + - rename: + fields: + - "ab" + ignore_missing: false + fail_on_error: true + alter_full_field: true +---- + +[source,json] +---- +// Input +{ + "ab": "first", + "aB": "second" +} + +// Output +{ + "ab": "first", + "aB": "second", + "err": "... Error: key collision" +} +---- + +==== Configuration: + +The `uppercase` processor has the following configuration settings: + +`fields`:: The field names to uppercase. The match is case-insensitive, e.g. `a.b.c.d` would match `A.b.C.d` or `A.B.C.D`. +`values`:: (Optional) Specifies the exact values to be converted to uppercase. Each entry should include the full path to the value. Key matching is case-sensitive. If the target value is not a string, an error is triggered (`fail_on_error: true`) or the value is skipped (`fail_on_error: false`). +`ignore_missing`:: (Optional) Indicates whether to ignore events that lack the source field. + The default is `false`, which will fail processing of an event if a field is missing. +`fail_on_error`:: (Optional) If set to `true` and an error occurs, the changes are reverted and the original event is returned. + If set to `false`, processing continues if an error occurs. Default is `true`. +`alter_full_field`:: (Optional) If set to `true`, the entire key path is uppercased. If set to `false` only the final part of the key path is uppercased. Default is true + + + +See <> for a list of supported conditions. diff --git a/libbeat/processors/actions/lowercase_test.go b/libbeat/processors/actions/lowercase_test.go index 6dba685caa4..4c11bd0f75d 100644 --- a/libbeat/processors/actions/lowercase_test.go +++ b/libbeat/processors/actions/lowercase_test.go @@ -18,13 +18,13 @@ package actions import ( - "fmt" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/elastic/beats/v7/libbeat/beat" + "github.com/elastic/beats/v7/libbeat/internal/testutil" conf "github.com/elastic/elastic-agent-libs/config" "github.com/elastic/elastic-agent-libs/mapstr" ) @@ -59,32 +59,6 @@ func TestLowerCaseProcessorRun(t *testing.T) { Output mapstr.M Error bool }{ - { - Name: "Lowercase Fields", - Fields: []string{"a.b.c", "Field1"}, - IgnoreMissing: false, - FailOnError: true, - FullPath: true, - Input: mapstr.M{ - "Field1": mapstr.M{"Field2": "Value"}, - "Field3": "Value", - "a": mapstr.M{ - "B": mapstr.M{ - "C": "D", - }, - }, - }, - Output: mapstr.M{ - "field1": mapstr.M{"Field2": "Value"}, // field1 is lowercased - "Field3": "Value", - "a": mapstr.M{ - "b": mapstr.M{ - "c": "D", - }, - }, - }, - Error: false, - }, { Name: "Lowercase Fields", Fields: []string{"a.b.c", "Field1"}, @@ -363,21 +337,14 @@ func BenchmarkLowerCaseProcessorRun(b *testing.B) { Events []beat.Event }{ { - Name: "5000 events with 5 fields on each level with 3 level depth without collisions", - Events: GenerateEvents(5000, 5, 3, false), + Name: "5000 events with 5 fields on each level with 3 level depth", + Events: testutil.GenerateEvents(5000, 5, 3), }, { - Name: "5000 events with 5 fields on each level with 3 level depth with collisions", - Events: GenerateEvents(5000, 5, 3, true), - }, - { - Name: "500 events with 50 fields on each level with 5 level depth without collisions", - Events: GenerateEvents(500, 50, 3, false), - }, - { - Name: "500 events with 50 fields on each level with 5 level depth with collisions", - Events: GenerateEvents(500, 50, 3, true), + Name: "500 events with 50 fields on each level with 5 level depth", + Events: testutil.GenerateEvents(500, 50, 3), }, + // Add more test cases as needed for benchmarking } @@ -402,35 +369,3 @@ func BenchmarkLowerCaseProcessorRun(b *testing.B) { }) } } - -func GenerateEvents(numEvents, fieldsPerLevel, depth int, withCollisions bool) []beat.Event { - events := make([]beat.Event, numEvents) - for i := 0; i < numEvents; i++ { - event := &beat.Event{Fields: mapstr.M{}} - generateFields(event, fieldsPerLevel, depth, withCollisions) - events[i] = *event - } - return events -} - -func generateFields(event *beat.Event, fieldsPerLevel, depth int, withCollisions bool) { - if depth == 0 { - return - } - - for j := 1; j <= fieldsPerLevel; j++ { - var key string - for d := 1; d < depth; d++ { - key += fmt.Sprintf("level%dfield%d", d, j) - key += "." - } - if withCollisions { - key += fmt.Sprintf("Level%dField%d", depth, j) // Creating a collision (Level is capitalized) - } else { - key += fmt.Sprintf("level%dfield%d", depth, j) - } - event.Fields.Put(key, "value") - key = "" - } - -} diff --git a/libbeat/processors/actions/uppercase.go b/libbeat/processors/actions/uppercase.go new file mode 100644 index 00000000000..ad00df603d8 --- /dev/null +++ b/libbeat/processors/actions/uppercase.go @@ -0,0 +1,47 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package actions + +import ( + "strings" + + "github.com/elastic/beats/v7/libbeat/beat" + "github.com/elastic/beats/v7/libbeat/processors" + "github.com/elastic/beats/v7/libbeat/processors/checks" + conf "github.com/elastic/elastic-agent-libs/config" +) + +func init() { + processors.RegisterPlugin( + "uppercase", + checks.ConfigChecked( + NewUpperCaseProcessor, + checks.RequireFields("fields"), + checks.AllowedFields("fields", "ignore_missing", "fail_on_error", "alter_full_field", "values"), + ), + ) +} + +// NewUpperCaseProcessor converts event keys matching the provided fields to uppercase +func NewUpperCaseProcessor(c *conf.C) (beat.Processor, error) { + return NewAlterFieldProcessor(c, "uppercase", upperCase) +} + +func upperCase(field string) (string, error) { + return strings.ToUpper(field), nil +} diff --git a/libbeat/processors/actions/uppercase_test.go b/libbeat/processors/actions/uppercase_test.go new file mode 100644 index 00000000000..2e643eadaaf --- /dev/null +++ b/libbeat/processors/actions/uppercase_test.go @@ -0,0 +1,193 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package actions + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/elastic/beats/v7/libbeat/beat" + conf "github.com/elastic/elastic-agent-libs/config" + "github.com/elastic/elastic-agent-libs/mapstr" +) + +func TestNewUpperCaseProcessor(t *testing.T) { + c := conf.MustNewConfigFrom( + mapstr.M{ + "fields": []string{"field1", "type", "field2", "type.value.key", "typeKey"}, // "type" is our mandatory field + "ignore_missing": true, + "fail_on_error": false, + }, + ) + + procInt, err := NewUpperCaseProcessor(c) + assert.NoError(t, err) + + processor, ok := procInt.(*alterFieldProcessor) + assert.True(t, ok) + assert.Equal(t, []string{"field1", "field2", "typeKey"}, processor.Fields) // we discard both "type" and "type.value.key" as mandatory fields + assert.True(t, processor.IgnoreMissing) + assert.False(t, processor.FailOnError) +} + +func TestUpperCaseProcessorRun(t *testing.T) { + tests := []struct { + Name string + Fields []string + Values []string + IgnoreMissing bool + FailOnError bool + FullPath bool + Input mapstr.M + Output mapstr.M + Error bool + }{ + { + Name: "Uppercase Fields", + Fields: []string{"a.b.c", "Field1"}, + Values: []string{"Field3"}, + IgnoreMissing: false, + FailOnError: true, + FullPath: true, + Input: mapstr.M{ + "Field1": mapstr.M{"Field2": "Value"}, + "Field3": "Value", + "a": mapstr.M{ + "B": mapstr.M{ + "C": "D", + }, + }, + }, + Output: mapstr.M{ + "FIELD1": mapstr.M{"Field2": "Value"}, // FIELD1 is uppercased + "Field3": "VALUE", // VALUE is uppercased + "A": mapstr.M{ + "B": mapstr.M{ + "C": "D", + }, + }, + }, + Error: false, + }, + { + Name: "Uppercase Fields when full_path is false", // searches only the most nested key 'case insensitively' + Fields: []string{"a.B.c"}, + IgnoreMissing: false, + FailOnError: true, + FullPath: false, + Input: mapstr.M{ + "Field3": "Value", + "a": mapstr.M{ + "B": mapstr.M{ + "c": "D", + }, + }, + }, + Output: mapstr.M{ + "Field3": "Value", + "a": mapstr.M{ + "B": mapstr.M{ + "C": "D", // only c is uppercased + }, + }, + }, + + Error: false, + }, + { + Name: "Fail On Missing Key Error", + Fields: []string{"Field4"}, + IgnoreMissing: false, + FailOnError: true, + FullPath: true, + Input: mapstr.M{ + "Field1": mapstr.M{"Field2": "Value"}, + "Field3": "Value", + }, + Output: mapstr.M{ + "Field1": mapstr.M{"Field2": "Value"}, + "Field3": "Value", + "error": mapstr.M{"message": "could not fetch value for key: Field4, Error: key not found"}, + }, + Error: true, + }, + { + Name: "Fail if value is not a string", + Values: []string{"Field1"}, + IgnoreMissing: false, + FailOnError: true, + FullPath: true, + Input: mapstr.M{ + "Field1": mapstr.M{"Field2": "Value"}, + "Field3": "Value", + }, + Output: mapstr.M{ + "Field1": mapstr.M{"Field2": "Value"}, + "Field3": "Value", + "error": mapstr.M{"message": "value of key \"Field1\" is not a string"}, + }, + Error: true, + }, + } + + for _, test := range tests { + t.Run(test.Name, func(t *testing.T) { + p := &alterFieldProcessor{ + Fields: test.Fields, + Values: test.Values, + IgnoreMissing: test.IgnoreMissing, + FailOnError: test.FailOnError, + AlterFullField: test.FullPath, + alterFunc: upperCase, + } + + event, err := p.Run(&beat.Event{Fields: test.Input}) + + if !test.Error { + require.NoError(t, err) + } else { + require.Error(t, err) + } + + assert.Equal(t, test.Output, event.Fields) + }) + } + + t.Run("test key collison", func(t *testing.T) { + Input := + mapstr.M{ + "ab": "first", + "Ab": "second", + } + + p := &alterFieldProcessor{ + Fields: []string{"ab"}, + IgnoreMissing: false, + FailOnError: true, + AlterFullField: true, + alterFunc: upperCase, + } + + _, err := p.Run(&beat.Event{Fields: Input}) + require.Error(t, err) + assert.ErrorIs(t, err, mapstr.ErrKeyCollision) + + }) +} diff --git a/libbeat/processors/add_cloud_metadata/docs/add_cloud_metadata.asciidoc b/libbeat/processors/add_cloud_metadata/docs/add_cloud_metadata.asciidoc index a80cd7a8be4..1e05e1d2c24 100644 --- a/libbeat/processors/add_cloud_metadata/docs/add_cloud_metadata.asciidoc +++ b/libbeat/processors/add_cloud_metadata/docs/add_cloud_metadata.asciidoc @@ -83,6 +83,8 @@ examples for each of the supported providers. _AWS_ +Metadata given below are extracted from https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-identity-documents.html[instance identity document], + [source,json] ------------------------------------------------------------------------------- { @@ -98,6 +100,22 @@ _AWS_ } ------------------------------------------------------------------------------- +If the EC2 instance has IMDS enabled and if tags are allowed through IMDS endpoint, the processor will further append tags in metadata. +Please refer official documentation on https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html[IMDS endpoint] for further details. + +[source,json] +------------------------------------------------------------------------------- +{ + "aws": { + "tags": { + "org" : "myOrg", + "owner": "userID" + } + } +} +------------------------------------------------------------------------------- + + _Digital Ocean_ [source,json] diff --git a/libbeat/processors/add_cloud_metadata/provider_aws_ec2.go b/libbeat/processors/add_cloud_metadata/provider_aws_ec2.go index ea945ce4bba..ae7dfbf9865 100644 --- a/libbeat/processors/add_cloud_metadata/provider_aws_ec2.go +++ b/libbeat/processors/add_cloud_metadata/provider_aws_ec2.go @@ -20,12 +20,15 @@ package add_cloud_metadata import ( "context" "fmt" + "io" "net/http" + "strings" "github.com/elastic/elastic-agent-libs/logp" awssdk "github.com/aws/aws-sdk-go-v2/aws" awscfg "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds" "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" "github.com/aws/aws-sdk-go-v2/service/ec2" "github.com/aws/aws-sdk-go-v2/service/ec2/types" @@ -35,7 +38,14 @@ import ( conf "github.com/elastic/elastic-agent-libs/config" ) +const ( + eksClusterNameTagKey = "eks:cluster-name" + tagsCategory = "tags/instance" + tagPrefix = "aws.tags" +) + type IMDSClient interface { + ec2rolecreds.GetMetadataAPIClient GetInstanceIdentityDocument(ctx context.Context, params *imds.GetInstanceIdentityDocumentInput, optFns ...func(*imds.Options)) (*imds.GetInstanceIdentityDocumentOutput, error) } @@ -90,30 +100,17 @@ func fetchRawProviderMetadata( result.err = fmt.Errorf("failed loading AWS default configuration: %w", err) return } - awsClient := NewIMDSClient(awsConfig) - instanceIdentity, err := awsClient.GetInstanceIdentityDocument(context.TODO(), &imds.GetInstanceIdentityDocumentInput{}) + imdsClient := NewIMDSClient(awsConfig) + instanceIdentity, err := imdsClient.GetInstanceIdentityDocument(ctx, &imds.GetInstanceIdentityDocumentInput{}) if err != nil { result.err = fmt.Errorf("failed fetching EC2 Identity Document: %w", err) return } - // AWS Region must be set to be able to get EC2 Tags awsRegion := instanceIdentity.InstanceIdentityDocument.Region - awsConfig.Region = awsRegion accountID := instanceIdentity.InstanceIdentityDocument.AccountID - - clusterName, err := fetchEC2ClusterNameTag(awsConfig, instanceIdentity.InstanceIdentityDocument.InstanceID) - if err != nil { - logger.Warnf("error fetching cluster name metadata: %s.", err) - } else if clusterName != "" { - // for AWS cluster ID is used cluster ARN: arn:partition:service:region:account-id:resource-type/resource-id, example: - // arn:aws:eks:us-east-2:627286350134:cluster/cluster-name - clusterARN := fmt.Sprintf("arn:aws:eks:%s:%s:cluster/%v", awsRegion, accountID, clusterName) - - _, _ = result.metadata.Put("orchestrator.cluster.id", clusterARN) - _, _ = result.metadata.Put("orchestrator.cluster.name", clusterName) - } + instanceID := instanceIdentity.InstanceIdentityDocument.InstanceID _, _ = result.metadata.Put("cloud.instance.id", instanceIdentity.InstanceIdentityDocument.InstanceID) _, _ = result.metadata.Put("cloud.machine.type", instanceIdentity.InstanceIdentityDocument.InstanceType) @@ -122,10 +119,106 @@ func fetchRawProviderMetadata( _, _ = result.metadata.Put("cloud.account.id", accountID) _, _ = result.metadata.Put("cloud.image.id", instanceIdentity.InstanceIdentityDocument.ImageID) + // AWS Region must be set to be able to get EC2 Tags + awsConfig.Region = awsRegion + tags := getTags(ctx, imdsClient, NewEC2Client(awsConfig), instanceID, logger) + + if tags[eksClusterNameTagKey] != "" { + // for AWS cluster ID is used cluster ARN: arn:partition:service:region:account-id:resource-type/resource-id, example: + // arn:aws:eks:us-east-2:627286350134:cluster/cluster-name + clusterARN := fmt.Sprintf("arn:aws:eks:%s:%s:cluster/%v", awsRegion, accountID, tags[eksClusterNameTagKey]) + + _, _ = result.metadata.Put("orchestrator.cluster.id", clusterARN) + _, _ = result.metadata.Put("orchestrator.cluster.name", tags[eksClusterNameTagKey]) + } + + if len(tags) == 0 { + return + } + + logger.Infof("Adding retrieved tags with key: %s", tagPrefix) + for k, v := range tags { + _, _ = result.metadata.Put(fmt.Sprintf("%s.%s", tagPrefix, k), v) + } +} + +// getTags is a helper to extract EC2 tags. Internally it utilize multiple extraction methods. +func getTags(ctx context.Context, imdsClient IMDSClient, ec2Client EC2Client, instanceId string, logger *logp.Logger) map[string]string { + logger.Info("Extracting EC2 tags from IMDS endpoint") + tags, ok := getTagsFromIMDS(ctx, imdsClient, logger) + if ok { + return tags + } + + logger.Info("Tag extraction from IMDS failed, fallback to DescribeTags API to obtain EKS cluster name.") + clusterName, err := clusterNameFromDescribeTag(ctx, ec2Client, instanceId) + if err != nil { + logger.Warnf("error obtaining cluster name: %v.", err) + return tags + } + + if clusterName != "" { + tags[eksClusterNameTagKey] = clusterName + } + return tags +} + +// getTagsFromIMDS is a helper to extract EC2 tags using instance metadata service. +// Note that this call could get throttled and currently does not implement a retry mechanism. +// See - https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html#instancedata-throttling +func getTagsFromIMDS(ctx context.Context, client IMDSClient, logger *logp.Logger) (tags map[string]string, ok bool) { + tags = make(map[string]string) + + b, err := getMetadataHelper(ctx, client, tagsCategory, logger) + if err != nil { + logger.Warnf("error obtaining tags category: %v", err) + return tags, false + } + + for _, tag := range strings.Split(string(b), "\n") { + tagPath := fmt.Sprintf("%s/%s", tagsCategory, tag) + b, err := getMetadataHelper(ctx, client, tagPath, logger) + if err != nil { + logger.Warnf("error extracting tag value of %s: %v", tag, err) + return tags, false + } + + tagValue := string(b) + if tagValue == "" { + logger.Infof("Ignoring tag key %s as value is empty", tag) + continue + } + + tags[tag] = tagValue + } + + return tags, true +} + +// getMetadataHelper performs the IMDS call for the given path and returns the response content after closing the underlying content reader. +func getMetadataHelper(ctx context.Context, client IMDSClient, path string, logger *logp.Logger) (content []byte, err error) { + metadata, err := client.GetMetadata(ctx, &imds.GetMetadataInput{Path: path}) + if err != nil { + return nil, fmt.Errorf("error from IMDS metadata request: %w", err) + } + + defer func(Content io.ReadCloser) { + err := Content.Close() + if err != nil { + logger.Warnf("error closing IMDS metadata response body: %v", err) + } + }(metadata.Content) + + content, err = io.ReadAll(metadata.Content) + if err != nil { + return nil, fmt.Errorf("error extracting metadata from the IMDS response: %w", err) + } + + return content, nil } -func fetchEC2ClusterNameTag(awsConfig awssdk.Config, instanceID string) (string, error) { - svc := NewEC2Client(awsConfig) +// clusterNameFromDescribeTag is a helper to extract EKS cluster name using DescribeTag. +func clusterNameFromDescribeTag(ctx context.Context, ec2Client EC2Client, instanceID string) (string, error) { input := &ec2.DescribeTagsInput{ Filters: []types.Filter{ { @@ -135,15 +228,13 @@ func fetchEC2ClusterNameTag(awsConfig awssdk.Config, instanceID string) (string, }, }, { - Name: awssdk.String("key"), - Values: []string{ - "eks:cluster-name", - }, + Name: awssdk.String("key"), + Values: []string{eksClusterNameTagKey}, }, }, } - tagsResult, err := svc.DescribeTags(context.TODO(), input) + tagsResult, err := ec2Client.DescribeTags(ctx, input) if err != nil { return "", fmt.Errorf("error fetching EC2 Tags: %w", err) } diff --git a/libbeat/processors/add_cloud_metadata/provider_aws_ec2_test.go b/libbeat/processors/add_cloud_metadata/provider_aws_ec2_test.go index 76ddea084a7..b36d566b64d 100644 --- a/libbeat/processors/add_cloud_metadata/provider_aws_ec2_test.go +++ b/libbeat/processors/add_cloud_metadata/provider_aws_ec2_test.go @@ -19,8 +19,11 @@ package add_cloud_metadata import ( "context" + "errors" "fmt" + "io" "os" + "strings" "testing" awssdk "github.com/aws/aws-sdk-go-v2/aws" @@ -43,8 +46,17 @@ func init() { os.Setenv("AWS_EC2_METADATA_DISABLED", "true") } +type getInstanceIDFunc func(ctx context.Context, params *imds.GetInstanceIdentityDocumentInput, optFns ...func(*imds.Options)) (*imds.GetInstanceIdentityDocumentOutput, error) +type getMetaFunc func(ctx context.Context, input *imds.GetMetadataInput, f ...func(*imds.Options)) (*imds.GetMetadataOutput, error) +type getTagFunc func(ctx context.Context, params *ec2.DescribeTagsInput, optFns ...func(*ec2.Options)) (*ec2.DescribeTagsOutput, error) + type MockIMDSClient struct { - GetInstanceIdentityDocumentFunc func(ctx context.Context, params *imds.GetInstanceIdentityDocumentInput, optFns ...func(*imds.Options)) (*imds.GetInstanceIdentityDocumentOutput, error) + GetInstanceIdentityDocumentFunc getInstanceIDFunc + GetMetadataFunc getMetaFunc +} + +func (m *MockIMDSClient) GetMetadata(ctx context.Context, input *imds.GetMetadataInput, f ...func(*imds.Options)) (*imds.GetMetadataOutput, error) { + return m.GetMetadataFunc(ctx, input, f...) } func (m *MockIMDSClient) GetInstanceIdentityDocument(ctx context.Context, params *imds.GetInstanceIdentityDocumentInput, optFns ...func(*imds.Options)) (*imds.GetInstanceIdentityDocumentOutput, error) { @@ -52,13 +64,75 @@ func (m *MockIMDSClient) GetInstanceIdentityDocument(ctx context.Context, params } type MockEC2Client struct { - DescribeTagsFunc func(ctx context.Context, params *ec2.DescribeTagsInput, optFns ...func(*ec2.Options)) (*ec2.DescribeTagsOutput, error) + DescribeTagsFunc getTagFunc } func (e *MockEC2Client) DescribeTags(ctx context.Context, params *ec2.DescribeTagsInput, optFns ...func(*ec2.Options)) (*ec2.DescribeTagsOutput, error) { return e.DescribeTagsFunc(ctx, params, optFns...) } +var ( + // not the best way to use a response template + // but this should serve until we need to test + // documents containing very different values + accountIDDoc1 = "111111111111111" + regionDoc1 = "us-east-1" + availabilityZoneDoc1 = "us-east-1c" + imageIDDoc1 = "ami-abcd1234" + instanceTypeDoc1 = "t2.medium" + instanceIDDoc2 = "i-22222222" + clusterNameKey = eksClusterNameTagKey + clusterNameValue = "test" + instanceIDDoc1 = "i-11111111" + customTagKey = "organization" + customTagValue = "orgName" +) + +// generic getTagFunc implementation with IMDS disabled error to avoid IMDS response +var disabledIMDS getMetaFunc = func(ctx context.Context, input *imds.GetMetadataInput, f ...func(*imds.Options)) (*imds.GetMetadataOutput, error) { + return nil, errors.New("IMDS disabled mock error") +} + +// set up a generic getTagFunc implementation with valid tags +var genericImdsGet getMetaFunc = func(ctx context.Context, input *imds.GetMetadataInput, f ...func(*imds.Options)) (*imds.GetMetadataOutput, error) { + tagKeys := fmt.Sprintf("%s\n%s", customTagKey, eksClusterNameTagKey) + + if input.Path == tagsCategory { + // tag category request + return &imds.GetMetadataOutput{ + Content: io.NopCloser(strings.NewReader(tagKeys)), + }, nil + } + + // tag request + if strings.HasSuffix(input.Path, customTagKey) { + return &imds.GetMetadataOutput{ + Content: io.NopCloser(strings.NewReader(customTagValue)), + }, nil + } + + if strings.HasSuffix(input.Path, eksClusterNameTagKey) { + return &imds.GetMetadataOutput{ + Content: io.NopCloser(strings.NewReader(clusterNameValue)), + }, nil + } + return nil, errors.New("invalid request") +} + +// generic getInstanceIDFunc implementation with known response values and no error +var genericInstanceIDResponse getInstanceIDFunc = func(ctx context.Context, params *imds.GetInstanceIdentityDocumentInput, optFns ...func(*imds.Options)) (*imds.GetInstanceIdentityDocumentOutput, error) { + return &imds.GetInstanceIdentityDocumentOutput{ + InstanceIdentityDocument: imds.InstanceIdentityDocument{ + AvailabilityZone: availabilityZoneDoc1, + Region: regionDoc1, + InstanceID: instanceIDDoc1, + InstanceType: instanceTypeDoc1, + AccountID: accountIDDoc1, + ImageID: imageIDDoc1, + }, + }, nil +} + func TestMain(m *testing.M) { logp.TestingSetup() code := m.Run() @@ -66,43 +140,19 @@ func TestMain(m *testing.M) { } func TestRetrieveAWSMetadataEC2(t *testing.T) { - var ( - // not the best way to use a response template - // but this should serve until we need to test - // documents containing very different values - accountIDDoc1 = "111111111111111" - regionDoc1 = "us-east-1" - availabilityZoneDoc1 = "us-east-1c" - imageIDDoc1 = "ami-abcd1234" - instanceTypeDoc1 = "t2.medium" - instanceIDDoc2 = "i-22222222" - clusterNameKey = "eks:cluster-name" - clusterNameValue = "test" - instanceIDDoc1 = "i-11111111" - ) - var tests = []struct { testName string - mockGetInstanceIdentity func(ctx context.Context, params *imds.GetInstanceIdentityDocumentInput, optFns ...func(*imds.Options)) (*imds.GetInstanceIdentityDocumentOutput, error) - mockEc2Tags func(ctx context.Context, params *ec2.DescribeTagsInput, optFns ...func(*ec2.Options)) (*ec2.DescribeTagsOutput, error) + mockGetInstanceIdentity getInstanceIDFunc + mockMetadata getMetaFunc + mockEc2Tags getTagFunc processorOverwrite bool previousEvent mapstr.M expectedEvent mapstr.M }{ { - testName: "valid instance identity document, no cluster tags", - mockGetInstanceIdentity: func(ctx context.Context, params *imds.GetInstanceIdentityDocumentInput, optFns ...func(*imds.Options)) (*imds.GetInstanceIdentityDocumentOutput, error) { - return &imds.GetInstanceIdentityDocumentOutput{ - InstanceIdentityDocument: imds.InstanceIdentityDocument{ - AvailabilityZone: availabilityZoneDoc1, - Region: regionDoc1, - InstanceID: instanceIDDoc1, - InstanceType: instanceTypeDoc1, - AccountID: accountIDDoc1, - ImageID: imageIDDoc1, - }, - }, nil - }, + testName: "valid instance identity document, no cluster tags", + mockGetInstanceIdentity: genericInstanceIDResponse, + mockMetadata: disabledIMDS, mockEc2Tags: func(ctx context.Context, params *ec2.DescribeTagsInput, optFns ...func(*ec2.Options)) (*ec2.DescribeTagsOutput, error) { return &ec2.DescribeTagsOutput{ Tags: []types.TagDescription{}, @@ -124,19 +174,9 @@ func TestRetrieveAWSMetadataEC2(t *testing.T) { }, }, { - testName: "all fields from processor", - mockGetInstanceIdentity: func(ctx context.Context, params *imds.GetInstanceIdentityDocumentInput, optFns ...func(*imds.Options)) (*imds.GetInstanceIdentityDocumentOutput, error) { - return &imds.GetInstanceIdentityDocumentOutput{ - InstanceIdentityDocument: imds.InstanceIdentityDocument{ - AvailabilityZone: availabilityZoneDoc1, - Region: regionDoc1, - InstanceID: instanceIDDoc1, - InstanceType: instanceTypeDoc1, - AccountID: accountIDDoc1, - ImageID: imageIDDoc1, - }, - }, nil - }, + testName: "all fields from processor", + mockGetInstanceIdentity: genericInstanceIDResponse, + mockMetadata: disabledIMDS, mockEc2Tags: func(ctx context.Context, params *ec2.DescribeTagsInput, optFns ...func(*ec2.Options)) (*ec2.DescribeTagsOutput, error) { return &ec2.DescribeTagsOutput{ Tags: []types.TagDescription{ @@ -168,22 +208,17 @@ func TestRetrieveAWSMetadataEC2(t *testing.T) { "id": fmt.Sprintf("arn:aws:eks:%s:%s:cluster/%s", regionDoc1, accountIDDoc1, clusterNameValue), }, }, + "aws": mapstr.M{ + "tags": mapstr.M{ + eksClusterNameTagKey: clusterNameValue, + }, + }, }, }, { - testName: "instanceId pre-informed, no overwrite", - mockGetInstanceIdentity: func(ctx context.Context, params *imds.GetInstanceIdentityDocumentInput, optFns ...func(*imds.Options)) (*imds.GetInstanceIdentityDocumentOutput, error) { - return &imds.GetInstanceIdentityDocumentOutput{ - InstanceIdentityDocument: imds.InstanceIdentityDocument{ - AvailabilityZone: availabilityZoneDoc1, - Region: regionDoc1, - InstanceID: instanceIDDoc1, - InstanceType: instanceTypeDoc1, - AccountID: accountIDDoc1, - ImageID: imageIDDoc1, - }, - }, nil - }, + testName: "instanceId pre-informed, no overwrite", + mockGetInstanceIdentity: genericInstanceIDResponse, + mockMetadata: disabledIMDS, mockEc2Tags: func(ctx context.Context, params *ec2.DescribeTagsInput, optFns ...func(*ec2.Options)) (*ec2.DescribeTagsOutput, error) { return &ec2.DescribeTagsOutput{ Tags: []types.TagDescription{ @@ -212,25 +247,20 @@ func TestRetrieveAWSMetadataEC2(t *testing.T) { "id": fmt.Sprintf("arn:aws:eks:%s:%s:cluster/%s", regionDoc1, accountIDDoc1, clusterNameValue), }, }, + "aws": mapstr.M{ + "tags": mapstr.M{ + eksClusterNameTagKey: clusterNameValue, + }, + }, }, }, { // NOTE: In this case, add_cloud_metadata will overwrite cloud fields because // it won't detect cloud.provider as a cloud field. This is not the behavior we // expect and will find a better solution later in issue 11697. - testName: "only cloud.provider pre-informed, no overwrite", - mockGetInstanceIdentity: func(ctx context.Context, params *imds.GetInstanceIdentityDocumentInput, optFns ...func(*imds.Options)) (*imds.GetInstanceIdentityDocumentOutput, error) { - return &imds.GetInstanceIdentityDocumentOutput{ - InstanceIdentityDocument: imds.InstanceIdentityDocument{ - AvailabilityZone: availabilityZoneDoc1, - Region: regionDoc1, - InstanceID: instanceIDDoc1, - InstanceType: instanceTypeDoc1, - AccountID: accountIDDoc1, - ImageID: imageIDDoc1, - }, - }, nil - }, + testName: "only cloud.provider pre-informed, no overwrite", + mockGetInstanceIdentity: genericInstanceIDResponse, + mockMetadata: disabledIMDS, mockEc2Tags: func(ctx context.Context, params *ec2.DescribeTagsInput, optFns ...func(*ec2.Options)) (*ec2.DescribeTagsOutput, error) { return &ec2.DescribeTagsOutput{ Tags: []types.TagDescription{ @@ -265,22 +295,17 @@ func TestRetrieveAWSMetadataEC2(t *testing.T) { "id": fmt.Sprintf("arn:aws:eks:%s:%s:cluster/%s", regionDoc1, accountIDDoc1, clusterNameValue), }, }, + "aws": mapstr.M{ + "tags": mapstr.M{ + eksClusterNameTagKey: clusterNameValue, + }, + }, }, }, { - testName: "instanceId pre-informed, overwrite", - mockGetInstanceIdentity: func(ctx context.Context, params *imds.GetInstanceIdentityDocumentInput, optFns ...func(*imds.Options)) (*imds.GetInstanceIdentityDocumentOutput, error) { - return &imds.GetInstanceIdentityDocumentOutput{ - InstanceIdentityDocument: imds.InstanceIdentityDocument{ - AvailabilityZone: availabilityZoneDoc1, - Region: regionDoc1, - InstanceID: instanceIDDoc1, - InstanceType: instanceTypeDoc1, - AccountID: accountIDDoc1, - ImageID: imageIDDoc1, - }, - }, nil - }, + testName: "instanceId pre-informed, overwrite", + mockGetInstanceIdentity: genericInstanceIDResponse, + mockMetadata: disabledIMDS, mockEc2Tags: func(ctx context.Context, params *ec2.DescribeTagsInput, optFns ...func(*ec2.Options)) (*ec2.DescribeTagsOutput, error) { return &ec2.DescribeTagsOutput{ Tags: []types.TagDescription{}, @@ -306,19 +331,9 @@ func TestRetrieveAWSMetadataEC2(t *testing.T) { }, }, { - testName: "only cloud.provider pre-informed, overwrite", - mockGetInstanceIdentity: func(ctx context.Context, params *imds.GetInstanceIdentityDocumentInput, optFns ...func(*imds.Options)) (*imds.GetInstanceIdentityDocumentOutput, error) { - return &imds.GetInstanceIdentityDocumentOutput{ - InstanceIdentityDocument: imds.InstanceIdentityDocument{ - AvailabilityZone: availabilityZoneDoc1, - Region: regionDoc1, - InstanceID: instanceIDDoc1, - InstanceType: instanceTypeDoc1, - AccountID: accountIDDoc1, - ImageID: imageIDDoc1, - }, - }, nil - }, + testName: "only cloud.provider pre-informed, overwrite", + mockGetInstanceIdentity: genericInstanceIDResponse, + mockMetadata: disabledIMDS, mockEc2Tags: func(ctx context.Context, params *ec2.DescribeTagsInput, optFns ...func(*ec2.Options)) (*ec2.DescribeTagsOutput, error) { return &ec2.DescribeTagsOutput{ Tags: []types.TagDescription{}, @@ -342,6 +357,36 @@ func TestRetrieveAWSMetadataEC2(t *testing.T) { }, }, }, + { + testName: "if enabled, extract tags from IMDS endpoint", + mockGetInstanceIdentity: genericInstanceIDResponse, + mockMetadata: genericImdsGet, + mockEc2Tags: nil, // could be nil as IMDS response fulfills tag + expectedEvent: mapstr.M{ + "cloud": mapstr.M{ + "provider": "aws", + "account": mapstr.M{"id": accountIDDoc1}, + "instance": mapstr.M{"id": instanceIDDoc1}, + "machine": mapstr.M{"type": instanceTypeDoc1}, + "image": mapstr.M{"id": imageIDDoc1}, + "region": regionDoc1, + "availability_zone": availabilityZoneDoc1, + "service": mapstr.M{"name": "EC2"}, + }, + "orchestrator": mapstr.M{ + "cluster": mapstr.M{ + "name": clusterNameValue, + "id": fmt.Sprintf("arn:aws:eks:%s:%s:cluster/%s", regionDoc1, accountIDDoc1, clusterNameValue), + }, + }, + "aws": mapstr.M{ + "tags": mapstr.M{ + eksClusterNameTagKey: clusterNameValue, + customTagKey: customTagValue, + }, + }, + }, + }, } for _, tc := range tests { @@ -350,6 +395,7 @@ func TestRetrieveAWSMetadataEC2(t *testing.T) { NewIMDSClient = func(cfg awssdk.Config) IMDSClient { return &MockIMDSClient{ GetInstanceIdentityDocumentFunc: tc.mockGetInstanceIdentity, + GetMetadataFunc: tc.mockMetadata, } } defer func() { NewIMDSClient = func(cfg awssdk.Config) IMDSClient { return imds.NewFromConfig(cfg) } }() @@ -381,3 +427,93 @@ func TestRetrieveAWSMetadataEC2(t *testing.T) { }) } } + +func Test_getTags(t *testing.T) { + ctx := context.Background() + instanceId := "ami-abcd1234" + logger := logp.NewLogger("add_cloud_metadata test logger") + + tests := []struct { + name string + imdsClient IMDSClient + ec2Client EC2Client + want map[string]string + }{ + { + name: "tags extracted from IMDS if possible", + imdsClient: &MockIMDSClient{ + GetMetadataFunc: genericImdsGet, + }, + want: map[string]string{ + customTagKey: customTagValue, + eksClusterNameTagKey: clusterNameValue, + }, + }, + { + name: "tag extraction fallback to DescribeTag if IMDS fetch results in an error", + imdsClient: &MockIMDSClient{ + GetMetadataFunc: disabledIMDS, + }, + ec2Client: &MockEC2Client{ + DescribeTagsFunc: func(ctx context.Context, params *ec2.DescribeTagsInput, optFns ...func(*ec2.Options)) (*ec2.DescribeTagsOutput, error) { + return &ec2.DescribeTagsOutput{ + Tags: []types.TagDescription{ + { + Key: &clusterNameKey, + ResourceId: &instanceId, + ResourceType: "instance", + Value: &clusterNameValue, + }, + }, + }, nil + }}, + want: map[string]string{ + eksClusterNameTagKey: clusterNameValue, + }, + }, + { + name: "empty tags if all methods failed", + imdsClient: &MockIMDSClient{ + GetMetadataFunc: disabledIMDS, + }, + ec2Client: &MockEC2Client{ + DescribeTagsFunc: func(ctx context.Context, params *ec2.DescribeTagsInput, optFns ...func(*ec2.Options)) (*ec2.DescribeTagsOutput, error) { + return nil, errors.New("some error from DescribeTag") + }}, + want: map[string]string{}, + }, + { + name: "Empty tags values are ignored", + imdsClient: &MockIMDSClient{ + GetMetadataFunc: func(ctx context.Context, input *imds.GetMetadataInput, f ...func(*imds.Options)) (*imds.GetMetadataOutput, error) { + if input.Path == tagsCategory { + // tag category request + return &imds.GetMetadataOutput{ + Content: io.NopCloser(strings.NewReader(customTagKey)), + }, nil + } + + // tag request + if strings.HasSuffix(input.Path, customTagKey) { + return &imds.GetMetadataOutput{ + Content: io.NopCloser(strings.NewReader("")), + }, nil + } + + return nil, errors.New("invalid request") + }, + }, + ec2Client: &MockEC2Client{ + DescribeTagsFunc: func(ctx context.Context, params *ec2.DescribeTagsInput, optFns ...func(*ec2.Options)) (*ec2.DescribeTagsOutput, error) { + return nil, errors.New("some error from DescribeTag") + }}, + want: map[string]string{}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tags := getTags(ctx, tt.imdsClient, tt.ec2Client, instanceId, logger) + assert.Equal(t, tags, tt.want) + }) + } +} diff --git a/libbeat/processors/add_cloud_metadata/providers.go b/libbeat/processors/add_cloud_metadata/providers.go index a9978251cfd..ea56a5e669b 100644 --- a/libbeat/processors/add_cloud_metadata/providers.go +++ b/libbeat/processors/add_cloud_metadata/providers.go @@ -187,7 +187,7 @@ func (p *addCloudMetadata) fetchMetadata() *result { if result.err == nil && result.metadata != nil { return &result } else if result.err != nil { - p.logger.Errorf("add_cloud_metadata: received error %v", result.err) + p.logger.Errorf("add_cloud_metadata: received error for provider %s: %v", result.provider, result.err) } case <-ctx.Done(): p.logger.Debugf("add_cloud_metadata: timed-out waiting for all responses") diff --git a/libbeat/template/load_test.go b/libbeat/template/load_test.go index 8f6b1837d9a..db82384539c 100644 --- a/libbeat/template/load_test.go +++ b/libbeat/template/load_test.go @@ -170,7 +170,7 @@ func TestFileLoader_Load(t *testing.T) { "refresh_interval": "5s", "mapping": mapstr.M{ "total_fields": mapstr.M{ - "limit": 10000, + "limit": defaultTotalFieldsLimit, }, }, "query": mapstr.M{ diff --git a/libbeat/template/template.go b/libbeat/template/template.go index 5663a55c9cb..f68b56987fd 100644 --- a/libbeat/template/template.go +++ b/libbeat/template/template.go @@ -35,7 +35,7 @@ import ( var ( // Defaults used in the template defaultDateDetection = false - defaultTotalFieldsLimit = 10000 + defaultTotalFieldsLimit = 12500 defaultMaxDocvalueFieldsSearch = 200 defaultFields []string diff --git a/metricbeat/Dockerfile b/metricbeat/Dockerfile index d163510a06e..75b463c3e88 100644 --- a/metricbeat/Dockerfile +++ b/metricbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.22.8 +FROM golang:1.22.9 COPY --from=docker:26.0.0-alpine3.19 /usr/local/bin/docker /usr/local/bin/ RUN \ diff --git a/metricbeat/docs/modules/aerospike.asciidoc b/metricbeat/docs/modules/aerospike.asciidoc index a2c873665e1..b4c6a59d12e 100644 --- a/metricbeat/docs/modules/aerospike.asciidoc +++ b/metricbeat/docs/modules/aerospike.asciidoc @@ -14,7 +14,7 @@ The Aerospike module uses the http://www.aerospike.com/docs/reference/info[Info [float] === Compatibility -The Aerospike metricsets were tested with Aerospike 3.9 and are expected to work with all versions >= 3.9. +The Aerospike metricsets were tested with Aerospike Enterprise Edition 7.2.0.1 and are expected to work with all versions >= 4.9. [float] @@ -45,6 +45,16 @@ metricbeat.modules: # Aerospike Cluster Name #cluster_name: myclustername + # Username of hosts. Empty by default. + #username: root + + # Password of hosts. Empty by default. + #password: secret + + # Authentication modes: https://aerospike.com/docs/server/guide/security/access-control + # Possible values: internal (default), external, pki + #auth_mode: internal + # Optional SSL/TLS (disabled by default) #ssl.enabled: true diff --git a/metricbeat/metricbeat.reference.yml b/metricbeat/metricbeat.reference.yml index b87cdb049fe..0a05e0c6008 100644 --- a/metricbeat/metricbeat.reference.yml +++ b/metricbeat/metricbeat.reference.yml @@ -152,6 +152,16 @@ metricbeat.modules: # Aerospike Cluster Name #cluster_name: myclustername + # Username of hosts. Empty by default. + #username: root + + # Password of hosts. Empty by default. + #password: secret + + # Authentication modes: https://aerospike.com/docs/server/guide/security/access-control + # Possible values: internal (default), external, pki + #auth_mode: internal + # Optional SSL/TLS (disabled by default) #ssl.enabled: true diff --git a/metricbeat/module/aerospike/_meta/Dockerfile b/metricbeat/module/aerospike/_meta/Dockerfile index ee1f021f71d..5aea63a508e 100644 --- a/metricbeat/module/aerospike/_meta/Dockerfile +++ b/metricbeat/module/aerospike/_meta/Dockerfile @@ -1,5 +1,4 @@ ARG AEROSPIKE_VERSION -FROM aerospike:${AEROSPIKE_VERSION} +FROM aerospike/aerospike-server-enterprise:${AEROSPIKE_VERSION} -RUN apt-get update && apt-get install -y netcat-openbsd -HEALTHCHECK --interval=1s --retries=90 CMD nc -z localhost 3000 +HEALTHCHECK --interval=1s --retries=90 CMD asinfo -v "namespace/test" | grep "ns_cluster_size=1" -q diff --git a/metricbeat/module/aerospike/_meta/config.reference.yml b/metricbeat/module/aerospike/_meta/config.reference.yml index cca162aa3db..900f56de7da 100644 --- a/metricbeat/module/aerospike/_meta/config.reference.yml +++ b/metricbeat/module/aerospike/_meta/config.reference.yml @@ -7,6 +7,16 @@ # Aerospike Cluster Name #cluster_name: myclustername + # Username of hosts. Empty by default. + #username: root + + # Password of hosts. Empty by default. + #password: secret + + # Authentication modes: https://aerospike.com/docs/server/guide/security/access-control + # Possible values: internal (default), external, pki + #auth_mode: internal + # Optional SSL/TLS (disabled by default) #ssl.enabled: true diff --git a/metricbeat/module/aerospike/_meta/config.yml b/metricbeat/module/aerospike/_meta/config.yml index 42db4e48332..dd333b05139 100644 --- a/metricbeat/module/aerospike/_meta/config.yml +++ b/metricbeat/module/aerospike/_meta/config.yml @@ -7,6 +7,16 @@ # Aerospike Cluster Name #cluster_name: myclustername + # Username of hosts. Empty by default. + #username: root + + # Password of hosts. Empty by default. + #password: secret + + # Authentication modes: https://aerospike.com/docs/server/guide/security/access-control + # Possible values: internal (default), external, pki + #auth_mode: internal + # Optional SSL/TLS (disabled by default) #ssl.enabled: true diff --git a/metricbeat/module/aerospike/_meta/docs.asciidoc b/metricbeat/module/aerospike/_meta/docs.asciidoc index dd13a19211c..2e38f8e4e7b 100644 --- a/metricbeat/module/aerospike/_meta/docs.asciidoc +++ b/metricbeat/module/aerospike/_meta/docs.asciidoc @@ -3,7 +3,7 @@ The Aerospike module uses the http://www.aerospike.com/docs/reference/info[Info [float] === Compatibility -The Aerospike metricsets were tested with Aerospike 3.9 and are expected to work with all versions >= 3.9. +The Aerospike metricsets were tested with Aerospike Enterprise Edition 7.2.0.1 and are expected to work with all versions >= 4.9. [float] diff --git a/metricbeat/module/aerospike/aerospike.go b/metricbeat/module/aerospike/aerospike.go index 745914c055d..bf051ca9f4e 100644 --- a/metricbeat/module/aerospike/aerospike.go +++ b/metricbeat/module/aerospike/aerospike.go @@ -24,14 +24,24 @@ import ( "github.com/elastic/elastic-agent-libs/transport/tlscommon" - as "github.com/aerospike/aerospike-client-go" + as "github.com/aerospike/aerospike-client-go/v7" ) type Config struct { ClusterName string `config:"cluster_name"` TLS *tlscommon.Config `config:"ssl"` + User string `config:"username"` + Password string `config:"password"` + AuthMode string `config:"auth_mode"` } +const ( + AUTH_MODE_UNSET string = "" + AUTH_MODE_INTERNAL string = "internal" + AUTH_MODE_PKI string = "pki" + AUTH_MODE_EXTERNAL string = "external" +) + // DefaultConfig return default config for the aerospike module. func DefaultConfig() Config { return Config{} @@ -47,6 +57,28 @@ func ParseClientPolicy(config Config) (*as.ClientPolicy, error) { clientPolicy.TlsConfig = tlsconfig.ToConfig() } + if config.User != "" && config.Password != "" { + clientPolicy.User = config.User + clientPolicy.Password = config.Password + } else if config.User != "" { + return nil, fmt.Errorf("if username is set, password should be set too") + } else if config.Password != "" { + return nil, fmt.Errorf("if password is set, username should be set too") + } + + switch config.AuthMode { + case AUTH_MODE_UNSET: + // Use default behavior of client + case AUTH_MODE_INTERNAL: + clientPolicy.AuthMode = as.AuthModeInternal + case AUTH_MODE_EXTERNAL: + clientPolicy.AuthMode = as.AuthModeExternal + case AUTH_MODE_PKI: + clientPolicy.AuthMode = as.AuthModePKI + default: + return nil, fmt.Errorf("unknown authentication mode '%s'", config.AuthMode) + } + if config.ClusterName != "" { clientPolicy.ClusterName = config.ClusterName } diff --git a/metricbeat/module/aerospike/aerospike_test.go b/metricbeat/module/aerospike/aerospike_test.go index 9dbcbeec4a5..23be6536564 100644 --- a/metricbeat/module/aerospike/aerospike_test.go +++ b/metricbeat/module/aerospike/aerospike_test.go @@ -19,13 +19,14 @@ package aerospike import ( "errors" + "fmt" "testing" "github.com/stretchr/testify/assert" "github.com/elastic/elastic-agent-libs/transport/tlscommon" - as "github.com/aerospike/aerospike-client-go" + as "github.com/aerospike/aerospike-client-go/v7" ) func TestParseHost(t *testing.T) { @@ -105,6 +106,8 @@ func pointer[T any](d T) *T { func TestParseClientPolicy(t *testing.T) { sampleClusterName := "TestCluster" + sampleUser := "TestUser" + samplePassword := "MySecretPassword" TLSPolicy := as.NewClientPolicy() tlsconfig, _ := tlscommon.LoadTLSConfig(&tlscommon.Config{Enabled: pointer(true)}) @@ -113,6 +116,24 @@ func TestParseClientPolicy(t *testing.T) { ClusterNamePolicy := as.NewClientPolicy() ClusterNamePolicy.ClusterName = sampleClusterName + UserPasswordClientPolicy := as.NewClientPolicy() + UserPasswordClientPolicy.User = sampleUser + UserPasswordClientPolicy.Password = samplePassword + + UserPasswordTLSPolicy := as.NewClientPolicy() + UserPasswordTLSPolicy.User = sampleUser + UserPasswordTLSPolicy.Password = samplePassword + UserPasswordTLSPolicy.TlsConfig = tlsconfig.ToConfig() + + ExternalAuthModePolicy := as.NewClientPolicy() + ExternalAuthModePolicy.AuthMode = as.AuthModeExternal + + PKIAuthModePolicy := as.NewClientPolicy() + PKIAuthModePolicy.AuthMode = as.AuthModePKI + + InternalAuthModePolicy := as.NewClientPolicy() + InternalAuthModePolicy.AuthMode = as.AuthModeInternal + tests := []struct { Name string Config Config @@ -143,6 +164,66 @@ func TestParseClientPolicy(t *testing.T) { expectedClientPolicy: ClusterNamePolicy, expectedErr: nil, }, + { + Name: "Username and password are honored", + Config: Config{ + User: sampleUser, + Password: samplePassword, + }, + expectedClientPolicy: UserPasswordClientPolicy, + expectedErr: nil, + }, + { + Name: "Username is set and password is not set", + Config: Config{ + User: sampleUser, + }, + expectedClientPolicy: as.NewClientPolicy(), + expectedErr: fmt.Errorf("if username is set, password should be set too"), + }, + { + Name: "Password is set and user is not set", + Config: Config{ + Password: samplePassword, + }, + expectedClientPolicy: as.NewClientPolicy(), + expectedErr: fmt.Errorf("if password is set, username should be set too"), + }, + { + Name: "TLS and Basic Auth", + Config: Config{ + TLS: &tlscommon.Config{ + Enabled: pointer(true), + }, + User: sampleUser, + Password: samplePassword, + }, + expectedClientPolicy: UserPasswordTLSPolicy, + expectedErr: nil, + }, + { + Name: "Unsupported Authentication Mode", + Config: Config{ + AuthMode: "doesnotexist", + }, + expectedErr: fmt.Errorf("unknown authentication mode 'doesnotexist'"), + }, + { + Name: "External Authentication Mode", + Config: Config{ + AuthMode: AUTH_MODE_EXTERNAL, + }, + expectedClientPolicy: ExternalAuthModePolicy, + expectedErr: fmt.Errorf("unknown authentication mode 'doesnotexist'"), + }, + { + Name: "Internal Authentication Mode", + Config: Config{ + AuthMode: AUTH_MODE_INTERNAL, + }, + expectedClientPolicy: InternalAuthModePolicy, + expectedErr: fmt.Errorf("unknown authentication mode 'doesnotexist'"), + }, } for _, test := range tests { @@ -158,6 +239,16 @@ func TestParseClientPolicy(t *testing.T) { } assert.Equalf(t, test.expectedClientPolicy.ClusterName, result.ClusterName, "Aerospike policy cluster name is wrong. Got '%s' expected '%s'", result.ClusterName, test.expectedClientPolicy.ClusterName) + + assert.Equalf(t, test.expectedClientPolicy.User, result.User, + "Aerospike policy username is wrong. Got '%s' expected '%s'", result.User, test.expectedClientPolicy.User) + + assert.Equalf(t, test.expectedClientPolicy.Password, result.Password, + "Aerospike policy password is wrong. Got '%s' expected '%s'", result.Password, test.expectedClientPolicy.Password) + + assert.Equalf(t, test.expectedClientPolicy.AuthMode, result.AuthMode, + "Aerospike policy authentication mode is wrong. Got '%s' expected '%s'", result.AuthMode, test.expectedClientPolicy.AuthMode) + if test.Config.TLS.IsEnabled() { assert.NotNil(t, result.TlsConfig, "Aerospike policy: TLS is not set even though TLS is specified in the configuration") } diff --git a/metricbeat/module/aerospike/docker-compose.yml b/metricbeat/module/aerospike/docker-compose.yml index 0de97dc783d..8976360c867 100644 --- a/metricbeat/module/aerospike/docker-compose.yml +++ b/metricbeat/module/aerospike/docker-compose.yml @@ -2,10 +2,10 @@ version: '2.3' services: aerospike: - image: docker.elastic.co/integrations-ci/beats-aerospike:${AEROSPIKE_VERSION:-3.9.0}-1 + image: docker.elastic.co/integrations-ci/beats-aerospike:${AEROSPIKE_VERSION:-7.2.0.1}-1 build: context: ./_meta args: - AEROSPIKE_VERSION: ${AEROSPIKE_VERSION:-3.9.0} + AEROSPIKE_VERSION: ${AEROSPIKE_VERSION:-7.2.0.1} ports: - 3000 diff --git a/metricbeat/module/aerospike/namespace/namespace.go b/metricbeat/module/aerospike/namespace/namespace.go index 265db9f933b..3385c9f59f2 100644 --- a/metricbeat/module/aerospike/namespace/namespace.go +++ b/metricbeat/module/aerospike/namespace/namespace.go @@ -21,7 +21,7 @@ import ( "fmt" "strings" - as "github.com/aerospike/aerospike-client-go" + as "github.com/aerospike/aerospike-client-go/v7" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/metricbeat/module/aerospike" @@ -45,6 +45,7 @@ type MetricSet struct { host *as.Host clientPolicy *as.ClientPolicy client *as.Client + infoPolicy *as.InfoPolicy } // New create a new instance of the MetricSet @@ -70,6 +71,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { BaseMetricSet: base, host: host, clientPolicy: clientPolicy, + infoPolicy: as.NewInfoPolicy(), }, nil } @@ -82,14 +84,14 @@ func (m *MetricSet) Fetch(reporter mb.ReporterV2) error { } for _, node := range m.client.GetNodes() { - info, err := as.RequestNodeInfo(node, "namespaces") + info, err := node.RequestInfo(m.infoPolicy, "namespaces") if err != nil { m.Logger().Error("Failed to retrieve namespaces from node %s", node.GetName()) continue } for _, namespace := range strings.Split(info["namespaces"], ";") { - info, err := as.RequestNodeInfo(node, "namespace/"+namespace) + info, err := node.RequestInfo(m.infoPolicy, "namespace/"+namespace) if err != nil { m.Logger().Error("Failed to retrieve metrics for namespace %s from node %s", namespace, node.GetName()) continue diff --git a/metricbeat/module/http/_meta/Dockerfile b/metricbeat/module/http/_meta/Dockerfile index 324b026ef9d..84db4e65617 100644 --- a/metricbeat/module/http/_meta/Dockerfile +++ b/metricbeat/module/http/_meta/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.22.8 +FROM golang:1.22.9 COPY test/main.go main.go diff --git a/metricbeat/module/nats/_meta/Dockerfile b/metricbeat/module/nats/_meta/Dockerfile index 4dd2715b4ae..40c9a00c9e2 100644 --- a/metricbeat/module/nats/_meta/Dockerfile +++ b/metricbeat/module/nats/_meta/Dockerfile @@ -2,7 +2,7 @@ ARG NATS_VERSION=2.0.4 FROM nats:$NATS_VERSION # build stage -FROM golang:1.22.8 AS build-env +FROM golang:1.22.9 AS build-env RUN apt-get install git mercurial gcc RUN git clone https://github.com/nats-io/nats.go.git /nats-go RUN cd /nats-go/examples/nats-bench && git checkout tags/v1.10.0 && go build . diff --git a/metricbeat/module/system/test_system.py b/metricbeat/module/system/test_system.py index dda6a0a6fdd..e4aa935a68c 100644 --- a/metricbeat/module/system/test_system.py +++ b/metricbeat/module/system/test_system.py @@ -252,6 +252,7 @@ def test_diskio(self): if 'error' not in evt: if "system" in evt: diskio = evt["system"]["diskio"] + self.remove_fields(diskio, ["serial_number"]) self.assert_fields_for_platform(SYSTEM_DISKIO, diskio) elif "host" in evt: host_disk = evt["host"]["disk"] diff --git a/metricbeat/modules.d/aerospike.yml.disabled b/metricbeat/modules.d/aerospike.yml.disabled index 35aad6b8e40..56f7c1cdd5f 100644 --- a/metricbeat/modules.d/aerospike.yml.disabled +++ b/metricbeat/modules.d/aerospike.yml.disabled @@ -10,6 +10,16 @@ # Aerospike Cluster Name #cluster_name: myclustername + # Username of hosts. Empty by default. + #username: root + + # Password of hosts. Empty by default. + #password: secret + + # Authentication modes: https://aerospike.com/docs/server/guide/security/access-control + # Possible values: internal (default), external, pki + #auth_mode: internal + # Optional SSL/TLS (disabled by default) #ssl.enabled: true diff --git a/metricbeat/tests/system/metricbeat.py b/metricbeat/tests/system/metricbeat.py index 123c9ad2bc0..449536e4f39 100644 --- a/metricbeat/tests/system/metricbeat.py +++ b/metricbeat/tests/system/metricbeat.py @@ -136,6 +136,11 @@ def check_metricset(self, module, metricset, hosts, fields: list = None, extras: self.assert_fields_are_documented(evt) + def remove_fields(self, event: object, fields: list): + for field in fields: + if field in event: + del event[field] + def supported_versions(path): """ diff --git a/packetbeat/Dockerfile b/packetbeat/Dockerfile index e739ac9efd1..d9feb614123 100644 --- a/packetbeat/Dockerfile +++ b/packetbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.22.8 +FROM golang:1.22.9 RUN \ apt-get update \ diff --git a/packetbeat/procs/procs.go b/packetbeat/procs/procs.go index 11b9fc2e8d8..f578b49a5d4 100644 --- a/packetbeat/procs/procs.go +++ b/packetbeat/procs/procs.go @@ -64,6 +64,7 @@ type portProcMapping struct { endpoint endpoint // FIXME: This is never used. pid int proc *process + expires time.Time } // process describes an OS process. @@ -185,8 +186,8 @@ func (proc *ProcessesWatcher) isLocalIP(ip net.IP) bool { func (proc *ProcessesWatcher) findProc(address net.IP, port uint16, transport applayer.Transport) *process { proc.mu.Lock() + defer proc.mu.Unlock() procMap, ok := proc.portProcMap[transport] - proc.mu.Unlock() if !ok { return nil } @@ -206,24 +207,47 @@ func (proc *ProcessesWatcher) findProc(address net.IP, port uint16, transport ap return nil } -func lookupMapping(address net.IP, port uint16, procMap map[endpoint]portProcMapping) (p portProcMapping, found bool) { +// proc.mu must be locked +func lookupMapping(address net.IP, port uint16, procMap map[endpoint]portProcMapping) (portProcMapping, bool) { + now := time.Now() + key := endpoint{address.String(), port} + p, found := procMap[key] + // Precedence when one socket is bound to a specific IP:port and another one // to INADDR_ANY and same port is not clear. Seems that the last one to bind // takes precedence, and we don't have a way to tell. // This function takes the naive approach of giving precedence to the more // specific address and then to INADDR_ANY. - if p, found = procMap[endpoint{address.String(), port}]; found { - return p, found + if !found { + if address.To4() != nil { + key.address = anyIPv4 + } else { + key.address = anyIPv6 + } + + p, found = procMap[key] } - nullAddr := anyIPv4 - if asIPv4 := address.To4(); asIPv4 == nil { - nullAddr = anyIPv6 + // We can't guarantee `p` doesn't point to an old entry, since + // we never remove entries from `procMap`, we only overwrite + // them, but we only overwrite them once an unrelated packet + // that doesn't have an entry on `procMap` ends up rebuilding + // the whole map. + // + // We take a conservative approach by discarding the entry if + // it's old enough. When we fail the first time here, our caller + // updates all maps and calls us again. + if found && now.After(p.expires) { + logp.Debug("procs", "PID %d (%s) port %d is too old, discarding", p.pid, p.proc.name, port) + delete(procMap, key) + p = portProcMapping{} + found = false } - p, found = procMap[endpoint{nullAddr, port}] + return p, found } +// proc.mu must be locked func (proc *ProcessesWatcher) updateMap(transport applayer.Transport) { if logp.HasSelector("procsdetailed") { start := time.Now() @@ -244,6 +268,7 @@ func (proc *ProcessesWatcher) updateMap(transport applayer.Transport) { } } +// proc.mu must be locked func (proc *ProcessesWatcher) expireProcessCache() { now := time.Now() for pid, info := range proc.processCache { @@ -253,9 +278,8 @@ func (proc *ProcessesWatcher) expireProcessCache() { } } +// proc.mu must be locked func (proc *ProcessesWatcher) updateMappingEntry(transport applayer.Transport, e endpoint, pid int) { - proc.mu.Lock() - defer proc.mu.Unlock() prev, ok := proc.portProcMap[transport][e] if ok && prev.pid == pid { // This port->pid mapping already exists @@ -267,11 +291,21 @@ func (proc *ProcessesWatcher) updateMappingEntry(transport applayer.Transport, e return } - // Simply overwrite old entries for now. - // We never expire entries from this map. Since there are 65k possible - // ports, the size of the dict can be max 1.5 MB, which we consider - // reasonable. - proc.portProcMap[transport][e] = portProcMapping{endpoint: e, pid: pid, proc: p} + // We overwrite previous entries here, and they expire in + // lookupMapping() if they are deemed old enough. + // + // Map size is bound by the number of ports: ~65k, so it's + // fine to have old entries lingering, as long as we don't + // trust them on subsequent connections. + // + // If the source port is re-used within the hardcoded 10 + // seconds window, we might end up hitting an old mapping. + proc.portProcMap[transport][e] = portProcMapping{ + endpoint: e, + pid: pid, + proc: p, + expires: time.Now().Add(10 * time.Second), + } if logp.IsDebug("procsdetailed") { logp.Debug("procsdetailed", "updateMappingEntry(): local=%s:%d/%s pid=%d process='%s'", diff --git a/winlogbeat/docs/winlogbeat-options.asciidoc b/winlogbeat/docs/winlogbeat-options.asciidoc index 2dd25ca409e..27a86bc04fb 100644 --- a/winlogbeat/docs/winlogbeat-options.asciidoc +++ b/winlogbeat/docs/winlogbeat-options.asciidoc @@ -230,14 +230,15 @@ and descriptions. A whitelist and blacklist of event IDs. The value is a comma-separated list. The accepted values are single event IDs to include (e.g. 4624), a range of event -IDs to include (e.g. 4700-4800), and single event IDs to exclude (e.g. -4735). +IDs to include (e.g. 4700-4800), single event IDs to exclude (e.g. -4735), +and a range of event IDs to exclude (e.g. -4701-4710). *{vista_and_newer}* [source,yaml] -------------------------------------------------------------------------------- winlogbeat.event_logs: - name: Security - event_id: 4624, 4625, 4700-4800, -4735 + event_id: 4624, 4625, 4700-4800, -4735, -4701-4710 -------------------------------------------------------------------------------- [WARNING] diff --git a/winlogbeat/sys/wineventlog/query.go b/winlogbeat/sys/wineventlog/query.go index c48208f38fa..014e0d20a84 100644 --- a/winlogbeat/sys/wineventlog/query.go +++ b/winlogbeat/sys/wineventlog/query.go @@ -33,7 +33,7 @@ const ( query = ` {{if .Suppress}} - *[System[({{join .Suppress " or "}})]]{{end}} + *[System[{{.Suppress}}]]{{end}} ` ) @@ -44,6 +44,7 @@ var ( incEventIDRegex = regexp.MustCompile(`^\d+$`) incEventIDRangeRegex = regexp.MustCompile(`^(\d+)\s*-\s*(\d+)$`) excEventIDRegex = regexp.MustCompile(`^-(\d+)$`) + excEventIDRangeRegex = regexp.MustCompile(`^-(\d+)\s*-\s*(\d+)$`) ) // Query that identifies the source of the events and one or more selectors or @@ -101,7 +102,7 @@ func (q Query) Build() (string, error) { type queryParams struct { Path string Select []string - Suppress []string + Suppress string } func (qp *queryParams) ignoreOlderSelect(q Query) error { @@ -140,6 +141,15 @@ func (qp *queryParams) eventIDSelect(q Query) error { } includes = append(includes, fmt.Sprintf("(EventID >= %d and EventID <= %d)", r1, r2)) + case excEventIDRangeRegex.MatchString(c): + m := excEventIDRangeRegex.FindStringSubmatch(c) + r1, _ := strconv.Atoi(m[1]) + r2, _ := strconv.Atoi(m[2]) + if r1 >= r2 { + return fmt.Errorf("event ID range '%s' is invalid", c) + } + excludes = append(excludes, + fmt.Sprintf("(EventID >= %d and EventID <= %d)", r1, r2)) default: return fmt.Errorf("invalid event ID query component ('%s')", c) } @@ -150,10 +160,9 @@ func (qp *queryParams) eventIDSelect(q Query) error { } else if len(includes) > 1 { qp.Select = append(qp.Select, "("+strings.Join(includes, " or ")+")") } - if len(excludes) == 1 { - qp.Suppress = append(qp.Suppress, excludes...) - } else if len(excludes) > 1 { - qp.Suppress = append(qp.Suppress, "("+strings.Join(excludes, " or ")+")") + + if len(excludes) > 0 { + qp.Suppress = "(" + strings.Join(excludes, " or ") + ")" } return nil diff --git a/winlogbeat/sys/wineventlog/query_test.go b/winlogbeat/sys/wineventlog/query_test.go index 9c85b2bca4e..4405de5eda3 100644 --- a/winlogbeat/sys/wineventlog/query_test.go +++ b/winlogbeat/sys/wineventlog/query_test.go @@ -99,14 +99,14 @@ func TestCombinedQuery(t *testing.T) { const expected = ` - *[System[(EventID=75)]] + *[System[(EventID=75 or (EventID >= 97 and EventID <= 99))]] ` q, err := Query{ Log: "Application", IgnoreOlder: time.Hour, - EventID: "1, 1-100, -75", + EventID: "1, 1-100, -75, -97-99", Level: "Warning", }.Build() if assert.NoError(t, err) { diff --git a/x-pack/filebeat/docs/inputs/input-gcs.asciidoc b/x-pack/filebeat/docs/inputs/input-gcs.asciidoc index eae7158c78d..23ac0e021c6 100644 --- a/x-pack/filebeat/docs/inputs/input-gcs.asciidoc +++ b/x-pack/filebeat/docs/inputs/input-gcs.asciidoc @@ -462,5 +462,38 @@ filebeat.inputs: In this configuration even though we have specified `max_workers = 10`, `poll = true` and `poll_interval = 15s` at the root level, both the buckets will override these values with their own respective values which are defined as part of their sub attibutes. +[float] +=== Metrics + +This input exposes metrics under the <>. +These metrics are exposed under the `/inputs` path. They can be used to +observe the activity of the input. + +[options="header"] +|======= +| Metric | Description +| `url` | URL of the input resource. +| `errors_total` | Total number of errors encountered by the input. +| `decode_errors_total` | Total number of decode errors encountered by the input. +| `gcs_objects_requested_total` | Total number of GCS objects downloaded. +| `gcs_objects_published_total` | Total number of GCS objects processed that were published. +| `gcs_objects_listed_total` | Total number of GCS objects returned by list operations. +| `gcs_bytes_processed_total` | Total number of GCS bytes processed. +| `gcs_events_created_total` | Total number of events created from processing GCS data. +| `gcs_failed_jobs_total` | Total number of failed jobs. +| `gcs_expired_failed_jobs_total` | Total number of expired failed jobs that could not be recovered. +| `gcs_objects_tracked_gauge` | Number of objects currently tracked in the state registry (gauge). +| `gcs_objects_inflight_gauge` | Number of GCS objects inflight (gauge). +| `gcs_jobs_scheduled_after_validation` | Histogram of the number of jobs scheduled after validation. +| `gcs_object_processing_time` | Histogram of the elapsed GCS object processing times in nanoseconds (start of download to completion of parsing). +| `gcs_object_size_in_bytes` | Histogram of processed GCS object size in bytes. +| `gcs_events_per_object` | Histogram of event count per GCS object. +| `source_lag_time` | Histogram of the time between the source (Updated) timestamp and the time the object was read, in nanoseconds. +|======= + +==== Common input options + +[id="{beatname_lc}-input-{type}-common-options"] +include::../../../../filebeat/docs/inputs/input-common-options.asciidoc[] NOTE: Any feedback is welcome which will help us further optimize this input. Please feel free to open a github issue for any bugs or feature requests. diff --git a/x-pack/filebeat/filebeat.reference.yml b/x-pack/filebeat/filebeat.reference.yml index 5e636901565..9ae0e524def 100644 --- a/x-pack/filebeat/filebeat.reference.yml +++ b/x-pack/filebeat/filebeat.reference.yml @@ -21,6 +21,9 @@ filebeat.modules: # Filebeat will choose the paths depending on your OS. #var.paths: + # Use journald to collect system logs + #var.use_journald: false + # Input configuration (advanced). Any input configuration option # can be added under this section. #input: @@ -33,6 +36,9 @@ filebeat.modules: # Filebeat will choose the paths depending on your OS. #var.paths: + # Use journald to collect auth logs + #var.use_journald: false + # Input configuration (advanced). Any input configuration option # can be added under this section. #input: diff --git a/x-pack/filebeat/input/awss3/s3.go b/x-pack/filebeat/input/awss3/s3.go index a4865022850..fabc1b2f1dd 100644 --- a/x-pack/filebeat/input/awss3/s3.go +++ b/x-pack/filebeat/input/awss3/s3.go @@ -16,16 +16,16 @@ import ( "github.com/elastic/beats/v7/libbeat/beat" ) -func createS3API(ctx context.Context, config config, awsConfig awssdk.Config) (*awsS3API, error) { - s3Client := s3.NewFromConfig(awsConfig, config.s3ConfigModifier) - regionName, err := getRegionForBucket(ctx, s3Client, config.getBucketName()) +func (in *s3PollerInput) createS3API(ctx context.Context) (*awsS3API, error) { + s3Client := s3.NewFromConfig(in.awsConfig, in.config.s3ConfigModifier) + regionName, err := getRegionForBucket(ctx, s3Client, in.config.getBucketName()) if err != nil { return nil, fmt.Errorf("failed to get AWS region for bucket: %w", err) } // Can this really happen? - if regionName != awsConfig.Region { - awsConfig.Region = regionName - s3Client = s3.NewFromConfig(awsConfig, config.s3ConfigModifier) + if regionName != in.awsConfig.Region { + in.awsConfig.Region = regionName + s3Client = s3.NewFromConfig(in.awsConfig, in.config.s3ConfigModifier) } return newAWSs3API(s3Client), nil diff --git a/x-pack/filebeat/input/awss3/s3_input.go b/x-pack/filebeat/input/awss3/s3_input.go index c3a83c284a2..6775a1be033 100644 --- a/x-pack/filebeat/input/awss3/s3_input.go +++ b/x-pack/filebeat/input/awss3/s3_input.go @@ -73,7 +73,7 @@ func (in *s3PollerInput) Run( defer in.states.Close() ctx := v2.GoContextFromCanceler(inputContext.Cancelation) - in.s3, err = createS3API(ctx, in.config, in.awsConfig) + in.s3, err = in.createS3API(ctx) if err != nil { return fmt.Errorf("failed to create S3 API: %w", err) } diff --git a/x-pack/filebeat/input/azureeventhub/v1_input.go b/x-pack/filebeat/input/azureeventhub/v1_input.go index 4736bc3f15a..c7d97d8603f 100644 --- a/x-pack/filebeat/input/azureeventhub/v1_input.go +++ b/x-pack/filebeat/input/azureeventhub/v1_input.go @@ -23,6 +23,7 @@ import ( v2 "github.com/elastic/beats/v7/filebeat/input/v2" "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/common/acker" + "github.com/elastic/beats/v7/libbeat/management/status" "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/mapstr" ) @@ -68,9 +69,13 @@ func (in *eventHubInputV1) Run( ) error { var err error + // Update the status to starting + inputContext.UpdateStatus(status.Starting, "") + // Create pipelineClient for publishing events. in.pipelineClient, err = createPipelineClient(pipeline) if err != nil { + inputContext.UpdateStatus(status.Failed, err.Error()) return fmt.Errorf("failed to create pipeline pipelineClient: %w", err) } defer in.pipelineClient.Close() @@ -82,6 +87,7 @@ func (in *eventHubInputV1) Run( // Set up new and legacy sanitizers, if any. sanitizers, err := newSanitizers(in.config.Sanitizers, in.config.LegacySanitizeOptions) if err != nil { + inputContext.UpdateStatus(status.Failed, err.Error()) return fmt.Errorf("failed to create sanitizers: %w", err) } @@ -98,6 +104,8 @@ func (in *eventHubInputV1) Run( // in preparation for the main run loop. err = in.setup(ctx) if err != nil { + in.log.Errorw("error setting up input", "error", err) + inputContext.UpdateStatus(status.Failed, err.Error()) return err } @@ -105,9 +113,11 @@ func (in *eventHubInputV1) Run( err = in.run(ctx) if err != nil { in.log.Errorw("error running input", "error", err) + inputContext.UpdateStatus(status.Failed, err.Error()) return err } + inputContext.UpdateStatus(status.Stopping, "") return nil } diff --git a/x-pack/filebeat/input/default-inputs/inputs_aix.go b/x-pack/filebeat/input/default-inputs/inputs_aix.go index 05e2cbe660d..d3fa76b1089 100644 --- a/x-pack/filebeat/input/default-inputs/inputs_aix.go +++ b/x-pack/filebeat/input/default-inputs/inputs_aix.go @@ -14,6 +14,7 @@ import ( "github.com/elastic/beats/v7/x-pack/filebeat/input/httpjson" "github.com/elastic/beats/v7/x-pack/filebeat/input/lumberjack" "github.com/elastic/beats/v7/x-pack/filebeat/input/o365audit" + "github.com/elastic/beats/v7/x-pack/filebeat/input/salesforce" "github.com/elastic/elastic-agent-libs/logp" ) @@ -25,5 +26,6 @@ func xpackInputs(info beat.Info, log *logp.Logger, store beater.StateStore) []v2 o365audit.Plugin(log, store), awss3.Plugin(store), lumberjack.Plugin(), + salesforce.Plugin(log, store), } } diff --git a/x-pack/filebeat/input/default-inputs/inputs_windows.go b/x-pack/filebeat/input/default-inputs/inputs_windows.go index e88e45ff2e7..4b27c3b35c2 100644 --- a/x-pack/filebeat/input/default-inputs/inputs_windows.go +++ b/x-pack/filebeat/input/default-inputs/inputs_windows.go @@ -25,6 +25,7 @@ import ( "github.com/elastic/beats/v7/x-pack/filebeat/input/lumberjack" "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow" "github.com/elastic/beats/v7/x-pack/filebeat/input/o365audit" + "github.com/elastic/beats/v7/x-pack/filebeat/input/salesforce" "github.com/elastic/elastic-agent-libs/logp" ) @@ -44,6 +45,7 @@ func xpackInputs(info beat.Info, log *logp.Logger, store beater.StateStore) []v2 lumberjack.Plugin(), etw.Plugin(), netflow.Plugin(log), + salesforce.Plugin(log, store), benchmark.Plugin(), } } diff --git a/x-pack/filebeat/input/gcs/decoding_test.go b/x-pack/filebeat/input/gcs/decoding_test.go index 0a2ee5e3f0d..a57fe62a5ed 100644 --- a/x-pack/filebeat/input/gcs/decoding_test.go +++ b/x-pack/filebeat/input/gcs/decoding_test.go @@ -79,7 +79,7 @@ func TestDecoding(t *testing.T) { } defer f.Close() p := &pub{t: t} - j := newJob(&storage.BucketHandle{}, &storage.ObjectAttrs{Name: "test_object"}, "gs://test_uri", newState(), &Source{}, p, log, false) + j := newJob(&storage.BucketHandle{}, &storage.ObjectAttrs{Name: "test_object"}, "gs://test_uri", newState(), &Source{}, p, nil, log, false) j.src.ReaderConfig.Decoding = tc.config err = j.decode(context.Background(), f, "test") if err != nil { diff --git a/x-pack/filebeat/input/gcs/input.go b/x-pack/filebeat/input/gcs/input.go index a2ecf2c28af..cc0e9ad74bb 100644 --- a/x-pack/filebeat/input/gcs/input.go +++ b/x-pack/filebeat/input/gcs/input.go @@ -152,9 +152,15 @@ func (input *gcsInput) Run(inputCtx v2.Context, src cursor.Source, log := inputCtx.Logger.With("project_id", currentSource.ProjectId).With("bucket", currentSource.BucketName) log.Infof("Running google cloud storage for project: %s", input.config.ProjectId) + // create a new inputMetrics instance + metrics := newInputMetrics(inputCtx.ID+":"+currentSource.BucketName, nil) + metrics.url.Set("gs://" + currentSource.BucketName) + defer metrics.Close() + var cp *Checkpoint if !cursor.IsNew() { if err := cursor.Unpack(&cp); err != nil { + metrics.errorsTotal.Inc() return err } @@ -169,6 +175,7 @@ func (input *gcsInput) Run(inputCtx v2.Context, src cursor.Source, client, err := fetchStorageClient(ctx, input.config, log) if err != nil { + metrics.errorsTotal.Inc() return err } bucket := client.Bucket(currentSource.BucketName).Retryer( @@ -180,7 +187,7 @@ func (input *gcsInput) Run(inputCtx v2.Context, src cursor.Source, // Since we are only reading, the operation is always idempotent storage.WithPolicy(storage.RetryAlways), ) - scheduler := newScheduler(publisher, bucket, currentSource, &input.config, st, log) + scheduler := newScheduler(publisher, bucket, currentSource, &input.config, st, metrics, log) return scheduler.schedule(ctx) } diff --git a/x-pack/filebeat/input/gcs/input_stateless.go b/x-pack/filebeat/input/gcs/input_stateless.go index 3cdeb379473..f56f7f35bc5 100644 --- a/x-pack/filebeat/input/gcs/input_stateless.go +++ b/x-pack/filebeat/input/gcs/input_stateless.go @@ -49,6 +49,7 @@ func (in *statelessInput) Run(inputCtx v2.Context, publisher stateless.Publisher pub := statelessPublisher{wrapped: publisher} var source cursor.Source var g errgroup.Group + for _, b := range in.config.Buckets { bucket := tryOverrideOrDefault(in.config, b) source = &Source{ @@ -68,6 +69,9 @@ func (in *statelessInput) Run(inputCtx v2.Context, publisher stateless.Publisher st := newState() currentSource := source.(*Source) log := inputCtx.Logger.With("project_id", currentSource.ProjectId).With("bucket", currentSource.BucketName) + metrics := newInputMetrics(inputCtx.ID+":"+currentSource.BucketName, nil) + defer metrics.Close() + metrics.url.Set("gs://" + currentSource.BucketName) ctx, cancel := context.WithCancel(context.Background()) go func() { @@ -85,7 +89,7 @@ func (in *statelessInput) Run(inputCtx v2.Context, publisher stateless.Publisher storage.WithPolicy(storage.RetryAlways), ) - scheduler := newScheduler(pub, bkt, currentSource, &in.config, st, log) + scheduler := newScheduler(pub, bkt, currentSource, &in.config, st, metrics, log) // allows multiple containers to be scheduled concurrently while testing // the stateless input is triggered only while testing and till now it did not mimic // the real world concurrent execution of multiple containers. This fix allows it to do so. diff --git a/x-pack/filebeat/input/gcs/input_test.go b/x-pack/filebeat/input/gcs/input_test.go index 64a548afd8c..8accb774f38 100644 --- a/x-pack/filebeat/input/gcs/input_test.go +++ b/x-pack/filebeat/input/gcs/input_test.go @@ -6,7 +6,9 @@ package gcs import ( "context" + "crypto/rand" "crypto/tls" + "encoding/hex" "errors" "fmt" "net/http" @@ -547,7 +549,7 @@ func Test_StorageClient(t *testing.T) { chanClient := beattest.NewChanClient(len(tt.expected)) t.Cleanup(func() { _ = chanClient.Close() }) - ctx, cancel := newV2Context() + ctx, cancel := newV2Context(t) t.Cleanup(cancel) var g errgroup.Group @@ -607,11 +609,23 @@ func Test_StorageClient(t *testing.T) { } } -func newV2Context() (v2.Context, func()) { +func newV2Context(t *testing.T) (v2.Context, func()) { ctx, cancel := context.WithCancel(context.Background()) + id, err := generateRandomID(8) + if err != nil { + t.Fatalf("failed to generate random id: %v", err) + } return v2.Context{ Logger: logp.NewLogger("gcs_test"), - ID: "test_id", + ID: "gcs_test-" + id, Cancelation: ctx, }, cancel } + +func generateRandomID(length int) (string, error) { + bytes := make([]byte, length) + if _, err := rand.Read(bytes); err != nil { + return "", err + } + return hex.EncodeToString(bytes), nil +} diff --git a/x-pack/filebeat/input/gcs/job.go b/x-pack/filebeat/input/gcs/job.go index 403555311e9..11420002159 100644 --- a/x-pack/filebeat/input/gcs/job.go +++ b/x-pack/filebeat/input/gcs/job.go @@ -45,6 +45,8 @@ type job struct { src *Source // publisher is used to publish a beat event to the output stream publisher cursor.Publisher + // metrics used to track the errors and success of jobs + metrics *inputMetrics // custom logger log *logp.Logger // flag used to denote if this object has previously failed without being processed at all. @@ -53,8 +55,12 @@ type job struct { // newJob, returns an instance of a job, which is a unit of work that can be assigned to a go routine func newJob(bucket *storage.BucketHandle, object *storage.ObjectAttrs, objectURI string, - state *state, src *Source, publisher cursor.Publisher, log *logp.Logger, isFailed bool, + state *state, src *Source, publisher cursor.Publisher, metrics *inputMetrics, log *logp.Logger, isFailed bool, ) *job { + if metrics == nil { + // metrics are optional, initialize a stub if not provided + metrics = newInputMetrics("", nil) + } return &job{ bucket: bucket, object: object, @@ -63,6 +69,7 @@ func newJob(bucket *storage.BucketHandle, object *storage.ObjectAttrs, objectURI state: state, src: src, publisher: publisher, + metrics: metrics, log: log, isFailed: isFailed, } @@ -78,6 +85,17 @@ func gcsObjectHash(src *Source, object *storage.ObjectAttrs) string { func (j *job) do(ctx context.Context, id string) { var fields mapstr.M + // metrics & logging + j.log.Debug("begin gcs object processing.") + j.metrics.gcsObjectsRequestedTotal.Inc() + j.metrics.gcsObjectsInflight.Inc() + start := time.Now() + defer func() { + elapsed := time.Since(start) + j.metrics.gcsObjectsInflight.Dec() + j.metrics.gcsObjectProcessingTime.Update(elapsed.Nanoseconds()) + j.log.Debugw("end gcs object processing.", "elapsed_time_ns", elapsed) + }() if allowedContentTypes[j.object.ContentType] { if j.object.ContentType == gzType || j.object.ContentEncoding == encodingGzip { @@ -85,10 +103,15 @@ func (j *job) do(ctx context.Context, id string) { } err := j.processAndPublishData(ctx, id) if err != nil { - j.state.updateFailedJobs(j.object.Name) + j.state.updateFailedJobs(j.object.Name, j.metrics) j.log.Errorw("job encountered an error while publishing data and has been added to a failed jobs list", "gcs.jobId", id, "error", err) + j.metrics.gcsFailedJobsTotal.Inc() + j.metrics.errorsTotal.Inc() return } + j.metrics.gcsObjectsPublishedTotal.Inc() + //nolint:gosec // object size cannot be negative hence this conversion is safe + j.metrics.gcsBytesProcessedTotal.Add(uint64(j.object.Size)) } else { err := fmt.Errorf("job with jobId %s encountered an error: content-type %s not supported", id, j.object.ContentType) @@ -101,9 +124,10 @@ func (j *job) do(ctx context.Context, id string) { } event.SetID(objectID(j.hash, 0)) // locks while data is being saved and published to avoid concurrent map read/writes - cp, done := j.state.saveForTx(j.object.Name, j.object.Updated) + cp, done := j.state.saveForTx(j.object.Name, j.object.Updated, j.metrics) if err := j.publisher.Publish(event, cp); err != nil { j.log.Errorw("job encountered an error while publishing event", "gcs.jobId", id, "error", err) + j.metrics.errorsTotal.Inc() } // unlocks after data is saved and published done() @@ -133,11 +157,21 @@ func (j *job) processAndPublishData(ctx context.Context, id string) error { defer func() { err = reader.Close() if err != nil { + j.metrics.errorsTotal.Inc() j.log.Errorw("failed to close reader for object", "objectName", j.object.Name, "error", err) } }() - return j.decode(ctx, reader, id) + // update the source lag time metric + j.metrics.sourceLagTime.Update(time.Since(j.object.Updated).Nanoseconds()) + + // calculate number of decode errors + if err := j.decode(ctx, reader, id); err != nil { + j.metrics.decodeErrorsTotal.Inc() + return fmt.Errorf("failed to decode object: %s, with error: %w", j.object.Name, err) + } + + return nil } func (j *job) decode(ctx context.Context, r io.Reader, id string) error { @@ -241,17 +275,24 @@ func (j *job) readJsonAndPublish(ctx context.Context, r io.Reader, id string) er // if expand_event_list_from_field is set, then split the event list if j.src.ExpandEventListFromField != "" { - if err := j.splitEventList(j.src.ExpandEventListFromField, item, offset, j.hash, id); err != nil { + if numEvents, err := j.splitEventList(j.src.ExpandEventListFromField, item, offset, id); err != nil { return err + } else { + j.metrics.gcsEventsPerObject.Update(int64(numEvents)) } continue + } else { + j.metrics.gcsEventsPerObject.Update(1) } var parsedData []mapstr.M if j.src.ParseJSON { parsedData, err = decodeJSON(bytes.NewReader(item)) if err != nil { - j.log.Errorw("job encountered an error", "gcs.jobId", id, "error", err) + // since we do not want to stop processing the job here as this is purely cosmetic and optional, we log the error and continue + j.metrics.errorsTotal.Inc() + j.metrics.decodeErrorsTotal.Inc() + j.log.Errorw("job encountered an error during 'ParseJSON' op", "gcs.jobId", id, "error", err) } } evt := j.createEvent(item, parsedData, offset) @@ -263,8 +304,9 @@ func (j *job) readJsonAndPublish(ctx context.Context, r io.Reader, id string) er func (j *job) publish(evt beat.Event, last bool, id string) { if last { // if this is the last object, then perform a complete state save - cp, done := j.state.saveForTx(j.object.Name, j.object.Updated) + cp, done := j.state.saveForTx(j.object.Name, j.object.Updated, j.metrics) if err := j.publisher.Publish(evt, cp); err != nil { + j.metrics.errorsTotal.Inc() j.log.Errorw("job encountered an error while publishing event", "gcs.jobId", id, "error", err) } done() @@ -272,20 +314,22 @@ func (j *job) publish(evt beat.Event, last bool, id string) { } // since we don't update the cursor checkpoint, lack of a lock here is not a problem if err := j.publisher.Publish(evt, nil); err != nil { + j.metrics.errorsTotal.Inc() j.log.Errorw("job encountered an error while publishing event", "gcs.jobId", id, "error", err) } } // splitEventList splits the event list into individual events and publishes them -func (j *job) splitEventList(key string, raw json.RawMessage, offset int64, objHash string, id string) error { +func (j *job) splitEventList(key string, raw json.RawMessage, offset int64, id string) (int, error) { var jsonObject map[string]json.RawMessage + var eventsPerObject int if err := json.Unmarshal(raw, &jsonObject); err != nil { - return fmt.Errorf("job with job id %s encountered an unmarshaling error: %w", id, err) + return eventsPerObject, fmt.Errorf("job with job id %s encountered an unmarshaling error: %w", id, err) } raw, found := jsonObject[key] if !found { - return fmt.Errorf("expand_event_list_from_field key <%v> is not in event", key) + return eventsPerObject, fmt.Errorf("expand_event_list_from_field key <%v> is not in event", key) } dec := json.NewDecoder(bytes.NewReader(raw)) @@ -294,11 +338,11 @@ func (j *job) splitEventList(key string, raw json.RawMessage, offset int64, objH tok, err := dec.Token() if err != nil { - return fmt.Errorf("failed to read JSON token for object: %s, with error: %w", j.object.Name, err) + return eventsPerObject, fmt.Errorf("failed to read JSON token for object: %s, with error: %w", j.object.Name, err) } delim, ok := tok.(json.Delim) if !ok || delim != '[' { - return fmt.Errorf("expand_event_list_from_field <%v> is not an array", key) + return eventsPerObject, fmt.Errorf("expand_event_list_from_field <%v> is not an array", key) } for dec.More() { @@ -306,31 +350,34 @@ func (j *job) splitEventList(key string, raw json.RawMessage, offset int64, objH var item json.RawMessage if err := dec.Decode(&item); err != nil { - return fmt.Errorf("failed to decode array item at offset %d: %w", offset+arrayOffset, err) + return eventsPerObject, fmt.Errorf("failed to decode array item at offset %d: %w", offset+arrayOffset, err) } data, err := item.MarshalJSON() if err != nil { - return fmt.Errorf("job with job id %s encountered a marshaling error: %w", id, err) + return eventsPerObject, fmt.Errorf("job with job id %s encountered a marshaling error: %w", id, err) } evt := j.createEvent(data, nil, offset+arrayOffset) if !dec.More() { // if this is the last object, then perform a complete state save - cp, done := j.state.saveForTx(j.object.Name, j.object.Updated) + cp, done := j.state.saveForTx(j.object.Name, j.object.Updated, j.metrics) if err := j.publisher.Publish(evt, cp); err != nil { + j.metrics.errorsTotal.Inc() j.log.Errorw("job encountered an error while publishing event", "gcs.jobId", id, "error", err) } done() } else { // since we don't update the cursor checkpoint, lack of a lock here is not a problem if err := j.publisher.Publish(evt, nil); err != nil { + j.metrics.errorsTotal.Inc() j.log.Errorw("job encountered an error while publishing event", "gcs.jobId", id, "error", err) } } + eventsPerObject++ } - return nil + return eventsPerObject, nil } // addGzipDecoderIfNeeded determines whether the given stream of bytes (encapsulated in a buffered reader) @@ -426,7 +473,7 @@ func (j *job) createEvent(message []byte, data []mapstr.M, offset int64) beat.Ev }, } event.SetID(objectID(j.hash, offset)) - + j.metrics.gcsEventsCreatedTotal.Inc() return event } diff --git a/x-pack/filebeat/input/gcs/metrics.go b/x-pack/filebeat/input/gcs/metrics.go new file mode 100644 index 00000000000..58b5e3c0257 --- /dev/null +++ b/x-pack/filebeat/input/gcs/metrics.go @@ -0,0 +1,78 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package gcs + +import ( + "github.com/rcrowley/go-metrics" + + "github.com/elastic/beats/v7/libbeat/monitoring/inputmon" + "github.com/elastic/elastic-agent-libs/monitoring" + "github.com/elastic/elastic-agent-libs/monitoring/adapter" +) + +// inputMetrics handles the input's metric reporting. +type inputMetrics struct { + unregister func() + url *monitoring.String // URL of the input resource. + errorsTotal *monitoring.Uint // Number of errors encountered. + decodeErrorsTotal *monitoring.Uint // Number of decode errors encountered. + + gcsObjectsTracked *monitoring.Uint // Number of objects currently tracked in the state registry (gauge). + gcsObjectsRequestedTotal *monitoring.Uint // Number of GCS objects downloaded. + gcsObjectsPublishedTotal *monitoring.Uint // Number of GCS objects processed that were published. + gcsObjectsListedTotal *monitoring.Uint // Number of GCS objects returned by list operations. + gcsBytesProcessedTotal *monitoring.Uint // Number of GCS bytes processed. + gcsEventsCreatedTotal *monitoring.Uint // Number of events created from processing GCS data. + gcsFailedJobsTotal *monitoring.Uint // Number of failed jobs. + gcsExpiredFailedJobsTotal *monitoring.Uint // Number of expired failed jobs that could not be recovered. + gcsObjectsInflight *monitoring.Uint // Number of GCS objects inflight (gauge). + gcsObjectProcessingTime metrics.Sample // Histogram of the elapsed GCS object processing times in nanoseconds (start of download to completion of parsing). + gcsObjectSizeInBytes metrics.Sample // Histogram of processed GCS object size in bytes. + gcsEventsPerObject metrics.Sample // Histogram of event count per GCS object. + gcsJobsScheduledAfterValidation metrics.Sample // Histogram of number of jobs scheduled after validation. + sourceLagTime metrics.Sample // Histogram of the time between the source (Updated) timestamp and the time the object was read. +} + +func newInputMetrics(id string, optionalParent *monitoring.Registry) *inputMetrics { + reg, unreg := inputmon.NewInputRegistry(inputName, id, optionalParent) + out := &inputMetrics{ + unregister: unreg, + url: monitoring.NewString(reg, "url"), + errorsTotal: monitoring.NewUint(reg, "errors_total"), + decodeErrorsTotal: monitoring.NewUint(reg, "decode_errors_total"), + + gcsObjectsTracked: monitoring.NewUint(reg, "gcs_objects_tracked_gauge"), + gcsObjectsRequestedTotal: monitoring.NewUint(reg, "gcs_objects_requested_total"), + gcsObjectsPublishedTotal: monitoring.NewUint(reg, "gcs_objects_published_total"), + gcsObjectsListedTotal: monitoring.NewUint(reg, "gcs_objects_listed_total"), + gcsBytesProcessedTotal: monitoring.NewUint(reg, "gcs_bytes_processed_total"), + gcsEventsCreatedTotal: monitoring.NewUint(reg, "gcs_events_created_total"), + gcsFailedJobsTotal: monitoring.NewUint(reg, "gcs_failed_jobs_total"), + gcsExpiredFailedJobsTotal: monitoring.NewUint(reg, "gcs_expired_failed_jobs_total"), + gcsObjectsInflight: monitoring.NewUint(reg, "gcs_objects_inflight_gauge"), + gcsObjectProcessingTime: metrics.NewUniformSample(1024), + gcsObjectSizeInBytes: metrics.NewUniformSample(1024), + gcsEventsPerObject: metrics.NewUniformSample(1024), + gcsJobsScheduledAfterValidation: metrics.NewUniformSample(1024), + sourceLagTime: metrics.NewUniformSample(1024), + } + + adapter.NewGoMetrics(reg, "gcs_object_processing_time", adapter.Accept). + Register("histogram", metrics.NewHistogram(out.gcsObjectProcessingTime)) //nolint:errcheck // A unique namespace is used so name collisions are impossible. + adapter.NewGoMetrics(reg, "gcs_object_size_in_bytes", adapter.Accept). + Register("histogram", metrics.NewHistogram(out.gcsObjectSizeInBytes)) //nolint:errcheck // A unique namespace is used so name collisions are impossible. + adapter.NewGoMetrics(reg, "gcs_events_per_object", adapter.Accept). + Register("histogram", metrics.NewHistogram(out.gcsEventsPerObject)) //nolint:errcheck // A unique namespace is used so name collisions are impossible. + adapter.NewGoMetrics(reg, "gcs_jobs_scheduled_after_validation", adapter.Accept). + Register("histogram", metrics.NewHistogram(out.gcsJobsScheduledAfterValidation)) //nolint:errcheck // A unique namespace is used so name collisions are impossible. + adapter.NewGoMetrics(reg, "source_lag_time", adapter.Accept). + Register("histogram", metrics.NewHistogram(out.sourceLagTime)) //nolint:errcheck // A unique namespace is used so name collisions are impossible. + + return out +} + +func (m *inputMetrics) Close() { + m.unregister() +} diff --git a/x-pack/filebeat/input/gcs/metrics_test.go b/x-pack/filebeat/input/gcs/metrics_test.go new file mode 100644 index 00000000000..3398a1a8daa --- /dev/null +++ b/x-pack/filebeat/input/gcs/metrics_test.go @@ -0,0 +1,67 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package gcs + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/elastic-agent-libs/monitoring" +) + +// TestInputMetricsClose asserts that metrics registered by this input are +// removed after Close() is called. This is important because an input with +// the same ID could be re-registered, and that ID cannot exist in the +// monitoring registry. +func TestInputMetricsClose(t *testing.T) { + reg := monitoring.NewRegistry() + + metrics := newInputMetrics("gcs-cl-bucket.cloudflare_logs-8b312b5f-9f99-492c-b035-3dff354a1f01", reg) + metrics.Close() + + reg.Do(monitoring.Full, func(s string, _ interface{}) { + t.Errorf("registry should be empty, but found %v", s) + }) +} + +// TestNewInputMetricsInstance asserts that all the metrics are initialized +// when a newInputMetrics method is invoked. This avoids nil hit panics when +// a getter is invoked on any uninitialized metric. +func TestNewInputMetricsInstance(t *testing.T) { + reg := monitoring.NewRegistry() + metrics := newInputMetrics("gcs-new-metric-test", reg) + + assert.NotNil(t, metrics.errorsTotal, + metrics.decodeErrorsTotal, + metrics.gcsObjectsTracked, + metrics.gcsObjectsRequestedTotal, + metrics.gcsObjectsPublishedTotal, + metrics.gcsObjectsListedTotal, + metrics.gcsBytesProcessedTotal, + metrics.gcsEventsCreatedTotal, + metrics.gcsFailedJobsTotal, + metrics.gcsExpiredFailedJobsTotal, + metrics.gcsObjectsInflight, + metrics.gcsObjectProcessingTime, + metrics.gcsObjectSizeInBytes, + metrics.gcsEventsPerObject, + metrics.gcsJobsScheduledAfterValidation, + metrics.sourceLagTime, + ) + + assert.Equal(t, uint64(0x0), metrics.errorsTotal.Get()) + assert.Equal(t, uint64(0x0), metrics.decodeErrorsTotal.Get()) + assert.Equal(t, uint64(0x0), metrics.gcsObjectsTracked.Get()) + assert.Equal(t, uint64(0x0), metrics.gcsObjectsRequestedTotal.Get()) + assert.Equal(t, uint64(0x0), metrics.gcsObjectsPublishedTotal.Get()) + assert.Equal(t, uint64(0x0), metrics.gcsObjectsListedTotal.Get()) + assert.Equal(t, uint64(0x0), metrics.gcsBytesProcessedTotal.Get()) + assert.Equal(t, uint64(0x0), metrics.gcsEventsCreatedTotal.Get()) + assert.Equal(t, uint64(0x0), metrics.gcsFailedJobsTotal.Get()) + assert.Equal(t, uint64(0x0), metrics.gcsExpiredFailedJobsTotal.Get()) + assert.Equal(t, uint64(0x0), metrics.gcsObjectsInflight.Get()) + +} diff --git a/x-pack/filebeat/input/gcs/scheduler.go b/x-pack/filebeat/input/gcs/scheduler.go index ef1bebd083d..3f7a1d833c9 100644 --- a/x-pack/filebeat/input/gcs/scheduler.go +++ b/x-pack/filebeat/input/gcs/scheduler.go @@ -36,12 +36,17 @@ type scheduler struct { state *state log *logp.Logger limiter *limiter + metrics *inputMetrics } // newScheduler, returns a new scheduler instance func newScheduler(publisher cursor.Publisher, bucket *storage.BucketHandle, src *Source, cfg *config, - state *state, log *logp.Logger, + state *state, metrics *inputMetrics, log *logp.Logger, ) *scheduler { + if metrics == nil { + // metrics are optional, initialize a stub if not provided + metrics = newInputMetrics("", nil) + } return &scheduler{ publisher: publisher, bucket: bucket, @@ -50,6 +55,7 @@ func newScheduler(publisher cursor.Publisher, bucket *storage.BucketHandle, src state: state, log: log, limiter: &limiter{limit: make(chan struct{}, src.MaxWorkers)}, + metrics: metrics, } } @@ -96,11 +102,13 @@ func (s *scheduler) scheduleOnce(ctx context.Context) error { var objects []*storage.ObjectAttrs nextPageToken, err := pager.NextPage(&objects) if err != nil { + s.metrics.errorsTotal.Inc() return err } numObs += len(objects) jobs := s.createJobs(objects, s.log) s.log.Debugf("scheduler: %d objects fetched for current batch", len(objects)) + s.metrics.gcsObjectsListedTotal.Add(uint64(len(objects))) // If previous checkpoint was saved then look up starting point for new jobs if !s.state.checkpoint().LatestEntryTime.IsZero() { @@ -110,6 +118,7 @@ func (s *scheduler) scheduleOnce(ctx context.Context) error { } } s.log.Debugf("scheduler: %d jobs scheduled for current batch", len(jobs)) + s.metrics.gcsJobsScheduledAfterValidation.Update(int64(len(jobs))) // distributes jobs among workers with the help of a limiter for i, job := range jobs { @@ -165,7 +174,7 @@ func (s *scheduler) createJobs(objects []*storage.ObjectAttrs, log *logp.Logger) } objectURI := "gs://" + s.src.BucketName + "/" + obj.Name - job := newJob(s.bucket, obj, objectURI, s.state, s.src, s.publisher, log, false) + job := newJob(s.bucket, obj, objectURI, s.state, s.src, s.publisher, s.metrics, log, false) jobs = append(jobs, job) } @@ -201,7 +210,6 @@ func (s *scheduler) moveToLastSeenJob(jobs []*job) []*job { func (s *scheduler) addFailedJobs(ctx context.Context, jobs []*job) []*job { jobMap := make(map[string]bool) - for _, j := range jobs { jobMap[j.Name()] = true } @@ -215,19 +223,19 @@ func (s *scheduler) addFailedJobs(ctx context.Context, jobs []*job) []*job { if err != nil { if errors.Is(err, storage.ErrObjectNotExist) { // if the object is not found in the bucket, then remove it from the failed job list - s.state.deleteFailedJob(name) + s.state.deleteFailedJob(name, s.metrics) s.log.Debugf("scheduler: failed job %s not found in bucket %s", name, s.src.BucketName) } else { // if there is an error while validating the object, // then update the failed job retry count and work towards natural removal - s.state.updateFailedJobs(name) + s.state.updateFailedJobs(name, s.metrics) s.log.Errorf("scheduler: adding failed job %s to job list caused an error: %v", name, err) } continue } objectURI := "gs://" + s.src.BucketName + "/" + obj.Name - job := newJob(s.bucket, obj, objectURI, s.state, s.src, s.publisher, s.log, true) + job := newJob(s.bucket, obj, objectURI, s.state, s.src, s.publisher, s.metrics, s.log, true) jobs = append(jobs, job) s.log.Debugf("scheduler: adding failed job number %d with name %s to job current list", fj, job.Name()) fj++ diff --git a/x-pack/filebeat/input/gcs/state.go b/x-pack/filebeat/input/gcs/state.go index ea04edcae90..af2ab43cec0 100644 --- a/x-pack/filebeat/input/gcs/state.go +++ b/x-pack/filebeat/input/gcs/state.go @@ -44,7 +44,7 @@ func newState() *state { // and returns an unlock function, done. The caller must call done when // s and cp are no longer needed in a locked state. done may not be called // more than once. -func (s *state) saveForTx(name string, lastModifiedOn time.Time) (cp *Checkpoint, done func()) { +func (s *state) saveForTx(name string, lastModifiedOn time.Time, metrics *inputMetrics) (cp *Checkpoint, done func()) { s.mu.Lock() if _, ok := s.cp.FailedJobs[name]; !ok { if len(s.cp.ObjectName) == 0 { @@ -61,6 +61,7 @@ func (s *state) saveForTx(name string, lastModifiedOn time.Time) (cp *Checkpoint } else { // clear entry if this is a failed job delete(s.cp.FailedJobs, name) + metrics.gcsObjectsTracked.Dec() } return s.cp, func() { s.mu.Unlock() } } @@ -70,20 +71,29 @@ func (s *state) saveForTx(name string, lastModifiedOn time.Time) (cp *Checkpoint // move ahead in timestamp & objectName due to successful operations from other workers. // A failed job will be re-tried a maximum of 3 times after which the // entry is removed from the map -func (s *state) updateFailedJobs(jobName string) { +func (s *state) updateFailedJobs(jobName string, metrics *inputMetrics) { s.mu.Lock() + if _, ok := s.cp.FailedJobs[jobName]; !ok { + // increment stored state object count & failed job count + metrics.gcsObjectsTracked.Inc() + metrics.gcsFailedJobsTotal.Inc() + } s.cp.FailedJobs[jobName]++ if s.cp.FailedJobs[jobName] > maxFailedJobRetries { delete(s.cp.FailedJobs, jobName) + metrics.gcsExpiredFailedJobsTotal.Inc() + metrics.gcsObjectsTracked.Dec() } s.mu.Unlock() } // deleteFailedJob, deletes a failed job from the failedJobs map // this is used when a job no longer exists in the bucket or gets expired -func (s *state) deleteFailedJob(jobName string) { +func (s *state) deleteFailedJob(jobName string, metrics *inputMetrics) { s.mu.Lock() delete(s.cp.FailedJobs, jobName) + metrics.gcsExpiredFailedJobsTotal.Inc() + metrics.gcsObjectsTracked.Dec() s.mu.Unlock() } diff --git a/x-pack/functionbeat/Dockerfile b/x-pack/functionbeat/Dockerfile index f5a96493923..f903da58b53 100644 --- a/x-pack/functionbeat/Dockerfile +++ b/x-pack/functionbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.22.8 +FROM golang:1.22.9 RUN \ apt-get update \ diff --git a/x-pack/metricbeat/metricbeat.reference.yml b/x-pack/metricbeat/metricbeat.reference.yml index 240acb2cfd6..7a32f16c19c 100644 --- a/x-pack/metricbeat/metricbeat.reference.yml +++ b/x-pack/metricbeat/metricbeat.reference.yml @@ -161,6 +161,16 @@ metricbeat.modules: # Aerospike Cluster Name #cluster_name: myclustername + # Username of hosts. Empty by default. + #username: root + + # Password of hosts. Empty by default. + #password: secret + + # Authentication modes: https://aerospike.com/docs/server/guide/security/access-control + # Possible values: internal (default), external, pki + #auth_mode: internal + # Optional SSL/TLS (disabled by default) #ssl.enabled: true diff --git a/x-pack/metricbeat/module/gcp/constants.go b/x-pack/metricbeat/module/gcp/constants.go index bba1171bb58..ac3516290bc 100644 --- a/x-pack/metricbeat/module/gcp/constants.go +++ b/x-pack/metricbeat/module/gcp/constants.go @@ -27,6 +27,7 @@ const ( ServiceDataproc = "dataproc" ServiceCloudSQL = "cloudsql" ServiceRedis = "redis" + ServiceAIPlatform = "aiplatform" ) // Paths within the GCP monitoring.TimeSeries response, if converted to JSON, where you can find each ECS field required for the output event @@ -82,13 +83,14 @@ const ( // NOTE: if you are adding labels make sure to update tests in metrics/metrics_requester_test.go. const ( - DefaultResourceLabel = "resource.label.zone" - ComputeResourceLabel = "resource.labels.zone" - GKEResourceLabel = "resource.label.location" - StorageResourceLabel = "resource.label.location" - CloudSQLResourceLabel = "resource.labels.region" - DataprocResourceLabel = "resource.label.region" - RedisResourceLabel = "resource.label.region" + DefaultResourceLabel = "resource.label.zone" + ComputeResourceLabel = "resource.labels.zone" + GKEResourceLabel = "resource.label.location" + StorageResourceLabel = "resource.label.location" + CloudSQLResourceLabel = "resource.labels.region" + DataprocResourceLabel = "resource.label.region" + RedisResourceLabel = "resource.label.region" + AIPlatformResourceLabel = "resource.label.location" ) // AlignersMapToGCP map contains available perSeriesAligner diff --git a/x-pack/metricbeat/module/gcp/metrics/metrics_requester.go b/x-pack/metricbeat/module/gcp/metrics/metrics_requester.go index d33d710f77a..ad0632e6c85 100644 --- a/x-pack/metricbeat/module/gcp/metrics/metrics_requester.go +++ b/x-pack/metricbeat/module/gcp/metrics/metrics_requester.go @@ -196,6 +196,8 @@ func getServiceLabelFor(serviceName string) string { return gcp.CloudSQLResourceLabel case gcp.ServiceRedis: return gcp.RedisResourceLabel + case gcp.ServiceAIPlatform: + return gcp.AIPlatformResourceLabel default: return gcp.DefaultResourceLabel } diff --git a/x-pack/metricbeat/module/gcp/metrics/metrics_requester_test.go b/x-pack/metricbeat/module/gcp/metrics/metrics_requester_test.go index 658568b66ca..9fb044e39e5 100644 --- a/x-pack/metricbeat/module/gcp/metrics/metrics_requester_test.go +++ b/x-pack/metricbeat/module/gcp/metrics/metrics_requester_test.go @@ -222,6 +222,7 @@ func TestIsAGlobalService(t *testing.T) { {"Dataproc service", gcp.ServiceDataproc, false}, {"CloudSQL service", gcp.ServiceCloudSQL, false}, {"Redis service", gcp.ServiceRedis, false}, + {"AIPlatform service", gcp.ServiceAIPlatform, false}, } for _, c := range cases { t.Run(c.title, func(t *testing.T) { @@ -249,6 +250,7 @@ func TestGetServiceLabelFor(t *testing.T) { {"Dataproc service", gcp.ServiceDataproc, "resource.label.region"}, {"CloudSQL service", gcp.ServiceCloudSQL, "resource.labels.region"}, {"Redis service", gcp.ServiceRedis, "resource.label.region"}, + {"AIPlatform service", gcp.ServiceAIPlatform, "resource.label.location"}, } for _, c := range cases { diff --git a/x-pack/metricbeat/module/meraki/device_health/device_health.go b/x-pack/metricbeat/module/meraki/device_health/device_health.go index 25d41bf43f5..bbe301b3b43 100644 --- a/x-pack/metricbeat/module/meraki/device_health/device_health.go +++ b/x-pack/metricbeat/module/meraki/device_health/device_health.go @@ -98,10 +98,7 @@ func (m *MetricSet) Fetch(reporter mb.ReporterV2) error { return fmt.Errorf("getDeviceStatuses failed; %w", err) } - err = getDevicePerformanceScores(m.client, devices) - if err != nil { - return fmt.Errorf("getDevicePerformanceScores failed; %w", err) - } + getDevicePerformanceScores(m.logger, m.client, devices) err = getDeviceChannelUtilization(m.client, devices, collectionPeriod) if err != nil { diff --git a/x-pack/metricbeat/module/meraki/device_health/devices.go b/x-pack/metricbeat/module/meraki/device_health/devices.go index 2f2591d6783..c76b76def78 100644 --- a/x-pack/metricbeat/module/meraki/device_health/devices.go +++ b/x-pack/metricbeat/module/meraki/device_health/devices.go @@ -6,10 +6,12 @@ package device_health import ( "fmt" + "net/http" "strings" "time" "github.com/elastic/beats/v7/metricbeat/mb" + "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/mapstr" meraki "github.com/meraki/dashboard-api-go/v3/sdk" @@ -67,7 +69,7 @@ func getDeviceStatuses(client *meraki.Client, organizationID string, devices map return nil } -func getDevicePerformanceScores(client *meraki.Client, devices map[Serial]*Device) error { +func getDevicePerformanceScores(logger *logp.Logger, client *meraki.Client, devices map[Serial]*Device) { for _, device := range devices { // attempting to get a performance score for a non-MX device returns a 400 if strings.Index(device.details.Model, "MX") != 0 { @@ -76,7 +78,11 @@ func getDevicePerformanceScores(client *meraki.Client, devices map[Serial]*Devic val, res, err := client.Appliance.GetDeviceAppliancePerformance(device.details.Serial) if err != nil { - return fmt.Errorf("GetDeviceAppliancePerformance failed; [%d] %s. %w", res.StatusCode(), res.Body(), err) + if !(res.StatusCode() != http.StatusBadRequest && strings.Contains(string(res.Body()), "Feature not supported")) { + logger.Errorf("GetDeviceAppliancePerformance failed; [%d] %s. %v", res.StatusCode(), res.Body(), err) + } + + continue } // 204 indicates there is no data for the device, it's likely 'offline' or 'dormant' @@ -84,8 +90,6 @@ func getDevicePerformanceScores(client *meraki.Client, devices map[Serial]*Devic device.performanceScore = val } } - - return nil } func getDeviceChannelUtilization(client *meraki.Client, devices map[Serial]*Device, period time.Duration) error { diff --git a/x-pack/metricbeat/module/stan/_meta/Dockerfile b/x-pack/metricbeat/module/stan/_meta/Dockerfile index 8150094a78b..4e844172a0b 100644 --- a/x-pack/metricbeat/module/stan/_meta/Dockerfile +++ b/x-pack/metricbeat/module/stan/_meta/Dockerfile @@ -2,7 +2,7 @@ ARG STAN_VERSION=0.15.1 FROM nats-streaming:$STAN_VERSION # build stage -FROM golang:1.22.8 AS build-env +FROM golang:1.22.9 AS build-env RUN apt-get install git mercurial gcc RUN git clone https://github.com/nats-io/stan.go.git /stan-go RUN cd /stan-go/examples/stan-bench && git checkout tags/v0.5.2 && go build .