From c835ab872bda09b86390dea0ff3e76e433d15cb6 Mon Sep 17 00:00:00 2001 From: Pier-Hugues Pellerin Date: Thu, 13 Jan 2022 08:36:01 -0500 Subject: [PATCH 01/69] Replace location of apoydence/eachers to poy/eachers (#29780) --- NOTICE.txt | 4 ++-- go.mod | 1 + go.sum | 4 ++-- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/NOTICE.txt b/NOTICE.txt index cfb36644063..7a332a0e662 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -22563,12 +22563,12 @@ For t_cl_generator.cc -------------------------------------------------------------------------------- -Dependency : github.com/apoydence/eachers +Dependency : github.com/poy/eachers Version: v0.0.0-20181020210610-23942921fe77 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/apoydence/eachers@v0.0.0-20181020210610-23942921fe77/LICENSE.md: +Contents of probable licence file $GOMODCACHE/github.com/poy/eachers@v0.0.0-20181020210610-23942921fe77/LICENSE.md: The MIT License (MIT) diff --git a/go.mod b/go.mod index 1e69b9d3a93..2601349a467 100644 --- a/go.mod +++ b/go.mod @@ -285,6 +285,7 @@ require ( replace ( github.com/Microsoft/go-winio => github.com/bi-zone/go-winio v0.4.15 github.com/Shopify/sarama => github.com/elastic/sarama v1.19.1-0.20210823122811-11c3ef800752 + github.com/apoydence/eachers => github.com/poy/eachers v0.0.0-20181020210610-23942921fe77 //indirect, see https://github.com/elastic/beats/pull/29780 for details. github.com/cucumber/godog => github.com/cucumber/godog v0.8.1 github.com/docker/go-plugins-helpers => github.com/elastic/go-plugins-helpers v0.0.0-20200207104224-bdf17607b79f github.com/dop251/goja => github.com/andrewkroh/goja v0.0.0-20190128172624-dd2ac4456e20 diff --git a/go.sum b/go.sum index 04eb9ceb919..ad1e540577f 100644 --- a/go.sum +++ b/go.sum @@ -210,8 +210,6 @@ github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.1-0.20200603211036-eac4d0c79a5f h1:33BV5v3u8I6dA2dEoPuXWCsAaHHOJfPtdxZhAMQV4uo= github.com/apache/thrift v0.13.1-0.20200603211036-eac4d0c79a5f/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/apoydence/eachers v0.0.0-20181020210610-23942921fe77 h1:afT88tB6u9JCKQZVAAaa9ICz/uGn5Uw9ekn6P22mYKM= -github.com/apoydence/eachers v0.0.0-20181020210610-23942921fe77/go.mod h1:bXvGk6IkT1Agy7qzJ+DjIw/SJ1AaB3AvAuMDVV+Vkoo= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= @@ -1346,6 +1344,8 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= +github.com/poy/eachers v0.0.0-20181020210610-23942921fe77 h1:SNdqPRvRsVmYR0gKqFvrUKhFizPJ6yDiGQ++VAJIoDg= +github.com/poy/eachers v0.0.0-20181020210610-23942921fe77/go.mod h1:x1vqpbcMW9T/KRcQ4b48diSiSVtYgvwQ5xzDByEg4WE= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/prometheus/alertmanager v0.20.0/go.mod h1:9g2i48FAyZW6BtbsnvHtMHQXl2aVtrORKwKVCQ+nbrg= github.com/prometheus/alertmanager v0.22.2/go.mod h1:rYinOWxFuCnNssc3iOjn2oMTlhLaPcUuqV5yk5JKUAE= From 5e13dcaaf5eaae9ec27e55d528b738301d9296af Mon Sep 17 00:00:00 2001 From: Chris Mark Date: Thu, 13 Jan 2022 13:47:52 +0000 Subject: [PATCH 02/69] Update versin of jinja2 (#29835) Signed-off-by: chrismark --- libbeat/tests/system/requirements_aix.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libbeat/tests/system/requirements_aix.txt b/libbeat/tests/system/requirements_aix.txt index 9b9df31aa37..b786356fcb3 100644 --- a/libbeat/tests/system/requirements_aix.txt +++ b/libbeat/tests/system/requirements_aix.txt @@ -15,7 +15,7 @@ idna==2.6 importlib-metadata==1.7.0 iniconfig==1.0.1 ipaddress==1.0.19 -Jinja2==2.11.2 +Jinja2==2.11.3 jsondiff==1.1.2 jsonschema==3.2.0 kafka-python==1.4.3 @@ -45,4 +45,4 @@ toml==0.10.1 urllib3==1.26.5 wcwidth==0.2.5 websocket-client==0.47.0 -zipp>=1.2.0,<=3.1.0 \ No newline at end of file +zipp>=1.2.0,<=3.1.0 From cfefa5eb75cfeae9029237e1b9e9f64c0a088070 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?No=C3=A9mi=20V=C3=A1nyi?= Date: Thu, 13 Jan 2022 15:56:51 +0100 Subject: [PATCH 03/69] Follow up changes in documentation and configuration reference about data streams (#29819) --- auditbeat/auditbeat.reference.yml | 11 ++-- filebeat/filebeat.reference.yml | 11 ++-- heartbeat/heartbeat.reference.yml | 11 ++-- .../output-elasticsearch.reference.yml.tmpl | 5 +- .../_meta/config/setup.ilm.reference.yml.tmpl | 6 +-- libbeat/docs/howto/change-index-name.asciidoc | 24 ++------- .../docs/howto/load-index-templates.asciidoc | 6 ++- libbeat/docs/shared-ilm.asciidoc | 51 ++----------------- metricbeat/metricbeat.reference.yml | 11 ++-- packetbeat/packetbeat.reference.yml | 11 ++-- winlogbeat/winlogbeat.reference.yml | 11 ++-- x-pack/auditbeat/auditbeat.reference.yml | 11 ++-- x-pack/filebeat/filebeat.reference.yml | 11 ++-- .../functionbeat/functionbeat.reference.yml | 11 ++-- x-pack/heartbeat/heartbeat.reference.yml | 11 ++-- x-pack/metricbeat/metricbeat.reference.yml | 11 ++-- x-pack/osquerybeat/osquerybeat.reference.yml | 11 ++-- x-pack/packetbeat/packetbeat.reference.yml | 11 ++-- x-pack/winlogbeat/winlogbeat.reference.yml | 11 ++-- 19 files changed, 73 insertions(+), 173 deletions(-) diff --git a/auditbeat/auditbeat.reference.yml b/auditbeat/auditbeat.reference.yml index cce2bc58144..bfe0eafaeec 100644 --- a/auditbeat/auditbeat.reference.yml +++ b/auditbeat/auditbeat.reference.yml @@ -414,10 +414,9 @@ output.elasticsearch: # Number of workers per Elasticsearch host. #worker: 1 - # Optional index name. The default is "auditbeat" plus date - # and generates [auditbeat-]YYYY.MM.DD keys. + # Optional data stream or index name. The default is "auditbeat-%{[agent.version]}". # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly. - #index: "auditbeat-%{[agent.version]}-%{+yyyy.MM.dd}" + #index: "auditbeat-%{[agent.version]}" # Optional ingest pipeline. By default no pipeline will be used. #pipeline: "" @@ -1226,10 +1225,8 @@ setup.template.settings: # ====================== Index Lifecycle Management (ILM) ====================== -# Configure index lifecycle management (ILM). These settings create a write -# alias and add additional settings to the index template. When ILM is enabled, -# output.elasticsearch.index is ignored, and the write alias is used to set the -# index name. +# Configure index lifecycle management (ILM) to manage the backing indices +# of your data streams. # Enable ILM support. Valid values are true, false. #setup.ilm.enabled: true diff --git a/filebeat/filebeat.reference.yml b/filebeat/filebeat.reference.yml index 11273253e6a..90d614545dc 100644 --- a/filebeat/filebeat.reference.yml +++ b/filebeat/filebeat.reference.yml @@ -1358,10 +1358,9 @@ output.elasticsearch: # Number of workers per Elasticsearch host. #worker: 1 - # Optional index name. The default is "filebeat" plus date - # and generates [filebeat-]YYYY.MM.DD keys. + # Optional data stream or index name. The default is "filebeat-%{[agent.version]}". # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly. - #index: "filebeat-%{[agent.version]}-%{+yyyy.MM.dd}" + #index: "filebeat-%{[agent.version]}" # Optional ingest pipeline. By default no pipeline will be used. #pipeline: "" @@ -2170,10 +2169,8 @@ setup.template.settings: # ====================== Index Lifecycle Management (ILM) ====================== -# Configure index lifecycle management (ILM). These settings create a write -# alias and add additional settings to the index template. When ILM is enabled, -# output.elasticsearch.index is ignored, and the write alias is used to set the -# index name. +# Configure index lifecycle management (ILM) to manage the backing indices +# of your data streams. # Enable ILM support. Valid values are true, false. #setup.ilm.enabled: true diff --git a/heartbeat/heartbeat.reference.yml b/heartbeat/heartbeat.reference.yml index 415ca4cb69d..9948b3a0b62 100644 --- a/heartbeat/heartbeat.reference.yml +++ b/heartbeat/heartbeat.reference.yml @@ -560,10 +560,9 @@ output.elasticsearch: # Number of workers per Elasticsearch host. #worker: 1 - # Optional index name. The default is "heartbeat" plus date - # and generates [heartbeat-]YYYY.MM.DD keys. + # Optional data stream or index name. The default is "heartbeat-%{[agent.version]}". # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly. - #index: "heartbeat-%{[agent.version]}-%{+yyyy.MM.dd}" + #index: "heartbeat-%{[agent.version]}" # Optional ingest pipeline. By default no pipeline will be used. #pipeline: "" @@ -1372,10 +1371,8 @@ setup.template.settings: # ====================== Index Lifecycle Management (ILM) ====================== -# Configure index lifecycle management (ILM). These settings create a write -# alias and add additional settings to the index template. When ILM is enabled, -# output.elasticsearch.index is ignored, and the write alias is used to set the -# index name. +# Configure index lifecycle management (ILM) to manage the backing indices +# of your data streams. # Enable ILM support. Valid values are true, false. #setup.ilm.enabled: true diff --git a/libbeat/_meta/config/output-elasticsearch.reference.yml.tmpl b/libbeat/_meta/config/output-elasticsearch.reference.yml.tmpl index be9ede7dba8..32c2c56a9b8 100644 --- a/libbeat/_meta/config/output-elasticsearch.reference.yml.tmpl +++ b/libbeat/_meta/config/output-elasticsearch.reference.yml.tmpl @@ -31,10 +31,9 @@ output.elasticsearch: # Number of workers per Elasticsearch host. #worker: 1 - # Optional index name. The default is "{{.BeatIndexPrefix}}" plus date - # and generates [{{.BeatIndexPrefix}}-]YYYY.MM.DD keys. + # Optional data stream or index name. The default is "{{.BeatIndexPrefix}}-%{[agent.version]}". # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly. - #index: "{{.BeatIndexPrefix}}-%{[agent.version]}-%{+yyyy.MM.dd}" + #index: "{{.BeatIndexPrefix}}-%{[agent.version]}" # Optional ingest pipeline. By default no pipeline will be used. #pipeline: "" diff --git a/libbeat/_meta/config/setup.ilm.reference.yml.tmpl b/libbeat/_meta/config/setup.ilm.reference.yml.tmpl index 5e4a2e642fe..406486c51b2 100644 --- a/libbeat/_meta/config/setup.ilm.reference.yml.tmpl +++ b/libbeat/_meta/config/setup.ilm.reference.yml.tmpl @@ -1,9 +1,7 @@ {{header "Index Lifecycle Management (ILM)"}} -# Configure index lifecycle management (ILM). These settings create a write -# alias and add additional settings to the index template. When ILM is enabled, -# output.elasticsearch.index is ignored, and the write alias is used to set the -# index name. +# Configure index lifecycle management (ILM) to manage the backing indices +# of your data streams. # Enable ILM support. Valid values are true, false. #setup.ilm.enabled: true diff --git a/libbeat/docs/howto/change-index-name.asciidoc b/libbeat/docs/howto/change-index-name.asciidoc index 4484ad79c82..8affe9887d0 100644 --- a/libbeat/docs/howto/change-index-name.asciidoc +++ b/libbeat/docs/howto/change-index-name.asciidoc @@ -1,32 +1,18 @@ [id="change-index-name"] == Change the index name -ifndef::no_ilm[] -TIP: If you're sending events to a cluster that supports index lifecycle -management, you need to change the index name in the ILM policy. -See <> to learn how to change it. -endif::no_ilm[] - -{beatname_uc} uses time series indices, by default, when index lifecycle -management is disabled or unsupported. The indices are named -+{beatname_lc}-{version}-yyyy.MM.dd+, where `yyyy.MM.dd` is the date when the -events were indexed. To use a different name, set the -<> option in the {es} output. The value that -you specify should include the root name of the index plus version and date -information. You also need to configure the `setup.template.name` and +{beatname_uc} uses data streams named +{beatname_lc}-{version}+. +To use a different name, set the <> option +in the {es} output. You also need to configure the `setup.template.name` and `setup.template.pattern` options to match the new name. For example: ["source","sh",subs="attributes,callouts"] ----- -output.elasticsearch.index: "customname-%{[{beat_version_key}]}-%{+yyyy.MM.dd}" +output.elasticsearch.index: "customname-%{[{beat_version_key}]}" setup.template.name: "customname" -setup.template.pattern: "customname-*" +setup.template.pattern: "customname-%{[{beat_version_key}]}" ----- -ifndef::no_ilm[] -WARNING: If <> is enabled (which is typically the default), `setup.template.name` and `setup.template.pattern` are ignored. -endif::no_ilm[] - ifndef::no_dashboards[] If you're using pre-built Kibana dashboards, also set the `setup.dashboards.index` option. For example: diff --git a/libbeat/docs/howto/load-index-templates.asciidoc b/libbeat/docs/howto/load-index-templates.asciidoc index f62efecdc04..06862a5cc22 100644 --- a/libbeat/docs/howto/load-index-templates.asciidoc +++ b/libbeat/docs/howto/load-index-templates.asciidoc @@ -3,8 +3,8 @@ {es} uses {ref}/index-templates.html[index templates] to define: -* Settings that control the behavior of your indices. The settings include the -lifecycle policy used to manage indices as they grow and age. +* Settings that control the behavior of your data stream and backing indices. +The settings include the lifecycle policy used to manage backing indices as they grow and age. * Mappings that determine how fields are analyzed. Each mapping sets the {ref}/mapping-types.html[{es} datatype] to use for a specific data field. @@ -44,6 +44,8 @@ setup.template.fields: "path/to/fields.yml" If the template already exists, it’s not overwritten unless you configure {beatname_uc} to do so. +You can load templates for both data streams and indices. + [float] [[overwrite-template]] === Overwrite an existing index template diff --git a/libbeat/docs/shared-ilm.asciidoc b/libbeat/docs/shared-ilm.asciidoc index 7d5dda17439..5ccb0a43f95 100644 --- a/libbeat/docs/shared-ilm.asciidoc +++ b/libbeat/docs/shared-ilm.asciidoc @@ -7,16 +7,10 @@ ++++ Use the {ref}/getting-started-index-lifecycle-management.html[index lifecycle -management] (ILM) feature in {es} to manage your {beatname_uc} indices as they age. -For example, instead of creating daily indices where index size can vary based -on the number of Beats and number of events sent, use an index lifecycle policy -to automate a rollover to a new index when the existing index reaches a -specified size or age. - -Starting with version 7.0, {beatname_uc} uses index lifecycle management by -default when it connects to a cluster that supports lifecycle management. -{beatname_uc} loads the default policy automatically and applies it to any -indices created by {beatname_uc}. +management] (ILM) feature in {es} to manage your {beatname_uc} +their backing indices of your data streams as they age. {beatname_uc} loads +the default policy automatically and applies it to any +data streams created by {beatname_uc}. You can view and edit the policy in the *Index lifecycle policies* UI in {kib}. For more information about working with the UI, see @@ -27,12 +21,7 @@ Example configuration: ["source","yaml",subs="attributes"] ---- setup.ilm.enabled: true -setup.ilm.rollover_alias: "{beatname_lc}" -setup.ilm.pattern: "{now/d}-000001" <1> ---- -<1> Date math is supported here. For more information, see -{ref}/indices-rollover-index.html#_using_date_math_with_the_rollover_api[Using -date math with the rollover API]. WARNING: If <> is enabled (which is typically the default), `setup.template.name` and `setup.template.pattern` are ignored. @@ -49,38 +38,6 @@ You can specify the following settings in the `setup.ilm` section of the Enables or disables index lifecycle management on any new indices created by {beatname_uc}. Valid values are `true` and `false`. -[float] -[[setup-ilm-rollover_alias-option]] -==== `setup.ilm.rollover_alias` - -The index lifecycle write alias name. The default is -+{beatname_lc}-%{[{beat_version_key}]}+. Setting this option changes the alias name. - -NOTE: If you modify this setting after loading the index template, you must -overwrite the template to apply the changes. - -[float] -[[setup-ilm-pattern-option]] -==== `setup.ilm.pattern` - -The rollover index pattern. The default is `%{now/d}-000001`. - -Date math is supported in this setting. For example: - -[source,yaml] ----- -setup.ilm.pattern: "{now/M{yyyy.MM}}-000001" ----- - -For more information, see -{ref}/indices-rollover-index.html#_using_date_math_with_the_rollover_api[Using -date math with the rollover API]. - -NOTE: Before modifying this setting for an existing ILM setup, you must manually -remove any aliases related to the previous pattern, then overwrite the policy. -Existing indices that don't match the new pattern might no longer be subject to -index lifecycle management. - [float] [[setup-ilm-policy_name-option]] ==== `setup.ilm.policy_name` diff --git a/metricbeat/metricbeat.reference.yml b/metricbeat/metricbeat.reference.yml index b98461466cf..ecaed598049 100644 --- a/metricbeat/metricbeat.reference.yml +++ b/metricbeat/metricbeat.reference.yml @@ -1277,10 +1277,9 @@ output.elasticsearch: # Number of workers per Elasticsearch host. #worker: 1 - # Optional index name. The default is "metricbeat" plus date - # and generates [metricbeat-]YYYY.MM.DD keys. + # Optional data stream or index name. The default is "metricbeat-%{[agent.version]}". # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly. - #index: "metricbeat-%{[agent.version]}-%{+yyyy.MM.dd}" + #index: "metricbeat-%{[agent.version]}" # Optional ingest pipeline. By default no pipeline will be used. #pipeline: "" @@ -2089,10 +2088,8 @@ setup.template.settings: # ====================== Index Lifecycle Management (ILM) ====================== -# Configure index lifecycle management (ILM). These settings create a write -# alias and add additional settings to the index template. When ILM is enabled, -# output.elasticsearch.index is ignored, and the write alias is used to set the -# index name. +# Configure index lifecycle management (ILM) to manage the backing indices +# of your data streams. # Enable ILM support. Valid values are true, false. #setup.ilm.enabled: true diff --git a/packetbeat/packetbeat.reference.yml b/packetbeat/packetbeat.reference.yml index fd9ed4b20fe..4b39046e4c7 100644 --- a/packetbeat/packetbeat.reference.yml +++ b/packetbeat/packetbeat.reference.yml @@ -909,10 +909,9 @@ output.elasticsearch: # Number of workers per Elasticsearch host. #worker: 1 - # Optional index name. The default is "packetbeat" plus date - # and generates [packetbeat-]YYYY.MM.DD keys. + # Optional data stream or index name. The default is "packetbeat-%{[agent.version]}". # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly. - #index: "packetbeat-%{[agent.version]}-%{+yyyy.MM.dd}" + #index: "packetbeat-%{[agent.version]}" # Optional ingest pipeline. By default no pipeline will be used. #pipeline: "" @@ -1721,10 +1720,8 @@ setup.template.settings: # ====================== Index Lifecycle Management (ILM) ====================== -# Configure index lifecycle management (ILM). These settings create a write -# alias and add additional settings to the index template. When ILM is enabled, -# output.elasticsearch.index is ignored, and the write alias is used to set the -# index name. +# Configure index lifecycle management (ILM) to manage the backing indices +# of your data streams. # Enable ILM support. Valid values are true, false. #setup.ilm.enabled: true diff --git a/winlogbeat/winlogbeat.reference.yml b/winlogbeat/winlogbeat.reference.yml index a9e7ff34744..4ebba6c88ba 100644 --- a/winlogbeat/winlogbeat.reference.yml +++ b/winlogbeat/winlogbeat.reference.yml @@ -350,10 +350,9 @@ output.elasticsearch: # Number of workers per Elasticsearch host. #worker: 1 - # Optional index name. The default is "winlogbeat" plus date - # and generates [winlogbeat-]YYYY.MM.DD keys. + # Optional data stream or index name. The default is "winlogbeat-%{[agent.version]}". # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly. - #index: "winlogbeat-%{[agent.version]}-%{+yyyy.MM.dd}" + #index: "winlogbeat-%{[agent.version]}" # Optional ingest pipeline. By default no pipeline will be used. #pipeline: "" @@ -1162,10 +1161,8 @@ setup.template.settings: # ====================== Index Lifecycle Management (ILM) ====================== -# Configure index lifecycle management (ILM). These settings create a write -# alias and add additional settings to the index template. When ILM is enabled, -# output.elasticsearch.index is ignored, and the write alias is used to set the -# index name. +# Configure index lifecycle management (ILM) to manage the backing indices +# of your data streams. # Enable ILM support. Valid values are true, false. #setup.ilm.enabled: true diff --git a/x-pack/auditbeat/auditbeat.reference.yml b/x-pack/auditbeat/auditbeat.reference.yml index 1fd3bfcc92c..e212152c2a0 100644 --- a/x-pack/auditbeat/auditbeat.reference.yml +++ b/x-pack/auditbeat/auditbeat.reference.yml @@ -470,10 +470,9 @@ output.elasticsearch: # Number of workers per Elasticsearch host. #worker: 1 - # Optional index name. The default is "auditbeat" plus date - # and generates [auditbeat-]YYYY.MM.DD keys. + # Optional data stream or index name. The default is "auditbeat-%{[agent.version]}". # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly. - #index: "auditbeat-%{[agent.version]}-%{+yyyy.MM.dd}" + #index: "auditbeat-%{[agent.version]}" # Optional ingest pipeline. By default no pipeline will be used. #pipeline: "" @@ -1282,10 +1281,8 @@ setup.template.settings: # ====================== Index Lifecycle Management (ILM) ====================== -# Configure index lifecycle management (ILM). These settings create a write -# alias and add additional settings to the index template. When ILM is enabled, -# output.elasticsearch.index is ignored, and the write alias is used to set the -# index name. +# Configure index lifecycle management (ILM) to manage the backing indices +# of your data streams. # Enable ILM support. Valid values are true, false. #setup.ilm.enabled: true diff --git a/x-pack/filebeat/filebeat.reference.yml b/x-pack/filebeat/filebeat.reference.yml index 7aa5350e19f..71872a5c4b2 100644 --- a/x-pack/filebeat/filebeat.reference.yml +++ b/x-pack/filebeat/filebeat.reference.yml @@ -3588,10 +3588,9 @@ output.elasticsearch: # Number of workers per Elasticsearch host. #worker: 1 - # Optional index name. The default is "filebeat" plus date - # and generates [filebeat-]YYYY.MM.DD keys. + # Optional data stream or index name. The default is "filebeat-%{[agent.version]}". # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly. - #index: "filebeat-%{[agent.version]}-%{+yyyy.MM.dd}" + #index: "filebeat-%{[agent.version]}" # Optional ingest pipeline. By default no pipeline will be used. #pipeline: "" @@ -4400,10 +4399,8 @@ setup.template.settings: # ====================== Index Lifecycle Management (ILM) ====================== -# Configure index lifecycle management (ILM). These settings create a write -# alias and add additional settings to the index template. When ILM is enabled, -# output.elasticsearch.index is ignored, and the write alias is used to set the -# index name. +# Configure index lifecycle management (ILM) to manage the backing indices +# of your data streams. # Enable ILM support. Valid values are true, false. #setup.ilm.enabled: true diff --git a/x-pack/functionbeat/functionbeat.reference.yml b/x-pack/functionbeat/functionbeat.reference.yml index 5f0e7bfd13e..20478c6aed6 100644 --- a/x-pack/functionbeat/functionbeat.reference.yml +++ b/x-pack/functionbeat/functionbeat.reference.yml @@ -592,10 +592,9 @@ output.elasticsearch: # Number of workers per Elasticsearch host. #worker: 1 - # Optional index name. The default is "functionbeat" plus date - # and generates [functionbeat-]YYYY.MM.DD keys. + # Optional data stream or index name. The default is "functionbeat-%{[agent.version]}". # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly. - #index: "functionbeat-%{[agent.version]}-%{+yyyy.MM.dd}" + #index: "functionbeat-%{[agent.version]}" # Optional ingest pipeline. By default no pipeline will be used. #pipeline: "" @@ -1005,10 +1004,8 @@ setup.template.settings: # ====================== Index Lifecycle Management (ILM) ====================== -# Configure index lifecycle management (ILM). These settings create a write -# alias and add additional settings to the index template. When ILM is enabled, -# output.elasticsearch.index is ignored, and the write alias is used to set the -# index name. +# Configure index lifecycle management (ILM) to manage the backing indices +# of your data streams. # Enable ILM support. Valid values are true, false. #setup.ilm.enabled: true diff --git a/x-pack/heartbeat/heartbeat.reference.yml b/x-pack/heartbeat/heartbeat.reference.yml index 415ca4cb69d..9948b3a0b62 100644 --- a/x-pack/heartbeat/heartbeat.reference.yml +++ b/x-pack/heartbeat/heartbeat.reference.yml @@ -560,10 +560,9 @@ output.elasticsearch: # Number of workers per Elasticsearch host. #worker: 1 - # Optional index name. The default is "heartbeat" plus date - # and generates [heartbeat-]YYYY.MM.DD keys. + # Optional data stream or index name. The default is "heartbeat-%{[agent.version]}". # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly. - #index: "heartbeat-%{[agent.version]}-%{+yyyy.MM.dd}" + #index: "heartbeat-%{[agent.version]}" # Optional ingest pipeline. By default no pipeline will be used. #pipeline: "" @@ -1372,10 +1371,8 @@ setup.template.settings: # ====================== Index Lifecycle Management (ILM) ====================== -# Configure index lifecycle management (ILM). These settings create a write -# alias and add additional settings to the index template. When ILM is enabled, -# output.elasticsearch.index is ignored, and the write alias is used to set the -# index name. +# Configure index lifecycle management (ILM) to manage the backing indices +# of your data streams. # Enable ILM support. Valid values are true, false. #setup.ilm.enabled: true diff --git a/x-pack/metricbeat/metricbeat.reference.yml b/x-pack/metricbeat/metricbeat.reference.yml index 18b6d9704a8..f974c4e77f7 100644 --- a/x-pack/metricbeat/metricbeat.reference.yml +++ b/x-pack/metricbeat/metricbeat.reference.yml @@ -1798,10 +1798,9 @@ output.elasticsearch: # Number of workers per Elasticsearch host. #worker: 1 - # Optional index name. The default is "metricbeat" plus date - # and generates [metricbeat-]YYYY.MM.DD keys. + # Optional data stream or index name. The default is "metricbeat-%{[agent.version]}". # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly. - #index: "metricbeat-%{[agent.version]}-%{+yyyy.MM.dd}" + #index: "metricbeat-%{[agent.version]}" # Optional ingest pipeline. By default no pipeline will be used. #pipeline: "" @@ -2610,10 +2609,8 @@ setup.template.settings: # ====================== Index Lifecycle Management (ILM) ====================== -# Configure index lifecycle management (ILM). These settings create a write -# alias and add additional settings to the index template. When ILM is enabled, -# output.elasticsearch.index is ignored, and the write alias is used to set the -# index name. +# Configure index lifecycle management (ILM) to manage the backing indices +# of your data streams. # Enable ILM support. Valid values are true, false. #setup.ilm.enabled: true diff --git a/x-pack/osquerybeat/osquerybeat.reference.yml b/x-pack/osquerybeat/osquerybeat.reference.yml index b81f31c7d16..54c5f87ea86 100644 --- a/x-pack/osquerybeat/osquerybeat.reference.yml +++ b/x-pack/osquerybeat/osquerybeat.reference.yml @@ -311,10 +311,9 @@ output.elasticsearch: # Number of workers per Elasticsearch host. #worker: 1 - # Optional index name. The default is "osquerybeat" plus date - # and generates [osquerybeat-]YYYY.MM.DD keys. + # Optional data stream or index name. The default is "osquerybeat-%{[agent.version]}". # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly. - #index: "osquerybeat-%{[agent.version]}-%{+yyyy.MM.dd}" + #index: "osquerybeat-%{[agent.version]}" # Optional ingest pipeline. By default no pipeline will be used. #pipeline: "" @@ -724,10 +723,8 @@ setup.template.settings: # ====================== Index Lifecycle Management (ILM) ====================== -# Configure index lifecycle management (ILM). These settings create a write -# alias and add additional settings to the index template. When ILM is enabled, -# output.elasticsearch.index is ignored, and the write alias is used to set the -# index name. +# Configure index lifecycle management (ILM) to manage the backing indices +# of your data streams. # Enable ILM support. Valid values are true, false. #setup.ilm.enabled: true diff --git a/x-pack/packetbeat/packetbeat.reference.yml b/x-pack/packetbeat/packetbeat.reference.yml index fd9ed4b20fe..4b39046e4c7 100644 --- a/x-pack/packetbeat/packetbeat.reference.yml +++ b/x-pack/packetbeat/packetbeat.reference.yml @@ -909,10 +909,9 @@ output.elasticsearch: # Number of workers per Elasticsearch host. #worker: 1 - # Optional index name. The default is "packetbeat" plus date - # and generates [packetbeat-]YYYY.MM.DD keys. + # Optional data stream or index name. The default is "packetbeat-%{[agent.version]}". # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly. - #index: "packetbeat-%{[agent.version]}-%{+yyyy.MM.dd}" + #index: "packetbeat-%{[agent.version]}" # Optional ingest pipeline. By default no pipeline will be used. #pipeline: "" @@ -1721,10 +1720,8 @@ setup.template.settings: # ====================== Index Lifecycle Management (ILM) ====================== -# Configure index lifecycle management (ILM). These settings create a write -# alias and add additional settings to the index template. When ILM is enabled, -# output.elasticsearch.index is ignored, and the write alias is used to set the -# index name. +# Configure index lifecycle management (ILM) to manage the backing indices +# of your data streams. # Enable ILM support. Valid values are true, false. #setup.ilm.enabled: true diff --git a/x-pack/winlogbeat/winlogbeat.reference.yml b/x-pack/winlogbeat/winlogbeat.reference.yml index 3b327c38cc2..f083fabb0e5 100644 --- a/x-pack/winlogbeat/winlogbeat.reference.yml +++ b/x-pack/winlogbeat/winlogbeat.reference.yml @@ -352,10 +352,9 @@ output.elasticsearch: # Number of workers per Elasticsearch host. #worker: 1 - # Optional index name. The default is "winlogbeat" plus date - # and generates [winlogbeat-]YYYY.MM.DD keys. + # Optional data stream or index name. The default is "winlogbeat-%{[agent.version]}". # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly. - #index: "winlogbeat-%{[agent.version]}-%{+yyyy.MM.dd}" + #index: "winlogbeat-%{[agent.version]}" # Optional ingest pipeline. By default no pipeline will be used. #pipeline: "" @@ -1164,10 +1163,8 @@ setup.template.settings: # ====================== Index Lifecycle Management (ILM) ====================== -# Configure index lifecycle management (ILM). These settings create a write -# alias and add additional settings to the index template. When ILM is enabled, -# output.elasticsearch.index is ignored, and the write alias is used to set the -# index name. +# Configure index lifecycle management (ILM) to manage the backing indices +# of your data streams. # Enable ILM support. Valid values are true, false. #setup.ilm.enabled: true From a7f8517716541d466f66448f4538fd9002c1016b Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Thu, 13 Jan 2022 10:56:57 -0500 Subject: [PATCH 04/69] [Elastic Agent] Add doc on how Fleet Server is bootstrapped. (#29563) * Add doc on how Fleet Server is bootstrapped by Elastic Agent. * Add port 8221. * Update with suggessions. * Apply suggestions from code review Co-authored-by: Michel Laterman <82832767+michel-laterman@users.noreply.github.com> Co-authored-by: Michel Laterman <82832767+michel-laterman@users.noreply.github.com> --- .../docs/fleet-server-bootstrap.asciidoc | 90 +++++++++++++++++++ 1 file changed, 90 insertions(+) create mode 100644 x-pack/elastic-agent/docs/fleet-server-bootstrap.asciidoc diff --git a/x-pack/elastic-agent/docs/fleet-server-bootstrap.asciidoc b/x-pack/elastic-agent/docs/fleet-server-bootstrap.asciidoc new file mode 100644 index 00000000000..1dc818f38d2 --- /dev/null +++ b/x-pack/elastic-agent/docs/fleet-server-bootstrap.asciidoc @@ -0,0 +1,90 @@ +[[fleet-server-bootstrap]] +== Fleet Server Bootstrap + +Elastic Agent with Fleet Server has a bootstrap process that it uses to get +Fleet Server up and running under Elastic Agent. + +Elastic Agent will bootstrap a Fleet Server when the `--fleet-server-es` +command-line option is provided to an `install` or `enroll` command. In this mode +Elastic Agent will only communicate with its local Fleet Server and expose +the Fleet Server over the `:8221` port. + +The `:8221` port is reserved for communication between the Elastic Agent and the +Fleet Server on the host. It is bound to the localhost of the machine/container +and cannot be accessed remotely. This ensures that the local Elastic Agent has +priority in check-ins with the Fleet Server. The `:8220` port is bound on +`0.0.0.0` to allow remote connections from external Elastic Agents that wish to +enroll and communicate. + +[float] +[[fleet-server-operating-modes]] +=== Operating Modes + +Elastic Agent can bootstrap the Fleet Server into three different modes. The mode +determines how Fleet Server exposes itself over the `:8220` port, but does not change +any other behaviour. + +==== Self-signed Certificate + +With the standard `--fleet-server-es` and `--fleet-server-service-token` options the +Elastic Agent will generate a CA and certificate for communication with +the Fleet Server that it starts. These certificates are generated +by Elastic Agent and passed to the Fleet Server, with Elastic Agent using the host's +hostname in the communication URL for valid TLS verification. + +==== HTTP Only + +Using the `--insecure` and `--fleet-server-insecure-http` will bootstrap the Fleet Server +without any certificates, it will be bound to `localhost:8220` and Elastic Agent will +communicate in clear-text. + +==== Custom Certificates (aka. Production) + +When deploying Elastic Agent in a production environment using enterprise generated +certificates will ensure that Elastic Agent running locally and remote Elastic Agent +will be able to connect over a verified TLS based connection. Certificates are specified +with `--fleet-server-cert`, `--fleet-server-cert-ca`, and `--certificate-authorities`. + +[float] +[[fleet-server-bootstrap-process]] +=== How Does It Bootstrap + +Bootstrapping is ran during the `enroll` command. The `install` command +or the `container` command (used by Docker container) will call the `enroll` +command to perform the bootstrapping process. + +==== Install Command + +When the `install` command is executed it places the Elastic Agent in the correct file +paths based on the operating system then starts the Elastic Agent service. The +`enroll` command is then executed by the `install` command. + +==== Container Command + +When the `container` command is executed it first copies the `data/downloads` directory +into a state path (`STATE_PATH`) then it executes the `enroll` command. + +==== Enroll Command + +This is where all the actual work of bootstrapping is performed. + +. A new `fleet.yml` is written with `fleet.server.*` options set along with +`fleet.server.bootstrap: true`. +. `enroll` command then either triggers a restart or spawns an Elastic Agent daemon. +.. First it checks if there is a running Elastic Agent daemon using the control socket. +In the case that there is a running Elastic Agent daemon it will trigger a restart through +the control socket. +.. In the case that there is no running Elastic Agent daemon a subprocess with the `run` +command will be started. The `enroll` command will then wait for the process to be up and +running by monitoring it with the control socket. +. The `status` command will then be polled through the control socket waiting for the +`fleet-server` application to be reported as `degraded`. `degraded` is reported because +the `fleet-server` is started without an `agent.id`. +. Once `fleet-server` is degraded the `enroll` command then uses localhost to communicate +with the running `fleet-server` to complete enrollment. This is the same enrollment used +by the Elastic Agent to a remote Fleet Server. +. A new `fleet.yml` is written with the enrollment information including its `agent.id` and +its API key to use for communication. The new `fleet.yml` still includes the `fleet.server.*`, +but this time the `fleet.server.bootstrap: false` is set. +. `enroll` command then either restarts the running Elatic Agent daemon if one was running +from Step 2, or it stops the spawned `run` subprocess and returns. From d3fb87b504f50308daf52845bf5b3562c26f1456 Mon Sep 17 00:00:00 2001 From: kaiyan-sheng Date: Thu, 13 Jan 2022 14:03:49 -0700 Subject: [PATCH 05/69] Remove deprecated old name for aws cloudwatch input (#29844) --- CHANGELOG.next.asciidoc | 1 + x-pack/filebeat/input/awscloudwatch/input.go | 12 +----------- 2 files changed, 2 insertions(+), 11 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 0b42d803ce4..e78e1bc7bce 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -44,6 +44,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - With the default configuration the cef and panw modules will no longer send the `host` - Add `while_pattern` type to multiline reader. {pull}19662[19662] - auditd dataset: Use process.args to store program arguments instead of auditd.log.aNNN fields. {pull}29601[29601] +- Remove deprecated old awscloudwatch input name. {pull}29844[29844] *Heartbeat* - Only add monitor.status to browser events when summary. {pull}29460[29460] diff --git a/x-pack/filebeat/input/awscloudwatch/input.go b/x-pack/filebeat/input/awscloudwatch/input.go index 22f9efe15c6..967c8102f03 100644 --- a/x-pack/filebeat/input/awscloudwatch/input.go +++ b/x-pack/filebeat/input/awscloudwatch/input.go @@ -26,8 +26,7 @@ import ( ) const ( - inputName = "aws-cloudwatch" - oldInputName = "awscloudwatch" + inputName = "aws-cloudwatch" ) func init() { @@ -35,11 +34,6 @@ func init() { if err != nil { panic(errors.Wrapf(err, "failed to register %v input", inputName)) } - - err = input.Register(oldInputName, NewInput) - if err != nil { - panic(errors.Wrapf(err, "failed to register %v input", oldInputName)) - } } // awsCloudWatchInput is a input for AWS CloudWatch logs @@ -88,10 +82,6 @@ func NewInput(cfg *common.Config, connector channel.Connector, context input.Con } logger.Debug("aws-cloudwatch input config = ", config) - if config.Type == oldInputName { - logger.Warnf("%s input name is deprecated, please use %s instead", oldInputName, inputName) - } - if config.LogGroupARN != "" { logGroupName, regionName, err := parseARN(config.LogGroupARN) if err != nil { From cbdba8aa1645a896023e387b370b5b88960f3176 Mon Sep 17 00:00:00 2001 From: DeDe Morton Date: Thu, 13 Jan 2022 15:02:13 -0800 Subject: [PATCH 06/69] Add links to azure docs (#29829) * Update azure.asciidoc Co-authored-by: Insuk (Chris) Cho --- filebeat/docs/modules/azure.asciidoc | 8 ++++---- x-pack/filebeat/module/azure/_meta/docs.asciidoc | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/filebeat/docs/modules/azure.asciidoc b/filebeat/docs/modules/azure.asciidoc index 8997a206c03..92097e4cefa 100644 --- a/filebeat/docs/modules/azure.asciidoc +++ b/filebeat/docs/modules/azure.asciidoc @@ -22,16 +22,16 @@ There are several requirements before using the module since the logs will actua The module contains the following filesets: `activitylogs` :: -Will retrieve azure activity logs. Control-plane events on Azure Resource Manager resources. Activity logs provide insight into the operations that were performed on resources in your subscription. +Will retrieve azure activity logs. Control-plane events on Azure Resource Manager resources. Activity logs provide insight into the operations that were performed on resources in your subscription. To learn more, refer to the https://docs.microsoft.com/en-us/azure/azure-monitor/essentials/activity-log[Azure Activity log] documentation. `platformlogs` :: -Will retrieve azure platform logs. Platform logs provide detailed diagnostic and auditing information for Azure resources and the Azure platform they depend on. +Will retrieve azure platform logs. Platform logs provide detailed diagnostic and auditing information for Azure resources and the Azure platform they depend on. To learn more, refer to the https://docs.microsoft.com/en-us/azure/azure-monitor/essentials/platform-logs-overview[Azure platform logs] documentation. `signinlogs` :: -Will retrieve azure Active Directory sign-in logs. The sign-ins report provides information about the usage of managed applications and user sign-in activities. +Will retrieve azure Active Directory sign-in logs. The sign-ins report provides information about the usage of managed applications and user sign-in activities. To learn more, refer to the https://docs.microsoft.com/en-us/azure/active-directory/reports-monitoring/concept-sign-ins[Azure sign-in logs] documentation. `auditlogs` :: -Will retrieve azure Active Directory audit logs. The audit logs provide traceability through logs for all changes done by various features within Azure AD. Examples of audit logs include changes made to any resources within Azure AD like adding or removing users, apps, groups, roles and policies. +Will retrieve azure Active Directory audit logs. The audit logs provide traceability through logs for all changes done by various features within Azure AD. Examples of audit logs include changes made to any resources within Azure AD like adding or removing users, apps, groups, roles and policies. To learn more, refer to the https://docs.microsoft.com/en-us/azure/active-directory/reports-monitoring/concept-audit-logs[Azure audit logs] documentation. [float] === Module configuration diff --git a/x-pack/filebeat/module/azure/_meta/docs.asciidoc b/x-pack/filebeat/module/azure/_meta/docs.asciidoc index d8c52d2c4f1..9ed929f510d 100644 --- a/x-pack/filebeat/module/azure/_meta/docs.asciidoc +++ b/x-pack/filebeat/module/azure/_meta/docs.asciidoc @@ -17,16 +17,16 @@ There are several requirements before using the module since the logs will actua The module contains the following filesets: `activitylogs` :: -Will retrieve azure activity logs. Control-plane events on Azure Resource Manager resources. Activity logs provide insight into the operations that were performed on resources in your subscription. +Will retrieve azure activity logs. Control-plane events on Azure Resource Manager resources. Activity logs provide insight into the operations that were performed on resources in your subscription. To learn more, refer to the https://docs.microsoft.com/en-us/azure/azure-monitor/essentials/activity-log[Azure Activity log] documentation. `platformlogs` :: -Will retrieve azure platform logs. Platform logs provide detailed diagnostic and auditing information for Azure resources and the Azure platform they depend on. +Will retrieve azure platform logs. Platform logs provide detailed diagnostic and auditing information for Azure resources and the Azure platform they depend on. To learn more, refer to the https://docs.microsoft.com/en-us/azure/azure-monitor/essentials/platform-logs-overview[Azure platform logs] documentation. `signinlogs` :: -Will retrieve azure Active Directory sign-in logs. The sign-ins report provides information about the usage of managed applications and user sign-in activities. +Will retrieve azure Active Directory sign-in logs. The sign-ins report provides information about the usage of managed applications and user sign-in activities. To learn more, refer to the https://docs.microsoft.com/en-us/azure/active-directory/reports-monitoring/concept-sign-ins[Azure sign-in logs] documentation. `auditlogs` :: -Will retrieve azure Active Directory audit logs. The audit logs provide traceability through logs for all changes done by various features within Azure AD. Examples of audit logs include changes made to any resources within Azure AD like adding or removing users, apps, groups, roles and policies. +Will retrieve azure Active Directory audit logs. The audit logs provide traceability through logs for all changes done by various features within Azure AD. Examples of audit logs include changes made to any resources within Azure AD like adding or removing users, apps, groups, roles and policies. To learn more, refer to the https://docs.microsoft.com/en-us/azure/active-directory/reports-monitoring/concept-audit-logs[Azure audit logs] documentation. [float] === Module configuration From 07c2db4c7b28084a188b2dd16903f8fc04f733f0 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Fri, 14 Jan 2022 11:12:17 -0600 Subject: [PATCH 07/69] Add note on docker container as a requirement for browser monitors (#29794) (#29854) * Add note on docker container as a requirement for browser monitors We mention that you need the container in the synthetics guide, but not on this page, this is a bit cleaner. * Update monitor-browser.asciidoc * More * Update heartbeat/docs/monitors/monitor-browser.asciidoc Co-authored-by: EamonnTP Co-authored-by: EamonnTP (cherry picked from commit 09c3abf1df942428337499f6b7ebb8136d02ada6) Co-authored-by: Andrew Cholakian --- heartbeat/docs/monitors/monitor-browser.asciidoc | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/heartbeat/docs/monitors/monitor-browser.asciidoc b/heartbeat/docs/monitors/monitor-browser.asciidoc index 49f31668d47..7c35bb7b6d7 100644 --- a/heartbeat/docs/monitors/monitor-browser.asciidoc +++ b/heartbeat/docs/monitors/monitor-browser.asciidoc @@ -6,7 +6,12 @@ See the {observability-guide}/synthetics-quickstart.html[quick start guide]. beta[] The options described here configure {beatname_uc} to run the synthetic monitoring test suites via Synthetic Agent on the Chromium browser. -Additional shared options are defined in <>. +Additional shared options are defined in <>. + +Browser based monitors can only be run in our {beatname_uc} docker image, +or via the `elastic-agent-complete` docker image. +For more information, see {observability-guide}/synthetics-quickstart.html[Synthetic monitoring using Docker]. + Example configuration: [source,yaml] From 4974c9d289016ef719b962d9576dc5044acafc8c Mon Sep 17 00:00:00 2001 From: Denis Rechkunov Date: Mon, 17 Jan 2022 09:34:37 +0100 Subject: [PATCH 08/69] Move `umask` from code to service files (#29708) Before this the `umask` value was hard-coded in `libbeat`. It caused some confusion among the users since file permission configuration was practically ignored by a beat on the level of the binary. It's been decided that we move `umask` to the service files, so the distribution is secured by default but it still allows the users to set the value if they choose to. --- CHANGELOG.next.asciidoc | 7 +--- .../darwin/launchd-daemon.plist.tmpl | 4 +++ .../templates/linux/beatname.sh.tmpl | 2 +- .../templates/linux/elastic-agent.sh.tmpl | 2 +- .../templates/linux/elastic-agent.unit.tmpl | 1 + .../templates/linux/systemd.unit.tmpl | 1 + .../docs/filebeat-general-options.asciidoc | 3 -- libbeat/cmd/instance/beat.go | 11 ------ libbeat/cmd/instance/settings.go | 2 -- libbeat/cmd/instance/umask_other.go | 33 ----------------- libbeat/cmd/instance/umask_windows.go | 27 -------------- libbeat/docs/loggingconfig.asciidoc | 3 -- libbeat/docs/shared-brew-run.asciidoc | 2 ++ libbeat/docs/shared-systemd.asciidoc | 2 ++ libbeat/tests/system/test_umask.py | 36 ------------------- 15 files changed, 13 insertions(+), 123 deletions(-) delete mode 100644 libbeat/cmd/instance/umask_other.go delete mode 100644 libbeat/cmd/instance/umask_windows.go delete mode 100644 libbeat/tests/system/test_umask.py diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index e78e1bc7bce..856c46b10c5 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -150,6 +150,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Add `default_region` config to AWS common module. {pull}29415[29415] - Add support for latest k8s versions v1.23 and v1.22 {pull}29575[29575] - Only connect to Elasticsearch instances with the same version or newer. {pull}29683[29683] +- Move umask from code to service files. {pull}29708[29708] *Auditbeat* @@ -214,9 +215,3 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d ==== Known Issue *Journalbeat* - - - - - - diff --git a/dev-tools/packaging/templates/darwin/launchd-daemon.plist.tmpl b/dev-tools/packaging/templates/darwin/launchd-daemon.plist.tmpl index b98e9081172..c4581e678ed 100644 --- a/dev-tools/packaging/templates/darwin/launchd-daemon.plist.tmpl +++ b/dev-tools/packaging/templates/darwin/launchd-daemon.plist.tmpl @@ -22,5 +22,9 @@ RunAtLoad + Umask + + 0027 + diff --git a/dev-tools/packaging/templates/linux/beatname.sh.tmpl b/dev-tools/packaging/templates/linux/beatname.sh.tmpl index 1343ce2b4c4..7721ed6ee00 100644 --- a/dev-tools/packaging/templates/linux/beatname.sh.tmpl +++ b/dev-tools/packaging/templates/linux/beatname.sh.tmpl @@ -2,7 +2,7 @@ # Script to run {{.BeatName | title}} in foreground with the same path settings that # the init script / systemd unit file would do. - +umask 0027 exec /usr/share/{{.BeatName}}/bin/{{.BeatName}} \ --path.home /usr/share/{{.BeatName}} \ --path.config /etc/{{.BeatName}} \ diff --git a/dev-tools/packaging/templates/linux/elastic-agent.sh.tmpl b/dev-tools/packaging/templates/linux/elastic-agent.sh.tmpl index 835b5955324..fb36046b4e2 100644 --- a/dev-tools/packaging/templates/linux/elastic-agent.sh.tmpl +++ b/dev-tools/packaging/templates/linux/elastic-agent.sh.tmpl @@ -2,7 +2,7 @@ # Script to run {{.BeatName | title}} in foreground with the same path settings that # the init script / systemd unit file would do. - +umask 0027 exec /usr/share/{{.BeatName}}/bin/{{.BeatName}} \ --path.home /var/lib/{{.BeatName}} \ --path.config /etc/{{.BeatName}} \ diff --git a/dev-tools/packaging/templates/linux/elastic-agent.unit.tmpl b/dev-tools/packaging/templates/linux/elastic-agent.unit.tmpl index c7aa5ac2a17..1ced8b6471b 100644 --- a/dev-tools/packaging/templates/linux/elastic-agent.unit.tmpl +++ b/dev-tools/packaging/templates/linux/elastic-agent.unit.tmpl @@ -9,6 +9,7 @@ After=network-online.target User={{ .BeatUser }} Group={{ .BeatUser }} {{- end }} +UMask=0027 Environment="GODEBUG='madvdontneed=1'" Environment="BEAT_CONFIG_OPTS=-c /etc/{{.BeatName}}/{{.BeatName}}.yml" ExecStart=/usr/bin/{{.BeatName}} run --environment systemd $BEAT_CONFIG_OPTS diff --git a/dev-tools/packaging/templates/linux/systemd.unit.tmpl b/dev-tools/packaging/templates/linux/systemd.unit.tmpl index 367f9e3532f..af1fe3af342 100644 --- a/dev-tools/packaging/templates/linux/systemd.unit.tmpl +++ b/dev-tools/packaging/templates/linux/systemd.unit.tmpl @@ -9,6 +9,7 @@ After=network-online.target User={{ .BeatUser }} Group={{ .BeatUser }} {{- end }} +UMask=0027 Environment="GODEBUG='madvdontneed=1'" Environment="BEAT_LOG_OPTS=" Environment="BEAT_CONFIG_OPTS=-c /etc/{{.BeatName}}/{{.BeatName}}.yml" diff --git a/filebeat/docs/filebeat-general-options.asciidoc b/filebeat/docs/filebeat-general-options.asciidoc index d1bcdf2e545..5ed2bd6db3b 100644 --- a/filebeat/docs/filebeat-general-options.asciidoc +++ b/filebeat/docs/filebeat-general-options.asciidoc @@ -40,9 +40,6 @@ That means in case there are some states where the TTL expired, these are only r The permissions mask to apply on registry data file. The default value is 0600. The permissions option must be a valid Unix-style file permissions mask expressed in octal notation. In Go, numbers in octal notation must start with 0. -The most permissive mask allowed is 0640. If a higher permissions mask is -specified via this setting, it will be subject to an umask of 0027. - This option is not supported on Windows. Examples: diff --git a/libbeat/cmd/instance/beat.go b/libbeat/cmd/instance/beat.go index ab52b4f545c..7fbb6a0beff 100644 --- a/libbeat/cmd/instance/beat.go +++ b/libbeat/cmd/instance/beat.go @@ -157,10 +157,6 @@ func initRand() { // instance. // XXX Move this as a *Beat method? func Run(settings Settings, bt beat.Creator) error { - err := setUmaskWithSettings(settings) - if err != nil && err != errNotImplemented { - return errw.Wrap(err, "could not set umask") - } return handleError(func() error { defer func() { @@ -1174,10 +1170,3 @@ func initPaths(cfg *common.Config) error { } return nil } - -func setUmaskWithSettings(settings Settings) error { - if settings.Umask != nil { - return setUmask(*settings.Umask) - } - return setUmask(0027) // 0640 for files | 0750 for dirs -} diff --git a/libbeat/cmd/instance/settings.go b/libbeat/cmd/instance/settings.go index 2d5c6cd2506..b46f248917c 100644 --- a/libbeat/cmd/instance/settings.go +++ b/libbeat/cmd/instance/settings.go @@ -46,8 +46,6 @@ type Settings struct { Processing processing.SupportFactory - Umask *int - // InputQueueSize is the size for the internal publisher queue in the // publisher pipeline. This is only useful when the Beat plans to use // beat.DropIfFull PublishMode. Leave as zero for default. diff --git a/libbeat/cmd/instance/umask_other.go b/libbeat/cmd/instance/umask_other.go deleted file mode 100644 index ba9245a7f4a..00000000000 --- a/libbeat/cmd/instance/umask_other.go +++ /dev/null @@ -1,33 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -//go:build !windows -// +build !windows - -package instance - -import ( - "errors" - "syscall" -) - -var errNotImplemented = errors.New("not implemented on platform") - -func setUmask(newmask int) error { - syscall.Umask(newmask) - return nil // the umask syscall always succeeds: http://man7.org/linux/man-pages/man2/umask.2.html#RETURN_VALUE -} diff --git a/libbeat/cmd/instance/umask_windows.go b/libbeat/cmd/instance/umask_windows.go deleted file mode 100644 index e52886301fb..00000000000 --- a/libbeat/cmd/instance/umask_windows.go +++ /dev/null @@ -1,27 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package instance - -import "errors" - -var errNotImplemented = errors.New("not implemented on windows") - -func setUmask(newmask int) error { - // No way to set umask on Windows - return errNotImplemented -} diff --git a/libbeat/docs/loggingconfig.asciidoc b/libbeat/docs/loggingconfig.asciidoc index b5443d2d978..c25f74e06a7 100644 --- a/libbeat/docs/loggingconfig.asciidoc +++ b/libbeat/docs/loggingconfig.asciidoc @@ -231,9 +231,6 @@ The permissions mask to apply when rotating log files. The default value is expressed in octal notation. In Go, numbers in octal notation must start with '0'. -The most permissive mask allowed is 0640. If a higher permissions mask is -specified via this setting, it will be subject to an umask of 0027. - This option is not supported on Windows. Examples: diff --git a/libbeat/docs/shared-brew-run.asciidoc b/libbeat/docs/shared-brew-run.asciidoc index 23b5a7c4cca..92b686d7417 100644 --- a/libbeat/docs/shared-brew-run.asciidoc +++ b/libbeat/docs/shared-brew-run.asciidoc @@ -8,6 +8,8 @@ run: brew services start elastic/tap/{beatname_lc}-full ----- +The launchd service is configured with `Umask=0027` which means the most permissive mask allowed for files created by {beatname_uc} is `0640`. All configured file permissions higher than `0640` will be ignored. Please edit the service file manually in case you need to change that. + ifndef::requires-sudo[] To run {beatname_uc} in the foreground instead of running it as a background service, run: diff --git a/libbeat/docs/shared-systemd.asciidoc b/libbeat/docs/shared-systemd.asciidoc index d565ef7ef39..802eef9858e 100644 --- a/libbeat/docs/shared-systemd.asciidoc +++ b/libbeat/docs/shared-systemd.asciidoc @@ -5,6 +5,8 @@ The DEB and RPM packages include a service unit for Linux systems with systemd. On these systems, you can manage {beatname_uc} by using the usual systemd commands. +The service unit is configured with `UMask=0027` which means the most permissive mask allowed for files created by {beatname_uc} is `0640`. All configured file permissions higher than `0640` will be ignored. Please edit the unit file manually in case you need to change that. + ifdef::apm-server[] We recommend that the {beatname_pkg} process is run as a non-root user. Therefore, that is the default setup for {beatname_uc}'s DEB package and RPM installation. diff --git a/libbeat/tests/system/test_umask.py b/libbeat/tests/system/test_umask.py deleted file mode 100644 index d6a7b7f391c..00000000000 --- a/libbeat/tests/system/test_umask.py +++ /dev/null @@ -1,36 +0,0 @@ -from base import BaseTest - -import os -import stat -import unittest -import sys - -INTEGRATION_TESTS = os.environ.get('INTEGRATION_TESTS', False) - - -class TestUmask(BaseTest): - """ - Test default umask - """ - - DEFAULT_UMASK = 0o027 - - def setUp(self): - super(BaseTest, self).setUp() - - self.output_file_permissions = 0o666 - - self.render_config_template(output_file_permissions=self.output_file_permissions) - proc = self.start_beat() - self.wait_until(lambda: self.output_lines() > 0, max_timeout=2) - proc.check_kill_and_wait() - - @unittest.skipIf(sys.platform.startswith("win"), "umask is not available on Windows") - def test_output_file_perms(self): - """ - Test that output file permissions respect default umask - """ - output_file_path = os.path.join(self.working_dir, "output", "mockbeat-" + self.today + ".ndjson") - perms = stat.S_IMODE(os.lstat(output_file_path).st_mode) - - self.assertEqual(perms, self.output_file_permissions & ~TestUmask.DEFAULT_UMASK) From 181b83a91f6ec12b48276194fef8d37bc7d1b442 Mon Sep 17 00:00:00 2001 From: Michael Katsoulis Date: Mon, 17 Jan 2022 14:30:44 +0200 Subject: [PATCH 09/69] Containerd metricbeat module (#29247) * Create cpu, memory, blkio metricset of metricbeat containerd module --- CHANGELOG.next.asciidoc | 1 + metricbeat/docs/fields.asciidoc | 454 ++++++++++++++++++ metricbeat/docs/modules/containerd.asciidoc | 91 ++++ .../docs/modules/containerd/blkio.asciidoc | 25 + .../docs/modules/containerd/cpu.asciidoc | 25 + .../docs/modules/containerd/memory.asciidoc | 25 + metricbeat/docs/modules_list.asciidoc | 5 + x-pack/metricbeat/include/list.go | 4 + x-pack/metricbeat/metricbeat.reference.yml | 13 + .../module/containerd/_meta/config.yml | 11 + .../module/containerd/_meta/docs.asciidoc | 38 ++ .../module/containerd/_meta/fields.yml | 15 + .../containerd/_meta/test/containerd.v1.5.2 | 99 ++++ .../module/containerd/blkio/_meta/data.json | 35 ++ .../containerd/blkio/_meta/docs.asciidoc | 1 + .../module/containerd/blkio/_meta/fields.yml | 49 ++ .../_meta/test/containerd.v1.5.2.expected | 37 ++ .../module/containerd/blkio/blkio.go | 129 +++++ .../module/containerd/blkio/blkio_test.go | 42 ++ x-pack/metricbeat/module/containerd/config.go | 19 + .../module/containerd/containerd.go | 116 +++++ .../module/containerd/cpu/_meta/data.json | 38 ++ .../module/containerd/cpu/_meta/docs.asciidoc | 1 + .../module/containerd/cpu/_meta/fields.yml | 54 +++ .../cpu/_meta/test/containerd.v1.5.2.expected | 398 +++++++++++++++ .../containerd/cpu/_meta/testdata/config.yml | 3 + .../containerd/cpu/_meta/testdata/docs.plain | 99 ++++ .../_meta/testdata/docs.plain-expected.json | 411 ++++++++++++++++ .../metricbeat/module/containerd/cpu/cpu.go | 235 +++++++++ .../module/containerd/cpu/cpu_test.go | 31 ++ x-pack/metricbeat/module/containerd/doc.go | 6 + x-pack/metricbeat/module/containerd/fields.go | 23 + x-pack/metricbeat/module/containerd/helper.go | 25 + .../module/containerd/memory/_meta/data.json | 51 ++ .../containerd/memory/_meta/docs.asciidoc | 1 + .../module/containerd/memory/_meta/fields.yml | 109 +++++ .../_meta/test/containerd.v1.5.2.expected | 56 +++ .../module/containerd/memory/memory.go | 167 +++++++ .../module/containerd/memory/memory_test.go | 26 + .../modules.d/containerd.yml.disabled | 14 + 40 files changed, 2982 insertions(+) create mode 100644 metricbeat/docs/modules/containerd.asciidoc create mode 100644 metricbeat/docs/modules/containerd/blkio.asciidoc create mode 100644 metricbeat/docs/modules/containerd/cpu.asciidoc create mode 100644 metricbeat/docs/modules/containerd/memory.asciidoc create mode 100644 x-pack/metricbeat/module/containerd/_meta/config.yml create mode 100644 x-pack/metricbeat/module/containerd/_meta/docs.asciidoc create mode 100644 x-pack/metricbeat/module/containerd/_meta/fields.yml create mode 100644 x-pack/metricbeat/module/containerd/_meta/test/containerd.v1.5.2 create mode 100644 x-pack/metricbeat/module/containerd/blkio/_meta/data.json create mode 100644 x-pack/metricbeat/module/containerd/blkio/_meta/docs.asciidoc create mode 100644 x-pack/metricbeat/module/containerd/blkio/_meta/fields.yml create mode 100644 x-pack/metricbeat/module/containerd/blkio/_meta/test/containerd.v1.5.2.expected create mode 100644 x-pack/metricbeat/module/containerd/blkio/blkio.go create mode 100644 x-pack/metricbeat/module/containerd/blkio/blkio_test.go create mode 100644 x-pack/metricbeat/module/containerd/config.go create mode 100644 x-pack/metricbeat/module/containerd/containerd.go create mode 100644 x-pack/metricbeat/module/containerd/cpu/_meta/data.json create mode 100644 x-pack/metricbeat/module/containerd/cpu/_meta/docs.asciidoc create mode 100644 x-pack/metricbeat/module/containerd/cpu/_meta/fields.yml create mode 100644 x-pack/metricbeat/module/containerd/cpu/_meta/test/containerd.v1.5.2.expected create mode 100644 x-pack/metricbeat/module/containerd/cpu/_meta/testdata/config.yml create mode 100644 x-pack/metricbeat/module/containerd/cpu/_meta/testdata/docs.plain create mode 100644 x-pack/metricbeat/module/containerd/cpu/_meta/testdata/docs.plain-expected.json create mode 100644 x-pack/metricbeat/module/containerd/cpu/cpu.go create mode 100644 x-pack/metricbeat/module/containerd/cpu/cpu_test.go create mode 100644 x-pack/metricbeat/module/containerd/doc.go create mode 100644 x-pack/metricbeat/module/containerd/fields.go create mode 100644 x-pack/metricbeat/module/containerd/helper.go create mode 100644 x-pack/metricbeat/module/containerd/memory/_meta/data.json create mode 100644 x-pack/metricbeat/module/containerd/memory/_meta/docs.asciidoc create mode 100644 x-pack/metricbeat/module/containerd/memory/_meta/fields.yml create mode 100644 x-pack/metricbeat/module/containerd/memory/_meta/test/containerd.v1.5.2.expected create mode 100644 x-pack/metricbeat/module/containerd/memory/memory.go create mode 100644 x-pack/metricbeat/module/containerd/memory/memory_test.go create mode 100644 x-pack/metricbeat/modules.d/containerd.yml.disabled diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 856c46b10c5..3e4267ddd65 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -174,6 +174,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Preliminary AIX support {pull}27954[27954] - Add option to skip older k8s events {pull}29396[29396] - Add `add_resource_metadata` configuration to Kubernetes module. {pull}29133[29133] +- Add `containerd` module with `cpu`, `memory`, `blkio` metricsets. {pull}29247[29247] - Add `container.id` and `container.runtime` ECS fields in container metricset. {pull}29560[29560] - Add `memory.workingset.limit.pct` field in Kubernetes container/pod metricset. {pull}29547[29547] - Add k8s metadata in state_cronjob metricset. {pull}29572[29572] diff --git a/metricbeat/docs/fields.asciidoc b/metricbeat/docs/fields.asciidoc index 9f265eaafd9..d1b6ece6ffc 100644 --- a/metricbeat/docs/fields.asciidoc +++ b/metricbeat/docs/fields.asciidoc @@ -27,6 +27,7 @@ grouped in the following categories: * <> * <> * <> +* <> * <> * <> * <> @@ -10267,6 +10268,459 @@ type: long -- +[[exported-fields-containerd]] +== Containerd fields + +Containerd stats collected from containerd + + + +[float] +=== containerd + +Information and statistics about containerd's running containers. + + + +*`containerd.namespace`*:: ++ +-- +Containerd namespace + + +type: keyword + +-- + +[float] +=== blkio + +Block I/O metrics. + + + +[float] +=== read + +Accumulated reads during the life of the container + + + +*`containerd.blkio.read.ops`*:: ++ +-- +Number of reads during the life of the container + + +type: long + +-- + +*`containerd.blkio.read.bytes`*:: ++ +-- +Bytes read during the life of the container + + +type: long + +format: bytes + +-- + +[float] +=== write + +Accumulated writes during the life of the container + + + +*`containerd.blkio.write.ops`*:: ++ +-- +Number of writes during the life of the container + + +type: long + +-- + +*`containerd.blkio.write.bytes`*:: ++ +-- +Bytes written during the life of the container + + +type: long + +format: bytes + +-- + +[float] +=== summary + +Accumulated reads and writes during the life of the container + + + +*`containerd.blkio.summary.ops`*:: ++ +-- +Number of I/O operations during the life of the container + + +type: long + +-- + +*`containerd.blkio.summary.bytes`*:: ++ +-- +Bytes read and written during the life of the container + + +type: long + +format: bytes + +-- + +[float] +=== cpu + +Containerd Runtime CPU metrics. + + + +*`containerd.cpu.system.total`*:: ++ +-- +Total user and system CPU time spent in seconds. + + +type: double + +-- + + + +*`containerd.cpu.usage.kernel.ns`*:: ++ +-- +CPU Kernel usage nanoseconds + + +type: double + +-- + + +*`containerd.cpu.usage.user.ns`*:: ++ +-- +CPU User usage nanoseconds + + +type: double + +-- + + +*`containerd.cpu.usage.total.ns`*:: ++ +-- +CPU total usage nanoseconds + + +type: double + +-- + +*`containerd.cpu.usage.total.pct`*:: ++ +-- +Percentage of total CPU time normalized by the number of CPU cores + + +type: scaled_float + +format: percent + +-- + +*`containerd.cpu.usage.kernel.pct`*:: ++ +-- +Percentage of time in kernel space normalized by the number of CPU cores. + + +type: scaled_float + +format: percent + +-- + +*`containerd.cpu.usage.user.pct`*:: ++ +-- +Percentage of time in user space normalized by the number of CPU cores. + + +type: scaled_float + +format: percent + +-- + +*`containerd.cpu.usage.cpu.*.ns`*:: ++ +-- +CPU usage nanoseconds in this cpu. + + +type: object + +-- + +[float] +=== memory + +memory + + + +*`containerd.memory.workingset.pct`*:: ++ +-- +Memory working set percentage. + + +type: scaled_float + +format: percent + +-- + +*`containerd.memory.rss`*:: ++ +-- +Total memory resident set size. + + +type: long + +format: bytes + +-- + +*`containerd.memory.activeFiles`*:: ++ +-- +Total active file bytes. + + +type: long + +format: bytes + +-- + +*`containerd.memory.cache`*:: ++ +-- +Total cache bytes. + + +type: long + +format: bytes + +-- + +*`containerd.memory.inactiveFiles`*:: ++ +-- +Total inactive file bytes. + + +type: long + +format: bytes + +-- + +[float] +=== usage + +Usage memory stats. + + + +*`containerd.memory.usage.max`*:: ++ +-- +Max memory usage. + + +type: long + +format: bytes + +-- + +*`containerd.memory.usage.pct`*:: ++ +-- +Total allocated memory percentage. + + +type: scaled_float + +format: percent + +-- + +*`containerd.memory.usage.total`*:: ++ +-- +Total memory usage. + + +type: long + +format: bytes + +-- + +*`containerd.memory.usage.fail.count`*:: ++ +-- +Fail counter. + + +type: scaled_float + +-- + +*`containerd.memory.usage.limit`*:: ++ +-- +Memory usage limit. + + +type: long + +format: bytes + +-- + +[float] +=== kernel + +Kernel memory stats. + + + +*`containerd.memory.kernel.max`*:: ++ +-- +Kernel max memory usage. + + +type: long + +format: bytes + +-- + +*`containerd.memory.kernel.total`*:: ++ +-- +Kernel total memory usage. + + +type: long + +format: bytes + +-- + +*`containerd.memory.kernel.fail.count`*:: ++ +-- +Kernel fail counter. + + +type: scaled_float + +-- + +*`containerd.memory.kernel.limit`*:: ++ +-- +Kernel memory limit. + + +type: long + +format: bytes + +-- + +[float] +=== swap + +Swap memory stats. + + + +*`containerd.memory.swap.max`*:: ++ +-- +Swap max memory usage. + + +type: long + +format: bytes + +-- + +*`containerd.memory.swap.total`*:: ++ +-- +Swap total memory usage. + + +type: long + +format: bytes + +-- + +*`containerd.memory.swap.fail.count`*:: ++ +-- +Swap fail counter. + + +type: scaled_float + +-- + +*`containerd.memory.swap.limit`*:: ++ +-- +Swap memory limit. + + +type: long + +format: bytes + +-- + [[exported-fields-coredns]] == Coredns fields diff --git a/metricbeat/docs/modules/containerd.asciidoc b/metricbeat/docs/modules/containerd.asciidoc new file mode 100644 index 00000000000..41c600b202b --- /dev/null +++ b/metricbeat/docs/modules/containerd.asciidoc @@ -0,0 +1,91 @@ +//// +This file is generated! See scripts/mage/docs_collector.go +//// + +:modulename: containerd + +[[metricbeat-module-containerd]] +[role="xpack"] +== Containerd module + +beta[] + +include::{libbeat-dir}/shared/integration-link.asciidoc[] + +:modulename!: + +Containerd module collects cpu, memory and blkio statistics about +running containers controlled by containerd runtime. + +The current metricsets are: `cpu`, `blkio` and `memory` and are enabled by default. + +[float] +=== Prerequisites +`Containerd` daemon has to be configured to provide metrics before enabling containerd module. + +In the configuration file located in `/etc/containerd/config.toml` metrics endpoint needs to +be set and containerd daemon needs to be restarted. + +``` +[metrics] + address = "127.0.0.1:1338" +``` + +[float] +=== Compatibility + +The Containerd module is tested with the following versions of Containerd: +v1.5.2 + +[float] +=== Module-specific configuration notes + +For cpu metricset if `calcpct.cpu` setting is set to true, cpu usage percentages will be calculated +and more specifically fields `containerd.cpu.usage.total.pct`, `containerd.cpu.usage.kernel.pct`, `containerd.cpu.usage.user.pct`. +Default value is true. + +For memory metricset if `calcpct.memory` setting is set to true, memory usage percentages will be calculated +and more specifically fields `containerd.memory.usage.pct` and `containerd.memory.workingset.pct`. +Default value is true. + + + +[float] +=== Example configuration + +The Containerd module supports the standard configuration options that are described +in <>. Here is an example configuration: + +[source,yaml] +---- +metricbeat.modules: +- module: containerd + metricsets: ["cpu", "memory", "blkio"] + period: 10s + # containerd metrics endpoint is configured in /etc/containerd/config.toml + # Metrics endpoint does not listen by default + # https://github.com/containerd/containerd/blob/main/docs/man/containerd-config.toml.5.md + hosts: ["localhost:1338"] + # if set to true, cpu and memory usage percentages will be calculated. Default is true + calcpct.cpu: true + calcpct.memory: true + +---- + +[float] +=== Metricsets + +The following metricsets are available: + +* <> + +* <> + +* <> + +include::containerd/blkio.asciidoc[] + +include::containerd/cpu.asciidoc[] + +include::containerd/memory.asciidoc[] + diff --git a/metricbeat/docs/modules/containerd/blkio.asciidoc b/metricbeat/docs/modules/containerd/blkio.asciidoc new file mode 100644 index 00000000000..6618e45bcd2 --- /dev/null +++ b/metricbeat/docs/modules/containerd/blkio.asciidoc @@ -0,0 +1,25 @@ +//// +This file is generated! See scripts/mage/docs_collector.go +//// + +[[metricbeat-metricset-containerd-blkio]] +[role="xpack"] +=== Containerd blkio metricset + +beta[] + +include::../../../../x-pack/metricbeat/module/containerd/blkio/_meta/docs.asciidoc[] + +This is a default metricset. If the host module is unconfigured, this metricset is enabled by default. + +==== Fields + +For a description of each field in the metricset, see the +<> section. + +Here is an example document generated by this metricset: + +[source,json] +---- +include::../../../../x-pack/metricbeat/module/containerd/blkio/_meta/data.json[] +---- diff --git a/metricbeat/docs/modules/containerd/cpu.asciidoc b/metricbeat/docs/modules/containerd/cpu.asciidoc new file mode 100644 index 00000000000..0d14f45da3e --- /dev/null +++ b/metricbeat/docs/modules/containerd/cpu.asciidoc @@ -0,0 +1,25 @@ +//// +This file is generated! See scripts/mage/docs_collector.go +//// + +[[metricbeat-metricset-containerd-cpu]] +[role="xpack"] +=== Containerd cpu metricset + +beta[] + +include::../../../../x-pack/metricbeat/module/containerd/cpu/_meta/docs.asciidoc[] + +This is a default metricset. If the host module is unconfigured, this metricset is enabled by default. + +==== Fields + +For a description of each field in the metricset, see the +<> section. + +Here is an example document generated by this metricset: + +[source,json] +---- +include::../../../../x-pack/metricbeat/module/containerd/cpu/_meta/data.json[] +---- diff --git a/metricbeat/docs/modules/containerd/memory.asciidoc b/metricbeat/docs/modules/containerd/memory.asciidoc new file mode 100644 index 00000000000..a09ff8fd762 --- /dev/null +++ b/metricbeat/docs/modules/containerd/memory.asciidoc @@ -0,0 +1,25 @@ +//// +This file is generated! See scripts/mage/docs_collector.go +//// + +[[metricbeat-metricset-containerd-memory]] +[role="xpack"] +=== Containerd memory metricset + +beta[] + +include::../../../../x-pack/metricbeat/module/containerd/memory/_meta/docs.asciidoc[] + +This is a default metricset. If the host module is unconfigured, this metricset is enabled by default. + +==== Fields + +For a description of each field in the metricset, see the +<> section. + +Here is an example document generated by this metricset: + +[source,json] +---- +include::../../../../x-pack/metricbeat/module/containerd/memory/_meta/data.json[] +---- diff --git a/metricbeat/docs/modules_list.asciidoc b/metricbeat/docs/modules_list.asciidoc index ad5d929514f..30177e13624 100644 --- a/metricbeat/docs/modules_list.asciidoc +++ b/metricbeat/docs/modules_list.asciidoc @@ -72,6 +72,10 @@ This file is generated! See scripts/mage/docs_collector.go .1+| .1+| |<> beta[] |<> beta[] |image:./images/icon-yes.png[Prebuilt dashboards are available] | .1+| .1+| |<> beta[] +|<> beta[] |image:./images/icon-no.png[No prebuilt dashboards] | +.3+| .3+| |<> beta[] +|<> beta[] +|<> beta[] |<> |image:./images/icon-yes.png[Prebuilt dashboards are available] | .1+| .1+| |<> |<> |image:./images/icon-yes.png[Prebuilt dashboards are available] | @@ -322,6 +326,7 @@ include::modules/ceph.asciidoc[] include::modules/cloudfoundry.asciidoc[] include::modules/cockroachdb.asciidoc[] include::modules/consul.asciidoc[] +include::modules/containerd.asciidoc[] include::modules/coredns.asciidoc[] include::modules/couchbase.asciidoc[] include::modules/couchdb.asciidoc[] diff --git a/x-pack/metricbeat/include/list.go b/x-pack/metricbeat/include/list.go index f06eed2652a..3fdd6556b58 100644 --- a/x-pack/metricbeat/include/list.go +++ b/x-pack/metricbeat/include/list.go @@ -25,6 +25,10 @@ import ( _ "github.com/elastic/beats/v7/x-pack/metricbeat/module/cloudfoundry/counter" _ "github.com/elastic/beats/v7/x-pack/metricbeat/module/cloudfoundry/value" _ "github.com/elastic/beats/v7/x-pack/metricbeat/module/cockroachdb" + _ "github.com/elastic/beats/v7/x-pack/metricbeat/module/containerd" + _ "github.com/elastic/beats/v7/x-pack/metricbeat/module/containerd/blkio" + _ "github.com/elastic/beats/v7/x-pack/metricbeat/module/containerd/cpu" + _ "github.com/elastic/beats/v7/x-pack/metricbeat/module/containerd/memory" _ "github.com/elastic/beats/v7/x-pack/metricbeat/module/coredns" _ "github.com/elastic/beats/v7/x-pack/metricbeat/module/coredns/stats" _ "github.com/elastic/beats/v7/x-pack/metricbeat/module/enterprisesearch" diff --git a/x-pack/metricbeat/metricbeat.reference.yml b/x-pack/metricbeat/metricbeat.reference.yml index f974c4e77f7..39d4937afab 100644 --- a/x-pack/metricbeat/metricbeat.reference.yml +++ b/x-pack/metricbeat/metricbeat.reference.yml @@ -427,6 +427,19 @@ metricbeat.modules: hosts: ["localhost:8500"] +#------------------------------ Containerd Module ------------------------------ +- module: containerd + metricsets: ["cpu", "memory", "blkio"] + period: 10s + # containerd metrics endpoint is configured in /etc/containerd/config.toml + # Metrics endpoint does not listen by default + # https://github.com/containerd/containerd/blob/main/docs/man/containerd-config.toml.5.md + hosts: ["localhost:1338"] + # if set to true, cpu and memory usage percentages will be calculated. Default is true + calcpct.cpu: true + calcpct.memory: true + + #------------------------------- Coredns Module ------------------------------- - module: coredns metricsets: ["stats"] diff --git a/x-pack/metricbeat/module/containerd/_meta/config.yml b/x-pack/metricbeat/module/containerd/_meta/config.yml new file mode 100644 index 00000000000..6197801bfcb --- /dev/null +++ b/x-pack/metricbeat/module/containerd/_meta/config.yml @@ -0,0 +1,11 @@ +- module: containerd + metricsets: ["cpu", "memory", "blkio"] + period: 10s + # containerd metrics endpoint is configured in /etc/containerd/config.toml + # Metrics endpoint does not listen by default + # https://github.com/containerd/containerd/blob/main/docs/man/containerd-config.toml.5.md + hosts: ["localhost:1338"] + # if set to true, cpu and memory usage percentages will be calculated. Default is true + calcpct.cpu: true + calcpct.memory: true + diff --git a/x-pack/metricbeat/module/containerd/_meta/docs.asciidoc b/x-pack/metricbeat/module/containerd/_meta/docs.asciidoc new file mode 100644 index 00000000000..3ee37e6722c --- /dev/null +++ b/x-pack/metricbeat/module/containerd/_meta/docs.asciidoc @@ -0,0 +1,38 @@ +include::{libbeat-dir}/shared/integration-link.asciidoc[] + +:modulename!: + +Containerd module collects cpu, memory and blkio statistics about +running containers controlled by containerd runtime. + +The current metricsets are: `cpu`, `blkio` and `memory` and are enabled by default. + +[float] +=== Prerequisites +`Containerd` daemon has to be configured to provide metrics before enabling containerd module. + +In the configuration file located in `/etc/containerd/config.toml` metrics endpoint needs to +be set and containerd daemon needs to be restarted. + +``` +[metrics] + address = "127.0.0.1:1338" +``` + +[float] +=== Compatibility + +The Containerd module is tested with the following versions of Containerd: +v1.5.2 + +[float] +=== Module-specific configuration notes + +For cpu metricset if `calcpct.cpu` setting is set to true, cpu usage percentages will be calculated +and more specifically fields `containerd.cpu.usage.total.pct`, `containerd.cpu.usage.kernel.pct`, `containerd.cpu.usage.user.pct`. +Default value is true. + +For memory metricset if `calcpct.memory` setting is set to true, memory usage percentages will be calculated +and more specifically fields `containerd.memory.usage.pct` and `containerd.memory.workingset.pct`. +Default value is true. + diff --git a/x-pack/metricbeat/module/containerd/_meta/fields.yml b/x-pack/metricbeat/module/containerd/_meta/fields.yml new file mode 100644 index 00000000000..9d5a0ccc29d --- /dev/null +++ b/x-pack/metricbeat/module/containerd/_meta/fields.yml @@ -0,0 +1,15 @@ +- key: containerd + title: "Containerd" + release: beta + description: > + Containerd stats collected from containerd + fields: + - name: containerd + type: group + description: > + Information and statistics about containerd's running containers. + fields: + - name: namespace + type: keyword + description: > + Containerd namespace diff --git a/x-pack/metricbeat/module/containerd/_meta/test/containerd.v1.5.2 b/x-pack/metricbeat/module/containerd/_meta/test/containerd.v1.5.2 new file mode 100644 index 00000000000..f82e4d3c1d9 --- /dev/null +++ b/x-pack/metricbeat/module/containerd/_meta/test/containerd.v1.5.2 @@ -0,0 +1,99 @@ +# TYPE container_blkio_io_service_bytes_recursive_bytes gauge +container_blkio_io_service_bytes_recursive_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",device="/dev/vda",major="254",minor="0",namespace="k8s.io",op="Async"} 0 +container_blkio_io_service_bytes_recursive_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",device="/dev/vda",major="254",minor="0",namespace="k8s.io",op="Discard"} 0 +container_blkio_io_service_bytes_recursive_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",device="/dev/vda",major="254",minor="0",namespace="k8s.io",op="Read"} 6.9246976e+07 +container_blkio_io_service_bytes_recursive_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",device="/dev/vda",major="254",minor="0",namespace="k8s.io",op="Sync"} 6.9271552e+07 +container_blkio_io_service_bytes_recursive_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",device="/dev/vda",major="254",minor="0",namespace="k8s.io",op="Total"} 6.9271552e+07 +container_blkio_io_service_bytes_recursive_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",device="/dev/vda",major="254",minor="0",namespace="k8s.io",op="Write"} 24576 +# TYPE container_blkio_io_serviced_recursive_total gauge +container_blkio_io_serviced_recursive_total{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",device="/dev/vda",major="254",minor="0",namespace="k8s.io",op="Async"} 0 +container_blkio_io_serviced_recursive_total{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",device="/dev/vda",major="254",minor="0",namespace="k8s.io",op="Discard"} 0 +container_blkio_io_serviced_recursive_total{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",device="/dev/vda",major="254",minor="0",namespace="k8s.io",op="Read"} 830 +container_blkio_io_serviced_recursive_total{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",device="/dev/vda",major="254",minor="0",namespace="k8s.io",op="Sync"} 832 +container_blkio_io_serviced_recursive_total{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",device="/dev/vda",major="254",minor="0",namespace="k8s.io",op="Total"} 832 +container_blkio_io_serviced_recursive_total{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",device="/dev/vda",major="254",minor="0",namespace="k8s.io",op="Write"} 2 +# TYPE container_cpu_kernel_nanoseconds gauge +container_cpu_kernel_nanoseconds{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 5.3218e+11 +# TYPE container_cpu_throttle_periods_total gauge +container_cpu_throttle_periods_total{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 0 +# TYPE container_cpu_throttled_periods_total gauge +container_cpu_throttled_periods_total{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 0 +# TYPE container_cpu_throttled_time_nanoseconds gauge +container_cpu_throttled_time_nanoseconds{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 0 +# TYPE container_cpu_total_nanoseconds gauge +container_cpu_total_nanoseconds{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 1.236339003984e+12 +# TYPE container_cpu_user_nanoseconds gauge +container_cpu_user_nanoseconds{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 5.2547e+11 +# TYPE container_memory_active_anon_bytes gauge +container_memory_active_anon_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 0 +# TYPE container_memory_active_file_bytes gauge +container_memory_active_file_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 1.216512e+06 +# TYPE container_memory_cache_bytes gauge +container_memory_cache_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 1.40980224e+08 +# TYPE container_memory_dirty_bytes gauge +container_memory_dirty_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 0 +# TYPE container_memory_inactive_anon_bytes gauge +container_memory_inactive_anon_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 4.4048384e+07 +# TYPE container_memory_inactive_file_bytes gauge +container_memory_inactive_file_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 1.3928448e+08 +# TYPE container_memory_kernel_failcnt_total gauge +container_memory_kernel_failcnt_total{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 0 +# TYPE container_memory_kernel_limit_bytes gauge +container_memory_kernel_limit_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 9.223372036854772e+18 +# TYPE container_memory_kernel_max_bytes gauge +container_memory_kernel_max_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 6.496256e+06 +# TYPE container_memory_kernel_usage_bytes gauge +container_memory_kernel_usage_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 6.459392e+06 +# TYPE container_memory_kerneltcp_failcnt_total gauge +container_memory_kerneltcp_failcnt_total{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 0 +# TYPE container_memory_kerneltcp_limit_bytes gauge +container_memory_kerneltcp_limit_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 9.223372036854772e+18 +# TYPE container_memory_kerneltcp_max_bytes gauge +container_memory_kerneltcp_max_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 0 +# TYPE container_memory_kerneltcp_usage_bytes gauge +container_memory_kerneltcp_usage_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 0 +# TYPE container_memory_rss_bytes gauge +container_memory_rss_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 4.3794432e+07 +# TYPE container_memory_rss_huge_bytes gauge +container_memory_rss_huge_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 0 +# TYPE container_memory_swap_failcnt_total gauge +container_memory_swap_failcnt_total{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 0 +# TYPE container_memory_swap_limit_bytes gauge +container_memory_swap_limit_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 9.223372036854772e+18 +# TYPE container_memory_swap_max_bytes gauge +container_memory_swap_max_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 2.09727488e+08 +# TYPE container_memory_swap_usage_bytes gauge +container_memory_swap_usage_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 1.9195904e+08 +# TYPE container_memory_total_active_anon_bytes gauge +container_memory_total_active_anon_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 0 +# TYPE container_memory_total_active_file_bytes gauge +container_memory_total_active_file_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 1.216512e+06 +# TYPE container_memory_total_cache_bytes gauge +container_memory_total_cache_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 1.40980224e+08 +# TYPE container_memory_total_inactive_anon_bytes gauge +container_memory_total_inactive_anon_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 4.4048384e+07 +# TYPE container_memory_total_inactive_file_bytes gauge +container_memory_total_inactive_file_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 1.3928448e+08 +# TYPE container_memory_total_rss_bytes gauge +container_memory_total_rss_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 4.3794432e+07 +# TYPE container_memory_usage_failcnt_total gauge +container_memory_usage_failcnt_total{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 0 +# TYPE container_memory_usage_limit_bytes gauge +container_memory_usage_limit_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 2.097152e+08 +# TYPE container_memory_usage_max_bytes gauge +container_memory_usage_max_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 2.09608704e+08 +# TYPE container_memory_usage_usage_bytes gauge +container_memory_usage_usage_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 1.91848448e+08 +# TYPE container_per_cpu_nanoseconds gauge +container_per_cpu_nanoseconds{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",cpu="0",namespace="k8s.io"} 9.913781757e+10 +container_per_cpu_nanoseconds{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",cpu="1",namespace="k8s.io"} 1.16475261138e+11 +container_per_cpu_nanoseconds{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",cpu="10",namespace="k8s.io"} 1.0670990577e+11 +container_per_cpu_nanoseconds{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",cpu="11",namespace="k8s.io"} 1.0487838037e+11 +container_per_cpu_nanoseconds{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",cpu="2",namespace="k8s.io"} 1.05305653633e+11 +container_per_cpu_nanoseconds{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",cpu="3",namespace="k8s.io"} 1.01195506344e+11 +container_per_cpu_nanoseconds{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",cpu="4",namespace="k8s.io"} 1.05731762224e+11 +container_per_cpu_nanoseconds{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",cpu="5",namespace="k8s.io"} 9.8155683224e+10 +container_per_cpu_nanoseconds{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",cpu="6",namespace="k8s.io"} 9.5075348914e+10 +container_per_cpu_nanoseconds{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",cpu="7",namespace="k8s.io"} 9.713478277e+10 +container_per_cpu_nanoseconds{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",cpu="8",namespace="k8s.io"} 1.04266711568e+11 +container_per_cpu_nanoseconds{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",cpu="9",namespace="k8s.io"} 1.02272190459e+11 diff --git a/x-pack/metricbeat/module/containerd/blkio/_meta/data.json b/x-pack/metricbeat/module/containerd/blkio/_meta/data.json new file mode 100644 index 00000000000..d1f4774a706 --- /dev/null +++ b/x-pack/metricbeat/module/containerd/blkio/_meta/data.json @@ -0,0 +1,35 @@ +{ + "@timestamp":"2016-05-23T08:05:34.853Z", + "beat":{ + "hostname":"beathost", + "name":"beathost" + }, + "metricset":{ + "host":"localhost", + "module":"containerd", + "name":"blkio", + "rtt":44269 + }, + "containerd": { + "blkio": { + "read": { + "ops": 168, + "bytes": 4952064 + }, + "summary": { + "ops": 168, + "bytes": 4952064 + }, + "write": { + "ops": 20, + "bytes": 123134 + }, + "device": "/dev/vda" + }, + "namespace": "k8s.io" + }, + "container": { + "id": "b4d9e874a2de96e4512a32a49df09641fa792a99bebcc6d353723850a50db831" + }, + "type":"metricsets" +} diff --git a/x-pack/metricbeat/module/containerd/blkio/_meta/docs.asciidoc b/x-pack/metricbeat/module/containerd/blkio/_meta/docs.asciidoc new file mode 100644 index 00000000000..4faa70acb6d --- /dev/null +++ b/x-pack/metricbeat/module/containerd/blkio/_meta/docs.asciidoc @@ -0,0 +1 @@ +This is the blkio metricset of the module containerd. diff --git a/x-pack/metricbeat/module/containerd/blkio/_meta/fields.yml b/x-pack/metricbeat/module/containerd/blkio/_meta/fields.yml new file mode 100644 index 00000000000..adcd4228888 --- /dev/null +++ b/x-pack/metricbeat/module/containerd/blkio/_meta/fields.yml @@ -0,0 +1,49 @@ +- name: blkio + type: group + release: beta + description: > + Block I/O metrics. + fields: + - name: read + type: group + description: > + Accumulated reads during the life of the container + fields: + - name: ops + type: long + description: > + Number of reads during the life of the container + - name: bytes + type: long + format: bytes + description: > + Bytes read during the life of the container + - name: write + type: group + description: > + Accumulated writes during the life of the container + fields: + - name: ops + type: long + description: > + Number of writes during the life of the container + - name: bytes + type: long + format: bytes + description: > + Bytes written during the life of the container + - name: summary + type: group + description: > + Accumulated reads and writes during the life of the container + fields: + - name: ops + type: long + description: > + Number of I/O operations during the life of the container + - name: bytes + type: long + format: bytes + description: > + Bytes read and written during the life of the container + diff --git a/x-pack/metricbeat/module/containerd/blkio/_meta/test/containerd.v1.5.2.expected b/x-pack/metricbeat/module/containerd/blkio/_meta/test/containerd.v1.5.2.expected new file mode 100644 index 00000000000..35b67461d90 --- /dev/null +++ b/x-pack/metricbeat/module/containerd/blkio/_meta/test/containerd.v1.5.2.expected @@ -0,0 +1,37 @@ +[ + { + "RootFields": { + "container": { + "id": "7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d" + } + }, + "ModuleFields": { + "namespace": "k8s.io" + }, + "MetricSetFields": { + "device": "/dev/vda", + "read": { + "bytes": 69246976, + "ops": 830 + }, + "summary": { + "bytes": 69271552, + "ops": 832 + }, + "write": { + "bytes": 24576, + "ops": 2 + } + }, + "Index": "", + "ID": "", + "Namespace": "containerd.blkio", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + } +] \ No newline at end of file diff --git a/x-pack/metricbeat/module/containerd/blkio/blkio.go b/x-pack/metricbeat/module/containerd/blkio/blkio.go new file mode 100644 index 00000000000..7ffb30e4aec --- /dev/null +++ b/x-pack/metricbeat/module/containerd/blkio/blkio.go @@ -0,0 +1,129 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package blkio + +import ( + "fmt" + + "github.com/elastic/beats/v7/libbeat/common" + + "github.com/pkg/errors" + + "github.com/elastic/beats/v7/x-pack/metricbeat/module/containerd" + + "github.com/elastic/beats/v7/libbeat/common/cfgwarn" + + "github.com/elastic/beats/v7/metricbeat/helper/prometheus" + "github.com/elastic/beats/v7/metricbeat/mb" + "github.com/elastic/beats/v7/metricbeat/mb/parse" +) + +const ( + defaultScheme = "http" + defaultPath = "/v1/metrics" +) + +var ( + // HostParser validates Prometheus URLs + hostParser = parse.URLHostParserBuilder{ + DefaultScheme: defaultScheme, + DefaultPath: defaultPath, + }.Build() + + // Mapping of state metrics + mapping = &prometheus.MetricsMapping{ + Metrics: map[string]prometheus.MetricMap{ + "container_blkio_io_serviced_recursive_total": prometheus.Metric("", prometheus.OpFilterMap( + "op", map[string]string{ + "Read": "read.ops", + "Write": "write.ops", + "Total": "summary.ops", + }, + )), + "container_blkio_io_service_bytes_recursive_bytes": prometheus.Metric("", prometheus.OpFilterMap( + "op", map[string]string{ + "Read": "read.bytes", + "Write": "write.bytes", + "Total": "summary.bytes", + }, + )), + }, + Labels: map[string]prometheus.LabelMap{ + "container_id": prometheus.KeyLabel("id"), + "device": prometheus.KeyLabel("device"), + "namespace": prometheus.KeyLabel("namespace"), + }, + } +) + +// Metricset for containerd blkio is a prometheus based metricset +type metricset struct { + mb.BaseMetricSet + prometheusClient prometheus.Prometheus + mod containerd.Module +} + +// init registers the MetricSet with the central registry. +// The New method will be called after the setup of the module and before starting to fetch data +func init() { + mb.Registry.MustAddMetricSet("containerd", "blkio", New, + mb.WithHostParser(hostParser), + mb.DefaultMetricSet(), + ) +} + +// New creates a new instance of the MetricSet. New is responsible for unpacking +// any MetricSet specific configuration options if there are any. +func New(base mb.BaseMetricSet) (mb.MetricSet, error) { + cfgwarn.Beta("The containerd blkio metricset is beta.") + + pc, err := prometheus.NewPrometheusClient(base) + if err != nil { + return nil, err + } + + mod, ok := base.Module().(containerd.Module) + if !ok { + return nil, fmt.Errorf("must be child of kubernetes module") + } + return &metricset{ + BaseMetricSet: base, + prometheusClient: pc, + mod: mod, + }, nil +} + +// Fetch gathers information from the containerd and reports events with this information. +func (m *metricset) Fetch(reporter mb.ReporterV2) error { + families, _, err := m.mod.GetContainerdMetricsFamilies(m.prometheusClient) + if err != nil { + return errors.Wrap(err, "error getting families") + } + events, err := m.prometheusClient.ProcessMetrics(families, mapping) + if err != nil { + return errors.Wrap(err, "error getting events") + } + for _, event := range events { + // setting ECS container.id and module field containerd.namespace + containerFields := common.MapStr{} + moduleFields := common.MapStr{} + rootFields := common.MapStr{} + + cID := containerd.GetAndDeleteCid(event) + namespace := containerd.GetAndDeleteNamespace(event) + + containerFields.Put("id", cID) + rootFields.Put("container", containerFields) + moduleFields.Put("namespace", namespace) + + reporter.Event(mb.Event{ + RootFields: rootFields, + ModuleFields: moduleFields, + MetricSetFields: event, + Namespace: "containerd.blkio", + }) + } + return nil +} diff --git a/x-pack/metricbeat/module/containerd/blkio/blkio_test.go b/x-pack/metricbeat/module/containerd/blkio/blkio_test.go new file mode 100644 index 00000000000..25eed0d2887 --- /dev/null +++ b/x-pack/metricbeat/module/containerd/blkio/blkio_test.go @@ -0,0 +1,42 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build !integration +// +build !integration + +package blkio + +import ( + "testing" + + "github.com/elastic/beats/v7/metricbeat/helper/prometheus/ptest" + _ "github.com/elastic/beats/v7/x-pack/metricbeat/module/containerd" +) + +func TestEventMapping(t *testing.T) { + ptest.TestMetricSet(t, "containerd", "blkio", + ptest.TestCases{ + { + MetricsFile: "../_meta/test/containerd.v1.5.2", + ExpectedFile: "./_meta/test/containerd.v1.5.2.expected", + }, + }, + ) +} diff --git a/x-pack/metricbeat/module/containerd/config.go b/x-pack/metricbeat/module/containerd/config.go new file mode 100644 index 00000000000..3c8910acba7 --- /dev/null +++ b/x-pack/metricbeat/module/containerd/config.go @@ -0,0 +1,19 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package containerd + +// Config contains the config needed for containerd +type Config struct { + CalculateCpuPct bool `config:"calcpct.cpu"` + CalculateMemPct bool `config:"calcpct.memory"` +} + +// DefaultConfig returns default module config +func DefaultConfig() Config { + return Config{ + CalculateCpuPct: true, + CalculateMemPct: true, + } +} diff --git a/x-pack/metricbeat/module/containerd/containerd.go b/x-pack/metricbeat/module/containerd/containerd.go new file mode 100644 index 00000000000..2b0c1fd7e09 --- /dev/null +++ b/x-pack/metricbeat/module/containerd/containerd.go @@ -0,0 +1,116 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package containerd + +import ( + "sync" + "time" + + "github.com/mitchellh/hashstructure" + "github.com/pkg/errors" + dto "github.com/prometheus/client_model/go" + + p "github.com/elastic/beats/v7/metricbeat/helper/prometheus" + "github.com/elastic/beats/v7/metricbeat/mb" +) + +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +func init() { + // Register the ModuleFactory function for the "containerd" module. + if err := mb.Registry.AddModule("containerd", ModuleBuilder()); err != nil { + panic(err) + } +} + +type Module interface { + mb.Module + GetContainerdMetricsFamilies(prometheus p.Prometheus) ([]*dto.MetricFamily, time.Time, error) +} + +type familiesCache struct { + sharedFamilies []*dto.MetricFamily + lastFetchErr error + lastFetchTimestamp time.Time +} + +type containerdMetricsCache struct { + cacheMap map[uint64]*familiesCache + lock sync.Mutex +} + +func (c *containerdMetricsCache) getCacheMapEntry(hash uint64) *familiesCache { + if _, ok := c.cacheMap[hash]; !ok { + c.cacheMap[hash] = &familiesCache{} + } + return c.cacheMap[hash] +} + +type module struct { + mb.BaseModule + + containerdMetricsCache *containerdMetricsCache + cacheHash uint64 +} + +func ModuleBuilder() func(base mb.BaseModule) (mb.Module, error) { + containerdMetricsCache := &containerdMetricsCache{ + cacheMap: make(map[uint64]*familiesCache), + } + + return func(base mb.BaseModule) (mb.Module, error) { + hash, err := generateCacheHash(base.Config().Hosts) + if err != nil { + return nil, errors.Wrap(err, "error generating cache hash for containerdMetricsCache") + } + m := module{ + BaseModule: base, + containerdMetricsCache: containerdMetricsCache, + cacheHash: hash, + } + return &m, nil + } +} + +func (m *module) GetContainerdMetricsFamilies(prometheus p.Prometheus) ([]*dto.MetricFamily, time.Time, error) { + m.containerdMetricsCache.lock.Lock() + defer m.containerdMetricsCache.lock.Unlock() + + now := time.Now() + // NOTE: These entries will be never removed, this can be a leak if + // metricbeat is used to monitor clusters dynamically created. + // (https://github.com/elastic/beats/pull/25640#discussion_r633395213) + familiesCache := m.containerdMetricsCache.getCacheMapEntry(m.cacheHash) + + if familiesCache.lastFetchTimestamp.IsZero() || now.Sub(familiesCache.lastFetchTimestamp) > m.Config().Period { + familiesCache.sharedFamilies, familiesCache.lastFetchErr = prometheus.GetFamilies() + familiesCache.lastFetchTimestamp = now + } + + return familiesCache.sharedFamilies, familiesCache.lastFetchTimestamp, familiesCache.lastFetchErr +} + +func generateCacheHash(host []string) (uint64, error) { + id, err := hashstructure.Hash(host, nil) + if err != nil { + return 0, err + } + return id, nil +} diff --git a/x-pack/metricbeat/module/containerd/cpu/_meta/data.json b/x-pack/metricbeat/module/containerd/cpu/_meta/data.json new file mode 100644 index 00000000000..1dc40fe4cb7 --- /dev/null +++ b/x-pack/metricbeat/module/containerd/cpu/_meta/data.json @@ -0,0 +1,38 @@ +{ + "@timestamp": "2019-03-01T08:05:34.853Z", + "container": { + "id": "7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d" + }, + "containerd": { + "namespace": "k8s.io", + "cpu": { + "usage": { + "user": { + "ns": 22526620000000, + "pct": 0.2496932948025663 + }, + "total": { + "pct": 0.24969398913655017, + "ns": 22554111128186 + }, + "kernel": { + "ns": 16740000000, + "pct": 0 + } + } + } + }, + "event": { + "dataset": "containerd.cpu", + "duration": 115000, + "module": "containerd" + }, + "metricset": { + "name": "cpu", + "period": 10000 + }, + "service": { + "address": "127.0.0.1:55555", + "type": "containerd" + } +} diff --git a/x-pack/metricbeat/module/containerd/cpu/_meta/docs.asciidoc b/x-pack/metricbeat/module/containerd/cpu/_meta/docs.asciidoc new file mode 100644 index 00000000000..6cf8db33d3d --- /dev/null +++ b/x-pack/metricbeat/module/containerd/cpu/_meta/docs.asciidoc @@ -0,0 +1 @@ +This is the cpu metricset of the module containerd. diff --git a/x-pack/metricbeat/module/containerd/cpu/_meta/fields.yml b/x-pack/metricbeat/module/containerd/cpu/_meta/fields.yml new file mode 100644 index 00000000000..e2a99f3d432 --- /dev/null +++ b/x-pack/metricbeat/module/containerd/cpu/_meta/fields.yml @@ -0,0 +1,54 @@ +- name: cpu + type: group + description: > + Containerd Runtime CPU metrics. + release: beta + fields: + - name: system.total + type: double + description: > + Total user and system CPU time spent in seconds. + - name: usage + type: group + fields: + - name: kernel + type: group + fields: + - name: ns + type: double + description: > + CPU Kernel usage nanoseconds + - name: user + type: group + fields: + - name: ns + type: double + description: > + CPU User usage nanoseconds + - name: total + type: group + fields: + - name: ns + type: double + description: > + CPU total usage nanoseconds + - name: total.pct + type: scaled_float + format: percent + description: > + Percentage of total CPU time normalized by the number of CPU cores + - name: kernel.pct + type: scaled_float + format: percent + description: > + Percentage of time in kernel space normalized by the number of CPU cores. + - name: user.pct + type: scaled_float + format: percent + description: > + Percentage of time in user space normalized by the number of CPU cores. + - name: cpu.*.ns + type: object + object_type: double + description: > + CPU usage nanoseconds in this cpu. diff --git a/x-pack/metricbeat/module/containerd/cpu/_meta/test/containerd.v1.5.2.expected b/x-pack/metricbeat/module/containerd/cpu/_meta/test/containerd.v1.5.2.expected new file mode 100644 index 00000000000..e048515f8dd --- /dev/null +++ b/x-pack/metricbeat/module/containerd/cpu/_meta/test/containerd.v1.5.2.expected @@ -0,0 +1,398 @@ +[ + { + "RootFields": { + "container": { + "id": "7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d" + } + }, + "ModuleFields": { + "namespace": "k8s.io" + }, + "MetricSetFields": { + "usage": { + "cpu": { + "7": { + "ns": 97134782770 + } + }, + "percpu": {} + } + }, + "Index": "", + "ID": "", + "Namespace": "containerd.cpu", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": { + "container": { + "id": "7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d" + } + }, + "ModuleFields": { + "namespace": "k8s.io" + }, + "MetricSetFields": { + "usage": { + "cpu": { + "4": { + "ns": 105731762224 + } + }, + "percpu": {} + } + }, + "Index": "", + "ID": "", + "Namespace": "containerd.cpu", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": { + "container": { + "id": "7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d" + } + }, + "ModuleFields": { + "namespace": "k8s.io" + }, + "MetricSetFields": { + "usage": { + "cpu": { + "9": { + "ns": 102272190459 + } + }, + "percpu": {} + } + }, + "Index": "", + "ID": "", + "Namespace": "containerd.cpu", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": { + "container": { + "id": "7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d" + } + }, + "ModuleFields": { + "namespace": "k8s.io" + }, + "MetricSetFields": { + "usage": { + "cpu": { + "8": { + "ns": 104266711568 + } + }, + "percpu": {} + } + }, + "Index": "", + "ID": "", + "Namespace": "containerd.cpu", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": { + "container": { + "id": "7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d" + } + }, + "ModuleFields": { + "namespace": "k8s.io" + }, + "MetricSetFields": { + "usage": { + "cpu": { + "10": { + "ns": 106709905770 + } + }, + "percpu": {} + } + }, + "Index": "", + "ID": "", + "Namespace": "containerd.cpu", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": { + "container": { + "id": "7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d" + } + }, + "ModuleFields": { + "namespace": "k8s.io" + }, + "MetricSetFields": { + "usage": { + "cpu": { + "5": { + "ns": 98155683224 + } + }, + "percpu": {} + } + }, + "Index": "", + "ID": "", + "Namespace": "containerd.cpu", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": { + "container": { + "id": "7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d" + } + }, + "ModuleFields": { + "namespace": "k8s.io" + }, + "MetricSetFields": { + "usage": { + "cpu": { + "2": { + "ns": 105305653633 + } + }, + "percpu": {} + } + }, + "Index": "", + "ID": "", + "Namespace": "containerd.cpu", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": { + "container": { + "id": "7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d" + } + }, + "ModuleFields": { + "namespace": "k8s.io" + }, + "MetricSetFields": { + "usage": { + "cpu": { + "3": { + "ns": 101195506344 + } + }, + "percpu": {} + } + }, + "Index": "", + "ID": "", + "Namespace": "containerd.cpu", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": { + "container": { + "id": "7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d" + } + }, + "ModuleFields": { + "namespace": "k8s.io" + }, + "MetricSetFields": { + "usage": { + "cpu": { + "0": { + "ns": 99137817570 + } + }, + "percpu": {} + } + }, + "Index": "", + "ID": "", + "Namespace": "containerd.cpu", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": { + "container": { + "id": "7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d" + } + }, + "ModuleFields": { + "namespace": "k8s.io" + }, + "MetricSetFields": { + "usage": { + "cpu": { + "1": { + "ns": 116475261138 + } + }, + "percpu": {} + } + }, + "Index": "", + "ID": "", + "Namespace": "containerd.cpu", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": { + "container": { + "id": "7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d" + } + }, + "ModuleFields": { + "namespace": "k8s.io" + }, + "MetricSetFields": { + "usage": { + "cpu": { + "6": { + "ns": 95075348914 + } + }, + "percpu": {} + } + }, + "Index": "", + "ID": "", + "Namespace": "containerd.cpu", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": { + "container": { + "id": "7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d" + } + }, + "ModuleFields": { + "namespace": "k8s.io" + }, + "MetricSetFields": { + "usage": { + "cpu": { + "11": { + "ns": 104878380370 + } + }, + "percpu": {} + } + }, + "Index": "", + "ID": "", + "Namespace": "containerd.cpu", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": { + "container": { + "id": "7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d" + } + }, + "ModuleFields": { + "namespace": "k8s.io" + }, + "MetricSetFields": { + "usage": { + "kernel": { + "ns": 532180000000, + "pct": 0 + }, + "total": { + "ns": 1236339003984, + "pct": 0 + }, + "user": { + "ns": 525470000000, + "pct": 0 + } + } + }, + "Index": "", + "ID": "", + "Namespace": "containerd.cpu", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + } +] \ No newline at end of file diff --git a/x-pack/metricbeat/module/containerd/cpu/_meta/testdata/config.yml b/x-pack/metricbeat/module/containerd/cpu/_meta/testdata/config.yml new file mode 100644 index 00000000000..e19c22ddc90 --- /dev/null +++ b/x-pack/metricbeat/module/containerd/cpu/_meta/testdata/config.yml @@ -0,0 +1,3 @@ +type: http +url: "/v1/metrics" +suffix: plain diff --git a/x-pack/metricbeat/module/containerd/cpu/_meta/testdata/docs.plain b/x-pack/metricbeat/module/containerd/cpu/_meta/testdata/docs.plain new file mode 100644 index 00000000000..f82e4d3c1d9 --- /dev/null +++ b/x-pack/metricbeat/module/containerd/cpu/_meta/testdata/docs.plain @@ -0,0 +1,99 @@ +# TYPE container_blkio_io_service_bytes_recursive_bytes gauge +container_blkio_io_service_bytes_recursive_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",device="/dev/vda",major="254",minor="0",namespace="k8s.io",op="Async"} 0 +container_blkio_io_service_bytes_recursive_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",device="/dev/vda",major="254",minor="0",namespace="k8s.io",op="Discard"} 0 +container_blkio_io_service_bytes_recursive_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",device="/dev/vda",major="254",minor="0",namespace="k8s.io",op="Read"} 6.9246976e+07 +container_blkio_io_service_bytes_recursive_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",device="/dev/vda",major="254",minor="0",namespace="k8s.io",op="Sync"} 6.9271552e+07 +container_blkio_io_service_bytes_recursive_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",device="/dev/vda",major="254",minor="0",namespace="k8s.io",op="Total"} 6.9271552e+07 +container_blkio_io_service_bytes_recursive_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",device="/dev/vda",major="254",minor="0",namespace="k8s.io",op="Write"} 24576 +# TYPE container_blkio_io_serviced_recursive_total gauge +container_blkio_io_serviced_recursive_total{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",device="/dev/vda",major="254",minor="0",namespace="k8s.io",op="Async"} 0 +container_blkio_io_serviced_recursive_total{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",device="/dev/vda",major="254",minor="0",namespace="k8s.io",op="Discard"} 0 +container_blkio_io_serviced_recursive_total{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",device="/dev/vda",major="254",minor="0",namespace="k8s.io",op="Read"} 830 +container_blkio_io_serviced_recursive_total{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",device="/dev/vda",major="254",minor="0",namespace="k8s.io",op="Sync"} 832 +container_blkio_io_serviced_recursive_total{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",device="/dev/vda",major="254",minor="0",namespace="k8s.io",op="Total"} 832 +container_blkio_io_serviced_recursive_total{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",device="/dev/vda",major="254",minor="0",namespace="k8s.io",op="Write"} 2 +# TYPE container_cpu_kernel_nanoseconds gauge +container_cpu_kernel_nanoseconds{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 5.3218e+11 +# TYPE container_cpu_throttle_periods_total gauge +container_cpu_throttle_periods_total{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 0 +# TYPE container_cpu_throttled_periods_total gauge +container_cpu_throttled_periods_total{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 0 +# TYPE container_cpu_throttled_time_nanoseconds gauge +container_cpu_throttled_time_nanoseconds{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 0 +# TYPE container_cpu_total_nanoseconds gauge +container_cpu_total_nanoseconds{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 1.236339003984e+12 +# TYPE container_cpu_user_nanoseconds gauge +container_cpu_user_nanoseconds{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 5.2547e+11 +# TYPE container_memory_active_anon_bytes gauge +container_memory_active_anon_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 0 +# TYPE container_memory_active_file_bytes gauge +container_memory_active_file_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 1.216512e+06 +# TYPE container_memory_cache_bytes gauge +container_memory_cache_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 1.40980224e+08 +# TYPE container_memory_dirty_bytes gauge +container_memory_dirty_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 0 +# TYPE container_memory_inactive_anon_bytes gauge +container_memory_inactive_anon_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 4.4048384e+07 +# TYPE container_memory_inactive_file_bytes gauge +container_memory_inactive_file_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 1.3928448e+08 +# TYPE container_memory_kernel_failcnt_total gauge +container_memory_kernel_failcnt_total{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 0 +# TYPE container_memory_kernel_limit_bytes gauge +container_memory_kernel_limit_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 9.223372036854772e+18 +# TYPE container_memory_kernel_max_bytes gauge +container_memory_kernel_max_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 6.496256e+06 +# TYPE container_memory_kernel_usage_bytes gauge +container_memory_kernel_usage_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 6.459392e+06 +# TYPE container_memory_kerneltcp_failcnt_total gauge +container_memory_kerneltcp_failcnt_total{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 0 +# TYPE container_memory_kerneltcp_limit_bytes gauge +container_memory_kerneltcp_limit_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 9.223372036854772e+18 +# TYPE container_memory_kerneltcp_max_bytes gauge +container_memory_kerneltcp_max_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 0 +# TYPE container_memory_kerneltcp_usage_bytes gauge +container_memory_kerneltcp_usage_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 0 +# TYPE container_memory_rss_bytes gauge +container_memory_rss_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 4.3794432e+07 +# TYPE container_memory_rss_huge_bytes gauge +container_memory_rss_huge_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 0 +# TYPE container_memory_swap_failcnt_total gauge +container_memory_swap_failcnt_total{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 0 +# TYPE container_memory_swap_limit_bytes gauge +container_memory_swap_limit_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 9.223372036854772e+18 +# TYPE container_memory_swap_max_bytes gauge +container_memory_swap_max_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 2.09727488e+08 +# TYPE container_memory_swap_usage_bytes gauge +container_memory_swap_usage_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 1.9195904e+08 +# TYPE container_memory_total_active_anon_bytes gauge +container_memory_total_active_anon_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 0 +# TYPE container_memory_total_active_file_bytes gauge +container_memory_total_active_file_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 1.216512e+06 +# TYPE container_memory_total_cache_bytes gauge +container_memory_total_cache_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 1.40980224e+08 +# TYPE container_memory_total_inactive_anon_bytes gauge +container_memory_total_inactive_anon_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 4.4048384e+07 +# TYPE container_memory_total_inactive_file_bytes gauge +container_memory_total_inactive_file_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 1.3928448e+08 +# TYPE container_memory_total_rss_bytes gauge +container_memory_total_rss_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 4.3794432e+07 +# TYPE container_memory_usage_failcnt_total gauge +container_memory_usage_failcnt_total{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 0 +# TYPE container_memory_usage_limit_bytes gauge +container_memory_usage_limit_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 2.097152e+08 +# TYPE container_memory_usage_max_bytes gauge +container_memory_usage_max_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 2.09608704e+08 +# TYPE container_memory_usage_usage_bytes gauge +container_memory_usage_usage_bytes{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",namespace="k8s.io"} 1.91848448e+08 +# TYPE container_per_cpu_nanoseconds gauge +container_per_cpu_nanoseconds{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",cpu="0",namespace="k8s.io"} 9.913781757e+10 +container_per_cpu_nanoseconds{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",cpu="1",namespace="k8s.io"} 1.16475261138e+11 +container_per_cpu_nanoseconds{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",cpu="10",namespace="k8s.io"} 1.0670990577e+11 +container_per_cpu_nanoseconds{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",cpu="11",namespace="k8s.io"} 1.0487838037e+11 +container_per_cpu_nanoseconds{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",cpu="2",namespace="k8s.io"} 1.05305653633e+11 +container_per_cpu_nanoseconds{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",cpu="3",namespace="k8s.io"} 1.01195506344e+11 +container_per_cpu_nanoseconds{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",cpu="4",namespace="k8s.io"} 1.05731762224e+11 +container_per_cpu_nanoseconds{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",cpu="5",namespace="k8s.io"} 9.8155683224e+10 +container_per_cpu_nanoseconds{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",cpu="6",namespace="k8s.io"} 9.5075348914e+10 +container_per_cpu_nanoseconds{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",cpu="7",namespace="k8s.io"} 9.713478277e+10 +container_per_cpu_nanoseconds{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",cpu="8",namespace="k8s.io"} 1.04266711568e+11 +container_per_cpu_nanoseconds{container_id="7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d",cpu="9",namespace="k8s.io"} 1.02272190459e+11 diff --git a/x-pack/metricbeat/module/containerd/cpu/_meta/testdata/docs.plain-expected.json b/x-pack/metricbeat/module/containerd/cpu/_meta/testdata/docs.plain-expected.json new file mode 100644 index 00000000000..61afe14be02 --- /dev/null +++ b/x-pack/metricbeat/module/containerd/cpu/_meta/testdata/docs.plain-expected.json @@ -0,0 +1,411 @@ +[ + { + "container": { + "id": "7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d" + }, + "containerd": { + "cpu": { + "usage": { + "cpu": { + "4": { + "ns": 105731762224 + } + }, + "percpu": {} + } + }, + "namespace": "k8s.io" + }, + "event": { + "dataset": "containerd.cpu", + "duration": 115000, + "module": "containerd" + }, + "metricset": { + "name": "cpu", + "period": 10000 + }, + "service": { + "address": "127.0.0.1:55555", + "type": "containerd" + } + }, + { + "container": { + "id": "7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d" + }, + "containerd": { + "cpu": { + "usage": { + "cpu": { + "3": { + "ns": 101195506344 + } + }, + "percpu": {} + } + }, + "namespace": "k8s.io" + }, + "event": { + "dataset": "containerd.cpu", + "duration": 115000, + "module": "containerd" + }, + "metricset": { + "name": "cpu", + "period": 10000 + }, + "service": { + "address": "127.0.0.1:55555", + "type": "containerd" + } + }, + { + "container": { + "id": "7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d" + }, + "containerd": { + "cpu": { + "usage": { + "cpu": { + "0": { + "ns": 99137817570 + } + }, + "percpu": {} + } + }, + "namespace": "k8s.io" + }, + "event": { + "dataset": "containerd.cpu", + "duration": 115000, + "module": "containerd" + }, + "metricset": { + "name": "cpu", + "period": 10000 + }, + "service": { + "address": "127.0.0.1:55555", + "type": "containerd" + } + }, + { + "container": { + "id": "7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d" + }, + "containerd": { + "cpu": { + "usage": { + "cpu": { + "9": { + "ns": 102272190459 + } + }, + "percpu": {} + } + }, + "namespace": "k8s.io" + }, + "event": { + "dataset": "containerd.cpu", + "duration": 115000, + "module": "containerd" + }, + "metricset": { + "name": "cpu", + "period": 10000 + }, + "service": { + "address": "127.0.0.1:55555", + "type": "containerd" + } + }, + { + "container": { + "id": "7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d" + }, + "containerd": { + "cpu": { + "usage": { + "cpu": { + "2": { + "ns": 105305653633 + } + }, + "percpu": {} + } + }, + "namespace": "k8s.io" + }, + "event": { + "dataset": "containerd.cpu", + "duration": 115000, + "module": "containerd" + }, + "metricset": { + "name": "cpu", + "period": 10000 + }, + "service": { + "address": "127.0.0.1:55555", + "type": "containerd" + } + }, + { + "container": { + "id": "7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d" + }, + "containerd": { + "cpu": { + "usage": { + "cpu": { + "8": { + "ns": 104266711568 + } + }, + "percpu": {} + } + }, + "namespace": "k8s.io" + }, + "event": { + "dataset": "containerd.cpu", + "duration": 115000, + "module": "containerd" + }, + "metricset": { + "name": "cpu", + "period": 10000 + }, + "service": { + "address": "127.0.0.1:55555", + "type": "containerd" + } + }, + { + "container": { + "id": "7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d" + }, + "containerd": { + "cpu": { + "usage": { + "cpu": { + "7": { + "ns": 97134782770 + } + }, + "percpu": {} + } + }, + "namespace": "k8s.io" + }, + "event": { + "dataset": "containerd.cpu", + "duration": 115000, + "module": "containerd" + }, + "metricset": { + "name": "cpu", + "period": 10000 + }, + "service": { + "address": "127.0.0.1:55555", + "type": "containerd" + } + }, + { + "container": { + "id": "7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d" + }, + "containerd": { + "cpu": { + "usage": { + "cpu": { + "5": { + "ns": 98155683224 + } + }, + "percpu": {} + } + }, + "namespace": "k8s.io" + }, + "event": { + "dataset": "containerd.cpu", + "duration": 115000, + "module": "containerd" + }, + "metricset": { + "name": "cpu", + "period": 10000 + }, + "service": { + "address": "127.0.0.1:55555", + "type": "containerd" + } + }, + { + "container": { + "id": "7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d" + }, + "containerd": { + "cpu": { + "usage": { + "cpu": { + "1": { + "ns": 116475261138 + } + }, + "percpu": {} + } + }, + "namespace": "k8s.io" + }, + "event": { + "dataset": "containerd.cpu", + "duration": 115000, + "module": "containerd" + }, + "metricset": { + "name": "cpu", + "period": 10000 + }, + "service": { + "address": "127.0.0.1:55555", + "type": "containerd" + } + }, + { + "container": { + "id": "7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d" + }, + "containerd": { + "cpu": { + "usage": { + "cpu": { + "10": { + "ns": 106709905770 + } + }, + "percpu": {} + } + }, + "namespace": "k8s.io" + }, + "event": { + "dataset": "containerd.cpu", + "duration": 115000, + "module": "containerd" + }, + "metricset": { + "name": "cpu", + "period": 10000 + }, + "service": { + "address": "127.0.0.1:55555", + "type": "containerd" + } + }, + { + "container": { + "id": "7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d" + }, + "containerd": { + "cpu": { + "usage": { + "kernel": { + "ns": 532180000000, + "pct": 0 + }, + "total": { + "ns": 1236339003984, + "pct": 0 + }, + "user": { + "ns": 525470000000, + "pct": 0 + } + } + }, + "namespace": "k8s.io" + }, + "event": { + "dataset": "containerd.cpu", + "duration": 115000, + "module": "containerd" + }, + "metricset": { + "name": "cpu", + "period": 10000 + }, + "service": { + "address": "127.0.0.1:55555", + "type": "containerd" + } + }, + { + "container": { + "id": "7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d" + }, + "containerd": { + "cpu": { + "usage": { + "cpu": { + "11": { + "ns": 104878380370 + } + }, + "percpu": {} + } + }, + "namespace": "k8s.io" + }, + "event": { + "dataset": "containerd.cpu", + "duration": 115000, + "module": "containerd" + }, + "metricset": { + "name": "cpu", + "period": 10000 + }, + "service": { + "address": "127.0.0.1:55555", + "type": "containerd" + } + }, + { + "container": { + "id": "7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d" + }, + "containerd": { + "cpu": { + "usage": { + "cpu": { + "6": { + "ns": 95075348914 + } + }, + "percpu": {} + } + }, + "namespace": "k8s.io" + }, + "event": { + "dataset": "containerd.cpu", + "duration": 115000, + "module": "containerd" + }, + "metricset": { + "name": "cpu", + "period": 10000 + }, + "service": { + "address": "127.0.0.1:55555", + "type": "containerd" + } + } +] \ No newline at end of file diff --git a/x-pack/metricbeat/module/containerd/cpu/cpu.go b/x-pack/metricbeat/module/containerd/cpu/cpu.go new file mode 100644 index 00000000000..135092f8184 --- /dev/null +++ b/x-pack/metricbeat/module/containerd/cpu/cpu.go @@ -0,0 +1,235 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package cpu + +import ( + "fmt" + "time" + + "github.com/elastic/beats/v7/x-pack/metricbeat/module/containerd" + + "github.com/elastic/beats/v7/libbeat/common/cfgwarn" + + "github.com/pkg/errors" + + "github.com/elastic/beats/v7/libbeat/common" + "github.com/elastic/beats/v7/metricbeat/helper/prometheus" + "github.com/elastic/beats/v7/metricbeat/mb" + "github.com/elastic/beats/v7/metricbeat/mb/parse" +) + +const ( + defaultScheme = "http" + defaultPath = "/v1/metrics" +) + +var ( + // HostParser validates Prometheus URLs + hostParser = parse.URLHostParserBuilder{ + DefaultScheme: defaultScheme, + DefaultPath: defaultPath, + }.Build() + + // Mapping of state metrics + mapping = &prometheus.MetricsMapping{ + Metrics: map[string]prometheus.MetricMap{ + "container_cpu_total_nanoseconds": prometheus.Metric("usage.total.ns"), + "container_cpu_user_nanoseconds": prometheus.Metric("usage.user.ns"), + "container_cpu_kernel_nanoseconds": prometheus.Metric("usage.kernel.ns"), + "container_per_cpu_nanoseconds": prometheus.Metric("usage.percpu.ns"), + "process_cpu_seconds_total": prometheus.Metric("system.total"), + }, + Labels: map[string]prometheus.LabelMap{ + "container_id": prometheus.KeyLabel("id"), + "namespace": prometheus.KeyLabel("namespace"), + "cpu": prometheus.KeyLabel("cpu"), + }, + } +) + +// Metricset for containerd is a prometheus based metricset +type metricset struct { + mb.BaseMetricSet + prometheusClient prometheus.Prometheus + mod containerd.Module + calcPct bool + preTimestamp time.Time + preContainerCpuTotalUsage map[string]float64 + preContainerCpuKernelUsage map[string]float64 + preContainerCpuUserUsage map[string]float64 +} + +// init registers the MetricSet with the central registry. +// The New method will be called after the setup of the module and before starting to fetch data +func init() { + mb.Registry.MustAddMetricSet("containerd", "cpu", New, + mb.WithHostParser(hostParser), + mb.DefaultMetricSet(), + ) +} + +// New creates a new instance of the MetricSet. New is responsible for unpacking +// any MetricSet specific configuration options if there are any. +func New(base mb.BaseMetricSet) (mb.MetricSet, error) { + cfgwarn.Beta("The containerd cpu metricset is beta.") + + pc, err := prometheus.NewPrometheusClient(base) + if err != nil { + return nil, err + } + config := containerd.DefaultConfig() + if err := base.Module().UnpackConfig(&config); err != nil { + return nil, err + } + mod, ok := base.Module().(containerd.Module) + if !ok { + return nil, fmt.Errorf("must be child of kubernetes module") + } + return &metricset{ + BaseMetricSet: base, + prometheusClient: pc, + mod: mod, + calcPct: config.CalculateCpuPct, + preTimestamp: time.Time{}, + preContainerCpuTotalUsage: map[string]float64{}, + preContainerCpuKernelUsage: map[string]float64{}, + preContainerCpuUserUsage: map[string]float64{}, + }, nil +} + +// Fetch gathers information from the containerd and reports events with this information. +func (m *metricset) Fetch(reporter mb.ReporterV2) error { + families, timestamp, err := m.mod.GetContainerdMetricsFamilies(m.prometheusClient) + if err != nil { + return errors.Wrap(err, "error getting families") + } + + events, err := m.prometheusClient.ProcessMetrics(families, mapping) + if err != nil { + return errors.Wrap(err, "error getting events") + } + + perContainerCpus := make(map[string]int) + if m.calcPct { + for _, event := range events { + if _, err = event.GetValue("cpu"); err == nil { + // calculate cpus used by each container + setContCpus(event, perContainerCpus) + } + } + } + + for _, event := range events { + // setting ECS container.id and module field containerd.namespace + containerFields := common.MapStr{} + moduleFields := common.MapStr{} + rootFields := common.MapStr{} + + cID := containerd.GetAndDeleteCid(event) + namespace := containerd.GetAndDeleteNamespace(event) + + containerFields.Put("id", cID) + rootFields.Put("container", containerFields) + moduleFields.Put("namespace", namespace) + + if m.calcPct { + contCpus, ok := perContainerCpus[cID] + if !ok { + contCpus = 1 + } + // calculate timestamp delta + timestampDelta := int64(0) + if !m.preTimestamp.IsZero() { + timestampDelta = timestamp.UnixNano() - m.preTimestamp.UnixNano() + } + // Calculate cpu total usage percentage + cpuUsageTotal, err := event.GetValue("usage.total.ns") + if err == nil { + cpuUsageTotalPct := calcUsagePct(timestampDelta, cpuUsageTotal.(float64), + float64(contCpus), cID, m.preContainerCpuTotalUsage) + m.Logger().Debugf("cpuUsageTotalPct for %+v is %+v", cID, cpuUsageTotalPct) + event.Put("usage.total.pct", cpuUsageTotalPct) + // Update values + m.preContainerCpuTotalUsage[cID] = cpuUsageTotal.(float64) + } + + // Calculate cpu kernel usage percentage + // If event does not contain usage.kernel.ns skip the calculation (event has only system.total) + cpuUsageKernel, err := event.GetValue("usage.kernel.ns") + if err == nil { + cpuUsageKernelPct := calcUsagePct(timestampDelta, cpuUsageKernel.(float64), + float64(contCpus), cID, m.preContainerCpuKernelUsage) + m.Logger().Debugf("cpuUsageKernelPct for %+v is %+v", cID, cpuUsageKernelPct) + event.Put("usage.kernel.pct", cpuUsageKernelPct) + // Update values + m.preContainerCpuKernelUsage[cID] = cpuUsageKernel.(float64) + } + + // Calculate cpu user usage percentage + cpuUsageUser, err := event.GetValue("usage.user.ns") + if err == nil { + cpuUsageUserPct := calcUsagePct(timestampDelta, cpuUsageUser.(float64), + float64(contCpus), cID, m.preContainerCpuUserUsage) + m.Logger().Debugf("cpuUsageUserPct for %+v is %+v", cID, cpuUsageUserPct) + event.Put("usage.user.pct", cpuUsageUserPct) + // Update values + m.preContainerCpuUserUsage[cID] = cpuUsageUser.(float64) + } + } + if cpuId, err := event.GetValue("cpu"); err == nil { + perCpuNs, err := event.GetValue("usage.percpu.ns") + if err == nil { + key := fmt.Sprintf("usage.cpu.%s.ns", cpuId) + event.Put(key, perCpuNs) + event.Delete("cpu") + event.Delete("usage.percpu.ns") + } + } + + reporter.Event(mb.Event{ + RootFields: rootFields, + ModuleFields: moduleFields, + MetricSetFields: event, + Namespace: "containerd.cpu", + }) + } + // set Timestamp of previous event + m.preTimestamp = timestamp + return nil +} + +func setContCpus(event common.MapStr, perContainerCpus map[string]int) { + val, err := event.GetValue("id") + if err != nil { + return + } + cid := val.(string) + _, err = event.GetValue("usage.percpu.ns") + if err != nil { + return + } + if _, ok := perContainerCpus[cid]; ok { + perContainerCpus[cid] += 1 + } else { + perContainerCpus[cid] = 1 + } +} + +func calcUsagePct(timestampDelta int64, newValue, contCpus float64, + cid string, oldValuesMap map[string]float64) float64 { + var usageDelta, usagePct float64 + if oldValue, ok := oldValuesMap[cid]; ok { + usageDelta = newValue - oldValue + } else { + usageDelta = newValue + } + if usageDelta == 0.0 || float64(timestampDelta) == 0.0 { + usagePct = 0.0 + } else { + // normalize percentage with cpus used per container + usagePct = (usageDelta / float64(timestampDelta)) / contCpus + } + return usagePct +} diff --git a/x-pack/metricbeat/module/containerd/cpu/cpu_test.go b/x-pack/metricbeat/module/containerd/cpu/cpu_test.go new file mode 100644 index 00000000000..e7b67c45406 --- /dev/null +++ b/x-pack/metricbeat/module/containerd/cpu/cpu_test.go @@ -0,0 +1,31 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build !integration +// +build !integration + +package cpu + +import ( + "testing" + + "github.com/elastic/beats/v7/metricbeat/helper/prometheus/ptest" + mbtest "github.com/elastic/beats/v7/metricbeat/mb/testing" + _ "github.com/elastic/beats/v7/x-pack/metricbeat/module/containerd" +) + +func TestEventMapping(t *testing.T) { + ptest.TestMetricSet(t, "containerd", "cpu", + ptest.TestCases{ + { + MetricsFile: "../_meta/test/containerd.v1.5.2", + ExpectedFile: "./_meta/test/containerd.v1.5.2.expected", + }, + }, + ) +} + +func TestData(t *testing.T) { + mbtest.TestDataFiles(t, "containerd", "cpu") +} diff --git a/x-pack/metricbeat/module/containerd/doc.go b/x-pack/metricbeat/module/containerd/doc.go new file mode 100644 index 00000000000..27f4fcf9471 --- /dev/null +++ b/x-pack/metricbeat/module/containerd/doc.go @@ -0,0 +1,6 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// Package containerd is a Metricbeat module that contains MetricSets. +package containerd diff --git a/x-pack/metricbeat/module/containerd/fields.go b/x-pack/metricbeat/module/containerd/fields.go new file mode 100644 index 00000000000..134bc54e6d0 --- /dev/null +++ b/x-pack/metricbeat/module/containerd/fields.go @@ -0,0 +1,23 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// Code generated by beats/dev-tools/cmd/asset/asset.go - DO NOT EDIT. + +package containerd + +import ( + "github.com/elastic/beats/v7/libbeat/asset" +) + +func init() { + if err := asset.SetFields("metricbeat", "containerd", asset.ModuleFieldsPri, AssetContainerd); err != nil { + panic(err) + } +} + +// AssetContainerd returns asset data. +// This is the base64 encoded zlib format compressed contents of module/containerd. +func AssetContainerd() string { + return "eJzUmM1u2zAMx+95CqKXAQOa3XMYsBYoUAzdim09F4pMp1pkydDH0vTpB8p26tjyR4IGcXQYBjsmf6T+JKVewxq3C+BaOSYUmmQG4ISTuICr293DqxmAQYnM4gKW6NgMIEHLjcid0GoBX2cAAO8fgHXMWeBaSuQOE0iNzva9pAJlYhfhw2tQLMMGBi23zXEBK6N9Xj6JuKV1r1JtMkaPganCv7BOcAtsqb2rmf5kwXilhFq9P7Tz0lKdqk5G/9qccdy9qeDWuN3oHXAPYiNDbYuVr6VcC93yU08CreZ+jPB+IzVfw/2Xn5ChM4Lvoo5FXicyyJK9F11QAwC0vnHuMy8ZqYLsWki8oc1wLwhSpAg6Df/fbU7DRAy0Dqtz23pX8UqtVpGXA8i0fvhsiYbYjoKu7e7W4cGAhbi7Px4RwA19GuAPY6+4N0Y4PIUIguGLU8Fx1FORAdE7VMcpwfosY2Z7uoZADfwyVUG9VedowiC6WHWEJlFtwkEyac0ynvvBSTZuXv7yyokM4fbxKTa+usZh31izW+swmzvtmIyqOdF+KZtNbyCNf8gaeIumOIgEH4E64NsclQOhwCLXKtmL4Z3MW7Ya32yHVL9Go7AZYb/JPrN10yomNOhPYLVG6JEWpe57iKBICyimdJm9zpAp/5cc8BPpZ3y4MQlfVLyurJqDAp7n3HUGbTmTmDynUrPYj6pmmaPhqGK/GEH/WHxM0NQSQwy7SlfkQYo3TGC5Df1S7eYE/YhrE2nT+zU7tQgpLqFKOAjXl3FxznsrdaJhhib+IUHy3M8/z6O1VASpl38xmoPixfNAtY0IkShb9UVRuhdhA2BreGeY6b1z3ofdRFuW+8b0Rpu1UCuLLqKTQY3062MgcQ+BsyIAi66yw1YYH93GNne589DWd2Abdcoo0ggGrUjoWEF8Vrx1kDHuxD+8E7Ll7HSEhU9IhcTCSByNM/4SP++cAip468MR6hy5qrwOZuuw0+GA76fQE0olhT/bNdvY0PEyY69nuKo8sNeKOuSju/mee7iUlSCl5uGSW1J3NRIYfa47XXL32stAelMm5JxrH03PqCyPALpjQkJwgqYbRYpMdFOcUIu1RBUQ8cKNXsSOrtzyUnR5pVuBj67gc5VBCeqmVg0lVzrpotiXZ09V2A1rKv/omvi9YfkFVkSBPfl6CJiTq4ZANe1aqMuyUQmz/wEAAP//Ep8/EQ==" +} diff --git a/x-pack/metricbeat/module/containerd/helper.go b/x-pack/metricbeat/module/containerd/helper.go new file mode 100644 index 00000000000..b4fac5f72fd --- /dev/null +++ b/x-pack/metricbeat/module/containerd/helper.go @@ -0,0 +1,25 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package containerd + +import "github.com/elastic/beats/v7/libbeat/common" + +// GetAndDeleteCid deletes and returns container id from an event +func GetAndDeleteCid(event common.MapStr) (cID string) { + if containerID, ok := event["id"]; ok { + cID = (containerID).(string) + event.Delete("id") + } + return +} + +// GetAndDeleteNamespace deletes and returns namespace from an event +func GetAndDeleteNamespace(event common.MapStr) (namespace string) { + if ns, ok := event["namespace"]; ok { + namespace = (ns).(string) + event.Delete("namespace") + } + return +} diff --git a/x-pack/metricbeat/module/containerd/memory/_meta/data.json b/x-pack/metricbeat/module/containerd/memory/_meta/data.json new file mode 100644 index 00000000000..67eceb1ad10 --- /dev/null +++ b/x-pack/metricbeat/module/containerd/memory/_meta/data.json @@ -0,0 +1,51 @@ +{ + "@timestamp":"2016-05-23T08:05:34.853Z", + "beat":{ + "hostname":"beathost", + "name":"beathost" + }, + "metricset":{ + "host":"localhost", + "module":"containerd", + "name":"memory", + "rtt":44269 + }, + "containerd": { + "namespace": "k8s.io", + "memory": { + "activeFiles": 0, + "workingset": { + "pct": 0.07224264705882352 + }, + "swap": { + "limit": 9223372036854772000, + "total": 49373184, + "max": 49848320, + "fail": { + "count": 0 + } + }, + "kernel": { + "limit": 9223372036854772000, + "fail": { + "count": 0 + }, + "max": 843776, + "total": 827392 + }, + "rss": 13651968, + "cache": 33792000, + "usage": { + "fail": { + "count": 0 + }, + "limit": 178257920, + "total": 49373184, + "pct": 0.27697610294117647, + "max": 49848320 + }, + "inactiveFiles": 36495360 + } + }, + "type":"metricsets" +} diff --git a/x-pack/metricbeat/module/containerd/memory/_meta/docs.asciidoc b/x-pack/metricbeat/module/containerd/memory/_meta/docs.asciidoc new file mode 100644 index 00000000000..64ce9bf9023 --- /dev/null +++ b/x-pack/metricbeat/module/containerd/memory/_meta/docs.asciidoc @@ -0,0 +1 @@ +This is the memory metricset of the module containerd. diff --git a/x-pack/metricbeat/module/containerd/memory/_meta/fields.yml b/x-pack/metricbeat/module/containerd/memory/_meta/fields.yml new file mode 100644 index 00000000000..e637302364d --- /dev/null +++ b/x-pack/metricbeat/module/containerd/memory/_meta/fields.yml @@ -0,0 +1,109 @@ +- name: memory + type: group + release: beta + description: > + memory + fields: + - name: workingset.pct + type: scaled_float + format: percent + description: > + Memory working set percentage. + - name: rss + type: long + format: bytes + description: > + Total memory resident set size. + - name: activeFiles + type: long + format: bytes + description: > + Total active file bytes. + - name: cache + type: long + format: bytes + description: > + Total cache bytes. + - name: inactiveFiles + type: long + format: bytes + description: > + Total inactive file bytes. + - name: usage + type: group + description: > + Usage memory stats. + fields: + - name: max + type: long + format: bytes + description: > + Max memory usage. + - name: pct + type: scaled_float + format: percent + description: > + Total allocated memory percentage. + - name: total + type: long + format: bytes + description: > + Total memory usage. + - name: fail.count + type: scaled_float + description: > + Fail counter. + - name: limit + type: long + format: bytes + description: > + Memory usage limit. + - name: kernel + type: group + description: > + Kernel memory stats. + fields: + - name: max + type: long + format: bytes + description: > + Kernel max memory usage. + - name: total + type: long + format: bytes + description: > + Kernel total memory usage. + - name: fail.count + type: scaled_float + description: > + Kernel fail counter. + - name: limit + type: long + format: bytes + description: > + Kernel memory limit. + - name: swap + type: group + description: > + Swap memory stats. + fields: + - name: max + type: long + format: bytes + description: > + Swap max memory usage. + - name: total + type: long + format: bytes + description: > + Swap total memory usage. + - name: fail.count + type: scaled_float + description: > + Swap fail counter. + - name: limit + type: long + format: bytes + description: > + Swap memory limit. + diff --git a/x-pack/metricbeat/module/containerd/memory/_meta/test/containerd.v1.5.2.expected b/x-pack/metricbeat/module/containerd/memory/_meta/test/containerd.v1.5.2.expected new file mode 100644 index 00000000000..016b938235b --- /dev/null +++ b/x-pack/metricbeat/module/containerd/memory/_meta/test/containerd.v1.5.2.expected @@ -0,0 +1,56 @@ +[ + { + "RootFields": { + "container": { + "id": "7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d" + } + }, + "ModuleFields": { + "namespace": "k8s.io" + }, + "MetricSetFields": { + "activeFiles": 1216512, + "cache": 140980224, + "inactiveFiles": 139284480, + "kernel": { + "fail": { + "count": 0 + }, + "limit": 9223372036854772000, + "max": 6496256, + "total": 6459392 + }, + "rss": 43794432, + "swap": { + "fail": { + "count": 0 + }, + "limit": 9223372036854772000, + "max": 209727488, + "total": 191959040 + }, + "usage": { + "fail": { + "count": 0 + }, + "limit": 209715200, + "max": 209608704, + "pct": 0.9148046875, + "total": 191848448 + }, + "workingset": { + "pct": 0.25064453125 + } + }, + "Index": "", + "ID": "", + "Namespace": "containerd.memory", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + } +] \ No newline at end of file diff --git a/x-pack/metricbeat/module/containerd/memory/memory.go b/x-pack/metricbeat/module/containerd/memory/memory.go new file mode 100644 index 00000000000..0bdc961a0e4 --- /dev/null +++ b/x-pack/metricbeat/module/containerd/memory/memory.go @@ -0,0 +1,167 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package memory + +import ( + "fmt" + + "github.com/elastic/beats/v7/libbeat/common" + + "github.com/pkg/errors" + + "github.com/elastic/beats/v7/libbeat/common/cfgwarn" + + "github.com/elastic/beats/v7/metricbeat/helper/prometheus" + "github.com/elastic/beats/v7/metricbeat/mb" + "github.com/elastic/beats/v7/metricbeat/mb/parse" + "github.com/elastic/beats/v7/x-pack/metricbeat/module/containerd" +) + +const ( + defaultScheme = "http" + defaultPath = "/v1/metrics" +) + +var ( + // HostParser validates Prometheus URLs + hostParser = parse.URLHostParserBuilder{ + DefaultScheme: defaultScheme, + DefaultPath: defaultPath, + }.Build() + + // Mapping of state metrics + mapping = &prometheus.MetricsMapping{ + Metrics: map[string]prometheus.MetricMap{ + "container_memory_usage_max_bytes": prometheus.Metric("usage.max"), + "container_memory_usage_usage_bytes": prometheus.Metric("usage.total"), + "container_memory_usage_limit_bytes": prometheus.Metric("usage.limit"), + "container_memory_usage_failcnt_total": prometheus.Metric("usage.fail.count"), + "container_memory_kernel_max_bytes": prometheus.Metric("kernel.max"), + "container_memory_kernel_usage_bytes": prometheus.Metric("kernel.total"), + "container_memory_kernel_limit_bytes": prometheus.Metric("kernel.limit"), + "container_memory_kernel_failcnt_total": prometheus.Metric("kernel.fail.count"), + "container_memory_swap_max_bytes": prometheus.Metric("swap.max"), + "container_memory_swap_usage_bytes": prometheus.Metric("swap.total"), + "container_memory_swap_limit_bytes": prometheus.Metric("swap.limit"), + "container_memory_swap_failcnt_total": prometheus.Metric("swap.fail.count"), + "container_memory_total_inactive_file_bytes": prometheus.Metric("inactiveFiles"), + "container_memory_total_active_file_bytes": prometheus.Metric("activeFiles"), + "container_memory_total_cache_bytes": prometheus.Metric("cache"), + "container_memory_total_rss_bytes": prometheus.Metric("rss"), + }, + Labels: map[string]prometheus.LabelMap{ + "container_id": prometheus.KeyLabel("id"), + "namespace": prometheus.KeyLabel("namespace"), + }, + } +) + +// Metricset for containerd memory is a prometheus based metricset +type metricset struct { + mb.BaseMetricSet + prometheusClient prometheus.Prometheus + mod containerd.Module + calcPct bool +} + +// init registers the MetricSet with the central registry. +// The New method will be called after the setup of the module and before starting to fetch data +func init() { + mb.Registry.MustAddMetricSet("containerd", "memory", New, + mb.WithHostParser(hostParser), + mb.DefaultMetricSet(), + ) +} + +// New creates a new instance of the MetricSet. New is responsible for unpacking +// any MetricSet specific configuration options if there are any. +func New(base mb.BaseMetricSet) (mb.MetricSet, error) { + cfgwarn.Beta("The containerd cpu metricset is beta.") + + pc, err := prometheus.NewPrometheusClient(base) + if err != nil { + return nil, err + } + config := containerd.DefaultConfig() + if err := base.Module().UnpackConfig(&config); err != nil { + return nil, err + } + + mod, ok := base.Module().(containerd.Module) + if !ok { + return nil, fmt.Errorf("must be child of kubernetes module") + } + return &metricset{ + BaseMetricSet: base, + prometheusClient: pc, + mod: mod, + calcPct: config.CalculateMemPct, + }, nil +} + +// Fetch gathers information from the containerd and reports events with this information. +func (m *metricset) Fetch(reporter mb.ReporterV2) error { + families, _, err := m.mod.GetContainerdMetricsFamilies(m.prometheusClient) + if err != nil { + return errors.Wrap(err, "error getting families") + } + events, err := m.prometheusClient.ProcessMetrics(families, mapping) + if err != nil { + return errors.Wrap(err, "error getting events") + } + + for _, event := range events { + + // setting ECS container.id and module field containerd.namespace + containerFields := common.MapStr{} + moduleFields := common.MapStr{} + rootFields := common.MapStr{} + + cID := containerd.GetAndDeleteCid(event) + namespace := containerd.GetAndDeleteNamespace(event) + + containerFields.Put("id", cID) + rootFields.Put("container", containerFields) + moduleFields.Put("namespace", namespace) + + // Calculate memory total usage percentage + if m.calcPct { + inactiveFiles, err := event.GetValue("inactiveFiles") + if err != nil { + m.Logger().Debugf("memoryUsagePct calculation skipped. inactiveFiles not present in the event: %w", err) + continue + } + usageTotal, err := event.GetValue("usage.total") + if err != nil { + m.Logger().Debugf("memoryUsagePct calculation skipped. usage.total not present in the event: %w", err) + continue + } + memoryLimit, err := event.GetValue("usage.limit") + if err != nil { + m.Logger().Debugf("memoryUsagePct calculation skipped. usage.limit not present in the event: %w", err) + continue + } + mLfloat, ok := memoryLimit.(float64) + if ok && mLfloat != 0.0 { + // calculate working set memory usage + workingSetUsage := usageTotal.(float64) - inactiveFiles.(float64) + workingSetUsagePct := workingSetUsage / mLfloat + event.Put("workingset.pct", workingSetUsagePct) + + memoryUsagePct := usageTotal.(float64) / mLfloat + event.Put("usage.pct", memoryUsagePct) + m.Logger().Debugf("memoryUsagePct for %+v is %+v", cID, memoryUsagePct) + } + } + + reporter.Event(mb.Event{ + RootFields: rootFields, + ModuleFields: moduleFields, + MetricSetFields: event, + Namespace: "containerd.memory", + }) + } + return nil +} diff --git a/x-pack/metricbeat/module/containerd/memory/memory_test.go b/x-pack/metricbeat/module/containerd/memory/memory_test.go new file mode 100644 index 00000000000..027ea9b598f --- /dev/null +++ b/x-pack/metricbeat/module/containerd/memory/memory_test.go @@ -0,0 +1,26 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build !integration +// +build !integration + +package memory + +import ( + "testing" + + "github.com/elastic/beats/v7/metricbeat/helper/prometheus/ptest" + _ "github.com/elastic/beats/v7/x-pack/metricbeat/module/containerd" +) + +func TestEventMapping(t *testing.T) { + ptest.TestMetricSet(t, "containerd", "memory", + ptest.TestCases{ + { + MetricsFile: "../_meta/test/containerd.v1.5.2", + ExpectedFile: "./_meta/test/containerd.v1.5.2.expected", + }, + }, + ) +} diff --git a/x-pack/metricbeat/modules.d/containerd.yml.disabled b/x-pack/metricbeat/modules.d/containerd.yml.disabled new file mode 100644 index 00000000000..cc735a455c7 --- /dev/null +++ b/x-pack/metricbeat/modules.d/containerd.yml.disabled @@ -0,0 +1,14 @@ +# Module: containerd +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-containerd.html + +- module: containerd + metricsets: ["cpu", "memory", "blkio"] + period: 10s + # containerd metrics endpoint is configured in /etc/containerd/config.toml + # Metrics endpoint does not listen by default + # https://github.com/containerd/containerd/blob/main/docs/man/containerd-config.toml.5.md + hosts: ["localhost:1338"] + # if set to true, cpu and memory usage percentages will be calculated. Default is true + calcpct.cpu: true + calcpct.memory: true + From 055798ae3561da8cbb4749984d1ff57964fbee2f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?No=C3=A9mi=20V=C3=A1nyi?= Date: Mon, 17 Jan 2022 17:06:23 +0100 Subject: [PATCH 10/69] Update Index template loading guide to use the correct endpoint (#29869) This PR updates the documentation for loading index templates manually. The endpoint used in the documentation was outdated. That lead to some confusion. --- libbeat/docs/howto/load-index-templates.asciidoc | 8 ++++---- libbeat/docs/upgrading.asciidoc | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/libbeat/docs/howto/load-index-templates.asciidoc b/libbeat/docs/howto/load-index-templates.asciidoc index 06862a5cc22..5225e228f91 100644 --- a/libbeat/docs/howto/load-index-templates.asciidoc +++ b/libbeat/docs/howto/load-index-templates.asciidoc @@ -292,7 +292,7 @@ ifdef::deb_os,rpm_os[] ["source","sh",subs="attributes"] ---- -curl -XPUT -H 'Content-Type: application/json' http://localhost:9200/_template/{beatname_lc}-{version} -d@{beatname_lc}.template.json +curl -XPUT -H 'Content-Type: application/json' http://localhost:9200/_index_template/{beatname_lc}-{version} -d@{beatname_lc}.template.json ---- endif::deb_os,rpm_os[] @@ -301,7 +301,7 @@ ifdef::mac_os[] ["source","sh",subs="attributes"] ---- -curl -XPUT -H 'Content-Type: application/json' http://localhost:9200/_template/{beatname_lc}-{version} -d@{beatname_lc}.template.json +curl -XPUT -H 'Content-Type: application/json' http://localhost:9200/_index_template/{beatname_lc}-{version} -d@{beatname_lc}.template.json ---- endif::mac_os[] @@ -310,7 +310,7 @@ ifdef::linux_os[] ["source","sh",subs="attributes"] ---- -curl -XPUT -H 'Content-Type: application/json' http://localhost:9200/_template/{beatname_lc}-{version} -d@{beatname_lc}.template.json +curl -XPUT -H 'Content-Type: application/json' http://localhost:9200/_index_template/{beatname_lc}-{version} -d@{beatname_lc}.template.json ---- endif::linux_os[] @@ -321,6 +321,6 @@ endif::win_only[] ["source","sh",subs="attributes"] ---- -PS > Invoke-RestMethod -Method Put -ContentType "application/json" -InFile {beatname_lc}.template.json -Uri http://localhost:9200/_template/{beatname_lc}-{version} +PS > Invoke-RestMethod -Method Put -ContentType "application/json" -InFile {beatname_lc}.template.json -Uri http://localhost:9200/_index_template/{beatname_lc}-{version} ---- endif::win_os[] diff --git a/libbeat/docs/upgrading.asciidoc b/libbeat/docs/upgrading.asciidoc index b0370a77812..6d2ae6478f7 100644 --- a/libbeat/docs/upgrading.asciidoc +++ b/libbeat/docs/upgrading.asciidoc @@ -330,7 +330,7 @@ layer. See <>. + ["source","sh",subs="attributes"] ---- -DELETE /_template/metricbeat-{version} +DELETE /_index_template/metricbeat-{version} ---- + Because the index template was loaded without the compatibility layer enabled, From d396baf9f79e1e98038c7e46434f874b13e4e7cc Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Tue, 18 Jan 2022 02:03:42 -0500 Subject: [PATCH 11/69] [Automation] Update elastic stack version to 8.1.0-677b9ef0 for testing (#29881) Co-authored-by: apmmachine --- testing/environments/snapshot-oss.yml | 6 +++--- testing/environments/snapshot.yml | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/testing/environments/snapshot-oss.yml b/testing/environments/snapshot-oss.yml index 8a398f7b4c8..26b198effc9 100644 --- a/testing/environments/snapshot-oss.yml +++ b/testing/environments/snapshot-oss.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.1.0-7004acda-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.1.0-677b9ef0-SNAPSHOT healthcheck: test: ["CMD-SHELL", "curl -s http://localhost:9200/_cat/health?h=status | grep -q green"] retries: 300 @@ -21,7 +21,7 @@ services: - "script.context.template.cache_max_size=2000" logstash: - image: docker.elastic.co/logstash/logstash-oss:8.1.0-7004acda-SNAPSHOT + image: docker.elastic.co/logstash/logstash-oss:8.1.0-677b9ef0-SNAPSHOT healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9600/_node/stats"] retries: 600 @@ -31,7 +31,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.1.0-7004acda-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.1.0-677b9ef0-SNAPSHOT healthcheck: test: ["CMD-SHELL", "curl -s http://localhost:5601/api/status?v8format=true | grep -q '\"overall\":{\"level\":\"available\"'"] retries: 600 diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index c8552faf0c0..4cae61b0cbe 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.1.0-7004acda-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.1.0-677b9ef0-SNAPSHOT healthcheck: test: ["CMD-SHELL", "curl -s http://localhost:9200/_cat/health?h=status | grep -q green"] retries: 300 @@ -37,7 +37,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.1.0-7004acda-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.1.0-677b9ef0-SNAPSHOT healthcheck: test: ["CMD-SHELL", "curl -s http://localhost:5601/api/status?v8format=true | grep -q '\"overall\":{\"level\":\"available\"'"] retries: 600 From 660a02efaac7dc92398436cf9536ef39a7623540 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?No=C3=A9mi=20V=C3=A1nyi?= Date: Tue, 18 Jan 2022 09:21:41 +0100 Subject: [PATCH 12/69] Copy small helpers to Elastic Agent from Beats to reduce dependency on Libbeat (#29867) --- .../gateway/fleet/fleet_gateway.go | 2 +- .../pkg/agent/application/info/agent_id.go | 2 +- .../pkg/agent/application/upgrade/rollback.go | 2 +- .../elastic-agent/pkg/agent/cmd/enroll_cmd.go | 3 +- x-pack/elastic-agent/pkg/agent/cmd/install.go | 11 ++- x-pack/elastic-agent/pkg/cli/confirm.go | 50 +++++++++++++ x-pack/elastic-agent/pkg/cli/input.go | 29 ++++++++ .../elastic-agent/pkg/core/backoff/backoff.go | 24 +++++++ .../pkg/core/backoff/backoff_test.go | 70 +++++++++++++++++++ .../pkg/core/backoff/equal_jitter.go | 60 ++++++++++++++++ .../pkg/core/backoff/exponential.go | 54 ++++++++++++++ .../pkg/core/retry/retrystrategy.go | 2 +- .../pkg/core/retry/retrystrategy_test.go | 2 +- 13 files changed, 298 insertions(+), 13 deletions(-) create mode 100644 x-pack/elastic-agent/pkg/cli/confirm.go create mode 100644 x-pack/elastic-agent/pkg/cli/input.go create mode 100644 x-pack/elastic-agent/pkg/core/backoff/backoff.go create mode 100644 x-pack/elastic-agent/pkg/core/backoff/backoff_test.go create mode 100644 x-pack/elastic-agent/pkg/core/backoff/equal_jitter.go create mode 100644 x-pack/elastic-agent/pkg/core/backoff/exponential.go diff --git a/x-pack/elastic-agent/pkg/agent/application/gateway/fleet/fleet_gateway.go b/x-pack/elastic-agent/pkg/agent/application/gateway/fleet/fleet_gateway.go index 5eaf4c47984..ee16f4b9a9d 100644 --- a/x-pack/elastic-agent/pkg/agent/application/gateway/fleet/fleet_gateway.go +++ b/x-pack/elastic-agent/pkg/agent/application/gateway/fleet/fleet_gateway.go @@ -10,10 +10,10 @@ import ( "sync" "time" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/backoff" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/state" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/fleetapi/client" - "github.com/elastic/beats/v7/libbeat/common/backoff" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/gateway" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/info" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/pipeline" diff --git a/x-pack/elastic-agent/pkg/agent/application/info/agent_id.go b/x-pack/elastic-agent/pkg/agent/application/info/agent_id.go index 6b62f32d396..76827518b50 100644 --- a/x-pack/elastic-agent/pkg/agent/application/info/agent_id.go +++ b/x-pack/elastic-agent/pkg/agent/application/info/agent_id.go @@ -13,12 +13,12 @@ import ( "github.com/gofrs/uuid" "gopkg.in/yaml.v2" - "github.com/elastic/beats/v7/libbeat/common/backoff" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/filelock" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/paths" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/errors" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/storage" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/config" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/backoff" monitoringConfig "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/monitoring/config" ) diff --git a/x-pack/elastic-agent/pkg/agent/application/upgrade/rollback.go b/x-pack/elastic-agent/pkg/agent/application/upgrade/rollback.go index 9267d019825..7af2d0c8a7d 100644 --- a/x-pack/elastic-agent/pkg/agent/application/upgrade/rollback.go +++ b/x-pack/elastic-agent/pkg/agent/application/upgrade/rollback.go @@ -14,12 +14,12 @@ import ( "github.com/hashicorp/go-multierror" - "github.com/elastic/beats/v7/libbeat/common/backoff" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/paths" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/control" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/control/client" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/errors" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/install" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/backoff" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/logger" ) diff --git a/x-pack/elastic-agent/pkg/agent/cmd/enroll_cmd.go b/x-pack/elastic-agent/pkg/agent/cmd/enroll_cmd.go index 0023d83fde0..da6c2be9e5e 100644 --- a/x-pack/elastic-agent/pkg/agent/cmd/enroll_cmd.go +++ b/x-pack/elastic-agent/pkg/agent/cmd/enroll_cmd.go @@ -17,8 +17,6 @@ import ( "gopkg.in/yaml.v2" - "github.com/elastic/beats/v7/libbeat/common/backoff" - "github.com/elastic/beats/v7/libbeat/common/transport/httpcommon" "github.com/elastic/beats/v7/libbeat/common/transport/tlscommon" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application" @@ -34,6 +32,7 @@ import ( "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/cli" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/config" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/authority" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/backoff" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/logger" monitoringConfig "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/monitoring/config" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/process" diff --git a/x-pack/elastic-agent/pkg/agent/cmd/install.go b/x-pack/elastic-agent/pkg/agent/cmd/install.go index 3796b64915e..e6934bce538 100644 --- a/x-pack/elastic-agent/pkg/agent/cmd/install.go +++ b/x-pack/elastic-agent/pkg/agent/cmd/install.go @@ -11,7 +11,6 @@ import ( "github.com/spf13/cobra" - c "github.com/elastic/beats/v7/libbeat/common/cli" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/filelock" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/paths" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/install" @@ -73,7 +72,7 @@ func installCmd(streams *cli.IOStreams, cmd *cobra.Command, args []string) error if status == install.Broken { if !force { fmt.Fprintf(streams.Out, "Elastic Agent is installed but currently broken: %s\n", reason) - confirm, err := c.Confirm(fmt.Sprintf("Continuing will re-install Elastic Agent over the current installation at %s. Do you want to continue?", paths.InstallPath), true) + confirm, err := cli.Confirm(fmt.Sprintf("Continuing will re-install Elastic Agent over the current installation at %s. Do you want to continue?", paths.InstallPath), true) if err != nil { return fmt.Errorf("problem reading prompt response") } @@ -83,7 +82,7 @@ func installCmd(streams *cli.IOStreams, cmd *cobra.Command, args []string) error } } else { if !force { - confirm, err := c.Confirm(fmt.Sprintf("Elastic Agent will be installed at %s and will run as a service. Do you want to continue?", paths.InstallPath), true) + confirm, err := cli.Confirm(fmt.Sprintf("Elastic Agent will be installed at %s and will run as a service. Do you want to continue?", paths.InstallPath), true) if err != nil { return fmt.Errorf("problem reading prompt response") } @@ -106,7 +105,7 @@ func installCmd(streams *cli.IOStreams, cmd *cobra.Command, args []string) error askEnroll = false } if askEnroll { - confirm, err := c.Confirm("Do you want to enroll this Agent into Fleet?", true) + confirm, err := cli.Confirm("Do you want to enroll this Agent into Fleet?", true) if err != nil { return fmt.Errorf("problem reading prompt response") } @@ -122,7 +121,7 @@ func installCmd(streams *cli.IOStreams, cmd *cobra.Command, args []string) error if enroll && fleetServer == "" { if url == "" { - url, err = c.ReadInput("URL you want to enroll this Agent into:") + url, err = cli.ReadInput("URL you want to enroll this Agent into:") if err != nil { return fmt.Errorf("problem reading prompt response") } @@ -132,7 +131,7 @@ func installCmd(streams *cli.IOStreams, cmd *cobra.Command, args []string) error } } if token == "" { - token, err = c.ReadInput("Fleet enrollment token:") + token, err = cli.ReadInput("Fleet enrollment token:") if err != nil { return fmt.Errorf("problem reading prompt response") } diff --git a/x-pack/elastic-agent/pkg/cli/confirm.go b/x-pack/elastic-agent/pkg/cli/confirm.go new file mode 100644 index 00000000000..34024ea675e --- /dev/null +++ b/x-pack/elastic-agent/pkg/cli/confirm.go @@ -0,0 +1,50 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package cli + +import ( + "bufio" + "errors" + "fmt" + "io" + "os" + "strings" +) + +// Confirm shows the confirmation text and ask the user to answer (y/n) +// default will be shown in uppercase and be selected if the user hits enter +// returns true for yes, false for no +func Confirm(prompt string, def bool) (bool, error) { + reader := bufio.NewReader(os.Stdin) + return confirm(reader, os.Stdout, prompt, def) +} + +func confirm(r io.Reader, out io.Writer, prompt string, def bool) (bool, error) { + options := " [Y/n]" + if !def { + options = " [y/N]" + } + + reader := bufio.NewScanner(r) + for { + fmt.Fprintf(out, prompt+options+":") + + if !reader.Scan() { + break + } + switch strings.ToLower(reader.Text()) { + case "": + return def, nil + case "y", "yes", "yeah": + return true, nil + case "n", "no": + return false, nil + default: + fmt.Fprintln(out, "Please write 'y' or 'n'") + } + } + + return false, errors.New("error reading user input") +} diff --git a/x-pack/elastic-agent/pkg/cli/input.go b/x-pack/elastic-agent/pkg/cli/input.go new file mode 100644 index 00000000000..3468651026a --- /dev/null +++ b/x-pack/elastic-agent/pkg/cli/input.go @@ -0,0 +1,29 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package cli + +import ( + "bufio" + "errors" + "fmt" + "io" + "os" +) + +// ReadInput shows the text and ask the user to provide input. +func ReadInput(prompt string) (string, error) { + reader := bufio.NewReader(os.Stdin) + return input(reader, os.Stdout, prompt) +} + +func input(r io.Reader, out io.Writer, prompt string) (string, error) { + reader := bufio.NewScanner(r) + fmt.Fprintf(out, prompt+" ") + + if !reader.Scan() { + return "", errors.New("error reading user input") + } + return reader.Text(), nil +} diff --git a/x-pack/elastic-agent/pkg/core/backoff/backoff.go b/x-pack/elastic-agent/pkg/core/backoff/backoff.go new file mode 100644 index 00000000000..06723e7db9a --- /dev/null +++ b/x-pack/elastic-agent/pkg/core/backoff/backoff.go @@ -0,0 +1,24 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package backoff + +// Backoff defines the interface for backoff strategies. +type Backoff interface { + // Wait blocks for a duration of time governed by the backoff strategy. + Wait() bool + + // Reset resets the backoff duration to an initial value governed by the backoff strategy. + Reset() +} + +// WaitOnError is a convenience method, if an error is received it will block, if not errors is +// received, the backoff will be resetted. +func WaitOnError(b Backoff, err error) bool { + if err == nil { + b.Reset() + return true + } + return b.Wait() +} diff --git a/x-pack/elastic-agent/pkg/core/backoff/backoff_test.go b/x-pack/elastic-agent/pkg/core/backoff/backoff_test.go new file mode 100644 index 00000000000..88498ff5a58 --- /dev/null +++ b/x-pack/elastic-agent/pkg/core/backoff/backoff_test.go @@ -0,0 +1,70 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package backoff + +import ( + "errors" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +type factory func(<-chan struct{}) Backoff + +func TestBackoff(t *testing.T) { + t.Run("test close channel", testCloseChannel) + t.Run("test unblock after some time", testUnblockAfterInit) +} + +func testCloseChannel(t *testing.T) { + init := 2 * time.Second + max := 5 * time.Minute + + tests := map[string]factory{ + "ExpBackoff": func(done <-chan struct{}) Backoff { + return NewExpBackoff(done, init, max) + }, + "EqualJitterBackoff": func(done <-chan struct{}) Backoff { + return NewEqualJitterBackoff(done, init, max) + }, + } + + for name, f := range tests { + t.Run(name, func(t *testing.T) { + c := make(chan struct{}) + b := f(c) + close(c) + assert.False(t, b.Wait()) + }) + } +} + +func testUnblockAfterInit(t *testing.T) { + init := 1 * time.Second + max := 5 * time.Minute + + tests := map[string]factory{ + "ExpBackoff": func(done <-chan struct{}) Backoff { + return NewExpBackoff(done, init, max) + }, + "EqualJitterBackoff": func(done <-chan struct{}) Backoff { + return NewEqualJitterBackoff(done, init, max) + }, + } + + for name, f := range tests { + t.Run(name, func(t *testing.T) { + c := make(chan struct{}) + defer close(c) + + b := f(c) + + startedAt := time.Now() + assert.True(t, WaitOnError(b, errors.New("bad bad"))) + assert.True(t, time.Now().Sub(startedAt) >= init) + }) + } +} diff --git a/x-pack/elastic-agent/pkg/core/backoff/equal_jitter.go b/x-pack/elastic-agent/pkg/core/backoff/equal_jitter.go new file mode 100644 index 00000000000..d87077397cd --- /dev/null +++ b/x-pack/elastic-agent/pkg/core/backoff/equal_jitter.go @@ -0,0 +1,60 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package backoff + +import ( + "math/rand" + "time" +) + +// EqualJitterBackoff implements an equal jitter strategy, meaning the wait time will consist of two parts, +// the first will be exponential and the other half will be random and will provide the jitter +// necessary to distribute the wait on remote endpoint. +type EqualJitterBackoff struct { + duration time.Duration + done <-chan struct{} + + init time.Duration + max time.Duration + + last time.Time +} + +// NewEqualJitterBackoff returns a new EqualJitter object. +func NewEqualJitterBackoff(done <-chan struct{}, init, max time.Duration) Backoff { + return &EqualJitterBackoff{ + duration: init * 2, // Allow to sleep at least the init period on the first wait. + done: done, + init: init, + max: max, + } +} + +// Reset resets the duration of the backoff. +func (b *EqualJitterBackoff) Reset() { + // Allow to sleep at least the init period on the first wait. + b.duration = b.init * 2 +} + +// Wait block until either the timer is completed or channel is done. +func (b *EqualJitterBackoff) Wait() bool { + // Make sure we have always some minimal back off and jitter. + temp := int64(b.duration / 2) + backoff := time.Duration(temp + rand.Int63n(temp)) + + // increase duration for next wait. + b.duration *= 2 + if b.duration > b.max { + b.duration = b.max + } + + select { + case <-b.done: + return false + case <-time.After(backoff): + b.last = time.Now() + return true + } +} diff --git a/x-pack/elastic-agent/pkg/core/backoff/exponential.go b/x-pack/elastic-agent/pkg/core/backoff/exponential.go new file mode 100644 index 00000000000..81224b95eb5 --- /dev/null +++ b/x-pack/elastic-agent/pkg/core/backoff/exponential.go @@ -0,0 +1,54 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package backoff + +import ( + "time" +) + +// ExpBackoff exponential backoff, will wait an initial time and exponentially +// increases the wait time up to a predefined maximum. Resetting Backoff will reset the next sleep +// timer to the initial backoff duration. +type ExpBackoff struct { + duration time.Duration + done <-chan struct{} + + init time.Duration + max time.Duration + + last time.Time +} + +// NewExpBackoff returns a new exponential backoff. +func NewExpBackoff(done <-chan struct{}, init, max time.Duration) Backoff { + return &ExpBackoff{ + duration: init, + done: done, + init: init, + max: max, + } +} + +// Reset resets the duration of the backoff. +func (b *ExpBackoff) Reset() { + b.duration = b.init +} + +// Wait block until either the timer is completed or channel is done. +func (b *ExpBackoff) Wait() bool { + backoff := b.duration + b.duration *= 2 + if b.duration > b.max { + b.duration = b.max + } + + select { + case <-b.done: + return false + case <-time.After(backoff): + b.last = time.Now() + return true + } +} diff --git a/x-pack/elastic-agent/pkg/core/retry/retrystrategy.go b/x-pack/elastic-agent/pkg/core/retry/retrystrategy.go index b4da40d9bf2..d088c705bfe 100644 --- a/x-pack/elastic-agent/pkg/core/retry/retrystrategy.go +++ b/x-pack/elastic-agent/pkg/core/retry/retrystrategy.go @@ -8,7 +8,7 @@ import ( "context" "time" - "github.com/elastic/beats/v7/libbeat/common/backoff" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/backoff" ) // DoWithBackoff ignores retry config of delays and lets backoff decide how much time it needs. diff --git a/x-pack/elastic-agent/pkg/core/retry/retrystrategy_test.go b/x-pack/elastic-agent/pkg/core/retry/retrystrategy_test.go index e93b784eb13..4c8824fd6fa 100644 --- a/x-pack/elastic-agent/pkg/core/retry/retrystrategy_test.go +++ b/x-pack/elastic-agent/pkg/core/retry/retrystrategy_test.go @@ -10,7 +10,7 @@ import ( "testing" "time" - "github.com/elastic/beats/v7/libbeat/common/backoff" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/backoff" ) func TestRetry(t *testing.T) { From e1ca29dc7f2d64ef72829f886f09fc3114e76f26 Mon Sep 17 00:00:00 2001 From: Francesco Gualazzi Date: Tue, 18 Jan 2022 11:12:00 +0100 Subject: [PATCH 13/69] Fix Filebeat dissect processor field tokenization in documentation (#29680) Signed-off-by: inge4pres --- libbeat/processors/dissect/docs/dissect.asciidoc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/libbeat/processors/dissect/docs/dissect.asciidoc b/libbeat/processors/dissect/docs/dissect.asciidoc index b3dcf240c7e..a7a68a45c12 100644 --- a/libbeat/processors/dissect/docs/dissect.asciidoc +++ b/libbeat/processors/dissect/docs/dissect.asciidoc @@ -74,14 +74,14 @@ For this example, imagine that an application generates the following messages: "789 - App02 - Database is refreshing tables" ---- -Use the `dissect` processor to split each message into two fields, for example, +Use the `dissect` processor to split each message into three fields, for example, `service.pid`, `service.name` and `service.status`: [source,yaml] ---- processors: - dissect: - tokenizer: '"%{pid|integer} - %{service.name} - %{service.status}"' + tokenizer: '"%{service.pid|integer} - %{service.name} - %{service.status}"' field: "message" target_prefix: "" ---- @@ -98,7 +98,7 @@ This configuration produces fields like: ---- `service.name` is an ECS {ref}/keyword.html[keyword field], which means that you -can use it in {es} for filtering, sorting, and aggregations. +can use it in {es} for filtering, sorting, and aggregations. When possible, use ECS-compatible field names. For more information, see the {ecs-ref}/index.html[Elastic Common Schema] documentation. From 8036edbebdefa35d84c244656d01c96f14da5c09 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?No=C3=A9mi=20V=C3=A1nyi?= Date: Tue, 18 Jan 2022 13:51:35 +0100 Subject: [PATCH 14/69] Note that datasets are disabled by default in Filebeat documentation (#29887) --- filebeat/docs/getting-started.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/filebeat/docs/getting-started.asciidoc b/filebeat/docs/getting-started.asciidoc index 08f2bc311a5..44e2dbf1231 100644 --- a/filebeat/docs/getting-started.asciidoc +++ b/filebeat/docs/getting-started.asciidoc @@ -88,7 +88,7 @@ include::{libbeat-dir}/tab-widgets/enable-modules-widget.asciidoc[] -- . In the module config under `modules.d`, enable the desired datasets and -change the module settings to match your environment. +change the module settings to match your environment. **Datasets are disabled by default.** + For example, log locations are set based on the OS. If your logs aren't in default locations, set the `paths` variable: From 10f850e0df40438b9096170fd0a048a9c92f9cf7 Mon Sep 17 00:00:00 2001 From: Maxwell Borden Date: Tue, 18 Jan 2022 04:53:52 -0800 Subject: [PATCH 15/69] Check that `info.NCPU` is not zero (#29302) ## What does this PR do? This adds a check for 0 when determining the concurrency limit used by mage during a build. ## Why is it important? `docker info -f '{{ json .}}'` exits with 0 even if there was an error, so the check for `err == nil` doesn't catch errors, and the default int value of 0 is used when trying to read `info.NCPU`. This leads to the confusing behaviour where the build stops and hangs forever with no errors and no indication that anything has gone wrong unless you build with --verbose. --- dev-tools/mage/common.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/dev-tools/mage/common.go b/dev-tools/mage/common.go index f89f0d9577c..fd8c60e5c4e 100644 --- a/dev-tools/mage/common.go +++ b/dev-tools/mage/common.go @@ -528,7 +528,9 @@ func numParallel() int { maxParallel := runtime.NumCPU() info, err := GetDockerInfo() - if err == nil && info.NCPU < maxParallel { + // Check that info.NCPU != 0 since docker info doesn't return with an + // error status if communcation with the daemon failed. + if err == nil && info.NCPU != 0 && info.NCPU < maxParallel { maxParallel = info.NCPU } From 616db13b36da5e4db327b745b333abf54a07345e Mon Sep 17 00:00:00 2001 From: Andrew Cholakian Date: Tue, 18 Jan 2022 09:09:41 -0600 Subject: [PATCH 16/69] [Heartbeat] Defer monitor / ICMP errors to monitor runtime / ES (#29413) This PR generally improves the error behavior of all monitors, and some specific ICMP related errors as well. These two items are combined in one PR because the general theme here is improving the ICMP error experience, and improving ICMP required improving all monitors. Fixes #29346 and incremental progress toward #29692 General monitor improvements Generally speaking, per #29692 we are trying to send monitor output to ES wherever possible. With this PR we now send any monitor initialization errors (such as a lack of ICMP kernel capabilities) during monitor creation to ES. We do this by allowing the monitor to initialize and run on schedule, even though we know it will always send the same error message. This lets users more easily debug issues in Kibana. ICMP Specific Improvement This PR also Removes broken a IP capability check that caused heartbeat to be unable to start. We now just rely on return codes from attempts to actually send packets. This is the more specific fix for #29346 . I was not able to exactly reproduce the exact customer reported issue, where the user somehow disabled ipv6 in a way that the ICMP loop that I can't exactly reproduce. I tried disabling ipv6 fully with sudo sysctl net.ipv6.conf.all.disable_ipv6=1 but that didn't yield the error in #29346 The logic is now simplified, there's no truly reliable way to know if you can send an ipv6 (or ipv4) ping before you send it (settings can change at any time! network cards can disappear!), so we just let the error codes happen as the check is executed. This is also generally a better UX in that the errors will now be visible in the Uptime app, not just the logs. It should be noted that the ipv4 and ipv6 boolean options only are documented to affect how DNS lookups happen. With this change the behavior matches the docs. Note that ICMP is a bit weird in that there's a single ICMP loop in heartbeat, and all monitors are really just interacting with that. Removal of .synthetics This also ignores the .synthetics folder which has been inconvenient for some time for devs, in that it dirties the git path --- CHANGELOG.next.asciidoc | 3 ++ heartbeat/monitors/active/icmp/icmp.go | 4 -- heartbeat/monitors/active/icmp/loop.go | 1 - heartbeat/monitors/active/icmp/stdloop.go | 34 ++++--------- heartbeat/monitors/factory_test.go | 7 +-- heartbeat/monitors/mocks_test.go | 61 +++++++++++++++-------- heartbeat/monitors/monitor.go | 21 ++++++-- heartbeat/monitors/monitor_test.go | 38 ++++++++++++-- heartbeat/monitors/stdfields/stdfields.go | 5 +- heartbeat/monitors/task.go | 2 +- heartbeat/tests/system/test_icmp.py | 10 ++-- x-pack/heartbeat/.gitignore | 1 + 12 files changed, 119 insertions(+), 68 deletions(-) create mode 100644 x-pack/heartbeat/.gitignore diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 3e4267ddd65..c6ee693fa30 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -110,6 +110,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d *Heartbeat* - Fix race condition in http monitors using `mode:all` that can cause crashes. {pull}29697[pull] +- Fix broken ICMP availability check that prevented heartbeat from starting in rare cases. {pull}29413[pull] *Metricbeat* @@ -168,6 +169,8 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d *Heartbeat* +- More errors are now visible in ES with new logic failing monitors later to ease debugging. {pull}29413[pull] + *Metricbeat* diff --git a/heartbeat/monitors/active/icmp/icmp.go b/heartbeat/monitors/active/icmp/icmp.go index ef57cdbebae..073660c259a 100644 --- a/heartbeat/monitors/active/icmp/icmp.go +++ b/heartbeat/monitors/active/icmp/icmp.go @@ -91,10 +91,6 @@ func (jf *jobFactory) checkConfig() error { } func (jf *jobFactory) makePlugin() (plugin2 plugin.Plugin, err error) { - if err := jf.loop.checkNetworkMode(jf.ipVersion); err != nil { - return plugin.Plugin{}, err - } - pingFactory := jf.pingIPFactory(&jf.config) var j []jobs.Job diff --git a/heartbeat/monitors/active/icmp/loop.go b/heartbeat/monitors/active/icmp/loop.go index de4d0ef4dfc..b29fa247f16 100644 --- a/heartbeat/monitors/active/icmp/loop.go +++ b/heartbeat/monitors/active/icmp/loop.go @@ -23,7 +23,6 @@ import ( ) type ICMPLoop interface { - checkNetworkMode(mode string) error ping( addr *net.IPAddr, timeout time.Duration, diff --git a/heartbeat/monitors/active/icmp/stdloop.go b/heartbeat/monitors/active/icmp/stdloop.go index 05858f5537f..9f5f5543967 100644 --- a/heartbeat/monitors/active/icmp/stdloop.go +++ b/heartbeat/monitors/active/icmp/stdloop.go @@ -20,7 +20,6 @@ package icmp import ( "bytes" "encoding/binary" - "errors" "fmt" "math/rand" "net" @@ -159,29 +158,6 @@ func newICMPLoop() (*stdICMPLoop, error) { return l, nil } -func (l *stdICMPLoop) checkNetworkMode(mode string) error { - ip4, ip6 := false, false - switch mode { - case "ip4": - ip4 = true - case "ip6": - ip6 = true - case "ip": - ip4, ip6 = true, true - default: - return fmt.Errorf("'%v' is not supported", mode) - } - - if ip4 && l.conn4 == nil { - return errors.New("failed to initiate IPv4 support. Check log details for permission configuration") - } - if ip6 && l.conn6 == nil { - return errors.New("failed to initiate IPv6 support. Check log details for permission configuration") - } - - return nil -} - func (l *stdICMPLoop) runICMPRecv(conn *icmp.PacketConn, proto int) { for { bytes := make([]byte, 512) @@ -251,6 +227,14 @@ func (l *stdICMPLoop) ping( timeout time.Duration, interval time.Duration, ) (time.Duration, int, error) { + isIPv6 := addr.IP.To4() == nil + if isIPv6 && l.conn6 == nil { + return -1, -1, fmt.Errorf("cannot ping IPv6 address '%s', no IPv6 connection available", addr) + } + if !isIPv6 && l.conn4 == nil { + return -1, -1, fmt.Errorf("cannot ping IPv4 address '%s', no IPv4 connection available", addr) + } + var err error toTimer := time.NewTimer(timeout) defer toTimer.Stop() @@ -379,7 +363,7 @@ func (l *stdICMPLoop) sendEchoRequest(addr *net.IPAddr) (*requestContext, error) _, err := conn.WriteTo(encoded, addr) if err != nil { - return nil, err + return nil, fmt.Errorf("could not write to conn: %w", err) } ctx.ts = ts diff --git a/heartbeat/monitors/factory_test.go b/heartbeat/monitors/factory_test.go index c395050aaa1..e4ff3589ccd 100644 --- a/heartbeat/monitors/factory_test.go +++ b/heartbeat/monitors/factory_test.go @@ -149,7 +149,7 @@ func TestPreProcessors(t *testing.T) { } func TestDuplicateMonitorIDs(t *testing.T) { - serverMonConf := mockPluginConf(t, "custom", "@every 1ms", "http://example.net") + serverMonConf := mockPluginConf(t, "custom", "custom", "@every 1ms", "http://example.net") badConf := mockBadPluginConf(t, "custom", "@every 1ms") reg, built, closed := mockPluginsReg() pipelineConnector := &MockPipelineConnector{} @@ -190,8 +190,9 @@ func TestDuplicateMonitorIDs(t *testing.T) { m1.Stop() m2.Stop() - // 3 are counted as built, even the bad config - require.Equal(t, 3, built.Load()) + // Two are counted as built. The bad config is missing a stdfield so it + // doesn't complete construction + require.Equal(t, 2, built.Load()) // Only 2 closes, because the bad config isn't closed require.Equal(t, 2, closed.Load()) } diff --git a/heartbeat/monitors/mocks_test.go b/heartbeat/monitors/mocks_test.go index 6d51791e3d7..720088db211 100644 --- a/heartbeat/monitors/mocks_test.go +++ b/heartbeat/monitors/mocks_test.go @@ -99,24 +99,28 @@ func (pc *MockPipelineConnector) ConnectWith(beat.ClientConfig) (beat.Client, er return c, nil } -func mockEventMonitorValidator(id string) validator.Validator { +func baseMockEventMonitorValidator(id string, name string, status string) validator.Validator { var idMatcher isdef.IsDef if id == "" { idMatcher = isdef.IsStringMatching(regexp.MustCompile(`^auto-test-.*`)) } else { idMatcher = isdef.IsEqual(id) } + return lookslike.MustCompile(map[string]interface{}{ + "monitor": map[string]interface{}{ + "id": idMatcher, + "name": name, + "type": "test", + "duration.us": isdef.IsDuration, + "status": status, + "check_group": isdef.IsString, + }, + }) +} + +func mockEventMonitorValidator(id string, name string) validator.Validator { return lookslike.Strict(lookslike.Compose( - lookslike.MustCompile(map[string]interface{}{ - "monitor": map[string]interface{}{ - "id": idMatcher, - "name": "", - "type": "test", - "duration.us": isdef.IsDuration, - "status": "up", - "check_group": isdef.IsString, - }, - }), + baseMockEventMonitorValidator(id, name, "up"), hbtestllext.MonitorTimespanValidator, hbtest.SummaryChecks(1, 0), lookslike.MustCompile(mockEventCustomFields()), @@ -151,15 +155,19 @@ func mockPluginBuilder() (plugin.PluginFactory, *atomic.Int, *atomic.Int) { unpacked := struct { URLs []string `config:"urls" validate:"required"` }{} - err := config.Unpack(&unpacked) - if err != nil { - return plugin.Plugin{}, err - } - j, err := createMockJob() + + // track all closes, even on error closer := func() error { closed.Inc() return nil } + + err := config.Unpack(&unpacked) + if err != nil { + return plugin.Plugin{DoClose: closer}, err + } + j, err := createMockJob() + return plugin.Plugin{Jobs: j, DoClose: closer, Endpoints: 1}, err }, Stats: plugin.NewPluginCountersRecorder("test", reg)}, @@ -174,13 +182,15 @@ func mockPluginsReg() (p *plugin.PluginsReg, built *atomic.Int, closed *atomic.I return reg, built, closed } -func mockPluginConf(t *testing.T, id string, schedule string, url string) *common.Config { +func mockPluginConf(t *testing.T, id string, name string, schedule string, url string) *common.Config { confMap := map[string]interface{}{ "type": "test", "urls": []string{url}, "schedule": schedule, + "name": name, } + // Optional to let us simulate this key missing if id != "" { confMap["id"] = id } @@ -197,7 +207,6 @@ func mockBadPluginConf(t *testing.T, id string, schedule string) *common.Config confMap := map[string]interface{}{ "type": "test", "notanoption": []string{"foo"}, - "schedule": schedule, } if id != "" { @@ -210,8 +219,6 @@ func mockBadPluginConf(t *testing.T, id string, schedule string) *common.Config return conf } -// mockInvalidPlugin conf returns a config that invalid at the basic level of -// what's expected in heartbeat, i.e. no type. func mockInvalidPluginConf(t *testing.T) *common.Config { confMap := map[string]interface{}{ "hoeutnheou": "oueanthoue", @@ -222,3 +229,17 @@ func mockInvalidPluginConf(t *testing.T) *common.Config { return conf } + +func mockInvalidPluginConfWithStdFields(t *testing.T, id string, name string, schedule string) *common.Config { + confMap := map[string]interface{}{ + "type": "test", + "id": id, + "name": name, + "schedule": schedule, + } + + conf, err := common.NewConfigFrom(confMap) + require.NoError(t, err) + + return conf +} diff --git a/heartbeat/monitors/monitor.go b/heartbeat/monitors/monitor.go index 669579e31aa..91a6a881d84 100644 --- a/heartbeat/monitors/monitor.go +++ b/heartbeat/monitors/monitor.go @@ -163,13 +163,26 @@ func newMonitorUnsafe( return p.Close() } - wrappedJobs := wrappers.WrapCommon(p.Jobs, m.stdFields) - m.endpoints = p.Endpoints - + // If we've hit an error at this point, still run on schedule, but always return an error. + // This way the error is clearly communicated through to kibana. + // Since the error is not recoverable in these instances, the user will need to reconfigure + // the monitor, which will destroy and recreate it in heartbeat, thus clearing this error. + // + // Note: we do this at this point, and no earlier, because at a minimum we need the + // standard monitor fields (id, name and schedule) to deliver an error to kibana in a way + // that it can render. if err != nil { - return m, fmt.Errorf("job err %v", err) + // Note, needed to hoist err to this scope, not just to add a prefix + fullErr := fmt.Errorf("job could not be initialized: %s", err) + // A placeholder job that always returns an error + p.Jobs = []jobs.Job{func(event *beat.Event) ([]jobs.Job, error) { + return nil, fullErr + }} } + wrappedJobs := wrappers.WrapCommon(p.Jobs, m.stdFields) + m.endpoints = p.Endpoints + m.configuredJobs, err = m.makeTasks(config, wrappedJobs) if err != nil { return m, err diff --git a/heartbeat/monitors/monitor_test.go b/heartbeat/monitors/monitor_test.go index bbcd5b9b74c..8184a867eae 100644 --- a/heartbeat/monitors/monitor_test.go +++ b/heartbeat/monitors/monitor_test.go @@ -25,19 +25,49 @@ import ( "github.com/stretchr/testify/require" "github.com/elastic/beats/v7/heartbeat/scheduler" + "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/monitoring" + "github.com/elastic/go-lookslike" + "github.com/elastic/go-lookslike/isdef" "github.com/elastic/go-lookslike/testslike" + "github.com/elastic/go-lookslike/validator" ) -func TestMonitor(t *testing.T) { - serverMonConf := mockPluginConf(t, "", "@every 1ms", "http://example.net") +// TestMonitorBasic tests a basic config +func TestMonitorBasic(t *testing.T) { + testMonitorConfig( + t, + mockPluginConf(t, "myId", "myName", "@every 1ms", "http://example.net"), + mockEventMonitorValidator("myId", "myName"), + ) +} + +// TestMonitorBasic tests a config that errors out at plugin creation, but still has stdfields defined. +// This should cause the monitor to run, but only produce error documents +func TestMonitorCfgError(t *testing.T) { + testMonitorConfig( + t, + mockInvalidPluginConfWithStdFields(t, "invalidTestId", "invalidTestName", "@every 10s"), + lookslike.Compose( + baseMockEventMonitorValidator("invalidTestId", "invalidTestName", "down"), + lookslike.MustCompile(common.MapStr{ + "error": common.MapStr{ + "message": isdef.IsStringContaining("missing required field"), + "type": "io", + }, + }), + ), + ) +} + +func testMonitorConfig(t *testing.T, conf *common.Config, eventValidator validator.Validator) { reg, built, closed := mockPluginsReg() pipelineConnector := &MockPipelineConnector{} sched := scheduler.Create(1, monitoring.NewRegistry(), time.Local, nil, false) defer sched.Stop() - mon, err := newMonitor(serverMonConf, reg, pipelineConnector, sched.Add, nil, false) + mon, err := newMonitor(conf, reg, pipelineConnector, sched.Add, nil, false) require.NoError(t, err) mon.Start() @@ -56,7 +86,7 @@ func TestMonitor(t *testing.T) { pcClient.Close() for _, event := range pcClient.Publishes() { - testslike.Test(t, mockEventMonitorValidator(""), event.Fields) + testslike.Test(t, eventValidator, event.Fields) } } else { // Let's yield this goroutine so we don't spin diff --git a/heartbeat/monitors/stdfields/stdfields.go b/heartbeat/monitors/stdfields/stdfields.go index f09161c2adf..92e5bc4bb90 100644 --- a/heartbeat/monitors/stdfields/stdfields.go +++ b/heartbeat/monitors/stdfields/stdfields.go @@ -18,10 +18,9 @@ package stdfields import ( + "fmt" "time" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/heartbeat/scheduler/schedule" "github.com/elastic/beats/v7/libbeat/common" ) @@ -46,7 +45,7 @@ func ConfigToStdMonitorFields(config *common.Config) (StdMonitorFields, error) { mpi := StdMonitorFields{Enabled: true} if err := config.Unpack(&mpi); err != nil { - return mpi, errors.Wrap(err, "error unpacking monitor plugin config") + return mpi, fmt.Errorf("error unpacking monitor plugin config: %w", err) } // Use `service_name` if `service.name` is unspecified diff --git a/heartbeat/monitors/task.go b/heartbeat/monitors/task.go index 11b013a9871..a7f1848b3ae 100644 --- a/heartbeat/monitors/task.go +++ b/heartbeat/monitors/task.go @@ -108,7 +108,7 @@ func runPublishJob(job jobs.Job, client *WrappedClient) []scheduler.TaskFunc { conts, err := job(event) if err != nil { - logp.Err("Job %v failed with: ", err) + logp.Err("Job failed with: %s", err) } hasContinuations := len(conts) > 0 diff --git a/heartbeat/tests/system/test_icmp.py b/heartbeat/tests/system/test_icmp.py index 7f61a7430f8..9a9ef4f6c77 100644 --- a/heartbeat/tests/system/test_icmp.py +++ b/heartbeat/tests/system/test_icmp.py @@ -6,6 +6,7 @@ import sys import time import unittest +import re from beat.beat import INTEGRATION_TESTS from elasticsearch import Elasticsearch from heartbeat import BaseTest @@ -44,8 +45,11 @@ def has_failed_message(): return self.log_contains("Failed to initialize ICMP lo # we should run pings on those machines and make sure they work. self.wait_until(lambda: has_started_message() or has_failed_message(), 30) + self.wait_until(lambda: self.output_has(lines=1)) + output = self.read_output() + monitor_status = output[0]["monitor.status"] if has_failed_message(): - proc.check_kill_and_wait(1) + assert monitor_status == "down" + self.assertRegex(output[0]["error.message"], ".*Insufficient privileges to perform ICMP ping.*") else: - # Check that documents are moving through - self.wait_until(lambda: self.output_has(lines=1)) + assert monitor_status == "up" diff --git a/x-pack/heartbeat/.gitignore b/x-pack/heartbeat/.gitignore new file mode 100644 index 00000000000..8af6c73dc61 --- /dev/null +++ b/x-pack/heartbeat/.gitignore @@ -0,0 +1 @@ +.synthetics From 5fdc4bb12cf0108b80a8487ac353586adb378bd1 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Tue, 18 Jan 2022 11:11:00 -0500 Subject: [PATCH 17/69] Fix YAML indentation in `parsers` examples (#29663) (#29893) See discussion on https://discuss.elastic.co/t/filebeat-filestream-input-parsers-multiline-fails/290543/9. (cherry picked from commit 9e0dad7bdd06ffa957f0c2492be1a6a6916267fe) Co-authored-by: Steve Mokris --- .../input-filestream-reader-options.asciidoc | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/filebeat/docs/inputs/input-filestream-reader-options.asciidoc b/filebeat/docs/inputs/input-filestream-reader-options.asciidoc index b2c0fa2fb70..2624928b154 100644 --- a/filebeat/docs/inputs/input-filestream-reader-options.asciidoc +++ b/filebeat/docs/inputs/input-filestream-reader-options.asciidoc @@ -164,11 +164,11 @@ The multiline message is stored under the key `msg`. ... parsers: - ndjson: - keys_under_root: true - message_key: msg + keys_under_root: true + message_key: msg - multiline: - type: counter - lines_count: 3 + type: counter + lines_count: 3 ---- See the available parser settings in detail below. @@ -197,9 +197,9 @@ Example configuration: [source,yaml] ---- - ndjson: - keys_under_root: true - add_error_key: true - message_key: log + keys_under_root: true + add_error_key: true + message_key: log ---- *`keys_under_root`*:: By default, the decoded JSON is placed under a "json" key @@ -256,5 +256,5 @@ all containers under the default Kubernetes logs path: - "/var/log/containers/*.log" parsers: - container: - stream: stdout + stream: stdout ---- From 38d834dadf2ab77b97bae9f2edb21d193406e644 Mon Sep 17 00:00:00 2001 From: Justin Kambic Date: Tue, 18 Jan 2022 17:18:46 -0500 Subject: [PATCH 18/69] [Heartbeat] Fix broken macOS ICMP test (#29900) Fixes broken macos python e2e test --- CHANGELOG.next.asciidoc | 1 + heartbeat/tests/system/test_icmp.py | 18 ++++-------------- 2 files changed, 5 insertions(+), 14 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index c6ee693fa30..101613c4f27 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -47,6 +47,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Remove deprecated old awscloudwatch input name. {pull}29844[29844] *Heartbeat* +- Fix broken macOS ICMP python e2e test. {pull}29900[29900] - Only add monitor.status to browser events when summary. {pull}29460[29460] - Also add summary to journeys for which the synthetics runner crashes. {pull}29606[29606] diff --git a/heartbeat/tests/system/test_icmp.py b/heartbeat/tests/system/test_icmp.py index 9a9ef4f6c77..f7be72e89f4 100644 --- a/heartbeat/tests/system/test_icmp.py +++ b/heartbeat/tests/system/test_icmp.py @@ -36,20 +36,10 @@ def test_base(self): proc = self.start_beat() - def has_started_message(): return self.log_contains("ICMP loop successfully initialized") - - def has_failed_message(): return self.log_contains("Failed to initialize ICMP loop") - - # We don't know if the system tests are running is configured to support or not support ping, but we can at least check that the ICMP loop - # was initiated. In the future we should start up VMs with the correct perms configured and be more specific. In addition to that - # we should run pings on those machines and make sure they work. - self.wait_until(lambda: has_started_message() or has_failed_message(), 30) - + # because we have no way of knowing if the current environment has the ability to do ICMP pings + # we are instead asserting the monitor's status via the output and checking for errors where appropriate self.wait_until(lambda: self.output_has(lines=1)) output = self.read_output() monitor_status = output[0]["monitor.status"] - if has_failed_message(): - assert monitor_status == "down" - self.assertRegex(output[0]["error.message"], ".*Insufficient privileges to perform ICMP ping.*") - else: - assert monitor_status == "up" + assert monitor_status == "up" or monitor_status == "down" + assert output[0]["monitor.type"] == "icmp" From cb22d1d69fbb934f632f7ccbb837d0173b85fcdd Mon Sep 17 00:00:00 2001 From: Dan Kortschak <90160302+efd6@users.noreply.github.com> Date: Wed, 19 Jan 2022 20:14:52 +1030 Subject: [PATCH 19/69] x-pack/filebeat/module/cisco: fix event.{outcome,type} handling (#29698) --- CHANGELOG.next.asciidoc | 1 + .../additional_messages.log-expected.json | 130 +++++------ .../cisco/asa/test/asa-fix.log-expected.json | 48 ++-- .../cisco/asa/test/asa.log-expected.json | 78 +++---- .../cisco/asa/test/filtered.log-expected.json | 6 +- .../cisco/asa/test/not-ip.log-expected.json | 10 +- .../cisco/asa/test/sample.log-expected.json | 211 +++++++++--------- .../cisco/ftd/test/asa-fix.log-expected.json | 24 +- .../cisco/ftd/test/asa.log-expected.json | 78 +++---- .../cisco/ftd/test/not-ip.log-expected.json | 10 +- .../cisco/ftd/test/sample.log-expected.json | 208 ++++++++--------- .../security-connection.log-expected.json | 10 +- .../cisco/shared/ingest/asa-ftd-pipeline.yml | 67 +++--- 13 files changed, 441 insertions(+), 440 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 101613c4f27..e50312b035e 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -106,6 +106,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Fix handling of IPv6 addresses in netflow flow events. {issue}19210[19210] {pull}29383[29383] - Fix `sophos` KV splitting and syslog header handling {issue}24237[24237] {pull}29331[29331] - Undo deletion of endpoint config from cloudtrail fileset in {pull}29415[29415]. {pull}29450[29450] +- Make Cisco ASA and FTD modules conform to the ECS definition for event.outcome and event.type. {issue}29581[29581] {pull}29698[29698] - ibmmq: Fixed `@timestamp` not being populated with correct values. {pull}29773[29773] *Heartbeat* diff --git a/x-pack/filebeat/module/cisco/asa/test/additional_messages.log-expected.json b/x-pack/filebeat/module/cisco/asa/test/additional_messages.log-expected.json index 2992f9a237c..c6feef5d7e8 100644 --- a/x-pack/filebeat/module/cisco/asa/test/additional_messages.log-expected.json +++ b/x-pack/filebeat/module/cisco/asa/test/additional_messages.log-expected.json @@ -443,8 +443,8 @@ "event.severity": 7, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "host.hostname": "dev01", @@ -1022,12 +1022,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-4-313004: Denied ICMP type=0, from laddr 10.10.10.10 on interface fw502 to 192.168.2.2: no matching session", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "host.hostname": "dev01", @@ -1123,12 +1123,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-2-106001: Inbound TCP connection denied from 192.168.2.2/43803 to 10.10.10.10/14322 flags SYN on interface out111", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 2, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "host.hostname": "dev01", @@ -1356,12 +1356,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-4-106023: Deny tcp src fw111:10.10.10.10/64388 dst out111:192.168.2.2/443 by access-group \"out1111_access_out\" [0x47e21ef4, 0x47e21ef4]", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "host.hostname": "dev01", @@ -1407,12 +1407,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-4-106021: Deny TCP reverse path check from 192.168.2.2 to 10.10.10.10 on interface fw111", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "host.hostname": "dev01", @@ -1456,12 +1456,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-2-106006: Deny inbound UDP from 192.168.2.2/65020 to 10.10.10.10/65020 on interface fw111", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 2, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "host.hostname": "dev01", @@ -1508,12 +1508,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-6-106015: Deny TCP (no connection) from 192.168.2.2/53089 to 10.10.10.10/443 flags FIN PSH ACK on interface out111", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 6, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "host.hostname": "dev01", @@ -1559,12 +1559,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-6-106015: Deny TCP (no connection) from 192.168.2.2/17127 to 10.10.10.10/443 flags PSH ACK on interface out111", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 6, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "host.hostname": "dev01", @@ -1610,12 +1610,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-6-106015: Deny TCP (no connection) from 192.168.2.2/24223 to 10.10.10.10/443 flags RST on interface fw111", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 6, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "host.hostname": "dev01", @@ -2010,7 +2010,7 @@ "event.timezone": "-02:00", "event.type": [ "allowed", - "info" + "connection" ], "fileset.name": "asa", "host.hostname": "dev01", @@ -2064,7 +2064,7 @@ "event.timezone": "-02:00", "event.type": [ "allowed", - "info" + "connection" ], "fileset.name": "asa", "host.hostname": "dev01", @@ -2183,8 +2183,8 @@ "event.severity": 7, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "host.hostname": "dev01", @@ -2298,12 +2298,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-3-106014: Deny inbound icmp src fw111:10.10.10.10 dst fw111:10.10.10.10(type 8, code 0)", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 3, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "host.hostname": "dev01", @@ -2391,12 +2391,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-3-106010: Deny inbound sctp src fw111:10.10.10.10/5114 dst fw111:10.10.10.10/2", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 3, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "host.hostname": "dev01", @@ -2496,7 +2496,7 @@ "event.timezone": "-02:00", "event.type": [ "allowed", - "info" + "connection" ], "fileset.name": "asa", "host.hostname": "dev01", @@ -2543,7 +2543,7 @@ "event.timezone": "-02:00", "event.type": [ "allowed", - "info" + "connection" ], "fileset.name": "asa", "host.hostname": "dev01", @@ -2590,7 +2590,7 @@ "event.timezone": "-02:00", "event.type": [ "allowed", - "info" + "connection" ], "fileset.name": "asa", "host.hostname": "dev01", @@ -2637,7 +2637,7 @@ "event.timezone": "-02:00", "event.type": [ "allowed", - "info" + "connection" ], "fileset.name": "asa", "host.hostname": "dev01", @@ -2755,12 +2755,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-4-106023: Deny tcp src outside:10.10.10.2/56444 dst srv:192.168.2.2/51635(testhostname.domain) by access-group \"global_access_1\"", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "host.hostname": "dev01", @@ -2809,12 +2809,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-5-106100: access-list testrulename denied tcp insideintf/somedomainname.local(27218) -> OUTSIDE/195.122.12.242(53) hit-cnt 1 first hit [0x16847359, 0x00000000]", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "host.hostname": "dev01", @@ -2860,7 +2860,6 @@ "event.severity": 5, "event.timezone": "-02:00", "event.type": [ - "allowed", "info" ], "fileset.name": "asa", @@ -2982,12 +2981,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-6-605004: Login denied from 10.10.1.212/51923 to FCD-FS-LAN:10.10.1.254/https for user \"*****\"", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 6, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "host.hostname": "dev01", @@ -3083,7 +3082,7 @@ "event.timezone": "-02:00", "event.type": [ "allowed", - "info" + "connection" ], "fileset.name": "asa", "host.hostname": "dev01", @@ -3424,12 +3423,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-3-710003: TCP access denied by ACL from 67.43.156.13/6370 to outside:195.74.114.34/23", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 3, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "host.hostname": "dev01", @@ -3594,8 +3593,8 @@ "event.severity": 6, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "host.hostname": "dev01", @@ -3759,7 +3758,6 @@ "event.severity": 6, "event.timezone": "-02:00", "event.type": [ - "allowed", "deletion", "info", "user" @@ -3934,7 +3932,6 @@ "event.severity": 5, "event.timezone": "-02:00", "event.type": [ - "allowed", "info" ], "fileset.name": "asa", @@ -4024,7 +4021,6 @@ "event.severity": 6, "event.timezone": "-02:00", "event.type": [ - "denied", "error" ], "fileset.name": "asa", @@ -4072,7 +4068,6 @@ "event.severity": 6, "event.timezone": "-02:00", "event.type": [ - "denied", "error" ], "fileset.name": "asa", @@ -4104,6 +4099,7 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-6-713903: IP = 67.43.156.12, All IPSec SA proposals found unacceptable!", + "event.outcome": "failure", "event.severity": 6, "event.timezone": "-02:00", "event.type": [ @@ -4143,7 +4139,6 @@ "event.severity": 6, "event.timezone": "-02:00", "event.type": [ - "denied", "error" ], "fileset.name": "asa", @@ -4180,7 +4175,6 @@ "event.severity": 6, "event.timezone": "-02:00", "event.type": [ - "denied", "error" ], "fileset.name": "asa", @@ -4279,12 +4273,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-4-106023: Deny protocol 47 src outside:100.66.124.24 dst inside:172.31.98.44 by access-group \"inbound\"", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "host.hostname": "dev01", @@ -4330,12 +4324,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-4-106023: Deny icmp src OUTSIDE:2a02:cf40:add:4002:91f2:a9b2:e09a:6fc6 dst OUTSIDE:fe00:afa0::1 (type 128, code 0) by access-group \"OUTSIDE_in\"", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "host.hostname": "dev01", diff --git a/x-pack/filebeat/module/cisco/asa/test/asa-fix.log-expected.json b/x-pack/filebeat/module/cisco/asa/test/asa-fix.log-expected.json index 0156348bd70..a8e799341b3 100644 --- a/x-pack/filebeat/module/cisco/asa/test/asa-fix.log-expected.json +++ b/x-pack/filebeat/module/cisco/asa/test/asa-fix.log-expected.json @@ -78,12 +78,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-4-106023: Deny icmp src Inside:10.123.123.123 dst Outside:10.123.123.123 (type 11, code 0) by access-group \"Inside_access_in\" [0x0, 0x0]", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "host.hostname": "SNL-ASA-VPN-A01", @@ -130,12 +130,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-4-106023: Deny tcp src dmz:10.123.123.123/6316 dst outside:10.123.123.123/53 type 3, code 0, by access-group \"acl_dmz\" [0xe3afb522, 0x0]", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "input.type": "log", @@ -179,12 +179,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-4-106023: Deny udp src Inside:10.123.123.123/57621(LOCAL\\Elastic) dst Outside:10.123.123.123/57621 by access-group \"Inside_access_in\" [0x0, 0x0]", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "host.hostname": "SNL-ASA-VPN-A01", @@ -232,12 +232,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-2-106017: Deny IP due to Land Attack from 10.123.123.123 to 10.123.123.123", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 2, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "host.hostname": "SNL-ASA-VPN-A01", @@ -276,12 +276,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-3-313008: Denied IPv6-ICMP type=134, code=0 from fe80::1ff:fe23:4567:890a on interface ISP1", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 3, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "host.hostname": "SNL-ASA-VPN-A01", @@ -330,12 +330,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-4-313009: Denied invalid ICMP code 9, for Inside:10.255.0.206/8795 (10.255.0.206/8795) to identity:10.12.31.51/0 (10.12.31.51/0), ICMP id 295, ICMP type 8", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "input.type": "log", @@ -384,7 +384,7 @@ "event.timezone": "-02:00", "event.type": [ "allowed", - "info" + "connection" ], "fileset.name": "asa", "input.type": "log", @@ -433,7 +433,7 @@ "event.timezone": "-02:00", "event.type": [ "allowed", - "info" + "connection" ], "fileset.name": "asa", "input.type": "log", @@ -483,7 +483,7 @@ "event.timezone": "-02:00", "event.type": [ "allowed", - "info" + "connection" ], "fileset.name": "asa", "input.type": "log", @@ -531,12 +531,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-1-106103: access-list filter denied icmp for user joe inside/10.1.2.3(64321) -> outside/1.2.33.40(8080) hit-cnt 1 first hit [0x3c8b88c1, 0xbee595c3]", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 1, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "input.type": "log", diff --git a/x-pack/filebeat/module/cisco/asa/test/asa.log-expected.json b/x-pack/filebeat/module/cisco/asa/test/asa.log-expected.json index 81c80ebf991..75e6e676c26 100644 --- a/x-pack/filebeat/module/cisco/asa/test/asa.log-expected.json +++ b/x-pack/filebeat/module/cisco/asa/test/asa.log-expected.json @@ -4985,12 +4985,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-4-106023: Deny tcp src outside:100.66.19.254/80 dst inside:172.31.98.44/8277 by access-group \"inbound\" [0x0, 0x0]", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "host.hostname": "localhost", @@ -5043,12 +5043,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-4-106023: Deny tcp src outside:100.66.19.254/80 dst inside:172.31.98.44/8277 by access-group \"inbound\" [0x0, 0x0]", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "host.hostname": "localhost", @@ -5101,12 +5101,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-4-106023: Deny tcp src outside:100.66.19.254/80 dst inside:172.31.98.44/8277 by access-group \"inbound\" [0x0, 0x0]", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "host.hostname": "localhost", @@ -5159,12 +5159,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-4-106023: Deny tcp src outside:100.66.19.254/80 dst inside:172.31.98.44/8277 by access-group \"inbound\" [0x0, 0x0]", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "host.hostname": "localhost", @@ -5217,12 +5217,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-4-106023: Deny tcp src outside:100.66.19.254/80 dst inside:172.31.98.44/8277 by access-group \"inbound\" [0x0, 0x0]", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "host.hostname": "localhost", @@ -5275,12 +5275,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-4-106023: Deny tcp src outside:100.66.19.254/80 dst inside:172.31.98.44/8277 by access-group \"inbound\" [0x0, 0x0]", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "host.hostname": "localhost", @@ -5333,12 +5333,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-4-106023: Deny tcp src outside:100.66.19.254/80 dst inside:172.31.98.44/8277 by access-group \"inbound\" [0x0, 0x0]", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "host.hostname": "localhost", @@ -5391,12 +5391,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-4-106023: Deny tcp src outside:100.66.19.254/80 dst inside:172.31.98.44/8277 by access-group \"inbound\" [0x0, 0x0]", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "host.hostname": "localhost", @@ -5449,12 +5449,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-4-106023: Deny tcp src outside:100.66.19.254/80 dst inside:172.31.98.44/8277 by access-group \"inbound\" [0x0, 0x0]", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "host.hostname": "localhost", @@ -5507,12 +5507,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-4-106023: Deny tcp src outside:100.66.19.254/80 dst inside:172.31.98.44/8277 by access-group \"inbound\" [0x0, 0x0]", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "host.hostname": "localhost", @@ -5565,12 +5565,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-4-106023: Deny tcp src outside:100.66.19.254/80 dst inside:172.31.98.44/8277 by access-group \"inbound\" [0x0, 0x0]", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "host.hostname": "localhost", @@ -5623,12 +5623,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-4-106023: Deny tcp src outside:100.66.19.254/80 dst inside:172.31.98.44/8277 by access-group \"inbound\" [0x0, 0x0]", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "host.hostname": "localhost", @@ -5681,12 +5681,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-4-106023: Deny tcp src outside:100.66.19.254/80 dst inside:172.31.98.44/8277 by access-group \"inbound\" [0x0, 0x0]", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "host.hostname": "localhost", diff --git a/x-pack/filebeat/module/cisco/asa/test/filtered.log-expected.json b/x-pack/filebeat/module/cisco/asa/test/filtered.log-expected.json index 1ae3aa1f563..37b53d01338 100644 --- a/x-pack/filebeat/module/cisco/asa/test/filtered.log-expected.json +++ b/x-pack/filebeat/module/cisco/asa/test/filtered.log-expected.json @@ -50,12 +50,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-2-106001: Inbound TCP connection denied from 10.13.12.11/45321 to 192.168.33.12/443 flags URG+SYN+RST on interface eth0", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 2, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "host.hostname": "beats", diff --git a/x-pack/filebeat/module/cisco/asa/test/not-ip.log-expected.json b/x-pack/filebeat/module/cisco/asa/test/not-ip.log-expected.json index 09357b0121b..948708bb81a 100644 --- a/x-pack/filebeat/module/cisco/asa/test/not-ip.log-expected.json +++ b/x-pack/filebeat/module/cisco/asa/test/not-ip.log-expected.json @@ -17,12 +17,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-5-106100: access-list AL-DMZ-LB-IN denied tcp LB-DMZ/WHAT-IS-THIS-A-HOSTNAME-192.0.2.244(27218) -> OUTSIDE/203.0.113.42(53) hit-cnt 1 first hit [0x16847359, 0x00000000]", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "input.type": "log", @@ -130,8 +130,8 @@ "event.severity": 4, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "host.hostname": "localhost", diff --git a/x-pack/filebeat/module/cisco/asa/test/sample.log-expected.json b/x-pack/filebeat/module/cisco/asa/test/sample.log-expected.json index 65608c192ee..79129ea3744 100644 --- a/x-pack/filebeat/module/cisco/asa/test/sample.log-expected.json +++ b/x-pack/filebeat/module/cisco/asa/test/sample.log-expected.json @@ -17,12 +17,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-4-106023: Deny tcp src dmz:10.1.2.30/63016 dst outside:192.0.0.8/53 by access-group \"acl_dmz\" [0xe3aab522, 0x0]", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "input.type": "log", @@ -68,12 +68,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-4-106023: Deny tcp src dmz:10.1.2.30/63016 dst outside:192.0.0.8/53 type 3, code 0, by access-group \"acl_dmz\" [0xe3aab522, 0x0]", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "input.type": "log", @@ -125,7 +125,7 @@ "event.timezone": "-02:00", "event.type": [ "allowed", - "info" + "connection" ], "fileset.name": "asa", "input.type": "log", @@ -171,12 +171,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-6-106100: access-list inside denied udp inside/172.29.2.101(1039) -> outside/192.0.2.10(53) hit-cnt 1 first hit [0xd820e56a, 0x0]", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 6, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "host.hostname": "INT-FW01", @@ -232,7 +232,7 @@ "event.timezone": "-02:00", "event.type": [ "allowed", - "info" + "connection" ], "fileset.name": "asa", "host.hostname": "INT-FW01", @@ -864,12 +864,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-2-106007: Deny inbound UDP from 192.0.0.66/12981 to 10.1.2.60/53 due to DNS Query", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 2, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "input.type": "log", @@ -920,7 +920,7 @@ "event.timezone": "-02:00", "event.type": [ "allowed", - "info" + "connection" ], "fileset.name": "asa", "input.type": "log", @@ -971,7 +971,7 @@ "event.timezone": "-02:00", "event.type": [ "allowed", - "info" + "connection" ], "fileset.name": "asa", "input.type": "log", @@ -1022,7 +1022,7 @@ "event.timezone": "-02:00", "event.type": [ "allowed", - "info" + "connection" ], "fileset.name": "asa", "input.type": "log", @@ -1073,7 +1073,7 @@ "event.timezone": "-02:00", "event.type": [ "allowed", - "info" + "connection" ], "fileset.name": "asa", "input.type": "log", @@ -1124,7 +1124,7 @@ "event.timezone": "-02:00", "event.type": [ "allowed", - "info" + "connection" ], "fileset.name": "asa", "input.type": "log", @@ -1175,7 +1175,7 @@ "event.timezone": "-02:00", "event.type": [ "allowed", - "info" + "connection" ], "fileset.name": "asa", "input.type": "log", @@ -1226,7 +1226,7 @@ "event.timezone": "-02:00", "event.type": [ "allowed", - "info" + "connection" ], "fileset.name": "asa", "input.type": "log", @@ -1277,7 +1277,7 @@ "event.timezone": "-02:00", "event.type": [ "allowed", - "info" + "connection" ], "fileset.name": "asa", "input.type": "log", @@ -1328,7 +1328,7 @@ "event.timezone": "-02:00", "event.type": [ "allowed", - "info" + "connection" ], "fileset.name": "asa", "input.type": "log", @@ -1379,7 +1379,7 @@ "event.timezone": "-02:00", "event.type": [ "allowed", - "info" + "connection" ], "fileset.name": "asa", "input.type": "log", @@ -1423,12 +1423,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-2-106006: Deny inbound UDP from 192.0.2.66/137 to 10.1.2.42/137 on interface inside", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 2, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "input.type": "log", @@ -1471,12 +1471,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-2-106007: Deny inbound UDP from 192.0.2.66/12981 to 10.1.5.60/53 due to DNS Query", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 2, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "input.type": "log", @@ -1527,7 +1527,7 @@ "event.timezone": "-02:00", "event.type": [ "allowed", - "info" + "connection" ], "fileset.name": "asa", "input.type": "log", @@ -1578,7 +1578,7 @@ "event.timezone": "-02:00", "event.type": [ "allowed", - "info" + "connection" ], "fileset.name": "asa", "input.type": "log", @@ -1629,7 +1629,7 @@ "event.timezone": "-02:00", "event.type": [ "allowed", - "info" + "connection" ], "fileset.name": "asa", "input.type": "log", @@ -1675,12 +1675,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-5-106100: access-list acl_in denied tcp inside/10.0.0.16(2011) -> outside/192.0.0.89(2000) hit-cnt 1 first hit [0x71a87d94, 0x0]", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "input.type": "log", @@ -1726,12 +1726,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-5-106100: access-list acl_in denied tcp inside/10.0.0.16(2012) -> outside/192.0.0.89(2000) hit-cnt 1 first hit [0x71a87d94, 0x0]", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "input.type": "log", @@ -1777,12 +1777,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-4-106023: Deny tcp src outside:192.0.2.126/53638 dst inside:10.0.0.132/8111 by access-group \"acl_out\" [0x71761f18, 0x0]", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "input.type": "log", @@ -1828,12 +1828,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-4-106023: Deny tcp src outside:192.0.2.126/53638 dst inside:10.0.0.132/8111 by access-group \"acl_out\" [0x71761f18, 0x0]", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "input.type": "log", @@ -1884,7 +1884,7 @@ "event.timezone": "-02:00", "event.type": [ "allowed", - "info" + "connection" ], "fileset.name": "asa", "input.type": "log", @@ -1935,7 +1935,7 @@ "event.timezone": "-02:00", "event.type": [ "allowed", - "info" + "connection" ], "fileset.name": "asa", "input.type": "log", @@ -1987,7 +1987,7 @@ "event.timezone": "-02:00", "event.type": [ "allowed", - "info" + "connection" ], "fileset.name": "asa", "input.type": "log", @@ -2088,12 +2088,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-4-106023: Deny udp src dmz:192.168.1.33/5555 dst outside:192.0.0.12/53 by access-group \"dmz\" [0x123a465e, 0x4c7bf613]", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "input.type": "log", @@ -2140,12 +2140,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-4-106023: Deny udp src dmz:192.168.1.33/5555 dst outside:192.0.0.12/53 by access-group \"dmz\" [0x123a465e, 0x4c7bf613]", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "input.type": "log", @@ -2470,12 +2470,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-6-106015: Deny TCP (no connection) from 192.0.2.222/1234 to 192.168.1.34/5679 flags RST on interface outside", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 6, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "input.type": "log", @@ -2519,12 +2519,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-6-106015: Deny TCP (no connection) from 192.0.2.222/1234 to 192.168.1.34/5679 flags RST on interface outside", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 6, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "input.type": "log", @@ -2570,12 +2570,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-4-106023: Deny udp src dmz:192.168.1.34/5679 dst outside:192.0.0.12/5000 by access-group \"dmz\" [0x123a465e, 0x8c20f21]", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "input.type": "log", @@ -2839,12 +2839,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-2-106016: Deny IP spoof from (0.0.0.0) to 192.88.99.47 on interface Mobile_Traffic", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 2, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "host.hostname": "GIFRCHN01", @@ -2887,12 +2887,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-2-106016: Deny IP spoof from (0.0.0.0) to 192.88.99.57 on interface Mobile_Traffic", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 2, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "host.hostname": "GIFRCHN01", @@ -2935,12 +2935,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-2-106016: Deny IP spoof from (0.0.0.0) to 192.88.99.47 on interface Mobile_Traffic", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 2, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "host.hostname": "GIFRCHN01", @@ -2983,12 +2983,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-2-106016: Deny IP spoof from (0.0.0.0) to 192.88.99.47 on interface Mobile_Traffic", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 2, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "host.hostname": "GIFRCHN01", @@ -3031,12 +3031,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-2-106016: Deny IP spoof from (0.0.0.0) to 192.88.99.57 on interface Mobile_Traffic", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 2, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "host.hostname": "GIFRCHN01", @@ -3079,12 +3079,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-2-106016: Deny IP spoof from (0.0.0.0) to 192.88.99.57 on interface Mobile_Traffic", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 2, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "host.hostname": "GIFRCHN01", @@ -3127,12 +3127,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-2-106016: Deny IP spoof from (0.0.0.0) to 192.168.1.255 on interface Mobile_Traffic", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 2, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "host.hostname": "GIFRCHN01", @@ -3175,12 +3175,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-2-106016: Deny IP spoof from (0.0.0.0) to 192.168.1.255 on interface Mobile_Traffic", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 2, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "host.hostname": "GIFRCHN01", @@ -3226,12 +3226,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-4-106023: Deny tcp src outside:192.0.2.95/24069 dst inside:10.32.112.125/25 by access-group \"PERMIT_IN\" [0x0, 0x0]\"", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "host.hostname": "GIFRCHN01", @@ -3279,12 +3279,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-3-313001: Denied ICMP type=3, code=3 from 10.2.3.5 on interface Outside", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 3, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "host.hostname": "GIFRCHN01", @@ -3329,12 +3329,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-4-313004: Denied ICMP type=0, from laddr 172.16.30.2 on interface inside to 172.16.1.10: no matching session", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "input.type": "log", @@ -3388,7 +3388,7 @@ "event.timezone": "-02:00", "event.type": [ "allowed", - "info" + "connection" ], "fileset.name": "asa", "input.type": "log", @@ -3509,8 +3509,8 @@ "event.severity": 4, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "input.type": "log", @@ -3559,7 +3559,7 @@ "event.timezone": "-02:00", "event.type": [ "allowed", - "info" + "connection" ], "fileset.name": "asa", "input.type": "log", @@ -3602,7 +3602,7 @@ "event.timezone": "-02:00", "event.type": [ "allowed", - "info" + "connection" ], "fileset.name": "asa", "input.type": "log", @@ -3643,12 +3643,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-5-304002: Access denied URL http://www.example.net/images/favicon.ico SRC 10.69.6.39 DEST 192.0.0.19 on interface inside", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "asa", "input.type": "log", @@ -3764,7 +3764,7 @@ "event.timezone": "-02:00", "event.type": [ "allowed", - "info" + "connection" ], "fileset.name": "asa", "input.type": "log", @@ -4705,7 +4705,6 @@ "event.severity": 6, "event.timezone": "-02:00", "event.type": [ - "allowed", "deletion", "info", "user" diff --git a/x-pack/filebeat/module/cisco/ftd/test/asa-fix.log-expected.json b/x-pack/filebeat/module/cisco/ftd/test/asa-fix.log-expected.json index 5b4432fe41b..e7266ca60e1 100644 --- a/x-pack/filebeat/module/cisco/ftd/test/asa-fix.log-expected.json +++ b/x-pack/filebeat/module/cisco/ftd/test/asa-fix.log-expected.json @@ -80,12 +80,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-4-106023: Deny icmp src Inside:10.123.123.123 dst Outside:10.123.123.123 (type 11, code 0) by access-group \"Inside_access_in\" [0x0, 0x0]", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "ftd", "host.hostname": "SNL-ASA-VPN-A01", @@ -133,12 +133,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-4-106023: Deny tcp src dmz:10.123.123.123/6316 dst outside:10.123.123.123/53 type 3, code 0, by access-group \"acl_dmz\" [0xe3afb522, 0x0]", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "ftd", "input.type": "log", @@ -183,12 +183,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-4-106023: Deny udp src Inside:10.123.123.123/57621(LOCAL\\Elastic) dst Outside:10.123.123.123/57621 by access-group \"Inside_access_in\" [0x0, 0x0]", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "ftd", "host.hostname": "SNL-ASA-VPN-A01", @@ -237,12 +237,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-2-106017: Deny IP due to Land Attack from 10.123.123.123 to 10.123.123.123", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 2, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "ftd", "host.hostname": "SNL-ASA-VPN-A01", diff --git a/x-pack/filebeat/module/cisco/ftd/test/asa.log-expected.json b/x-pack/filebeat/module/cisco/ftd/test/asa.log-expected.json index 4aa3fad3d8b..3b95629dffb 100644 --- a/x-pack/filebeat/module/cisco/ftd/test/asa.log-expected.json +++ b/x-pack/filebeat/module/cisco/ftd/test/asa.log-expected.json @@ -4902,12 +4902,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-4-106023: Deny tcp src outside:100.66.19.254/80 dst inside:172.31.98.44/8277 by access-group \"inbound\" [0x0, 0x0]", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "ftd", "host.hostname": "localhost", @@ -4959,12 +4959,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-4-106023: Deny tcp src outside:100.66.19.254/80 dst inside:172.31.98.44/8277 by access-group \"inbound\" [0x0, 0x0]", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "ftd", "host.hostname": "localhost", @@ -5016,12 +5016,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-4-106023: Deny tcp src outside:100.66.19.254/80 dst inside:172.31.98.44/8277 by access-group \"inbound\" [0x0, 0x0]", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "ftd", "host.hostname": "localhost", @@ -5073,12 +5073,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-4-106023: Deny tcp src outside:100.66.19.254/80 dst inside:172.31.98.44/8277 by access-group \"inbound\" [0x0, 0x0]", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "ftd", "host.hostname": "localhost", @@ -5130,12 +5130,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-4-106023: Deny tcp src outside:100.66.19.254/80 dst inside:172.31.98.44/8277 by access-group \"inbound\" [0x0, 0x0]", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "ftd", "host.hostname": "localhost", @@ -5187,12 +5187,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-4-106023: Deny tcp src outside:100.66.19.254/80 dst inside:172.31.98.44/8277 by access-group \"inbound\" [0x0, 0x0]", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "ftd", "host.hostname": "localhost", @@ -5244,12 +5244,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-4-106023: Deny tcp src outside:100.66.19.254/80 dst inside:172.31.98.44/8277 by access-group \"inbound\" [0x0, 0x0]", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "ftd", "host.hostname": "localhost", @@ -5301,12 +5301,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-4-106023: Deny tcp src outside:100.66.19.254/80 dst inside:172.31.98.44/8277 by access-group \"inbound\" [0x0, 0x0]", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "ftd", "host.hostname": "localhost", @@ -5358,12 +5358,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-4-106023: Deny tcp src outside:100.66.19.254/80 dst inside:172.31.98.44/8277 by access-group \"inbound\" [0x0, 0x0]", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "ftd", "host.hostname": "localhost", @@ -5415,12 +5415,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-4-106023: Deny tcp src outside:100.66.19.254/80 dst inside:172.31.98.44/8277 by access-group \"inbound\" [0x0, 0x0]", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "ftd", "host.hostname": "localhost", @@ -5472,12 +5472,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-4-106023: Deny tcp src outside:100.66.19.254/80 dst inside:172.31.98.44/8277 by access-group \"inbound\" [0x0, 0x0]", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "ftd", "host.hostname": "localhost", @@ -5529,12 +5529,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-4-106023: Deny tcp src outside:100.66.19.254/80 dst inside:172.31.98.44/8277 by access-group \"inbound\" [0x0, 0x0]", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "ftd", "host.hostname": "localhost", @@ -5586,12 +5586,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-4-106023: Deny tcp src outside:100.66.19.254/80 dst inside:172.31.98.44/8277 by access-group \"inbound\" [0x0, 0x0]", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "ftd", "host.hostname": "localhost", diff --git a/x-pack/filebeat/module/cisco/ftd/test/not-ip.log-expected.json b/x-pack/filebeat/module/cisco/ftd/test/not-ip.log-expected.json index eb1a32afe4c..2d85b823a65 100644 --- a/x-pack/filebeat/module/cisco/ftd/test/not-ip.log-expected.json +++ b/x-pack/filebeat/module/cisco/ftd/test/not-ip.log-expected.json @@ -17,12 +17,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-5-106100: access-list AL-DMZ-LB-IN denied tcp LB-DMZ/WHAT-IS-THIS-A-HOSTNAME-192.0.2.244(27218) -> OUTSIDE/203.0.113.42(53) hit-cnt 1 first hit [0x16847359, 0x00000000]", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "ftd", "input.type": "log", @@ -128,8 +128,8 @@ "event.severity": 4, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "ftd", "host.hostname": "localhost", diff --git a/x-pack/filebeat/module/cisco/ftd/test/sample.log-expected.json b/x-pack/filebeat/module/cisco/ftd/test/sample.log-expected.json index 5d1987b7ec6..84c749c8d75 100644 --- a/x-pack/filebeat/module/cisco/ftd/test/sample.log-expected.json +++ b/x-pack/filebeat/module/cisco/ftd/test/sample.log-expected.json @@ -17,12 +17,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%FTD-4-106023: Deny tcp src dmz:10.1.2.30/63016 dst outside:192.0.0.8/53 by access-group \"acl_dmz\" [0xe3aab522, 0x0]", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "ftd", "input.type": "log", @@ -67,12 +67,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%FTD-4-106023: Deny tcp src dmz:10.1.2.30/63016 dst outside:192.0.0.8/53 type 3, code 0, by access-group \"acl_dmz\" [0xe3aab522, 0x0]", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "ftd", "input.type": "log", @@ -123,7 +123,7 @@ "event.timezone": "-02:00", "event.type": [ "allowed", - "info" + "connection" ], "fileset.name": "ftd", "input.type": "log", @@ -168,12 +168,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%FTD-6-106100: access-list inside denied udp inside/172.29.2.101(1039) -> outside/192.0.2.10(53) hit-cnt 1 first hit [0xd820e56a, 0x0]", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 6, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "ftd", "host.hostname": "INT-FW01", @@ -228,7 +228,7 @@ "event.timezone": "-02:00", "event.type": [ "allowed", - "info" + "connection" ], "fileset.name": "ftd", "host.hostname": "INT-FW01", @@ -848,12 +848,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%FTD-2-106007: Deny inbound UDP from 192.0.0.66/12981 to 10.1.2.60/53 due to DNS Query", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 2, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "ftd", "input.type": "log", @@ -903,7 +903,7 @@ "event.timezone": "-02:00", "event.type": [ "allowed", - "info" + "connection" ], "fileset.name": "ftd", "input.type": "log", @@ -953,7 +953,7 @@ "event.timezone": "-02:00", "event.type": [ "allowed", - "info" + "connection" ], "fileset.name": "ftd", "input.type": "log", @@ -1003,7 +1003,7 @@ "event.timezone": "-02:00", "event.type": [ "allowed", - "info" + "connection" ], "fileset.name": "ftd", "input.type": "log", @@ -1053,7 +1053,7 @@ "event.timezone": "-02:00", "event.type": [ "allowed", - "info" + "connection" ], "fileset.name": "ftd", "input.type": "log", @@ -1103,7 +1103,7 @@ "event.timezone": "-02:00", "event.type": [ "allowed", - "info" + "connection" ], "fileset.name": "ftd", "input.type": "log", @@ -1153,7 +1153,7 @@ "event.timezone": "-02:00", "event.type": [ "allowed", - "info" + "connection" ], "fileset.name": "ftd", "input.type": "log", @@ -1203,7 +1203,7 @@ "event.timezone": "-02:00", "event.type": [ "allowed", - "info" + "connection" ], "fileset.name": "ftd", "input.type": "log", @@ -1253,7 +1253,7 @@ "event.timezone": "-02:00", "event.type": [ "allowed", - "info" + "connection" ], "fileset.name": "ftd", "input.type": "log", @@ -1303,7 +1303,7 @@ "event.timezone": "-02:00", "event.type": [ "allowed", - "info" + "connection" ], "fileset.name": "ftd", "input.type": "log", @@ -1353,7 +1353,7 @@ "event.timezone": "-02:00", "event.type": [ "allowed", - "info" + "connection" ], "fileset.name": "ftd", "input.type": "log", @@ -1396,12 +1396,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%FTD-2-106006: Deny inbound UDP from 192.0.2.66/137 to 10.1.2.42/137 on interface inside", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 2, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "ftd", "input.type": "log", @@ -1443,12 +1443,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%FTD-2-106007: Deny inbound UDP from 192.0.2.66/12981 to 10.1.5.60/53 due to DNS Query", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 2, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "ftd", "input.type": "log", @@ -1498,7 +1498,7 @@ "event.timezone": "-02:00", "event.type": [ "allowed", - "info" + "connection" ], "fileset.name": "ftd", "input.type": "log", @@ -1548,7 +1548,7 @@ "event.timezone": "-02:00", "event.type": [ "allowed", - "info" + "connection" ], "fileset.name": "ftd", "input.type": "log", @@ -1598,7 +1598,7 @@ "event.timezone": "-02:00", "event.type": [ "allowed", - "info" + "connection" ], "fileset.name": "ftd", "input.type": "log", @@ -1643,12 +1643,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%FTD-5-106100: access-list acl_in denied tcp inside/10.0.0.16(2011) -> outside/192.0.0.89(2000) hit-cnt 1 first hit [0x71a87d94, 0x0]", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "ftd", "input.type": "log", @@ -1693,12 +1693,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%FTD-5-106100: access-list acl_in denied tcp inside/10.0.0.16(2012) -> outside/192.0.0.89(2000) hit-cnt 1 first hit [0x71a87d94, 0x0]", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "ftd", "input.type": "log", @@ -1743,12 +1743,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%FTD-4-106023: Deny tcp src outside:192.0.2.126/53638 dst inside:10.0.0.132/8111 by access-group \"acl_out\" [0x71761f18, 0x0]", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "ftd", "input.type": "log", @@ -1793,12 +1793,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%FTD-4-106023: Deny tcp src outside:192.0.2.126/53638 dst inside:10.0.0.132/8111 by access-group \"acl_out\" [0x71761f18, 0x0]", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "ftd", "input.type": "log", @@ -1848,7 +1848,7 @@ "event.timezone": "-02:00", "event.type": [ "allowed", - "info" + "connection" ], "fileset.name": "ftd", "input.type": "log", @@ -1898,7 +1898,7 @@ "event.timezone": "-02:00", "event.type": [ "allowed", - "info" + "connection" ], "fileset.name": "ftd", "input.type": "log", @@ -1949,7 +1949,7 @@ "event.timezone": "-02:00", "event.type": [ "allowed", - "info" + "connection" ], "fileset.name": "ftd", "input.type": "log", @@ -2052,12 +2052,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%FTD-4-106023: Deny udp src dmz:192.168.1.33/5555 dst outside:192.0.0.12/53 by access-group \"dmz\" [0x123a465e, 0x4c7bf613]", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "ftd", "host.hostname": "127.0.0.1", @@ -2107,12 +2107,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%FTD-4-106023: Deny udp src dmz:192.168.1.33/5555 dst outside:192.0.0.12/53 by access-group \"dmz\" [0x123a465e, 0x4c7bf613]", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "ftd", "host.hostname": "127.0.0.1", @@ -2451,12 +2451,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%FTD-6-106015: Deny TCP (no connection) from 192.0.2.222/1234 to 192.168.1.34/5679 flags RST on interface outside", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 6, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "ftd", "host.hostname": "127.0.0.1", @@ -2503,12 +2503,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%FTD-6-106015: Deny TCP (no connection) from 192.0.2.222/1234 to 192.168.1.34/5679 flags RST on interface outside", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 6, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "ftd", "host.hostname": "127.0.0.1", @@ -2557,12 +2557,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%FTD-4-106023: Deny udp src dmz:192.168.1.34/5679 dst outside:192.0.0.12/5000 by access-group \"dmz\" [0x123a465e, 0x8c20f21]", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "ftd", "host.hostname": "127.0.0.1", @@ -2837,12 +2837,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%FTD-2-106016: Deny IP spoof from (0.0.0.0) to 192.88.99.47 on interface Mobile_Traffic", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 2, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "ftd", "host.hostname": "GIFRCHN01", @@ -2884,12 +2884,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%FTD-2-106016: Deny IP spoof from (0.0.0.0) to 192.88.99.57 on interface Mobile_Traffic", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 2, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "ftd", "host.hostname": "GIFRCHN01", @@ -2931,12 +2931,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%FTD-2-106016: Deny IP spoof from (0.0.0.0) to 192.88.99.47 on interface Mobile_Traffic", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 2, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "ftd", "host.hostname": "GIFRCHN01", @@ -2978,12 +2978,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%FTD-2-106016: Deny IP spoof from (0.0.0.0) to 192.88.99.47 on interface Mobile_Traffic", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 2, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "ftd", "host.hostname": "GIFRCHN01", @@ -3025,12 +3025,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%FTD-2-106016: Deny IP spoof from (0.0.0.0) to 192.88.99.57 on interface Mobile_Traffic", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 2, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "ftd", "host.hostname": "GIFRCHN01", @@ -3072,12 +3072,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%FTD-2-106016: Deny IP spoof from (0.0.0.0) to 192.88.99.57 on interface Mobile_Traffic", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 2, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "ftd", "host.hostname": "GIFRCHN01", @@ -3119,12 +3119,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%FTD-2-106016: Deny IP spoof from (0.0.0.0) to 192.168.1.255 on interface Mobile_Traffic", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 2, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "ftd", "host.hostname": "GIFRCHN01", @@ -3166,12 +3166,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%FTD-2-106016: Deny IP spoof from (0.0.0.0) to 192.168.1.255 on interface Mobile_Traffic", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 2, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "ftd", "host.hostname": "GIFRCHN01", @@ -3216,12 +3216,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%FTD-4-106023: Deny tcp src outside:192.0.2.95/24069 dst inside:10.32.112.125/25 by access-group \"PERMIT_IN\" [0x0, 0x0]\"", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "ftd", "host.hostname": "GIFRCHN01", @@ -3268,12 +3268,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%FTD-3-313001: Denied ICMP type=3, code=3 from 10.2.3.5 on interface Outside", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 3, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "ftd", "host.hostname": "GIFRCHN01", @@ -3317,12 +3317,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%FTD-4-313004: Denied ICMP type=0, from laddr 172.16.30.2 on interface inside to 172.16.1.10: no matching session", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 4, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "ftd", "input.type": "log", @@ -3375,7 +3375,7 @@ "event.timezone": "-02:00", "event.type": [ "allowed", - "info" + "connection" ], "fileset.name": "ftd", "input.type": "log", @@ -3497,8 +3497,8 @@ "event.severity": 4, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "ftd", "input.type": "log", @@ -3546,7 +3546,7 @@ "event.timezone": "-02:00", "event.type": [ "allowed", - "info" + "connection" ], "fileset.name": "ftd", "input.type": "log", @@ -3588,7 +3588,7 @@ "event.timezone": "-02:00", "event.type": [ "allowed", - "info" + "connection" ], "fileset.name": "ftd", "input.type": "log", @@ -3628,12 +3628,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%FTD-5-304002: Access denied URL http://www.example.net/images/favicon.ico SRC 10.69.6.39 DEST 192.0.0.19 on interface inside", - "event.outcome": "failure", + "event.outcome": "success", "event.severity": 5, "event.timezone": "-02:00", "event.type": [ - "denied", - "info" + "connection", + "denied" ], "fileset.name": "ftd", "input.type": "log", diff --git a/x-pack/filebeat/module/cisco/ftd/test/security-connection.log-expected.json b/x-pack/filebeat/module/cisco/ftd/test/security-connection.log-expected.json index 5433746ab11..9d37925b3d2 100644 --- a/x-pack/filebeat/module/cisco/ftd/test/security-connection.log-expected.json +++ b/x-pack/filebeat/module/cisco/ftd/test/security-connection.log-expected.json @@ -844,12 +844,12 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%FTD-1-430002: AccessControlRuleAction: Block, SrcIP: 10.0.100.30, DstIP: 10.0.1.20, ICMPType: Echo Request, ICMPCode: No Code, Protocol: icmp, IngressInterface: output, EgressInterface: input, IngressZone: output-zone, EgressZone: input-zone, ACPolicy: default, AccessControlRuleName: Block-inbound-ICMP, Prefilter Policy: Default Prefilter Policy, User: No Authentication Required, InitiatorPackets: 0, ResponderPackets: 0, InitiatorBytes: 0, ResponderBytes: 0, NAPPolicy: Balanced Security and Connectivity", - "event.outcome": "block", + "event.outcome": "success", "event.severity": 1, "event.timezone": "-02:00", "event.type": [ "connection", - "failure", + "denied", "start" ], "fileset.name": "ftd", @@ -944,14 +944,14 @@ "event.kind": "event", "event.module": "cisco", "event.original": "%FTD-1-430003: AccessControlRuleAction: Block, AccessControlRuleReason: File Block, SrcIP: 10.0.1.20, DstIP: 10.0.100.30, SrcPort: 41544, DstPort: 8000, Protocol: tcp, IngressInterface: input, EgressInterface: output, IngressZone: input-zone, EgressZone: output-zone, ACPolicy: default, AccessControlRuleName: Intrusion-Rule, Prefilter Policy: Default Prefilter Policy, User: No Authentication Required, UserAgent: curl/7.58.0, Client: cURL, ClientVersion: 7.58.0, ApplicationProtocol: HTTP, ConnectionDuration: 1, FileCount: 1, InitiatorPackets: 4, ResponderPackets: 7, InitiatorBytes: 365, ResponderBytes: 1927, NAPPolicy: Balanced Security and Connectivity, HTTPResponse: 200, ReferencedHost: 10.0.100.30:8000, URL: http://10.0.100.30:8000/eicar_com.zip", - "event.outcome": "block", + "event.outcome": "success", "event.severity": 1, "event.start": "2019-08-14T17:09:40.000Z", "event.timezone": "-02:00", "event.type": [ "connection", - "end", - "failure" + "denied", + "end" ], "fileset.name": "ftd", "host.hostname": "siem-ftd", diff --git a/x-pack/filebeat/module/cisco/shared/ingest/asa-ftd-pipeline.yml b/x-pack/filebeat/module/cisco/shared/ingest/asa-ftd-pipeline.yml index 5fb634d0fe2..709f3762a24 100644 --- a/x-pack/filebeat/module/cisco/shared/ingest/asa-ftd-pipeline.yml +++ b/x-pack/filebeat/module/cisco/shared/ingest/asa-ftd-pipeline.yml @@ -383,7 +383,7 @@ processors: if: "ctx._temp_.cisco.message_id == '304001'" field: "event.outcome" description: "304001" - value: success + value: "allowed" - dissect: if: "ctx._temp_.cisco.message_id == '304002'" field: "message" @@ -750,7 +750,7 @@ processors: - set: if: '["110002"].contains(ctx._temp_.cisco.message_id)' field: "event.outcome" - value: "failure" + value: "dropped" - set: if: '["713120"].contains(ctx._temp_.cisco.message_id)' field: "event.outcome" @@ -760,7 +760,11 @@ processors: field: "event.outcome" value: "success" - set: - if: '["713905", "713904", "713906", "713902", "713901", "710005"].contains(ctx._temp_.cisco.message_id)' + if: '["710005"].contains(ctx._temp_.cisco.message_id)' + field: "event.outcome" + value: "dropped" + - set: + if: '["713901", "713902", "713903", "713904", "713905"].contains(ctx._temp_.cisco.message_id)' field: "event.outcome" value: "failure" - set: @@ -1447,27 +1451,19 @@ processors: - set: field: "event.outcome" if: 'ctx.event?.outcome == "est-allowed"' - value: success + value: "allowed" - set: field: "event.outcome" if: 'ctx.event?.outcome == "permitted"' - value: success + value: "allowed" - set: field: "event.outcome" if: 'ctx.event?.outcome == "allow"' - value: success - - set: - field: "event.outcome" - if: 'ctx.event?.outcome == "denied"' - value: failure + value: "allowed" - set: field: "event.outcome" if: 'ctx.event?.outcome == "deny"' - value: failure - - set: - field: "event.outcome" - if: 'ctx.event?.outcome == "dropped"' - value: failure + value: denied - set: field: "network.transport" if: 'ctx.network?.transport == "icmpv6"' @@ -1773,14 +1769,12 @@ processors: category: - network type: - - connection - end connection-started: kind: event category: - network type: - - connection - start file-detected: kind: alert @@ -1792,8 +1786,7 @@ processors: kind: event category: - network - type: - - info + type: [] flow-expiration: kind: event category: @@ -1847,22 +1840,36 @@ processors: if (ctx?.event?.action == null || !params.containsKey(ctx.event.action)) { return; } + ctx.event.kind = params.get(ctx.event.action).get('kind'); ctx.event.category = params.get(ctx.event.action).get('category').clone(); ctx.event.type = params.get(ctx.event.action).get('type').clone(); - if (ctx?.event?.outcome == null) { + + if (ctx?.event?.outcome == null || (!ctx.event.category.contains('network') && !ctx.event.category.contains('intrusion_detection'))) { + if (ctx?.event?.action == 'firewall-rule') { + ctx.event.type.add('info'); + } else if (ctx?.event?.action.startsWith('connection-')) { + ctx.event.type.add('connection'); + } return; } - if (ctx.event.category.contains('network') || ctx.event.category.contains('intrusion_detection')) { - if (ctx.event.outcome == 'success') { - ctx.event.type.add('allowed'); - } - if (ctx.event.outcome == 'failure') { - ctx.event.type.add('denied'); - } - if (ctx.event.outcome == 'block') { - ctx.event.type.add('failure'); - } + + if (ctx.event.outcome == 'allowed') { + ctx.event.outcome = 'success'; + ctx.event.type.add('connection'); + ctx.event.type.add('allowed'); + } else if (ctx.event.outcome == 'denied' || ctx.event.outcome == 'block') { + ctx.event.outcome = 'success'; + ctx.event.type.add('connection'); + ctx.event.type.add('denied'); + } else if (ctx.event.outcome == 'dropped') { + ctx.event.outcome = 'failure'; + ctx.event.type.add('connection'); + ctx.event.type.add('denied'); + } else if (ctx?.event?.action == 'firewall-rule') { + ctx.event.type.add('info'); + } else if (ctx?.event?.action.startsWith('connection-')) { + ctx.event.type.add('connection'); } - set: From 298e4fcd2548e12171544f1383cae47b69163c75 Mon Sep 17 00:00:00 2001 From: Dan Kortschak <90160302+efd6@users.noreply.github.com> Date: Wed, 19 Jan 2022 20:30:24 +1030 Subject: [PATCH 20/69] x-pack/auditbeat/module/system/process: don't try to hash files in other namespaces (#29786) --- CHANGELOG.next.asciidoc | 1 + .../module/system/process/namepace_linux.go | 57 +++++++++++++++++++ .../module/system/process/namepace_other.go | 13 +++++ .../module/system/process/process.go | 15 ++++- 4 files changed, 84 insertions(+), 2 deletions(-) create mode 100644 x-pack/auditbeat/module/system/process/namepace_linux.go create mode 100644 x-pack/auditbeat/module/system/process/namepace_other.go diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index e50312b035e..ef2b24d07c2 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -157,6 +157,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d *Auditbeat* +- system/process: Prevent hashing files in other mnt namespaces. {issue}25777[25777] {issue}29678[29678] {pull}29786[29786] *Filebeat* diff --git a/x-pack/auditbeat/module/system/process/namepace_linux.go b/x-pack/auditbeat/module/system/process/namepace_linux.go new file mode 100644 index 00000000000..148f22910b4 --- /dev/null +++ b/x-pack/auditbeat/module/system/process/namepace_linux.go @@ -0,0 +1,57 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build linux +// +build linux + +package process + +import ( + "fmt" + "os" + "syscall" + + "github.com/pkg/errors" +) + +// isNsSharedWith returns whether the process with the given pid shares the +// namespace ns with the current process. +func isNsSharedWith(pid int, ns string) (yes bool, err error) { + self, err := selfNsIno(ns) + if err != nil { + return false, err + } + other, err := nsIno(pid, ns) + if err != nil { + return false, err + } + return self == other, nil +} + +// selfNsIno returns the inode number for the namespace ns for this process. +func selfNsIno(ns string) (ino uint64, err error) { + fi, err := os.Stat(fmt.Sprintf("/proc/self/ns/%s", ns)) + if err != nil { + return 0, err + } + sysInfo, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return 0, errors.New("not a stat_t") + } + return sysInfo.Ino, nil +} + +// nsIno returns the inode number for the namespace ns for the process with +// the given pid. +func nsIno(pid int, ns string) (ino uint64, err error) { + fi, err := os.Stat(fmt.Sprintf("/proc/%d/ns/%s", pid, ns)) + if err != nil { + return 0, err + } + sysInfo, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return 0, errors.New("not a stat_t") + } + return sysInfo.Ino, nil +} diff --git a/x-pack/auditbeat/module/system/process/namepace_other.go b/x-pack/auditbeat/module/system/process/namepace_other.go new file mode 100644 index 00000000000..d49add6dcb7 --- /dev/null +++ b/x-pack/auditbeat/module/system/process/namepace_other.go @@ -0,0 +1,13 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build !linux +// +build !linux + +package process + +// isNsSharedWith returns true and nil. +func isNsSharedWith(pid int, ns string) (yes bool, err error) { + return true, nil +} diff --git a/x-pack/auditbeat/module/system/process/process.go b/x-pack/auditbeat/module/system/process/process.go index dde619d50b9..74b48ad6118 100644 --- a/x-pack/auditbeat/module/system/process/process.go +++ b/x-pack/auditbeat/module/system/process/process.go @@ -319,15 +319,26 @@ func (ms *MetricSet) enrichProcess(process *Process) { } if process.Info.Exe != "" { + sharedMntNS, err := isNsSharedWith(process.Info.PID, "mnt") + if err != nil { + if process.Error == nil { + process.Error = errors.Wrapf(err, "failed to get namespaces for %v PID %v", process.Info.Exe, + process.Info.PID) + } + return + } + if !sharedMntNS { + return + } hashes, err := ms.hasher.HashFile(process.Info.Exe) if err != nil { if process.Error == nil { process.Error = errors.Wrapf(err, "failed to hash executable %v for PID %v", process.Info.Exe, process.Info.PID) } - } else { - process.Hashes = hashes + return } + process.Hashes = hashes } } From 92ecce05443be423bb581a77b11e14da530e3c01 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Wed, 19 Jan 2022 08:39:40 -0500 Subject: [PATCH 21/69] [Automation] Update elastic stack version to 8.1.0-f5a18001 for testing (#29903) Co-authored-by: apmmachine Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- testing/environments/snapshot-oss.yml | 6 +++--- testing/environments/snapshot.yml | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/testing/environments/snapshot-oss.yml b/testing/environments/snapshot-oss.yml index 26b198effc9..eea926aab2f 100644 --- a/testing/environments/snapshot-oss.yml +++ b/testing/environments/snapshot-oss.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.1.0-677b9ef0-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.1.0-f5a18001-SNAPSHOT healthcheck: test: ["CMD-SHELL", "curl -s http://localhost:9200/_cat/health?h=status | grep -q green"] retries: 300 @@ -21,7 +21,7 @@ services: - "script.context.template.cache_max_size=2000" logstash: - image: docker.elastic.co/logstash/logstash-oss:8.1.0-677b9ef0-SNAPSHOT + image: docker.elastic.co/logstash/logstash-oss:8.1.0-f5a18001-SNAPSHOT healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9600/_node/stats"] retries: 600 @@ -31,7 +31,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.1.0-677b9ef0-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.1.0-f5a18001-SNAPSHOT healthcheck: test: ["CMD-SHELL", "curl -s http://localhost:5601/api/status?v8format=true | grep -q '\"overall\":{\"level\":\"available\"'"] retries: 600 diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 4cae61b0cbe..b55137d4a91 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.1.0-677b9ef0-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.1.0-f5a18001-SNAPSHOT healthcheck: test: ["CMD-SHELL", "curl -s http://localhost:9200/_cat/health?h=status | grep -q green"] retries: 300 @@ -37,7 +37,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.1.0-677b9ef0-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.1.0-f5a18001-SNAPSHOT healthcheck: test: ["CMD-SHELL", "curl -s http://localhost:5601/api/status?v8format=true | grep -q '\"overall\":{\"level\":\"available\"'"] retries: 600 From 8acaed96150f6f6dacd90aca9181ea81807a8799 Mon Sep 17 00:00:00 2001 From: endorama <526307+endorama@users.noreply.github.com> Date: Wed, 19 Jan 2022 17:42:35 +0100 Subject: [PATCH 22/69] [Metricbeat] gcp.gke: fix overview dashboard (#29913) --- CHANGELOG.next.asciidoc | 1 + .../1ae960c0-f9f8-11eb-bc38-79936db7c106.json | 516 +++++++++--------- 2 files changed, 259 insertions(+), 258 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index ef2b24d07c2..cb9fa4932f4 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -122,6 +122,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Fix gcp metrics metricset apply aligner to all metric_types {pull}29514[29513] - Extract correct index property in kibana.stats metricset {pull}29622[29622] - Fixed bug with `elasticsearch/cluster_stats` metricset not recording license expiration date correctly. {pull}29711[29711] +- Fixed GCP GKE Overview dashboard {pull}29913[29913] *Packetbeat* diff --git a/x-pack/metricbeat/module/gcp/_meta/kibana/7/dashboard/1ae960c0-f9f8-11eb-bc38-79936db7c106.json b/x-pack/metricbeat/module/gcp/_meta/kibana/7/dashboard/1ae960c0-f9f8-11eb-bc38-79936db7c106.json index 251cc7aba6a..5e2593a51df 100644 --- a/x-pack/metricbeat/module/gcp/_meta/kibana/7/dashboard/1ae960c0-f9f8-11eb-bc38-79936db7c106.json +++ b/x-pack/metricbeat/module/gcp/_meta/kibana/7/dashboard/1ae960c0-f9f8-11eb-bc38-79936db7c106.json @@ -98,7 +98,7 @@ }, "panelIndex": "9d604bbc-ce5e-49c8-b961-d974fa9d7891", "type": "visualization", - "version": "7.15.0" + "version": "7.16.2" }, { "embeddableConfig": { @@ -167,7 +167,7 @@ "panelIndex": "a4a26c8f-3415-4cb2-a44e-27fe2e706862", "title": "Clusters", "type": "lens", - "version": "7.15.0" + "version": "7.16.2" }, { "embeddableConfig": { @@ -236,7 +236,7 @@ "panelIndex": "6018a29a-f6f0-4dec-9940-9094b3ed841d", "title": "Nodes", "type": "lens", - "version": "7.15.0" + "version": "7.16.2" }, { "embeddableConfig": { @@ -305,7 +305,7 @@ "panelIndex": "6f1f7601-f921-4051-899d-10fda75d07df", "title": "Namespaces", "type": "lens", - "version": "7.15.0" + "version": "7.16.2" }, { "embeddableConfig": { @@ -374,7 +374,7 @@ "panelIndex": "1d22e757-a6ec-43df-a60f-decda1d057c2", "title": "Pods", "type": "lens", - "version": "7.15.0" + "version": "7.16.2" }, { "embeddableConfig": { @@ -451,7 +451,7 @@ "panelIndex": "7541ca7c-3333-4065-9d98-f8fa11c29ebf", "title": "Container restarts", "type": "lens", - "version": "7.15.0" + "version": "7.16.2" }, { "embeddableConfig": { @@ -487,7 +487,7 @@ }, "panelIndex": "00e7a3a4-e042-4f46-8637-3159cd608047", "type": "visualization", - "version": "7.15.0" + "version": "7.16.2" }, { "embeddableConfig": { @@ -523,7 +523,7 @@ }, "panelIndex": "20e84709-926a-4588-a76c-dd9c5583873a", "type": "visualization", - "version": "7.15.0" + "version": "7.16.2" }, { "embeddableConfig": { @@ -661,7 +661,7 @@ "panelIndex": "10a4fa84-84c1-45ac-921a-7d4e7ba0a461", "title": "Pods per cluster", "type": "lens", - "version": "7.15.0" + "version": "7.16.2" }, { "embeddableConfig": { @@ -802,7 +802,7 @@ "panelIndex": "8261db16-766d-4c27-b988-87c90ed067d0", "title": "Nodes per cluster", "type": "lens", - "version": "7.15.0" + "version": "7.16.2" }, { "embeddableConfig": { @@ -991,7 +991,156 @@ "panelIndex": "1df880b1-44bc-468b-aba7-0cec27b74b12", "title": "CPU usage by Pod (seconds)", "type": "lens", - "version": "7.15.0" + "version": "7.16.2" + }, + { + "embeddableConfig": { + "attributes": { + "references": [ + { + "id": "metricbeat-*", + "name": "indexpattern-datasource-current-indexpattern", + "type": "index-pattern" + }, + { + "id": "metricbeat-*", + "name": "indexpattern-datasource-layer-f4259765-f8eb-47de-8472-a04528f8219e", + "type": "index-pattern" + } + ], + "state": { + "datasourceStates": { + "indexpattern": { + "layers": { + "f4259765-f8eb-47de-8472-a04528f8219e": { + "columnOrder": [ + "18e2e114-77eb-4a85-afdb-ddd837e6f05a", + "3153e211-b16a-4f92-b775-6d06a4edaf44", + "61256570-b7dd-4bec-b73d-d12d993ae091" + ], + "columns": { + "18e2e114-77eb-4a85-afdb-ddd837e6f05a": { + "dataType": "string", + "isBucketed": true, + "label": "Top values of gcp.labels.resource.pod_name", + "operationType": "terms", + "params": { + "missingBucket": false, + "orderBy": { + "columnId": "61256570-b7dd-4bec-b73d-d12d993ae091", + "type": "column" + }, + "orderDirection": "desc", + "otherBucket": true, + "size": 100 + }, + "scale": "ordinal", + "sourceField": "gcp.labels.resource.pod_name" + }, + "3153e211-b16a-4f92-b775-6d06a4edaf44": { + "dataType": "date", + "isBucketed": true, + "label": "@timestamp", + "operationType": "date_histogram", + "params": { + "interval": "60s" + }, + "scale": "interval", + "sourceField": "@timestamp" + }, + "61256570-b7dd-4bec-b73d-d12d993ae091": { + "dataType": "number", + "isBucketed": false, + "label": "Median of gcp.gke.container.memory.limit_utilization.pct", + "operationType": "median", + "params": { + "format": { + "id": "percent", + "params": { + "decimals": 0 + } + } + }, + "scale": "ratio", + "sourceField": "gcp.gke.container.memory.limit_utilization.pct" + } + }, + "incompleteColumns": {} + } + } + } + }, + "filters": [], + "query": { + "language": "kuery", + "query": "" + }, + "visualization": { + "axisTitlesVisibilitySettings": { + "x": false, + "yLeft": false, + "yRight": true + }, + "fittingFunction": "Linear", + "gridlinesVisibilitySettings": { + "x": true, + "yLeft": true, + "yRight": true + }, + "layers": [ + { + "accessors": [ + "61256570-b7dd-4bec-b73d-d12d993ae091" + ], + "layerId": "f4259765-f8eb-47de-8472-a04528f8219e", + "layerType": "data", + "position": "top", + "seriesType": "line", + "showGridlines": false, + "splitAccessor": "18e2e114-77eb-4a85-afdb-ddd837e6f05a", + "xAccessor": "3153e211-b16a-4f92-b775-6d06a4edaf44" + } + ], + "legend": { + "isVisible": false, + "position": "bottom", + "showSingleSeries": false + }, + "preferredSeriesType": "line", + "tickLabelsVisibilitySettings": { + "x": true, + "yLeft": true, + "yRight": true + }, + "valueLabels": "hide", + "yLeftExtent": { + "lowerBound": 0, + "mode": "custom", + "upperBound": 1 + }, + "yRightExtent": { + "mode": "full" + } + } + }, + "title": "", + "type": "lens", + "visualizationType": "lnsXY" + }, + "enhancements": {}, + "hidePanelTitles": false + }, + "gridData": { + "h": 9, + "i": "ce28d9df-4506-4020-b2de-6274ac0d46b7", + "w": 12, + "x": 12, + "y": 21 + }, + "panelIndex": "ce28d9df-4506-4020-b2de-6274ac0d46b7", + "title": "CPU limit utilization by Pod", + "type": "lens", + "version": "7.16.2" }, { "embeddableConfig": { @@ -1132,7 +1281,7 @@ "panelIndex": "f3275c69-84ce-4c6d-bd49-3cd6f1c606f9", "title": "CPU usage per Node (seconds)", "type": "lens", - "version": "7.15.0" + "version": "7.16.2" }, { "embeddableConfig": { @@ -1331,7 +1480,7 @@ "panelIndex": "cadc827f-4efb-4045-b98b-7395264e4c16", "title": "Memory usage", "type": "lens", - "version": "7.15.0" + "version": "7.16.2" }, { "embeddableConfig": { @@ -1485,7 +1634,7 @@ "panelIndex": "5551e0b7-722f-401c-90fd-a0094e919618", "title": "Memory usage", "type": "lens", - "version": "7.15.0" + "version": "7.16.2" }, { "embeddableConfig": { @@ -1498,7 +1647,7 @@ }, { "id": "metricbeat-*", - "name": "indexpattern-datasource-layer-f4259765-f8eb-47de-8472-a04528f8219e", + "name": "indexpattern-datasource-layer-3888dded-b04b-45ec-b466-c121715bc0c1", "type": "index-pattern" } ], @@ -1506,14 +1655,25 @@ "datasourceStates": { "indexpattern": { "layers": { - "f4259765-f8eb-47de-8472-a04528f8219e": { + "3888dded-b04b-45ec-b466-c121715bc0c1": { "columnOrder": [ - "18e2e114-77eb-4a85-afdb-ddd837e6f05a", - "3153e211-b16a-4f92-b775-6d06a4edaf44", - "61256570-b7dd-4bec-b73d-d12d993ae091" + "18948f36-88ec-476d-8593-352b13485e53", + "118e3ebc-e414-495d-a99d-a356e436b074", + "98d45c49-c3b2-43ff-bf13-9b289ba154af" ], "columns": { - "18e2e114-77eb-4a85-afdb-ddd837e6f05a": { + "118e3ebc-e414-495d-a99d-a356e436b074": { + "dataType": "date", + "isBucketed": true, + "label": "@timestamp", + "operationType": "date_histogram", + "params": { + "interval": "60s" + }, + "scale": "interval", + "sourceField": "@timestamp" + }, + "18948f36-88ec-476d-8593-352b13485e53": { "dataType": "string", "isBucketed": true, "label": "Top values of gcp.labels.resource.pod_name", @@ -1521,42 +1681,32 @@ "params": { "missingBucket": false, "orderBy": { - "columnId": "61256570-b7dd-4bec-b73d-d12d993ae091", + "columnId": "98d45c49-c3b2-43ff-bf13-9b289ba154af", "type": "column" }, "orderDirection": "desc", "otherBucket": true, - "size": 100 + "size": 5 }, "scale": "ordinal", "sourceField": "gcp.labels.resource.pod_name" }, - "3153e211-b16a-4f92-b775-6d06a4edaf44": { - "dataType": "date", - "isBucketed": true, - "label": "@timestamp", - "operationType": "date_histogram", - "params": { - "interval": "60s" - }, - "scale": "interval", - "sourceField": "@timestamp" - }, - "61256570-b7dd-4bec-b73d-d12d993ae091": { + "98d45c49-c3b2-43ff-bf13-9b289ba154af": { + "customLabel": true, "dataType": "number", "isBucketed": false, - "label": "Median of gcp.gke.container.memory.limit_utilization.pct", + "label": "Median memory used", "operationType": "median", "params": { "format": { - "id": "percent", + "id": "bytes", "params": { - "decimals": 0 + "decimals": 2 } } }, "scale": "ratio", - "sourceField": "gcp.gke.container.memory.limit_utilization.pct" + "sourceField": "gcp.gke.container.memory.used.bytes" } }, "incompleteColumns": {} @@ -1584,15 +1734,15 @@ "layers": [ { "accessors": [ - "61256570-b7dd-4bec-b73d-d12d993ae091" + "98d45c49-c3b2-43ff-bf13-9b289ba154af" ], - "layerId": "f4259765-f8eb-47de-8472-a04528f8219e", + "layerId": "3888dded-b04b-45ec-b466-c121715bc0c1", "layerType": "data", "position": "top", "seriesType": "line", "showGridlines": false, - "splitAccessor": "18e2e114-77eb-4a85-afdb-ddd837e6f05a", - "xAccessor": "3153e211-b16a-4f92-b775-6d06a4edaf44" + "splitAccessor": "18948f36-88ec-476d-8593-352b13485e53", + "xAccessor": "118e3ebc-e414-495d-a99d-a356e436b074" } ], "legend": { @@ -1607,10 +1757,9 @@ "yRight": true }, "valueLabels": "hide", + "valuesInLegend": true, "yLeftExtent": { - "lowerBound": 0, - "mode": "custom", - "upperBound": 1 + "mode": "full" }, "yRightExtent": { "mode": "full" @@ -1626,15 +1775,15 @@ }, "gridData": { "h": 9, - "i": "ce28d9df-4506-4020-b2de-6274ac0d46b7", + "i": "610b04b2-7483-4995-b3f6-a11c09c2b2f2", "w": 12, "x": 12, - "y": 21 + "y": 30 }, - "panelIndex": "ce28d9df-4506-4020-b2de-6274ac0d46b7", - "title": "CPU limit utilization by Pod", + "panelIndex": "610b04b2-7483-4995-b3f6-a11c09c2b2f2", + "title": "Memory usage by Pod", "type": "lens", - "version": "7.15.0" + "version": "7.16.2" }, { "embeddableConfig": { @@ -1848,7 +1997,7 @@ "panelIndex": "a1714037-fe75-468b-bfea-d4a8e1769cbf", "title": "Network traffic (bytes count)", "type": "lens", - "version": "7.15.0" + "version": "7.16.2" }, { "embeddableConfig": { @@ -1901,7 +2050,7 @@ "label": "Media received bytes count", "operationType": "median", "scale": "ratio", - "sourceField": "gcp.gke.node.network.received_bytes.count" + "sourceField": "gcp.gke.node.network.received.bytes" } }, "incompleteColumns": {} @@ -1950,7 +2099,7 @@ "panelIndex": "f09d76ba-490b-4392-b6cb-051e4fcc03c9", "title": "Inbound network traffic top nodes", "type": "lens", - "version": "7.15.0" + "version": "7.16.2" }, { "embeddableConfig": { @@ -2118,7 +2267,7 @@ "panelIndex": "eea592ea-3598-4104-96b6-ea33a0d9845d", "title": "Storage requests", "type": "lens", - "version": "7.15.0" + "version": "7.16.2" }, { "embeddableConfig": { @@ -2131,7 +2280,7 @@ }, { "id": "metricbeat-*", - "name": "indexpattern-datasource-layer-3888dded-b04b-45ec-b466-c121715bc0c1", + "name": "indexpattern-datasource-layer-abc1288b-8de8-4cd4-ae39-c2d9c700396e", "type": "index-pattern" } ], @@ -2139,14 +2288,30 @@ "datasourceStates": { "indexpattern": { "layers": { - "3888dded-b04b-45ec-b466-c121715bc0c1": { + "abc1288b-8de8-4cd4-ae39-c2d9c700396e": { "columnOrder": [ - "18948f36-88ec-476d-8593-352b13485e53", - "118e3ebc-e414-495d-a99d-a356e436b074", - "98d45c49-c3b2-43ff-bf13-9b289ba154af" + "d40af55e-4c31-4a17-b71f-63ee7e598131", + "b47b55db-eb5b-4a10-8c49-c6920135fedf", + "374bc09a-188c-4cdf-b993-b31eb3754e46" ], "columns": { - "118e3ebc-e414-495d-a99d-a356e436b074": { + "374bc09a-188c-4cdf-b993-b31eb3754e46": { + "dataType": "number", + "isBucketed": false, + "label": "Maximum of gcp.gke.pod.volume.utilization.pct", + "operationType": "max", + "params": { + "format": { + "id": "percent", + "params": { + "decimals": 2 + } + } + }, + "scale": "ratio", + "sourceField": "gcp.gke.pod.volume.utilization.pct" + }, + "b47b55db-eb5b-4a10-8c49-c6920135fedf": { "dataType": "date", "isBucketed": true, "label": "@timestamp", @@ -2157,7 +2322,7 @@ "scale": "interval", "sourceField": "@timestamp" }, - "18948f36-88ec-476d-8593-352b13485e53": { + "d40af55e-4c31-4a17-b71f-63ee7e598131": { "dataType": "string", "isBucketed": true, "label": "Top values of gcp.labels.resource.pod_name", @@ -2165,7 +2330,7 @@ "params": { "missingBucket": false, "orderBy": { - "columnId": "98d45c49-c3b2-43ff-bf13-9b289ba154af", + "columnId": "374bc09a-188c-4cdf-b993-b31eb3754e46", "type": "column" }, "orderDirection": "desc", @@ -2174,23 +2339,6 @@ }, "scale": "ordinal", "sourceField": "gcp.labels.resource.pod_name" - }, - "98d45c49-c3b2-43ff-bf13-9b289ba154af": { - "customLabel": true, - "dataType": "number", - "isBucketed": false, - "label": "Median memory used", - "operationType": "median", - "params": { - "format": { - "id": "bytes", - "params": { - "decimals": 2 - } - } - }, - "scale": "ratio", - "sourceField": "gcp.gke.container.memory.used.bytes" } }, "incompleteColumns": {} @@ -2218,15 +2366,14 @@ "layers": [ { "accessors": [ - "98d45c49-c3b2-43ff-bf13-9b289ba154af" + "374bc09a-188c-4cdf-b993-b31eb3754e46" ], - "layerId": "3888dded-b04b-45ec-b466-c121715bc0c1", + "layerId": "abc1288b-8de8-4cd4-ae39-c2d9c700396e", "layerType": "data", - "position": "top", "seriesType": "line", - "showGridlines": false, - "splitAccessor": "18948f36-88ec-476d-8593-352b13485e53", - "xAccessor": "118e3ebc-e414-495d-a99d-a356e436b074" + "splitAccessor": "d40af55e-4c31-4a17-b71f-63ee7e598131", + "xAccessor": "b47b55db-eb5b-4a10-8c49-c6920135fedf", + "yConfig": [] } ], "legend": { @@ -2259,15 +2406,15 @@ }, "gridData": { "h": 9, - "i": "610b04b2-7483-4995-b3f6-a11c09c2b2f2", + "i": "17973ffd-05c1-4075-97eb-4990a3e9b61e", "w": 12, "x": 12, - "y": 30 + "y": 39 }, - "panelIndex": "610b04b2-7483-4995-b3f6-a11c09c2b2f2", - "title": "Memory usage by Pod", + "panelIndex": "17973ffd-05c1-4075-97eb-4990a3e9b61e", + "title": "Volume utilization by Pod", "type": "lens", - "version": "7.15.0" + "version": "7.16.2" }, { "embeddableConfig": { @@ -2421,7 +2568,7 @@ "panelIndex": "4611dd47-5619-485a-8a19-8edcc37d2f4e", "title": "Ephemeral storage usage", "type": "lens", - "version": "7.15.0" + "version": "7.16.2" }, { "embeddableConfig": { @@ -2571,154 +2718,7 @@ "panelIndex": "fe426912-61f1-4913-81d1-1dc734c50111", "title": "Ephemeral storage usage by Node", "type": "lens", - "version": "7.15.0" - }, - { - "embeddableConfig": { - "attributes": { - "references": [ - { - "id": "metricbeat-*", - "name": "indexpattern-datasource-current-indexpattern", - "type": "index-pattern" - }, - { - "id": "metricbeat-*", - "name": "indexpattern-datasource-layer-abc1288b-8de8-4cd4-ae39-c2d9c700396e", - "type": "index-pattern" - } - ], - "state": { - "datasourceStates": { - "indexpattern": { - "layers": { - "abc1288b-8de8-4cd4-ae39-c2d9c700396e": { - "columnOrder": [ - "d40af55e-4c31-4a17-b71f-63ee7e598131", - "b47b55db-eb5b-4a10-8c49-c6920135fedf", - "374bc09a-188c-4cdf-b993-b31eb3754e46" - ], - "columns": { - "374bc09a-188c-4cdf-b993-b31eb3754e46": { - "dataType": "number", - "isBucketed": false, - "label": "Maximum of gcp.gke.pod.volume.utilization.pct", - "operationType": "max", - "params": { - "format": { - "id": "percent", - "params": { - "decimals": 2 - } - } - }, - "scale": "ratio", - "sourceField": "gcp.gke.pod.volume.utilization.pct" - }, - "b47b55db-eb5b-4a10-8c49-c6920135fedf": { - "dataType": "date", - "isBucketed": true, - "label": "@timestamp", - "operationType": "date_histogram", - "params": { - "interval": "60s" - }, - "scale": "interval", - "sourceField": "@timestamp" - }, - "d40af55e-4c31-4a17-b71f-63ee7e598131": { - "dataType": "string", - "isBucketed": true, - "label": "Top values of gcp.labels.resource.pod_name", - "operationType": "terms", - "params": { - "missingBucket": false, - "orderBy": { - "columnId": "374bc09a-188c-4cdf-b993-b31eb3754e46", - "type": "column" - }, - "orderDirection": "desc", - "otherBucket": true, - "size": 5 - }, - "scale": "ordinal", - "sourceField": "gcp.labels.resource.pod_name" - } - }, - "incompleteColumns": {} - } - } - } - }, - "filters": [], - "query": { - "language": "kuery", - "query": "" - }, - "visualization": { - "axisTitlesVisibilitySettings": { - "x": false, - "yLeft": false, - "yRight": true - }, - "fittingFunction": "Linear", - "gridlinesVisibilitySettings": { - "x": true, - "yLeft": true, - "yRight": true - }, - "layers": [ - { - "accessors": [ - "374bc09a-188c-4cdf-b993-b31eb3754e46" - ], - "layerId": "abc1288b-8de8-4cd4-ae39-c2d9c700396e", - "layerType": "data", - "seriesType": "line", - "splitAccessor": "d40af55e-4c31-4a17-b71f-63ee7e598131", - "xAccessor": "b47b55db-eb5b-4a10-8c49-c6920135fedf", - "yConfig": [] - } - ], - "legend": { - "isVisible": false, - "position": "bottom", - "showSingleSeries": false - }, - "preferredSeriesType": "line", - "tickLabelsVisibilitySettings": { - "x": true, - "yLeft": true, - "yRight": true - }, - "valueLabels": "hide", - "valuesInLegend": true, - "yLeftExtent": { - "mode": "full" - }, - "yRightExtent": { - "mode": "full" - } - } - }, - "title": "", - "type": "lens", - "visualizationType": "lnsXY" - }, - "enhancements": {}, - "hidePanelTitles": false - }, - "gridData": { - "h": 9, - "i": "17973ffd-05c1-4075-97eb-4990a3e9b61e", - "w": 12, - "x": 12, - "y": 39 - }, - "panelIndex": "17973ffd-05c1-4075-97eb-4990a3e9b61e", - "title": "Volume utilization by Pod", - "type": "lens", - "version": "7.15.0" + "version": "7.16.2" }, { "embeddableConfig": { @@ -2853,17 +2853,17 @@ }, "panelIndex": "6f9ca350-a898-4865-8aef-4b593bd341ff", "type": "lens", - "version": "7.15.0" + "version": "7.16.2" } ], "timeRestore": false, "title": "[Metricbeat GCP] GKE Overview", "version": 1 }, - "coreMigrationVersion": "7.15.0", + "coreMigrationVersion": "7.16.2", "id": "1ae960c0-f9f8-11eb-bc38-79936db7c106", "migrationVersion": { - "dashboard": "7.15.0" + "dashboard": "7.16.0" }, "references": [ { @@ -2966,6 +2966,16 @@ "name": "1df880b1-44bc-468b-aba7-0cec27b74b12:indexpattern-datasource-layer-0599ce9e-3c12-4f89-af4e-d094e9f68ea9", "type": "index-pattern" }, + { + "id": "metricbeat-*", + "name": "ce28d9df-4506-4020-b2de-6274ac0d46b7:indexpattern-datasource-current-indexpattern", + "type": "index-pattern" + }, + { + "id": "metricbeat-*", + "name": "ce28d9df-4506-4020-b2de-6274ac0d46b7:indexpattern-datasource-layer-f4259765-f8eb-47de-8472-a04528f8219e", + "type": "index-pattern" + }, { "id": "metricbeat-*", "name": "f3275c69-84ce-4c6d-bd49-3cd6f1c606f9:indexpattern-datasource-current-indexpattern", @@ -3003,12 +3013,12 @@ }, { "id": "metricbeat-*", - "name": "ce28d9df-4506-4020-b2de-6274ac0d46b7:indexpattern-datasource-current-indexpattern", + "name": "610b04b2-7483-4995-b3f6-a11c09c2b2f2:indexpattern-datasource-current-indexpattern", "type": "index-pattern" }, { "id": "metricbeat-*", - "name": "ce28d9df-4506-4020-b2de-6274ac0d46b7:indexpattern-datasource-layer-f4259765-f8eb-47de-8472-a04528f8219e", + "name": "610b04b2-7483-4995-b3f6-a11c09c2b2f2:indexpattern-datasource-layer-3888dded-b04b-45ec-b466-c121715bc0c1", "type": "index-pattern" }, { @@ -3043,12 +3053,12 @@ }, { "id": "metricbeat-*", - "name": "610b04b2-7483-4995-b3f6-a11c09c2b2f2:indexpattern-datasource-current-indexpattern", + "name": "17973ffd-05c1-4075-97eb-4990a3e9b61e:indexpattern-datasource-current-indexpattern", "type": "index-pattern" }, { "id": "metricbeat-*", - "name": "610b04b2-7483-4995-b3f6-a11c09c2b2f2:indexpattern-datasource-layer-3888dded-b04b-45ec-b466-c121715bc0c1", + "name": "17973ffd-05c1-4075-97eb-4990a3e9b61e:indexpattern-datasource-layer-abc1288b-8de8-4cd4-ae39-c2d9c700396e", "type": "index-pattern" }, { @@ -3071,16 +3081,6 @@ "name": "fe426912-61f1-4913-81d1-1dc734c50111:indexpattern-datasource-layer-98ee5c53-f8e5-43ed-91a8-507010e5b0a9", "type": "index-pattern" }, - { - "id": "metricbeat-*", - "name": "17973ffd-05c1-4075-97eb-4990a3e9b61e:indexpattern-datasource-current-indexpattern", - "type": "index-pattern" - }, - { - "id": "metricbeat-*", - "name": "17973ffd-05c1-4075-97eb-4990a3e9b61e:indexpattern-datasource-layer-abc1288b-8de8-4cd4-ae39-c2d9c700396e", - "type": "index-pattern" - }, { "id": "metricbeat-*", "name": "6f9ca350-a898-4865-8aef-4b593bd341ff:indexpattern-datasource-current-indexpattern", @@ -3093,6 +3093,6 @@ } ], "type": "dashboard", - "updated_at": "2021-08-26T15:37:59.949Z", - "version": "WzIxODAsMV0=" + "updated_at": "2022-01-19T14:08:38.278Z", + "version": "WzIyMTYsMV0=" } \ No newline at end of file From 82fcf6632309dadc7a2b52119faa1270339e8774 Mon Sep 17 00:00:00 2001 From: Cyrille Le Clerc Date: Thu, 20 Jan 2022 08:30:45 +0100 Subject: [PATCH 23/69] [Documentation] Istio Module documentation clarification (#29873) * Documentation clarification * Update metricbeat/docs/modules/istio.asciidoc Co-authored-by: Chris Mark * Update istio.asciidoc Co-authored-by: Chris Mark --- metricbeat/docs/modules/istio.asciidoc | 30 +++++++++---------- x-pack/metricbeat/metricbeat.reference.yml | 14 ++++----- .../module/istio/_meta/config.reference.yml | 14 ++++----- .../metricbeat/module/istio/_meta/config.yml | 14 ++++----- .../module/istio/_meta/docs.asciidoc | 16 +++++----- .../metricbeat/modules.d/istio.yml.disabled | 14 ++++----- 6 files changed, 51 insertions(+), 51 deletions(-) diff --git a/metricbeat/docs/modules/istio.asciidoc b/metricbeat/docs/modules/istio.asciidoc index aedb381098d..66abac572b0 100644 --- a/metricbeat/docs/modules/istio.asciidoc +++ b/metricbeat/docs/modules/istio.asciidoc @@ -11,16 +11,16 @@ This file is generated! See scripts/mage/docs_collector.go beta[] This is the Istio module. -This module is compatible with versions before `1.5` of Istio where microservices architecture is used. If using -versions prior to `1.5` then `mesh`, `mixer`, `pilot`, `galley`, `citadel` metricsets should be used where the Istio -module collects metrics from the -Istio https://istio.io/v1.4/docs/tasks/observability/metrics/querying-metrics/#about-the-prometheus-add-on[prometheus exporters endpoints]. +When using versions prior to `1.5` then the `mesh`, `mixer`, `pilot`, `galley`, `citadel` metricsets should be used. -For versions after `1.5`, `istiod` and `proxy` metricsets can be used. +In such case, the Istio module collects metrics from the pre v1.5 +Istio https://istio.io/v1.4/docs/tasks/observability/metrics/querying-metrics/#about-the-prometheus-add-on[prometheus exporters endpoints]. -`istiod` collects metrics directly from Istio Daemon while `proxy` collects from each of the proxy sidecars. +For versions after `1.5`, the `istiod` and `proxy` metricsets should be used. +In such case, the `istiod` endpoint collects metrics directly from the Istio Daemon while the `proxy` endpoint collects from each of the proxy sidecars. +The metrics exposed by Istio after version `1.5` are documented on https://istio.io/latest/docs/reference/config/metrics/[Istio Documentation > Reference > Configuration > Istio Standard Metrics]. -The default metricsets are `mesh`, `mixer`, `pilot`, `galley`, `citadel`. +The default metricsets of the module are `mesh`, `mixer`, `pilot`, `galley`, `citadel`. When using Istio versions after `1.5`, the default metricsets must be overwritten specifying the `istiod` and/or `proxy` metricsets. [float] === Compatibility @@ -33,7 +33,7 @@ The Istio module is tested with Istio `1.7` for `istiod` and `proxy`. The Istio module includes predefined dashboards: -1. overview information about Istio Daemon. +1. Overview information about Istio Daemon. 2. Traffic information collected from istio-proxies. @@ -54,49 +54,49 @@ in <>. Here is an example configuration: [source,yaml] ---- metricbeat.modules: -# Istio mesh. To collect all Mixer-generated metrics +# Istio mesh. To collect all Mixer-generated metrics. For versions of Istio prior to 1.5. - module: istio metricsets: ["mesh"] period: 10s # use istio-telemetry.istio-system:42422, when deploying Metricbeat in a kubernetes cluster as Pod or Daemonset hosts: ["localhost:42422"] -# Istio mixer. To monitor Mixer itself. +# Istio mixer. To monitor Mixer itself. For versions of Istio prior to 1.5. - module: istio metricsets: ["mixer"] period: 10s # use istio-telemetry.istio-system:15014, when deploying Metricbeat in a kubernetes cluster as Pod or Daemonset hosts: ["localhost:15014"] -# Istio galley. To collect all Galley-generated metrics +# Istio galley. To collect all Galley-generated metrics. For versions of Istio prior to 1.5. - module: istio metricsets: ["galley"] period: 10s # use istio-galley.istio-system:15014, when deploying Metricbeat in a kubernetes cluster as Pod or Daemonset hosts: ["localhost:15014"] -# Istio pilot. To collect all Pilot-generated metrics. +# Istio pilot. To collect all Pilot-generated metrics. For versions of Istio prior to 1.5. - module: istio metricsets: ["pilot"] period: 10s # use istio-pilot.istio-system:15014, when deploying Metricbeat in a kubernetes cluster as Pod or Daemonset hosts: ["localhost:15014"] -# Istio citadel. To collect all Citadel-generated metrics. +# Istio citadel. To collect all Citadel-generated metrics. For versions of Istio prior to 1.5. - module: istio metricsets: ["citadel"] period: 10s # use istio-pilot.istio-system:15014, when deploying Metricbeat in a kubernetes cluster as Pod or Daemonset hosts: ["localhost:15014"] -# Istio istiod to monitor the Istio Daemon for versions after 1.5 of Istio. +# Istio istiod to monitor the Istio Daemon for versions of Istio after 1.5. - module: istio metricsets: ['istiod'] period: 10s # use istiod.istio-system:15014, when deploying Metricbeat in a kubernetes cluster as Pod or Daemonset hosts: ['localhost:15014'] -# Istio proxy to monitor Envoy sidecars for versions after 1.5 of Istio. +# Istio proxy to monitor Envoy sidecars for versions of Istio after 1.5. - module: istio metricsets: ['proxy'] period: 10s diff --git a/x-pack/metricbeat/metricbeat.reference.yml b/x-pack/metricbeat/metricbeat.reference.yml index 39d4937afab..2ae6a148d88 100644 --- a/x-pack/metricbeat/metricbeat.reference.yml +++ b/x-pack/metricbeat/metricbeat.reference.yml @@ -709,49 +709,49 @@ metricbeat.modules: # application_pool.name: [] #-------------------------------- Istio Module -------------------------------- -# Istio mesh. To collect all Mixer-generated metrics +# Istio mesh. To collect all Mixer-generated metrics. For versions of Istio prior to 1.5. - module: istio metricsets: ["mesh"] period: 10s # use istio-telemetry.istio-system:42422, when deploying Metricbeat in a kubernetes cluster as Pod or Daemonset hosts: ["localhost:42422"] -# Istio mixer. To monitor Mixer itself. +# Istio mixer. To monitor Mixer itself. For versions of Istio prior to 1.5. - module: istio metricsets: ["mixer"] period: 10s # use istio-telemetry.istio-system:15014, when deploying Metricbeat in a kubernetes cluster as Pod or Daemonset hosts: ["localhost:15014"] -# Istio galley. To collect all Galley-generated metrics +# Istio galley. To collect all Galley-generated metrics. For versions of Istio prior to 1.5. - module: istio metricsets: ["galley"] period: 10s # use istio-galley.istio-system:15014, when deploying Metricbeat in a kubernetes cluster as Pod or Daemonset hosts: ["localhost:15014"] -# Istio pilot. To collect all Pilot-generated metrics. +# Istio pilot. To collect all Pilot-generated metrics. For versions of Istio prior to 1.5. - module: istio metricsets: ["pilot"] period: 10s # use istio-pilot.istio-system:15014, when deploying Metricbeat in a kubernetes cluster as Pod or Daemonset hosts: ["localhost:15014"] -# Istio citadel. To collect all Citadel-generated metrics. +# Istio citadel. To collect all Citadel-generated metrics. For versions of Istio prior to 1.5. - module: istio metricsets: ["citadel"] period: 10s # use istio-pilot.istio-system:15014, when deploying Metricbeat in a kubernetes cluster as Pod or Daemonset hosts: ["localhost:15014"] -# Istio istiod to monitor the Istio Daemon for versions after 1.5 of Istio. +# Istio istiod to monitor the Istio Daemon for versions of Istio after 1.5. - module: istio metricsets: ['istiod'] period: 10s # use istiod.istio-system:15014, when deploying Metricbeat in a kubernetes cluster as Pod or Daemonset hosts: ['localhost:15014'] -# Istio proxy to monitor Envoy sidecars for versions after 1.5 of Istio. +# Istio proxy to monitor Envoy sidecars for versions of Istio after 1.5. - module: istio metricsets: ['proxy'] period: 10s diff --git a/x-pack/metricbeat/module/istio/_meta/config.reference.yml b/x-pack/metricbeat/module/istio/_meta/config.reference.yml index 7efe2adea30..a281555d36a 100644 --- a/x-pack/metricbeat/module/istio/_meta/config.reference.yml +++ b/x-pack/metricbeat/module/istio/_meta/config.reference.yml @@ -1,46 +1,46 @@ -# Istio mesh. To collect all Mixer-generated metrics +# Istio mesh. To collect all Mixer-generated metrics. For versions of Istio prior to 1.5. - module: istio metricsets: ["mesh"] period: 10s # use istio-telemetry.istio-system:42422, when deploying Metricbeat in a kubernetes cluster as Pod or Daemonset hosts: ["localhost:42422"] -# Istio mixer. To monitor Mixer itself. +# Istio mixer. To monitor Mixer itself. For versions of Istio prior to 1.5. - module: istio metricsets: ["mixer"] period: 10s # use istio-telemetry.istio-system:15014, when deploying Metricbeat in a kubernetes cluster as Pod or Daemonset hosts: ["localhost:15014"] -# Istio galley. To collect all Galley-generated metrics +# Istio galley. To collect all Galley-generated metrics. For versions of Istio prior to 1.5. - module: istio metricsets: ["galley"] period: 10s # use istio-galley.istio-system:15014, when deploying Metricbeat in a kubernetes cluster as Pod or Daemonset hosts: ["localhost:15014"] -# Istio pilot. To collect all Pilot-generated metrics. +# Istio pilot. To collect all Pilot-generated metrics. For versions of Istio prior to 1.5. - module: istio metricsets: ["pilot"] period: 10s # use istio-pilot.istio-system:15014, when deploying Metricbeat in a kubernetes cluster as Pod or Daemonset hosts: ["localhost:15014"] -# Istio citadel. To collect all Citadel-generated metrics. +# Istio citadel. To collect all Citadel-generated metrics. For versions of Istio prior to 1.5. - module: istio metricsets: ["citadel"] period: 10s # use istio-pilot.istio-system:15014, when deploying Metricbeat in a kubernetes cluster as Pod or Daemonset hosts: ["localhost:15014"] -# Istio istiod to monitor the Istio Daemon for versions after 1.5 of Istio. +# Istio istiod to monitor the Istio Daemon for versions of Istio after 1.5. - module: istio metricsets: ['istiod'] period: 10s # use istiod.istio-system:15014, when deploying Metricbeat in a kubernetes cluster as Pod or Daemonset hosts: ['localhost:15014'] -# Istio proxy to monitor Envoy sidecars for versions after 1.5 of Istio. +# Istio proxy to monitor Envoy sidecars for versions of Istio after 1.5. - module: istio metricsets: ['proxy'] period: 10s diff --git a/x-pack/metricbeat/module/istio/_meta/config.yml b/x-pack/metricbeat/module/istio/_meta/config.yml index 7efe2adea30..a281555d36a 100644 --- a/x-pack/metricbeat/module/istio/_meta/config.yml +++ b/x-pack/metricbeat/module/istio/_meta/config.yml @@ -1,46 +1,46 @@ -# Istio mesh. To collect all Mixer-generated metrics +# Istio mesh. To collect all Mixer-generated metrics. For versions of Istio prior to 1.5. - module: istio metricsets: ["mesh"] period: 10s # use istio-telemetry.istio-system:42422, when deploying Metricbeat in a kubernetes cluster as Pod or Daemonset hosts: ["localhost:42422"] -# Istio mixer. To monitor Mixer itself. +# Istio mixer. To monitor Mixer itself. For versions of Istio prior to 1.5. - module: istio metricsets: ["mixer"] period: 10s # use istio-telemetry.istio-system:15014, when deploying Metricbeat in a kubernetes cluster as Pod or Daemonset hosts: ["localhost:15014"] -# Istio galley. To collect all Galley-generated metrics +# Istio galley. To collect all Galley-generated metrics. For versions of Istio prior to 1.5. - module: istio metricsets: ["galley"] period: 10s # use istio-galley.istio-system:15014, when deploying Metricbeat in a kubernetes cluster as Pod or Daemonset hosts: ["localhost:15014"] -# Istio pilot. To collect all Pilot-generated metrics. +# Istio pilot. To collect all Pilot-generated metrics. For versions of Istio prior to 1.5. - module: istio metricsets: ["pilot"] period: 10s # use istio-pilot.istio-system:15014, when deploying Metricbeat in a kubernetes cluster as Pod or Daemonset hosts: ["localhost:15014"] -# Istio citadel. To collect all Citadel-generated metrics. +# Istio citadel. To collect all Citadel-generated metrics. For versions of Istio prior to 1.5. - module: istio metricsets: ["citadel"] period: 10s # use istio-pilot.istio-system:15014, when deploying Metricbeat in a kubernetes cluster as Pod or Daemonset hosts: ["localhost:15014"] -# Istio istiod to monitor the Istio Daemon for versions after 1.5 of Istio. +# Istio istiod to monitor the Istio Daemon for versions of Istio after 1.5. - module: istio metricsets: ['istiod'] period: 10s # use istiod.istio-system:15014, when deploying Metricbeat in a kubernetes cluster as Pod or Daemonset hosts: ['localhost:15014'] -# Istio proxy to monitor Envoy sidecars for versions after 1.5 of Istio. +# Istio proxy to monitor Envoy sidecars for versions of Istio after 1.5. - module: istio metricsets: ['proxy'] period: 10s diff --git a/x-pack/metricbeat/module/istio/_meta/docs.asciidoc b/x-pack/metricbeat/module/istio/_meta/docs.asciidoc index 39eb93b4095..f31d392bf4c 100644 --- a/x-pack/metricbeat/module/istio/_meta/docs.asciidoc +++ b/x-pack/metricbeat/module/istio/_meta/docs.asciidoc @@ -1,14 +1,14 @@ This is the Istio module. -This module is compatible with versions before `1.5` of Istio where microservices architecture is used. If using -versions prior to `1.5` then `mesh`, `mixer`, `pilot`, `galley`, `citadel` metricsets should be used where the Istio -module collects metrics from the -Istio https://istio.io/v1.4/docs/tasks/observability/metrics/querying-metrics/#about-the-prometheus-add-on[prometheus exporters endpoints]. +When using versions prior to `1.5` then the `mesh`, `mixer`, `pilot`, `galley`, `citadel` metricsets should be used. -For versions after `1.5`, `istiod` and `proxy` metricsets can be used. +In such case, the Istio module collects metrics from the pre v1.5 +Istio https://istio.io/v1.4/docs/tasks/observability/metrics/querying-metrics/#about-the-prometheus-add-on[prometheus exporters endpoints]. -`istiod` collects metrics directly from Istio Daemon while `proxy` collects from each of the proxy sidecars. +For versions after `1.5`, the `istiod` and `proxy` metricsets should be used. +In such case, the `istiod` endpoint collects metrics directly from the Istio Daemon while the `proxy` endpoint collects from each of the proxy sidecars. +The metrics exposed by Istio after version `1.5` are documented on https://istio.io/latest/docs/reference/config/metrics/[Istio Documentation > Reference > Configuration > Istio Standard Metrics]. -The default metricsets are `mesh`, `mixer`, `pilot`, `galley`, `citadel`. +The default metricsets of the module are `mesh`, `mixer`, `pilot`, `galley`, `citadel`. When using Istio versions after `1.5`, the default metricsets must be overwritten specifying the `istiod` and/or `proxy` metricsets. [float] === Compatibility @@ -21,7 +21,7 @@ The Istio module is tested with Istio `1.7` for `istiod` and `proxy`. The Istio module includes predefined dashboards: -1. overview information about Istio Daemon. +1. Overview information about Istio Daemon. 2. Traffic information collected from istio-proxies. diff --git a/x-pack/metricbeat/modules.d/istio.yml.disabled b/x-pack/metricbeat/modules.d/istio.yml.disabled index 119bc603d80..55c2a1d715a 100644 --- a/x-pack/metricbeat/modules.d/istio.yml.disabled +++ b/x-pack/metricbeat/modules.d/istio.yml.disabled @@ -1,49 +1,49 @@ # Module: istio # Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-istio.html -# Istio mesh. To collect all Mixer-generated metrics +# Istio mesh. To collect all Mixer-generated metrics. For versions of Istio prior to 1.5. - module: istio metricsets: ["mesh"] period: 10s # use istio-telemetry.istio-system:42422, when deploying Metricbeat in a kubernetes cluster as Pod or Daemonset hosts: ["localhost:42422"] -# Istio mixer. To monitor Mixer itself. +# Istio mixer. To monitor Mixer itself. For versions of Istio prior to 1.5. - module: istio metricsets: ["mixer"] period: 10s # use istio-telemetry.istio-system:15014, when deploying Metricbeat in a kubernetes cluster as Pod or Daemonset hosts: ["localhost:15014"] -# Istio galley. To collect all Galley-generated metrics +# Istio galley. To collect all Galley-generated metrics. For versions of Istio prior to 1.5. - module: istio metricsets: ["galley"] period: 10s # use istio-galley.istio-system:15014, when deploying Metricbeat in a kubernetes cluster as Pod or Daemonset hosts: ["localhost:15014"] -# Istio pilot. To collect all Pilot-generated metrics. +# Istio pilot. To collect all Pilot-generated metrics. For versions of Istio prior to 1.5. - module: istio metricsets: ["pilot"] period: 10s # use istio-pilot.istio-system:15014, when deploying Metricbeat in a kubernetes cluster as Pod or Daemonset hosts: ["localhost:15014"] -# Istio citadel. To collect all Citadel-generated metrics. +# Istio citadel. To collect all Citadel-generated metrics. For versions of Istio prior to 1.5. - module: istio metricsets: ["citadel"] period: 10s # use istio-pilot.istio-system:15014, when deploying Metricbeat in a kubernetes cluster as Pod or Daemonset hosts: ["localhost:15014"] -# Istio istiod to monitor the Istio Daemon for versions after 1.5 of Istio. +# Istio istiod to monitor the Istio Daemon for versions of Istio after 1.5. - module: istio metricsets: ['istiod'] period: 10s # use istiod.istio-system:15014, when deploying Metricbeat in a kubernetes cluster as Pod or Daemonset hosts: ['localhost:15014'] -# Istio proxy to monitor Envoy sidecars for versions after 1.5 of Istio. +# Istio proxy to monitor Envoy sidecars for versions of Istio after 1.5. - module: istio metricsets: ['proxy'] period: 10s From 13dc3de89d4f39319ed8c5fe92f7382473665a28 Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Thu, 20 Jan 2022 13:35:08 +0000 Subject: [PATCH 24/69] Packaging: rename arm64 suffix to aarch64 in the tar.gz artifacts ONLY (#28813) --- CHANGELOG.next.asciidoc | 1 + dev-tools/mage/pkgtypes.go | 2 +- x-pack/elastic-agent/magefile.go | 2 +- x-pack/elastic-agent/pkg/artifact/artifact.go | 2 +- x-pack/elastic-agent/pkg/artifact/download/http/elastic_test.go | 2 +- 5 files changed, 5 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index cb9fa4932f4..c284298bc2a 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -24,6 +24,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Remove option `setup.template.type` and always load composable template with data streams. {pull}28450[28450] - Remove several ILM options (`rollover_alias` and `pattern`) as data streams does not require index aliases. {pull}28450[28450] - Index template's default_fields setting is only populated with ECS fields. {pull}28596[28596] {issue}28215[28215] +- tar.gz packages for ARM64 will now use the suffix `aarch64` rather than `arm64`. {pull}28813[28813] - Remove deprecated `--template` and `--ilm-policy` flags. Use `--index-management` instead. {pull}28870[28870] - Remove options `logging.files.suffix` and default to datetime endings. {pull}28927[28927] - Remove Journalbeat. Use `journald` input of Filebeat instead. {pull}29131[29131] diff --git a/dev-tools/mage/pkgtypes.go b/dev-tools/mage/pkgtypes.go index 1fc5fe79e50..9944547c82a 100644 --- a/dev-tools/mage/pkgtypes.go +++ b/dev-tools/mage/pkgtypes.go @@ -162,7 +162,7 @@ var OSArchNames = map[string]map[PackageType]map[string]string{ "armv5": "armv5", "armv6": "armv6", "armv7": "armv7", - "arm64": "arm64", + "arm64": "aarch64", "mips": "mips", "mipsle": "mipsel", "mips64": "mips64", diff --git a/x-pack/elastic-agent/magefile.go b/x-pack/elastic-agent/magefile.go index ccceaf06935..a04d3c974bf 100644 --- a/x-pack/elastic-agent/magefile.go +++ b/x-pack/elastic-agent/magefile.go @@ -308,7 +308,7 @@ func Package() { }{ {"darwin/amd64", "darwin-x86_64.tar.gz"}, {"linux/amd64", "linux-x86_64.tar.gz"}, - {"linux/arm64", "linux-arm64.tar.gz"}, + {"linux/arm64", "linux-aarch64.tar.gz"}, {"windows/amd64", "windows-x86_64.zip"}, } diff --git a/x-pack/elastic-agent/pkg/artifact/artifact.go b/x-pack/elastic-agent/pkg/artifact/artifact.go index 5f8a099ed6a..ff81fc326f1 100644 --- a/x-pack/elastic-agent/pkg/artifact/artifact.go +++ b/x-pack/elastic-agent/pkg/artifact/artifact.go @@ -15,7 +15,7 @@ import ( var packageArchMap = map[string]string{ "linux-binary-32": "linux-x86.tar.gz", "linux-binary-64": "linux-x86_64.tar.gz", - "linux-binary-arm64": "linux-arm64.tar.gz", + "linux-binary-arm64": "linux-aarch64.tar.gz", "windows-binary-32": "windows-x86.zip", "windows-binary-64": "windows-x86_64.zip", "darwin-binary-32": "darwin-x86_64.tar.gz", diff --git a/x-pack/elastic-agent/pkg/artifact/download/http/elastic_test.go b/x-pack/elastic-agent/pkg/artifact/download/http/elastic_test.go index 747324a7cc7..d0b321da3d1 100644 --- a/x-pack/elastic-agent/pkg/artifact/download/http/elastic_test.go +++ b/x-pack/elastic-agent/pkg/artifact/download/http/elastic_test.go @@ -170,7 +170,7 @@ func getElasticCoClient() http.Client { fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "i686.rpm"): struct{}{}, fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "x86_64.rpm"): struct{}{}, fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "linux-x86.tar.gz"): struct{}{}, - fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "linux-arm64.tar.gz"): struct{}{}, + fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "linux-aarch64.tar.gz"): struct{}{}, fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "linux-x86_64.tar.gz"): struct{}{}, fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "windows-x86.zip"): struct{}{}, fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "windows-x86_64.zip"): struct{}{}, From 9032ae2cb37925a35dbe67885dc861932e55d43e Mon Sep 17 00:00:00 2001 From: Chris Mark Date: Thu, 20 Jan 2022 13:59:52 +0000 Subject: [PATCH 25/69] Fix istio docs regarding default metricsets (#29875) Signed-off-by: chrismark --- metricbeat/docs/modules/istio.asciidoc | 1 - x-pack/metricbeat/module/istio/_meta/docs.asciidoc | 1 - 2 files changed, 2 deletions(-) diff --git a/metricbeat/docs/modules/istio.asciidoc b/metricbeat/docs/modules/istio.asciidoc index 66abac572b0..c2e01e6b126 100644 --- a/metricbeat/docs/modules/istio.asciidoc +++ b/metricbeat/docs/modules/istio.asciidoc @@ -20,7 +20,6 @@ For versions after `1.5`, the `istiod` and `proxy` metricsets should be used. In such case, the `istiod` endpoint collects metrics directly from the Istio Daemon while the `proxy` endpoint collects from each of the proxy sidecars. The metrics exposed by Istio after version `1.5` are documented on https://istio.io/latest/docs/reference/config/metrics/[Istio Documentation > Reference > Configuration > Istio Standard Metrics]. -The default metricsets of the module are `mesh`, `mixer`, `pilot`, `galley`, `citadel`. When using Istio versions after `1.5`, the default metricsets must be overwritten specifying the `istiod` and/or `proxy` metricsets. [float] === Compatibility diff --git a/x-pack/metricbeat/module/istio/_meta/docs.asciidoc b/x-pack/metricbeat/module/istio/_meta/docs.asciidoc index f31d392bf4c..1480c5197dd 100644 --- a/x-pack/metricbeat/module/istio/_meta/docs.asciidoc +++ b/x-pack/metricbeat/module/istio/_meta/docs.asciidoc @@ -8,7 +8,6 @@ For versions after `1.5`, the `istiod` and `proxy` metricsets should be used. In such case, the `istiod` endpoint collects metrics directly from the Istio Daemon while the `proxy` endpoint collects from each of the proxy sidecars. The metrics exposed by Istio after version `1.5` are documented on https://istio.io/latest/docs/reference/config/metrics/[Istio Documentation > Reference > Configuration > Istio Standard Metrics]. -The default metricsets of the module are `mesh`, `mixer`, `pilot`, `galley`, `citadel`. When using Istio versions after `1.5`, the default metricsets must be overwritten specifying the `istiod` and/or `proxy` metricsets. [float] === Compatibility From 4a1e8f82b80c16f2325e894a679429e0bf1083e8 Mon Sep 17 00:00:00 2001 From: Michel Laterman <82832767+michel-laterman@users.noreply.github.com> Date: Thu, 20 Jan 2022 09:22:19 -0800 Subject: [PATCH 26/69] Re add the ability for container commands to access kibana env vars (#29930) * Re add the ability for container commands to access kibana env vars Allow elastic-agent container to read KIBANA_USERNAME/KIBANA_PASSWORD as credentials to use to generate a service_token as the env vars were erroniously removed. * Remove unneccesary CHANGELOG item --- x-pack/elastic-agent/pkg/agent/cmd/setup_config.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/x-pack/elastic-agent/pkg/agent/cmd/setup_config.go b/x-pack/elastic-agent/pkg/agent/cmd/setup_config.go index ab163f4b5e7..1a0716ef852 100644 --- a/x-pack/elastic-agent/pkg/agent/cmd/setup_config.go +++ b/x-pack/elastic-agent/pkg/agent/cmd/setup_config.go @@ -108,8 +108,8 @@ func defaultAccessConfig() (setupConfig, error) { Fleet: kibanaFleetConfig{ Setup: envBool("KIBANA_FLEET_SETUP"), Host: envWithDefault("http://kibana:5601", "KIBANA_FLEET_HOST", "KIBANA_HOST"), - Username: envWithDefault("elastic", "KIBANA_FLEET_USERNAME", "ELASTICSEARCH_USERNAME"), - Password: envWithDefault("changeme", "KIBANA_FLEET_PASSWORD", "ELASTICSEARCH_PASSWORD"), + Username: envWithDefault("elastic", "KIBANA_FLEET_USERNAME", "KIBANA_USERNAME", "ELASTICSEARCH_USERNAME"), + Password: envWithDefault("changeme", "KIBANA_FLEET_PASSWORD", "KIBANA_PASSWORD", "ELASTICSEARCH_PASSWORD"), ServiceToken: envWithDefault("", "KIBANA_FLEET_SERVICE_TOKEN", "FLEET_SERVER_SERVICE_TOKEN"), CA: envWithDefault("", "KIBANA_FLEET_CA", "KIBANA_CA", "ELASTICSEARCH_CA"), }, From 96e393f4e06334db1875fe62fb07a0be701fd3b9 Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Thu, 20 Jan 2022 12:51:52 -0500 Subject: [PATCH 27/69] Change to check for fleet server enable not fleet enrollment. (#29931) --- x-pack/elastic-agent/pkg/agent/cmd/container.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x-pack/elastic-agent/pkg/agent/cmd/container.go b/x-pack/elastic-agent/pkg/agent/cmd/container.go index 25ff4c15ddc..bdf2fde9e7c 100644 --- a/x-pack/elastic-agent/pkg/agent/cmd/container.go +++ b/x-pack/elastic-agent/pkg/agent/cmd/container.go @@ -269,7 +269,7 @@ func runContainerCmd(streams *cli.IOStreams, cmd *cobra.Command, cfg setupConfig return run(streams, logToStderr) } - if cfg.Kibana.Fleet.Setup || cfg.Fleet.Enroll { + if cfg.Kibana.Fleet.Setup || cfg.FleetServer.Enable { err = ensureServiceToken(streams, &cfg) if err != nil { return err From 53c7c51be99a798a484f87d63e633350ecc44df6 Mon Sep 17 00:00:00 2001 From: Michel Laterman <82832767+michel-laterman@users.noreply.github.com> Date: Thu, 20 Jan 2022 14:41:55 -0800 Subject: [PATCH 28/69] Add beat/process config to elastic agent diagnostics collect (#29902) * Add beat/process config to elastic agent diagnostics collect Add the configs that are output from elastic-agent inspect output to the bundle generated by elastic-agent diagnostics collect. Files are named as ProcID_RouteKey. * Apply suggestions from code review Co-authored-by: Anderson Queiroz Co-authored-by: Anderson Queiroz --- x-pack/elastic-agent/CHANGELOG.next.asciidoc | 1 + .../pkg/agent/cmd/diagnostics.go | 40 +++++++++++++++++++ 2 files changed, 41 insertions(+) diff --git a/x-pack/elastic-agent/CHANGELOG.next.asciidoc b/x-pack/elastic-agent/CHANGELOG.next.asciidoc index 40cecf51b90..37f09cec0cc 100644 --- a/x-pack/elastic-agent/CHANGELOG.next.asciidoc +++ b/x-pack/elastic-agent/CHANGELOG.next.asciidoc @@ -157,3 +157,4 @@ - Allow pprof endpoints for elastic-agent or beats if enabled. {pull}28983[28983] {pull}29155[29155] - Add --fleet-server-es-ca-trusted-fingerprint flag to allow agent/fleet-server to work with elasticsearch clusters using self signed certs. {pull}29128[29128] - Discover changes in Kubernetes nodes metadata as soon as they happen. {pull}23139[23139] +- Add results of inspect output command into archive produced by diagnostics collect. {pull}29902[29902] diff --git a/x-pack/elastic-agent/pkg/agent/cmd/diagnostics.go b/x-pack/elastic-agent/pkg/agent/cmd/diagnostics.go index b32edf6df2d..d5b7a759f72 100644 --- a/x-pack/elastic-agent/pkg/agent/cmd/diagnostics.go +++ b/x-pack/elastic-agent/pkg/agent/cmd/diagnostics.go @@ -22,6 +22,7 @@ import ( "github.com/spf13/cobra" "gopkg.in/yaml.v2" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/info" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/paths" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/configuration" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/control/client" @@ -47,6 +48,7 @@ type DiagnosticsInfo struct { type AgentConfig struct { ConfigLocal *configuration.Configuration ConfigRendered map[string]interface{} + AppConfig map[string]interface{} // map of processName_rk:config } func newDiagnosticsCommand(s []string, streams *cli.IOStreams) *cobra.Command { @@ -361,6 +363,35 @@ func gatherConfig() (AgentConfig, error) { } cfg.ConfigRendered = mapCFG + // Gather vars to render process config + isStandalone, err := isStandalone(renderedCFG) + if err != nil { + return AgentConfig{}, err + } + + agentInfo, err := info.NewAgentInfo(false) + if err != nil { + return AgentConfig{}, err + } + + log, err := newErrorLogger() + if err != nil { + return AgentConfig{}, err + } + + // Get process config - uses same approach as inspect output command. + // Does not contact server process to request configs. + pMap, err := getProgramsFromConfig(log, agentInfo, renderedCFG, isStandalone) + if err != nil { + return AgentConfig{}, err + } + cfg.AppConfig = make(map[string]interface{}, 0) + for rk, programs := range pMap { + for _, p := range programs { + cfg.AppConfig[p.Identifier()+"_"+rk] = p.Configuration() + } + } + return cfg, nil } @@ -419,6 +450,15 @@ func createZip(fileName, outputFormat string, diag DiagnosticsInfo, cfg AgentCon if err := writeFile(zf, outputFormat, cfg.ConfigRendered); err != nil { return closeHandlers(err, zw, f) } + for name, appCfg := range cfg.AppConfig { + zf, err := zw.Create("config/" + name + "." + outputFormat) + if err != nil { + return closeHandlers(err, zw, f) + } + if err := writeFile(zf, outputFormat, appCfg); err != nil { + return closeHandlers(err, zw, f) + } + } if err := zipLogs(zw); err != nil { return closeHandlers(err, zw, f) From 02c9bef29564fbb358832d6bf9a5182d32f365b3 Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Mon, 24 Jan 2022 11:57:09 +0000 Subject: [PATCH 29/69] ci: docker login step for pulling then pushing (#29960) --- .ci/packaging.groovy | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.ci/packaging.groovy b/.ci/packaging.groovy index 7be1628e138..84f5f795e41 100644 --- a/.ci/packaging.groovy +++ b/.ci/packaging.groovy @@ -307,8 +307,6 @@ def tagAndPush(Map args = [:]) { tagName = "pr-${env.CHANGE_ID}" } - dockerLogin(secret: "${DOCKERELASTIC_SECRET}", registry: "${DOCKER_REGISTRY}") - // supported tags def tags = [tagName, "${env.GIT_BASE_COMMIT}"] if (!isPR() && aliasVersion != "") { @@ -386,6 +384,7 @@ def release(){ withEnv([ "DEV=true" ]) { + dockerLogin(secret: "${DOCKERELASTIC_SECRET}", registry: "${DOCKER_REGISTRY}") dir("${env.BEATS_FOLDER}") { sh(label: "Release ${env.BEATS_FOLDER} ${env.PLATFORMS}", script: 'mage package') } From c1d4f28ff30e28429efed51c817cd02d46633aac Mon Sep 17 00:00:00 2001 From: Chris Mark Date: Mon, 24 Jan 2022 12:25:32 +0000 Subject: [PATCH 30/69] Allign k8s configuration settings (#29908) --- CHANGELOG.next.asciidoc | 1 + .../autodiscover/providers/kubernetes/config.go | 10 +--------- libbeat/autodiscover/providers/kubernetes/pod.go | 5 ++--- .../processors/add_kubernetes_metadata/config.go | 4 ++-- .../add_kubernetes_metadata/kubernetes.go | 15 +++++++-------- metricbeat/docs/modules/kubernetes.asciidoc | 6 ++++-- metricbeat/metricbeat.reference.yml | 6 ++++-- .../module/kubernetes/_meta/config.reference.yml | 6 ++++-- metricbeat/module/kubernetes/_meta/config.yml | 6 ++++-- metricbeat/module/kubernetes/util/kubernetes.go | 8 +++++--- metricbeat/modules.d/kubernetes.yml.disabled | 6 ++++-- x-pack/metricbeat/metricbeat.reference.yml | 6 ++++-- 12 files changed, 42 insertions(+), 37 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index c284298bc2a..27ca724a282 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -32,6 +32,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Add job.name in pods controlled by Jobs {pull}28954[28954] - Change Docker base image from CentOS 7 to Ubuntu 20.04 {pull}29681[29681] - Enrich kubernetes metadata with node annotations. {pull}29605[29605] +- Allign kubernetes configuration settings. {pull}29908[29908] *Auditbeat* diff --git a/libbeat/autodiscover/providers/kubernetes/config.go b/libbeat/autodiscover/providers/kubernetes/config.go index 46f73930141..39be2ad5c58 100644 --- a/libbeat/autodiscover/providers/kubernetes/config.go +++ b/libbeat/autodiscover/providers/kubernetes/config.go @@ -29,7 +29,6 @@ import ( "github.com/elastic/beats/v7/libbeat/autodiscover/template" "github.com/elastic/beats/v7/libbeat/common" - "github.com/elastic/beats/v7/libbeat/common/cfgwarn" "github.com/elastic/beats/v7/libbeat/logp" ) @@ -43,8 +42,7 @@ type Config struct { CleanupTimeout time.Duration `config:"cleanup_timeout" validate:"positive"` // Needed when resource is a pod - HostDeprecated string `config:"host"` - Node string `config:"node"` + Node string `config:"node"` // Scope can be either node or cluster. Scope string `config:"scope"` Resource string `config:"resource"` @@ -86,12 +84,6 @@ func (c *Config) Validate() error { return fmt.Errorf("no configs or hints defined for autodiscover provider") } - // Check if host is being defined and change it to node instead. - if c.Node == "" && c.HostDeprecated != "" { - c.Node = c.HostDeprecated - cfgwarn.Deprecate("8.0", "`host` will be deprecated, use `node` instead") - } - // Check if resource is either node or pod. If yes then default the scope to "node" if not provided. // Default the scope to "cluster" for everything else. switch c.Resource { diff --git a/libbeat/autodiscover/providers/kubernetes/pod.go b/libbeat/autodiscover/providers/kubernetes/pod.go index be423080068..8248114c875 100644 --- a/libbeat/autodiscover/providers/kubernetes/pod.go +++ b/libbeat/autodiscover/providers/kubernetes/pod.go @@ -94,10 +94,9 @@ func NewPodEventer(uuid uuid.UUID, cfg *common.Config, client k8s.Interface, pub options := kubernetes.WatchOptions{ SyncTimeout: config.SyncPeriod, Node: config.Node, + Namespace: config.Namespace, } - if config.Namespace != "" { - options.Namespace = config.Namespace - } + metaConf := config.AddResourceMetadata nodeWatcher, err := kubernetes.NewNamedWatcher("node", client, &kubernetes.Node{}, options, nil) if err != nil { diff --git a/libbeat/processors/add_kubernetes_metadata/config.go b/libbeat/processors/add_kubernetes_metadata/config.go index 9365627f59f..b45902bb967 100644 --- a/libbeat/processors/add_kubernetes_metadata/config.go +++ b/libbeat/processors/add_kubernetes_metadata/config.go @@ -29,7 +29,7 @@ import ( type kubeAnnotatorConfig struct { KubeConfig string `config:"kube_config"` KubeClientOptions kubernetes.KubeClientOptions `config:"kube_client_options"` - Host string `config:"host"` + Node string `config:"node"` Scope string `config:"scope"` Namespace string `config:"namespace"` SyncPeriod time.Duration `config:"sync_period"` @@ -67,7 +67,7 @@ func (k *kubeAnnotatorConfig) Validate() error { } if k.Scope == "cluster" { - k.Host = "" + k.Node = "" } // Checks below were added to warn the users early on and avoid initialising the processor in case the `logs_path` diff --git a/libbeat/processors/add_kubernetes_metadata/kubernetes.go b/libbeat/processors/add_kubernetes_metadata/kubernetes.go index a4b40b9655d..2730770bc86 100644 --- a/libbeat/processors/add_kubernetes_metadata/kubernetes.go +++ b/libbeat/processors/add_kubernetes_metadata/kubernetes.go @@ -166,23 +166,23 @@ func (k *kubernetesAnnotator) init(config kubeAnnotatorConfig, cfg *common.Confi k.matchers = matchers nd := &kubernetes.DiscoverKubernetesNodeParams{ - ConfigHost: config.Host, + ConfigHost: config.Node, Client: client, IsInCluster: kubernetes.IsInCluster(config.KubeConfig), HostUtils: &kubernetes.DefaultDiscoveryUtils{}, } if config.Scope == "node" { - config.Host, err = kubernetes.DiscoverKubernetesNode(k.log, nd) + config.Node, err = kubernetes.DiscoverKubernetesNode(k.log, nd) if err != nil { k.log.Errorf("Couldn't discover Kubernetes node: %w", err) return } - k.log.Debugf("Initializing a new Kubernetes watcher using host: %s", config.Host) + k.log.Debugf("Initializing a new Kubernetes watcher using host: %s", config.Node) } watcher, err := kubernetes.NewNamedWatcher("add_kubernetes_metadata_pod", client, &kubernetes.Pod{}, kubernetes.WatchOptions{ SyncTimeout: config.SyncPeriod, - Node: config.Host, + Node: config.Node, Namespace: config.Namespace, }, nil) if err != nil { @@ -194,11 +194,10 @@ func (k *kubernetesAnnotator) init(config kubeAnnotatorConfig, cfg *common.Confi options := kubernetes.WatchOptions{ SyncTimeout: config.SyncPeriod, - Node: config.Host, - } - if config.Namespace != "" { - options.Namespace = config.Namespace + Node: config.Node, + Namespace: config.Namespace, } + nodeWatcher, err := kubernetes.NewNamedWatcher("add_kubernetes_metadata_node", client, &kubernetes.Node{}, options, nil) if err != nil { k.log.Errorf("couldn't create watcher for %T due to error %+v", &kubernetes.Node{}, err) diff --git a/metricbeat/docs/modules/kubernetes.asciidoc b/metricbeat/docs/modules/kubernetes.asciidoc index e237f16781b..dc6873daece 100644 --- a/metricbeat/docs/modules/kubernetes.asciidoc +++ b/metricbeat/docs/modules/kubernetes.asciidoc @@ -218,7 +218,7 @@ metricbeat.modules: # Enriching parameters: add_metadata: true # When used outside the cluster: - #host: node_name + #node: node_name # If kube_config is not set, KUBECONFIG environment variable will be checked # and if not present it will fall back to InCluster #kube_config: ~/.kube/config @@ -262,10 +262,12 @@ metricbeat.modules: # Enriching parameters: add_metadata: true # When used outside the cluster: - #host: node_name + #node: node_name # If kube_config is not set, KUBECONFIG environment variable will be checked # and if not present it will fall back to InCluster #kube_config: ~/.kube/config + # Set the namespace to watch for resources + #namespace: staging # To configure additionally node and namespace metadata, added to pod, service and container resource types, # `add_resource_metadata` can be defined. # By default all labels will be included while annotations are not added by default. diff --git a/metricbeat/metricbeat.reference.yml b/metricbeat/metricbeat.reference.yml index ecaed598049..bec4fb6bdac 100644 --- a/metricbeat/metricbeat.reference.yml +++ b/metricbeat/metricbeat.reference.yml @@ -496,7 +496,7 @@ metricbeat.modules: # Enriching parameters: add_metadata: true # When used outside the cluster: - #host: node_name + #node: node_name # If kube_config is not set, KUBECONFIG environment variable will be checked # and if not present it will fall back to InCluster #kube_config: ~/.kube/config @@ -540,10 +540,12 @@ metricbeat.modules: # Enriching parameters: add_metadata: true # When used outside the cluster: - #host: node_name + #node: node_name # If kube_config is not set, KUBECONFIG environment variable will be checked # and if not present it will fall back to InCluster #kube_config: ~/.kube/config + # Set the namespace to watch for resources + #namespace: staging # To configure additionally node and namespace metadata, added to pod, service and container resource types, # `add_resource_metadata` can be defined. # By default all labels will be included while annotations are not added by default. diff --git a/metricbeat/module/kubernetes/_meta/config.reference.yml b/metricbeat/module/kubernetes/_meta/config.reference.yml index 3a6e22af69b..2db9c919efe 100644 --- a/metricbeat/module/kubernetes/_meta/config.reference.yml +++ b/metricbeat/module/kubernetes/_meta/config.reference.yml @@ -19,7 +19,7 @@ # Enriching parameters: add_metadata: true # When used outside the cluster: - #host: node_name + #node: node_name # If kube_config is not set, KUBECONFIG environment variable will be checked # and if not present it will fall back to InCluster #kube_config: ~/.kube/config @@ -63,10 +63,12 @@ # Enriching parameters: add_metadata: true # When used outside the cluster: - #host: node_name + #node: node_name # If kube_config is not set, KUBECONFIG environment variable will be checked # and if not present it will fall back to InCluster #kube_config: ~/.kube/config + # Set the namespace to watch for resources + #namespace: staging # To configure additionally node and namespace metadata, added to pod, service and container resource types, # `add_resource_metadata` can be defined. # By default all labels will be included while annotations are not added by default. diff --git a/metricbeat/module/kubernetes/_meta/config.yml b/metricbeat/module/kubernetes/_meta/config.yml index 9e8eaee745c..85cd0041486 100644 --- a/metricbeat/module/kubernetes/_meta/config.yml +++ b/metricbeat/module/kubernetes/_meta/config.yml @@ -19,9 +19,12 @@ #labels.dedot: true #annotations.dedot: true # When used outside the cluster: - #host: node_name + #node: node_name # If kube_config is not set, KUBECONFIG environment variable will be checked # and if not present it will fall back to InCluster + #kube_config: ~/.kube/config + # Set the namespace to watch for resources + #namespace: staging # To configure additionally node and namespace metadata, added to pod, service and container resource types, # `add_resource_metadata` can be defined. # By default all labels will be included while annotations are not added by default. @@ -32,7 +35,6 @@ # include_labels: ["nodelabel2"] # include_annotations: ["nodeannotation1"] # deployment: false - #kube_config: ~/.kube/config # Kubernetes client QPS and burst can be configured additionally #kube_client_options: # qps: 5 diff --git a/metricbeat/module/kubernetes/util/kubernetes.go b/metricbeat/module/kubernetes/util/kubernetes.go index 6518a258e26..e20b4da74c0 100644 --- a/metricbeat/module/kubernetes/util/kubernetes.go +++ b/metricbeat/module/kubernetes/util/kubernetes.go @@ -51,12 +51,13 @@ type kubernetesConfig struct { KubeConfig string `config:"kube_config"` KubeClientOptions kubernetes.KubeClientOptions `config:"kube_client_options"` - Host string `config:"host"` + Node string `config:"node"` SyncPeriod time.Duration `config:"sync_period"` // AddMetadata enables enriching metricset events with metadata from the API server AddMetadata bool `config:"add_metadata"` AddResourceMetadata *metadata.AddResourceMetadataConfig `config:"add_resource_metadata"` + Namespace string `config:"namespace"` } type enricher struct { @@ -262,6 +263,7 @@ func getResourceMetadataWatchers(config *kubernetesConfig, resource kubernetes.R options := kubernetes.WatchOptions{ SyncTimeout: config.SyncPeriod, + Namespace: config.Namespace, } log := logp.NewLogger(selector) @@ -269,7 +271,7 @@ func getResourceMetadataWatchers(config *kubernetesConfig, resource kubernetes.R // Watch objects in the node only if nodeScope { nd := &kubernetes.DiscoverKubernetesNodeParams{ - ConfigHost: config.Host, + ConfigHost: config.Node, Client: client, IsInCluster: kubernetes.IsInCluster(config.KubeConfig), HostUtils: &kubernetes.DefaultDiscoveryUtils{}, @@ -281,7 +283,7 @@ func getResourceMetadataWatchers(config *kubernetesConfig, resource kubernetes.R } } - log.Debugf("Initializing a new Kubernetes watcher using host: %v", config.Host) + log.Debugf("Initializing a new Kubernetes watcher using host: %v", config.Node) watcher, err := kubernetes.NewNamedWatcher("resource_metadata_enricher", client, resource, options, nil) if err != nil { diff --git a/metricbeat/modules.d/kubernetes.yml.disabled b/metricbeat/modules.d/kubernetes.yml.disabled index fcfeec14875..11f6af1e983 100644 --- a/metricbeat/modules.d/kubernetes.yml.disabled +++ b/metricbeat/modules.d/kubernetes.yml.disabled @@ -22,9 +22,12 @@ #labels.dedot: true #annotations.dedot: true # When used outside the cluster: - #host: node_name + #node: node_name # If kube_config is not set, KUBECONFIG environment variable will be checked # and if not present it will fall back to InCluster + #kube_config: ~/.kube/config + # Set the namespace to watch for resources + #namespace: staging # To configure additionally node and namespace metadata, added to pod, service and container resource types, # `add_resource_metadata` can be defined. # By default all labels will be included while annotations are not added by default. @@ -35,7 +38,6 @@ # include_labels: ["nodelabel2"] # include_annotations: ["nodeannotation1"] # deployment: false - #kube_config: ~/.kube/config # Kubernetes client QPS and burst can be configured additionally #kube_client_options: # qps: 5 diff --git a/x-pack/metricbeat/metricbeat.reference.yml b/x-pack/metricbeat/metricbeat.reference.yml index 2ae6a148d88..e46f549be40 100644 --- a/x-pack/metricbeat/metricbeat.reference.yml +++ b/x-pack/metricbeat/metricbeat.reference.yml @@ -881,7 +881,7 @@ metricbeat.modules: # Enriching parameters: add_metadata: true # When used outside the cluster: - #host: node_name + #node: node_name # If kube_config is not set, KUBECONFIG environment variable will be checked # and if not present it will fall back to InCluster #kube_config: ~/.kube/config @@ -925,10 +925,12 @@ metricbeat.modules: # Enriching parameters: add_metadata: true # When used outside the cluster: - #host: node_name + #node: node_name # If kube_config is not set, KUBECONFIG environment variable will be checked # and if not present it will fall back to InCluster #kube_config: ~/.kube/config + # Set the namespace to watch for resources + #namespace: staging # To configure additionally node and namespace metadata, added to pod, service and container resource types, # `add_resource_metadata` can be defined. # By default all labels will be included while annotations are not added by default. From c570227b30f34c63c889895108d5ebc579bdb52b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tim=20R=C3=BChsen?= Date: Mon, 24 Jan 2022 13:34:01 +0100 Subject: [PATCH 31/69] Return early from IsRestartNeeded() (#29724) Check the constant condition early to avoid unneeded processing. --- x-pack/elastic-agent/pkg/core/plugin/common.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/x-pack/elastic-agent/pkg/core/plugin/common.go b/x-pack/elastic-agent/pkg/core/plugin/common.go index 7bb25c7e94d..ec5cc13c95a 100644 --- a/x-pack/elastic-agent/pkg/core/plugin/common.go +++ b/x-pack/elastic-agent/pkg/core/plugin/common.go @@ -21,6 +21,11 @@ type configFetcher interface { // - spec is configured to support restart on change // - output changes in between configs func IsRestartNeeded(log *logger.Logger, spec program.Spec, cfgFetch configFetcher, newCfg map[string]interface{}) bool { + if !spec.RestartOnOutputChange { + // early exit if restart is not needed anyway + return false + } + // compare outputs curCfgStr := cfgFetch.Config() if curCfgStr == "" { @@ -40,8 +45,8 @@ func IsRestartNeeded(log *logger.Logger, spec program.Spec, cfgFetch configFetch return false } - // restart needed only if specified and output changed - return spec.RestartOnOutputChange && currentOutput != newOutput + // restart needed only if output changed + return currentOutput != newOutput } func getOutputConfigFromString(cfgString string) (string, error) { From 33acb3c392471596daf773608b250b188ac38480 Mon Sep 17 00:00:00 2001 From: Dan Kortschak <90160302+efd6@users.noreply.github.com> Date: Tue, 25 Jan 2022 07:11:34 +1030 Subject: [PATCH 32/69] x-pack/winlogbeat/module/sysmon: add eventid 26 handler (#29957) --- CHANGELOG.next.asciidoc | 1 + .../module/sysmon/ingest/sysmon.yml | 7 +- .../sysmon-11-filedeletedetected.evtx | Bin 0 -> 69632 bytes ...mon-11-filedeletedetected.evtx.golden.json | 146 ++++++++++++++++++ 4 files changed, 153 insertions(+), 1 deletion(-) create mode 100644 x-pack/winlogbeat/module/sysmon/test/testdata/sysmon-11-filedeletedetected.evtx create mode 100644 x-pack/winlogbeat/module/sysmon/test/testdata/sysmon-11-filedeletedetected.evtx.golden.json diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 27ca724a282..1efe08acb2b 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -198,6 +198,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d *Winlogbeat* - Add support for custom XML queries {issue}1054[1054] {pull}29330[29330] +- Add support for sysmon event ID 26; FileDeleteDetected. {issue}26280[26280] {pull}29957[29957] *Elastic Log Driver* diff --git a/x-pack/winlogbeat/module/sysmon/ingest/sysmon.yml b/x-pack/winlogbeat/module/sysmon/ingest/sysmon.yml index db28eb13cf7..2ace6b3f66d 100644 --- a/x-pack/winlogbeat/module/sysmon/ingest/sysmon.yml +++ b/x-pack/winlogbeat/module/sysmon/ingest/sysmon.yml @@ -146,6 +146,11 @@ processors: - process type: - change + "26": + category: + - file + type: + - deletion tag: Add ECS categorization fields source: |- if (ctx?.event?.code == null || params.get(ctx.event.code) == null) { @@ -237,7 +242,7 @@ processors: target_field: process.hash if: |- ctx?._temp?.hashes != null && - ["1", "23", "24", "25"].contains(ctx.event.code) + ["1", "23", "24", "25", "26"].contains(ctx.event.code) - rename: field: process.hash.imphash target_field: process.pe.imphash diff --git a/x-pack/winlogbeat/module/sysmon/test/testdata/sysmon-11-filedeletedetected.evtx b/x-pack/winlogbeat/module/sysmon/test/testdata/sysmon-11-filedeletedetected.evtx new file mode 100644 index 0000000000000000000000000000000000000000..8a0c028b129c8bc65928374648da252381ed6217 GIT binary patch literal 69632 zcmeI53w#yDy~pR|O&|%Vh_$pn%R3?DMG&dYQ{G?_xM+Ftt^Xa8 z_y0fh{m*}9XXZa!)>v2G&{(USTf8}N9rh80PXi;)B**>vS6}#B#&KUHyg--(VGe{j z5avLb17Qw?IS}SRm;+%BggFrAK$rtz4jeHD${OoR>YJD19^cyH%-;F=Ivjthzf!x> zm3s2em-jQtj(NxHSL}cHdiw=FmV{3!N*zd4YEOdw8>L3z_$29+i2mxZpSB(0oYj6L!S%q`F)dFr#oacnQQUfht2yLNL!9;KJB;;)AZIhOyJ>y zFDLixs-s4MD`*PuM?d*y%I<+XBN>s18W8!?9~z#MtMYxM|JCz9EZFq(7oXdI@D}_M zrYahwR^cswAh)LnB&ssCLe;8f)q&5q;+1rT`EpA}qFSk%R3pA@#}|o_d{vKR9jaBG zr^e#1T`j_qTBNDL`z-7maHLJO;8TmLL&_|CZo%=f>PmdqjD3#E#$O)ZWvfE`uk94y z9`z|Ma_G>{!AiKbR3)iObrr7Mp(QF6S9IvmeCQ=a-@Q6T&DPRaK-OZUYQwh$c$JGW zPWwWFnh9l_@TF4e=_-;m&)CDr!3pX@wOpTxuQF6dd{Mlbfn=*pii0)Dw#=HMO7Y!r zypm{gyv)eormBl@i5g^C1AcykR)JZUo}iNA(&JT9LZrVs8wbSFPwSVgE;h*;kWEX*Q`i||T`>Rfz~l5@oiDQX(Nz6#u0kdKRz?@XGg1}7lfXXC>0i0_OKMurp~`QHHx5|gzQODgW@Bpu$C@gQXZdX%k2tWkSbFX zE9c|VQ&lQ;xfv&kLwMjX{Z)xhMm-v}CX%hyNLOBNN_gtKsj3K9?1195mJa+<>$>U5 zDkF(bf!Oo^YmlO7>q`7h9w(*ITn-&-f(&sYPkpx^e!xnt_;M}C!aJFIfTp4@sXO`^ zn_Yy2?f5fVq^Oi6x>lmSc|r0VhYsaXFuWSUO>9ol8Px<+pmJ7OiA#(mBRgu;63F5;Iuvub7$=JB-I$dEh=j2m zX{?M>>I|ev5-(ggJxR^MFX1)KuNk0Dj!#G*q|QoECm|^ePQPHEiZ5;{YA{}d^!o(7 z<10m_>U20=AJ4|8fht2y#{LiSXCck$_%qvezZpau96>4PdeD}xes1G+VjWi(#EV{ zsjzI3{=stD*Ij?GpE!TMe`<5`{ED9ypKGAJUaN87MoD zV^Ub-(x4I3W}37by>;dO4pSC)8kZKQGI-8NbIwP*uHW7K7pu6mc$LERyf%Lit-@%Z z&}mw6X)q$DI?JS@jhIx4Fg#XDOYke>(h{w7xh5TdrAe2r-DR6j*P+V6hxyWsGg(8# zr6pNso@34&+;wIWjF6QctG{-nOtw;vH7Ti+kud8CpweKOtUPn@mx=v2{W(vaiTC-akjLTpc`6rYX5oJhD%YGT z4=sA)fp=<{-AW~^{V4b|@r%!d7n_6qWY}f}w5w9{RW)jtS#SwO*q@0KP=;?V#&;!n zSAy$SJ4#7)lq;vaYW&I-u#|d~8&=X;$QgP+ud`Vu=z8h+Dz%x?4OQEWdpfSP1V_Y- ztC4HWbDA`3SQdhDkh2({J8+H3&^`xwQJ^N_-2~)BA@ZgWR~!#53h*}p?{ZbK&Z`{o z<}v<0?#JPexjF%#$K%|5eT@PfV@h6ySIoovd|aaduY9B|!qxKeZ4tgN*T;%*HVY!R zi~gg6?KQUBMxgMShLt*Hy<5Ajw>;y4h|@DlJ5fC2&mxw3G2j{9%EmC12bPT+@H(2Z zkq^3A;F*g(%f>nR@6t1p(dDER)>;V5Eyk+= zmYj!k3iba|9Ld4?#oFe}@ID9mR*v*cQLIZ$8Ln4~YnSN%Wk|`AkcaOIbtxE!E0*CC zzhT+TLCN5!9DE+9uVNXZN*v7cv1N4W-{3zFU{(f4Ma;(=G3!Pi>#~iOi&a&3*fd3f~x#YKpt{jAliRFlm5%+YN zAV3esc0^`py=AfI-2v+wrXWA7y{uI2R$P<*g8NESPDd;%`Ja8|JIsCFy0eP@#QQuq ze?bo;EvbQc?`%m8H7%(^D9x7CZ0s*l^cVXM>^oq@Vg7(f+%!6C?Y{De`d$V1J0gv! zLhi=hs%WNg-@q-ARpX|RpK7!7t1=n-z~U!g4VIrsbmO-R3`Mh$pQHTgawp|)j^X!M z((*qB%TFZg!i9e*a(a6S?CMeEM6;0J;rLx__}w(^>jCo3^gYq{&+*jKL9k^=5`L|FdTfS+g<@^kQOvGbd6_&xOD%7FQaL^pn{|3tHp zpMzgi`(=UQ_ttAadOO(iClcNGjY0hHmq&l(D0ivW=FvMM4}fz(JbWW z;1{u%zeR@Ms?GQOJ=pRm65aT*|5`K)`8oLI*!e9s{Oa%h$LHS;bo^E%y7A-vgJ>4= zbMWi1^Q$%d`n_=T-NEt`iEjK_F~3nX3;8+tMcqHt8Ga}K<{JU?6Nz5@M6-~egI}7x z{4Fv3uATVKFM}HAXl z4!L*Vzv}G6#cC40o6LV|Fy*&n-bvNn%P;YAVme5AOyL&;CkNjTL@e_Lv?6Jj9h(xRYi&MKz{YQS)c!_2)%I~K6_g=KF z*fLGN|I6w-D)OhVeEf<#*8XY*4x6~uEAX}uVKTgbV)W(Zc>f9PJEJcdAKigSr8Y#z zFq*j1L@!yoa0L{LpZ?2HwY%0?&xs?i)h8?P)KpUg=4DTp`9LWB;3 zt6Ok=iSo>X%nHQca6>F5?S<{$?bx>%ySeCYKkcR=(mn1*BzoD6XcnV(V~?A4F*-C4?|2nCpAxE|y+rPw zr>`jgc@@5&fd7n6WaUz_3g$M-7&<|h)p_=#pAKL@|4`|x(d@6>BQ zA22_W=*3So3;8+tO}3Z64#RK7woL)^6Nz5@M6-~egWs8Ue#@o)yZwU80DanPb9we&6oGi`pJzYx83lQs!^%M&kH}%Ead0l zcY*M`2kfpi{MNj-#YcYQT>4Q&qWI5ohphXDS5Wru#PFkN7Nh*6A4PgDuu>cDW;=AR zh%$RwDa72@k%zvo!#OJrm)zI=%1Z&tViRzV@ir%(%^ybV*zeGLyDYCJjBE1q^(Qm8iXQO|) z%GmG2S^X~$wq6&Bmi;EEOygIiUJ%Xtfc^GvzeJq9URY)LO-&l?BfqgO^@2zg|0Dep zPhhFQ*;o=%G>cJw&UztT*9)y!M`{VGf!SDns|`JZTz!z6?(?+Q3s)O1yL{y`$E98n ziC$bpvl!)KuNR7Yv0kvqcCQ!gq)*&hVG4St3t^c}dIh~{=<9AnEnx4f&chf_F-B^t z(1$-A_Bs!J+l8i9Fk>=OFKmbHUSsUH{@GJ~^eaoXUx{A#OeA{QuV~f>>~|h~3Rm*& zaE!eiex#)*mP(&Ah8)>Wy%Y8QwT9=2AN$Gk6E6NpBzo}_&H4b(li-iscxIyg)r|EE zTaGY>))TVyL32=U{T?rybcFFTu5`;WH|oQ&uyV9S+w}NuCF;ax{Njeru`n}INIRrG zj==ukj6Lo@dx($z<@?&d>^-p8HZMRVdfB6B7Nhof{mNy3dUzqrzfyev-Jcztlk((( zAAEQCt512%3+Tb;5^q9ukRO_9&Xgs69$O1YfR>?D-LyTa?#%{|qKK7%sOK z?)I^L;5J`IBwAeJ(ad?jGjfIFO`=(hat{+ z)c(E0pXI}Hq+d}q3;8+5n{u~;8QWbfhc_91ZLjY5wtxKG#+yW<8m%!lkDq!ax%cm@|a>HBZ^n_CU9|MivE(n$C5ZIS53 zOEim7UUq*oFVcJS@P#-_a8GCATNc1GPRyD3d9cf3-K)rl+U2ULXt}Ttlp0$z@j0Tw z=K{{e=WL9F6roS@A~U+NnDHdE=RL6H+wkJ{!-P|P^gq{Y{}Vl~ArigpSv2bd_S_8r zy7(=T=*3wy>jRuez;C&6?&)~Pu@h&> zKXFDu#&?c_EWnrtpEOtl4>b+3L1Qt-T4emt7}_Q6llALojD4;;*H8Pj+CkC%P9o9E zK1H(_wNHD$6Wc&+3wg&MO4}z3wTrZr=uPx*Qx=-tS+3hub)YNVIrGufX+7^m&M8G0Mwc zZ}jdsMj^SeoyPs=48PUqwErkrej?F}pJ*2HbBtqjKL3pU=64!?3%69f;^H1&4EJpd+`x`l{hvOJ*Lw8_4kMuUObzOy?8E)uTw2xz~GhE`o z?qqRVr4pVQ?;uas_na5>dBgSI?S66{9_c>bArif~ie`O)>nX?8Z`f19 zEZHYQA7L)05M^W3g8Fc#^fdfmgfR+wn$hSZEKo~3`v{*HlCgV_eSX^A#vVr{SNYz4 ziS8#9iC*?7n#HI++VA(+c3~UF>L--C47W_3?HM?W*?L|K{RI8R7YvtGBmLy!=Kn>a z7Z=eiM!DGAFMj)ftESJk`~NQ*u46{~$<@vOi$pK3qFEo{8v1`P|BrRNhw63+{mebE z#V;9qJY%Mx_GsO|M*Dw}=w*+hS&Z7_(fa>fHJ&`kWi$4C&uvBKX(=!GHjk-9xBsK( zF^NPcm$&h(fhs&fKs1X{F3x@h`#dJ;SLj-WvTG$#8Oc+sf_%t{^FF?8IIX|XPfl*@ zh>AonPNG?ia5_WVZiM||`Bza#@YcfCVg)apcKe^GkDtY*Mipw zy^ipBHB!fJXQONu;%+`0<=I-BIG^~C5)o(Q#(IbQ4TfLM%%J&+L@$1#*}n|G@gSIY z1b(__M)(C($fM zIfd;zSs|Y5&vW&7uXW;&uYGr(8Yk^L+V%a$p09c%E+bgKBNDyrSu~4Ldp>@@qwo9W zzH+1CwA)usZhl83dT|oXVw6+pcY^UdXuXQxVY}z6#-5jV{HNOPTYr|@JT;MM*>jxg z|Iw~^`@d#%Y1Rkqxp(KO9g_Ac_YW9;C-3`l!2Cp_7eCP~ zn4($8&%v+K&hH_^@6V5aCt!Xe(JFmi|KGjUqqttdo87_{GEdGD4K=*9Q+#W{Jw7Zef|Dj0rL}yUi?I}ke`F!U^~A@ z4ZnT`*WMFs`4fr4@5O#yfuVW3iu$XQ{VG&F{MkPYQ2EoX;%Sb|cc2U^JOe5;q&|nd6H4 zVbhNpyUzcXpLTt#%e*g<=w;WUSs$?L4)~y>jVlzNb?g?*jAb0ydE`vqo57tvjoZWD z_LFy!iysq-lHg;OQ!%sW4+DFlGC?e6zPDQgAwNrZ>3fo4EL+ojt$fu`B zOA1dXAy_@@vGM zN~X2$3EBr^u|h#B{z~z_4)-|A@ofk0at1`~{?PAuZPPcf)A+qR;rH^4Jxxo@er6Az zB9nop-hjxuvx@#S4Bz(Fdh#P(+Rq}fC@=Byb1saimPy?2hNN}*A<gf5yT*2W=5vf{k>7S6FEwMmAzu5AoBQ zlw-Y4&$ym?T1`o0XGxh`Y2p6Q*7t4cfBYwaMq5~Qac zvfV|y%s2LU+ekm{ahc2gyGZo1N71Yg*yE*WBfH(t^<-V3cF)*SS@)xoSkbb+QvA$4 z;9Fq0-r+0POcy^R61}*JW_^I`>Am7-dP4Zb^);jQszA#TA)JfV$_mhznAf=qStdLb zycq3~{$?v|a=fw2sk{92H*WK`MWUBoie`PlE`6N0U1<1SF?`jp{A<6s@e_#_Kcy0; zS?!n2XqHqDP)all`8mc7qvowmF#NKvZ~k?#{6wM~zcndJiDn@`2fwI!Pv;qaNovHN zVEKte@t<$biY|YmS;+5j{3aTH!*`$lR;Ck@}RU*6;+U$^op5}k1sESFnQSVglwz_)krFQVEv7Z`rKJ~g;5 zSpO;#o#pduXq&e7Q%W=o`8oV+6u(ayeq}Egt_YT&NIY;#ZRNi5i2D9e?sr63{x~mD zGzkklsg-zjY8FY3@#7eCP~mpMzhUz5Go# z{C13we=1mhBGHZCxw!um%|d<-e$UwX%`p64x%=&>gXJd@z4(b{AwLH{y)FsnzZPK> zu?q8Sx#{N{Fy6?Y`SlU~+^dtVI~B!;31=YiAL2OUzX2nb9r({N&PmWK7h{%uRto2E YMn7reNO5zySVNdN!< literal 0 HcmV?d00001 diff --git a/x-pack/winlogbeat/module/sysmon/test/testdata/sysmon-11-filedeletedetected.evtx.golden.json b/x-pack/winlogbeat/module/sysmon/test/testdata/sysmon-11-filedeletedetected.evtx.golden.json new file mode 100644 index 00000000000..be766c23f9b --- /dev/null +++ b/x-pack/winlogbeat/module/sysmon/test/testdata/sysmon-11-filedeletedetected.evtx.golden.json @@ -0,0 +1,146 @@ +[ + { + "@timestamp": "2022-01-24T05:12:34.328Z", + "event": { + "category": [ + "file" + ], + "code": "26", + "kind": "event", + "module": "sysmon", + "provider": "Microsoft-Windows-Sysmon", + "type": [ + "deletion" + ] + }, + "file": { + "directory": "C:\\Windows\\ServiceState\\EventLog\\Data", + "extension": "dat", + "name": "lastalive1.dat", + "path": "C:\\Windows\\ServiceState\\EventLog\\Data\\lastalive1.dat" + }, + "host": { + "name": "vagrant" + }, + "log": { + "level": "information" + }, + "process": { + "entity_id": "{63a74932-a2b4-61ee-1b00-000000000700}", + "executable": "C:\\Windows\\System32\\svchost.exe", + "hash": { + "sha256": "a94808e7c66973b122f66ec6611019c745a9602f8e944f53635cab58aef35a79" + }, + "name": "svchost.exe", + "pid": 1264 + }, + "related": { + "hash": "a94808e7c66973b122f66ec6611019c745a9602f8e944f53635cab58aef35a79", + "user": "LOCAL SERVICE" + }, + "rule": { + "name": "-" + }, + "sysmon": { + "file": { + "is_executable": false + } + }, + "user": { + "domain": "NT AUTHORITY", + "id": "S-1-5-18", + "name": "LOCAL SERVICE" + }, + "winlog": { + "api": "wineventlog", + "channel": "Microsoft-Windows-Sysmon/Operational", + "computer_name": "vagrant", + "event_id": "26", + "process": { + "pid": 2764, + "thread": { + "id": 3792 + } + }, + "provider_guid": "{5770385f-c22a-43e0-bf4c-06f5698ffbd9}", + "provider_name": "Microsoft-Windows-Sysmon", + "record_id": 456, + "user": { + "identifier": "S-1-5-18" + }, + "version": 5 + } + }, + { + "@timestamp": "2022-01-24T05:12:51.031Z", + "event": { + "category": [ + "file" + ], + "code": "26", + "kind": "event", + "module": "sysmon", + "provider": "Microsoft-Windows-Sysmon", + "type": [ + "deletion" + ] + }, + "file": { + "directory": "C:\\ProgramData\\Microsoft\\Windows\\DeviceMetadataCache", + "extension": "000", + "name": "OLDCACHE.000", + "path": "C:\\ProgramData\\Microsoft\\Windows\\DeviceMetadataCache\\OLDCACHE.000" + }, + "host": { + "name": "vagrant" + }, + "log": { + "level": "information" + }, + "process": { + "entity_id": "{63a74932-3523-61ee-af00-000000000700}", + "executable": "C:\\Windows\\system32\\svchost.exe", + "hash": { + "sha256": "d78fbf654d84ddf2cb4fe221f7d8b61e0decdee48a4687915e6e4a2296e2418b" + }, + "name": "svchost.exe", + "pid": 1364 + }, + "related": { + "hash": "d78fbf654d84ddf2cb4fe221f7d8b61e0decdee48a4687915e6e4a2296e2418b", + "user": "SYSTEM" + }, + "rule": { + "name": "-" + }, + "sysmon": { + "file": { + "is_executable": false + } + }, + "user": { + "domain": "NT AUTHORITY", + "id": "S-1-5-18", + "name": "SYSTEM" + }, + "winlog": { + "api": "wineventlog", + "channel": "Microsoft-Windows-Sysmon/Operational", + "computer_name": "vagrant", + "event_id": "26", + "process": { + "pid": 2764, + "thread": { + "id": 3792 + } + }, + "provider_guid": "{5770385f-c22a-43e0-bf4c-06f5698ffbd9}", + "provider_name": "Microsoft-Windows-Sysmon", + "record_id": 457, + "user": { + "identifier": "S-1-5-18" + }, + "version": 5 + } + } +] \ No newline at end of file From 61a7d368e8d89b28b5430785bc51ce4cb25afb7b Mon Sep 17 00:00:00 2001 From: Andrew Kroh Date: Mon, 24 Jan 2022 16:18:52 -0500 Subject: [PATCH 33/69] Improve aws-s3 gzip file detection to avoid false negatives (#29969) Directly check the byte stream for the gzip magic number and deflate compression type. Avoid using http.DetectContentType because it returns the first match it finds while checking many signatures. Closes #29968 --- CHANGELOG.next.asciidoc | 1 + x-pack/filebeat/input/awss3/s3_objects.go | 12 +++--------- 2 files changed, 4 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 1efe08acb2b..82a20fa1d2f 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -110,6 +110,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Undo deletion of endpoint config from cloudtrail fileset in {pull}29415[29415]. {pull}29450[29450] - Make Cisco ASA and FTD modules conform to the ECS definition for event.outcome and event.type. {issue}29581[29581] {pull}29698[29698] - ibmmq: Fixed `@timestamp` not being populated with correct values. {pull}29773[29773] +- aws-s3: Improve gzip detection to avoid false negatives. {issue}29968[29968] *Heartbeat* diff --git a/x-pack/filebeat/input/awss3/s3_objects.go b/x-pack/filebeat/input/awss3/s3_objects.go index 7fe6b193fa4..ebe1a5f0828 100644 --- a/x-pack/filebeat/input/awss3/s3_objects.go +++ b/x-pack/filebeat/input/awss3/s3_objects.go @@ -15,7 +15,6 @@ import ( "fmt" "io" "io/ioutil" - "net/http" "reflect" "strings" "time" @@ -375,18 +374,13 @@ func s3ObjectHash(obj s3EventV2) string { // stream without consuming it. This makes it convenient for code executed after this function call // to consume the stream if it wants. func isStreamGzipped(r *bufio.Reader) (bool, error) { - // Why 512? See https://godoc.org/net/http#DetectContentType - buf, err := r.Peek(512) + buf, err := r.Peek(3) if err != nil && err != io.EOF { return false, err } - switch http.DetectContentType(buf) { - case "application/x-gzip", "application/zip": - return true, nil - default: - return false, nil - } + // gzip magic number (1f 8b) and the compression method (08 for DEFLATE). + return bytes.HasPrefix(buf, []byte{0x1F, 0x8B, 0x08}), nil } // s3Metadata returns a map containing the selected S3 object metadata keys. From b4624300442f95981e172105ceec4ae19426d3ac Mon Sep 17 00:00:00 2001 From: Andrew Cholakian Date: Mon, 24 Jan 2022 16:08:38 -0600 Subject: [PATCH 34/69] Increase recommended k8s resource requests / limits (#29457) Changes the recommended k8s resource limits / requests to be more suitable for synthetics. --- deploy/kubernetes/heartbeat-kubernetes.yaml | 10 +++++++--- deploy/kubernetes/heartbeat/heartbeat-deployment.yaml | 10 +++++++--- 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/deploy/kubernetes/heartbeat-kubernetes.yaml b/deploy/kubernetes/heartbeat-kubernetes.yaml index ba21e40f43a..e68076ae5eb 100644 --- a/deploy/kubernetes/heartbeat-kubernetes.yaml +++ b/deploy/kubernetes/heartbeat-kubernetes.yaml @@ -100,10 +100,14 @@ spec: runAsUser: 0 resources: limits: - memory: 200Mi + memory: 1536mi requests: - cpu: 100m - memory: 100Mi + # for synthetics, 2 full cores is a good starting point for relatively consistent perform of a single concurrent check + # For lightweight checks as low as 100m is fine + cpu: 2000m + # A high value like this is encouraged for browser based monitors. + # Lightweight checks use substantially less, even 128Mi is fine for those. + memory: 1536Mi volumeMounts: - name: config mountPath: /etc/heartbeat.yml diff --git a/deploy/kubernetes/heartbeat/heartbeat-deployment.yaml b/deploy/kubernetes/heartbeat/heartbeat-deployment.yaml index 3f7a471b457..63855464aff 100644 --- a/deploy/kubernetes/heartbeat/heartbeat-deployment.yaml +++ b/deploy/kubernetes/heartbeat/heartbeat-deployment.yaml @@ -46,10 +46,14 @@ spec: runAsUser: 0 resources: limits: - memory: 200Mi + memory: 1536mi requests: - cpu: 100m - memory: 100Mi + # for synthetics, 2 full cores is a good starting point for relatively consistent perform of a single concurrent check + # For lightweight checks as low as 100m is fine + cpu: 2000m + # A high value like this is encouraged for browser based monitors. + # Lightweight checks use substantially less, even 128Mi is fine for those. + memory: 1536Mi volumeMounts: - name: config mountPath: /etc/heartbeat.yml From da9720cca91f0f22c190ddd9514a950cdb37806b Mon Sep 17 00:00:00 2001 From: Guillaume Marsay Date: Tue, 25 Jan 2022 02:40:53 +0100 Subject: [PATCH 35/69] [Heartbeat] Change size of data on ICMP packet (#29948) * [Heartbeat] Change size of data on ICMP packet * Add CHANGELOG entry. Co-authored-by: Guillaume Marsay Co-authored-by: Justin Kambic --- CHANGELOG.next.asciidoc | 1 + heartbeat/monitors/active/icmp/stdloop.go | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 82a20fa1d2f..404be94402e 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -52,6 +52,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Fix broken macOS ICMP python e2e test. {pull}29900[29900] - Only add monitor.status to browser events when summary. {pull}29460[29460] - Also add summary to journeys for which the synthetics runner crashes. {pull}29606[29606] +- Update size of ICMP packets to adhere to standard min size. {pull}29948[29948] *Metricbeat* diff --git a/heartbeat/monitors/active/icmp/stdloop.go b/heartbeat/monitors/active/icmp/stdloop.go index 9f5f5543967..6f76b256353 100644 --- a/heartbeat/monitors/active/icmp/stdloop.go +++ b/heartbeat/monitors/active/icmp/stdloop.go @@ -346,7 +346,7 @@ func (l *stdICMPLoop) sendEchoRequest(addr *net.IPAddr) (*requestContext, error) l.requests[id] = ctx l.mutex.Unlock() - payloadBuf := make([]byte, 0, 8) + payloadBuf := make([]byte, 48, 48) payload := bytes.NewBuffer(payloadBuf) ts := time.Now() binary.Write(payload, binary.BigEndian, ts.UnixNano()) From 3202b403d87fc10aaf62601c186205c3ab612319 Mon Sep 17 00:00:00 2001 From: Anderson Queiroz Date: Tue, 25 Jan 2022 11:06:22 +0100 Subject: [PATCH 36/69] [Elastic Agent] Fix some typos (#29890) --- dev-tools/mage/crossbuild.go | 4 ++-- libbeat/common/backoff/equal_jitter.go | 4 ++-- libbeat/logp/configure/logging.go | 2 +- libbeat/logp/core.go | 2 +- x-pack/elastic-agent/magefile.go | 2 +- .../pkg/agent/application/gateway/fleet/fleet_gateway.go | 3 ++- .../elastic-agent/pkg/agent/application/pipeline/pipeline.go | 2 +- x-pack/elastic-agent/pkg/agent/application/reexec/manager.go | 2 +- x-pack/elastic-agent/pkg/core/server/server.go | 2 +- x-pack/elastic-agent/pkg/core/status/reporter.go | 2 +- 10 files changed, 13 insertions(+), 12 deletions(-) diff --git a/dev-tools/mage/crossbuild.go b/dev-tools/mage/crossbuild.go index 9f56fd125c3..ee209bd688c 100644 --- a/dev-tools/mage/crossbuild.go +++ b/dev-tools/mage/crossbuild.go @@ -43,8 +43,8 @@ const defaultCrossBuildTarget = "golangCrossBuild" // See NewPlatformList for details about platform filtering expressions. var Platforms = BuildPlatforms.Defaults() -// Types is the list of package types -var SelectedPackageTypes []PackageType +// SelectedPackageTypes is the list of package types +var SelectedPackageTypes []PackageType = []PackageType{TarGz} func init() { // Allow overriding via PLATFORMS. diff --git a/libbeat/common/backoff/equal_jitter.go b/libbeat/common/backoff/equal_jitter.go index ff5c86f156f..d5b5c7d250c 100644 --- a/libbeat/common/backoff/equal_jitter.go +++ b/libbeat/common/backoff/equal_jitter.go @@ -47,11 +47,11 @@ func NewEqualJitterBackoff(done <-chan struct{}, init, max time.Duration) Backof // Reset resets the duration of the backoff. func (b *EqualJitterBackoff) Reset() { - // Allow to sleep at least the init period on the first wait. + // Allow sleeping at least the init period on the first wait. b.duration = b.init * 2 } -// Wait block until either the timer is completed or channel is done. +// Wait blocks until either the timer is completed or channel is done. func (b *EqualJitterBackoff) Wait() bool { // Make sure we have always some minimal back off and jitter. temp := int64(b.duration / 2) diff --git a/libbeat/logp/configure/logging.go b/libbeat/logp/configure/logging.go index 43a32dd7f2f..9a041bc2559 100644 --- a/libbeat/logp/configure/logging.go +++ b/libbeat/logp/configure/logging.go @@ -60,7 +60,7 @@ func Logging(beatName string, cfg *common.Config) error { return logp.Configure(config) } -// Logging builds a logp.Config based on the given common.Config and the specified +// LoggingWithOutputs builds a logp.Config based on the given common.Config and the specified // CLI flags along with the given outputs. func LoggingWithOutputs(beatName string, cfg *common.Config, outputs ...zapcore.Core) error { config := logp.DefaultConfig(environment) diff --git a/libbeat/logp/core.go b/libbeat/logp/core.go index 552c81e9201..262515ac8d2 100644 --- a/libbeat/logp/core.go +++ b/libbeat/logp/core.go @@ -67,7 +67,7 @@ func Configure(cfg Config) error { return ConfigureWithOutputs(cfg) } -// XXX: ConfigureWithOutputs is used by elastic-agent only (See file: x-pack/elastic-agent/pkg/core/logger/logger.go). +// ConfigureWithOutputs XXX: is used by elastic-agent only (See file: x-pack/elastic-agent/pkg/core/logger/logger.go). // The agent requires that the output specified in the config object is configured and merged with the // logging outputs given. func ConfigureWithOutputs(cfg Config, outputs ...zapcore.Core) error { diff --git a/x-pack/elastic-agent/magefile.go b/x-pack/elastic-agent/magefile.go index a04d3c974bf..ed84c8ca073 100644 --- a/x-pack/elastic-agent/magefile.go +++ b/x-pack/elastic-agent/magefile.go @@ -508,7 +508,7 @@ func runAgent(env map[string]string) error { return err } - // docker does not exists for this commit, build it + // docker does not exist for this commit, build it if !strings.Contains(dockerImageOut, tag) { // produce docker package packageAgent([]string{ diff --git a/x-pack/elastic-agent/pkg/agent/application/gateway/fleet/fleet_gateway.go b/x-pack/elastic-agent/pkg/agent/application/gateway/fleet/fleet_gateway.go index ee16f4b9a9d..fd835ee95f4 100644 --- a/x-pack/elastic-agent/pkg/agent/application/gateway/fleet/fleet_gateway.go +++ b/x-pack/elastic-agent/pkg/agent/application/gateway/fleet/fleet_gateway.go @@ -127,7 +127,8 @@ func newFleetGatewayWithScheduler( stateStore stateStore, ) (gateway.FleetGateway, error) { - // Backoff implementation doesn't support the using context as the shutdown mechanism. + // Backoff implementation doesn't support the use of a context [cancellation] + // as the shutdown mechanism. // So we keep a done channel that will be closed when the current context is shutdown. done := make(chan struct{}) diff --git a/x-pack/elastic-agent/pkg/agent/application/pipeline/pipeline.go b/x-pack/elastic-agent/pkg/agent/application/pipeline/pipeline.go index c8cac5b3216..f2f02efb258 100644 --- a/x-pack/elastic-agent/pkg/agent/application/pipeline/pipeline.go +++ b/x-pack/elastic-agent/pkg/agent/application/pipeline/pipeline.go @@ -29,7 +29,7 @@ var DefaultRK = "default" // RoutingKey is used for routing as pipeline id. type RoutingKey = string -// Router is an interace routes programs to correspongind stream +// Router is an interface routing programs to the corresponding stream. type Router interface { Routes() *sorted.Set Route(id string, grpProg map[RoutingKey][]program.Program) error diff --git a/x-pack/elastic-agent/pkg/agent/application/reexec/manager.go b/x-pack/elastic-agent/pkg/agent/application/reexec/manager.go index 5ccc870d948..d251926d357 100644 --- a/x-pack/elastic-agent/pkg/agent/application/reexec/manager.go +++ b/x-pack/elastic-agent/pkg/agent/application/reexec/manager.go @@ -54,7 +54,7 @@ func (m *manager) ReExec(shutdownCallback ShutdownCallbackFn, argOverrides ...st if shutdownCallback != nil { if err := shutdownCallback(); err != nil { // panic; because there is no going back, everything is shutdown - panic(errors.New(errors.TypeUnexpected, err, "failure occured during shutdown cleanup")) + panic(errors.New(errors.TypeUnexpected, err, "failure occurred during shutdown cleanup")) } } diff --git a/x-pack/elastic-agent/pkg/core/server/server.go b/x-pack/elastic-agent/pkg/core/server/server.go index 390283b4e15..f584e70bad2 100644 --- a/x-pack/elastic-agent/pkg/core/server/server.go +++ b/x-pack/elastic-agent/pkg/core/server/server.go @@ -581,7 +581,7 @@ func (as *ApplicationState) Stop(timeout time.Duration) error { doneChan := as.checkinDone as.checkinLock.RUnlock() if (wasConn && doneChan == nil) || (!wasConn && s == proto.StateObserved_STOPPING && doneChan == nil) { - // either occurred + // either occurred: // * client was connected then disconnected on stop // * client was not connected; connected; received stopping; then disconnected as.Destroy() diff --git a/x-pack/elastic-agent/pkg/core/status/reporter.go b/x-pack/elastic-agent/pkg/core/status/reporter.go index 3add6b188c8..2e34bb15cdc 100644 --- a/x-pack/elastic-agent/pkg/core/status/reporter.go +++ b/x-pack/elastic-agent/pkg/core/status/reporter.go @@ -279,7 +279,7 @@ func (r *reporter) Update(s state.Status, message string, payload map[string]int } } -// Unregister unregister status from reporter. Reporter will no longer be taken into consideration +// Unregister unregisters status from reporter. Reporter will no longer be taken into consideration // for overall status computation. func (r *reporter) Unregister() { r.mx.Lock() From a84302d99b7be0824eed8d4edcbd550709011f68 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?No=C3=A9mi=20V=C3=A1nyi?= Date: Tue, 25 Jan 2022 13:15:51 +0100 Subject: [PATCH 37/69] Add clarification about enableing dashboard loading (#29985) --- libbeat/docs/dashboardsconfig.asciidoc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/libbeat/docs/dashboardsconfig.asciidoc b/libbeat/docs/dashboardsconfig.asciidoc index 7bed3d256f8..d720d7e8f7e 100644 --- a/libbeat/docs/dashboardsconfig.asciidoc +++ b/libbeat/docs/dashboardsconfig.asciidoc @@ -46,6 +46,9 @@ You can specify the following options in the `setup.dashboards` section of the If this option is set to true, {beatname_uc} loads the sample Kibana dashboards from the local `kibana` directory in the home path of the {beatname_uc} installation. +NOTE: {beatname_uc} loads dashboards on startup if either `enabled` is set to `true` +or the `setup.dashboards` section is included in the configuration. + NOTE: When dashboard loading is enabled, {beatname_uc} overwrites any existing dashboards that match the names of the dashboards you are loading. This happens every time {beatname_uc} starts. From ea8f10cc8f6a44dfdea2be48826f5719c1bdce95 Mon Sep 17 00:00:00 2001 From: kaiyan-sheng Date: Tue, 25 Jan 2022 08:15:24 -0700 Subject: [PATCH 38/69] Support running multiple log groups in cloudwatch input (#29695) --- CHANGELOG.next.asciidoc | 1 + .../docs/inputs/input-aws-cloudwatch.asciidoc | 8 +- .../awscloudwatch/_meta/terraform/.gitignore | 3 + .../_meta/terraform/.terraform.lock.hcl | 57 +++ .../awscloudwatch/_meta/terraform/README.md | 46 ++ .../awscloudwatch/_meta/terraform/main.tf | 44 ++ .../awscloudwatch/_meta/terraform/outputs.tf | 11 + .../_meta/terraform/variables.tf | 5 + .../input/awscloudwatch/cloudwatch.go | 126 ++++++ x-pack/filebeat/input/awscloudwatch/config.go | 12 +- x-pack/filebeat/input/awscloudwatch/input.go | 393 ++++++++---------- .../awscloudwatch/input_integration_test.go | 232 +++++++++++ .../input/awscloudwatch/input_test.go | 9 - .../filebeat/input/awscloudwatch/metrics.go | 39 ++ .../filebeat/input/awscloudwatch/processor.go | 78 ++++ x-pack/filebeat/input/awss3/input.go | 2 +- .../input/awss3/input_benchmark_test.go | 6 +- .../input/awss3/input_integration_test.go | 6 +- x-pack/filebeat/input/awss3/interfaces.go | 4 +- .../input/awss3/mock_interfaces_test.go | 3 +- x-pack/filebeat/input/awss3/s3.go | 7 +- x-pack/filebeat/input/awss3/s3_objects.go | 12 +- .../filebeat/input/awss3/s3_objects_test.go | 11 +- x-pack/filebeat/input/awss3/s3_test.go | 11 +- x-pack/filebeat/input/awss3/sqs.go | 5 +- x-pack/filebeat/input/awss3/sqs_s3_event.go | 4 +- .../filebeat/input/awss3/sqs_s3_event_test.go | 3 +- x-pack/filebeat/input/awss3/sqs_test.go | 4 +- .../input/default-inputs/inputs_other.go | 2 + .../awss3 => libbeat/common/aws}/acker.go | 34 +- .../common/aws}/acker_test.go | 20 +- .../awss3 => libbeat/common/aws}/semaphore.go | 16 +- .../common/aws}/semaphore_test.go | 4 +- 33 files changed, 907 insertions(+), 311 deletions(-) create mode 100644 x-pack/filebeat/input/awscloudwatch/_meta/terraform/.gitignore create mode 100644 x-pack/filebeat/input/awscloudwatch/_meta/terraform/.terraform.lock.hcl create mode 100644 x-pack/filebeat/input/awscloudwatch/_meta/terraform/README.md create mode 100644 x-pack/filebeat/input/awscloudwatch/_meta/terraform/main.tf create mode 100644 x-pack/filebeat/input/awscloudwatch/_meta/terraform/outputs.tf create mode 100644 x-pack/filebeat/input/awscloudwatch/_meta/terraform/variables.tf create mode 100644 x-pack/filebeat/input/awscloudwatch/cloudwatch.go create mode 100644 x-pack/filebeat/input/awscloudwatch/input_integration_test.go create mode 100644 x-pack/filebeat/input/awscloudwatch/metrics.go create mode 100644 x-pack/filebeat/input/awscloudwatch/processor.go rename x-pack/{filebeat/input/awss3 => libbeat/common/aws}/acker.go (75%) rename x-pack/{filebeat/input/awss3 => libbeat/common/aws}/acker_test.go (79%) rename x-pack/{filebeat/input/awss3 => libbeat/common/aws}/semaphore.go (81%) rename x-pack/{filebeat/input/awss3 => libbeat/common/aws}/semaphore_test.go (95%) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 404be94402e..92e3291317e 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -111,6 +111,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Undo deletion of endpoint config from cloudtrail fileset in {pull}29415[29415]. {pull}29450[29450] - Make Cisco ASA and FTD modules conform to the ECS definition for event.outcome and event.type. {issue}29581[29581] {pull}29698[29698] - ibmmq: Fixed `@timestamp` not being populated with correct values. {pull}29773[29773] +- Fix using log_group_name_prefix in aws-cloudwatch input. {pull}29695[29695] - aws-s3: Improve gzip detection to avoid false negatives. {issue}29968[29968] *Heartbeat* diff --git a/x-pack/filebeat/docs/inputs/input-aws-cloudwatch.asciidoc b/x-pack/filebeat/docs/inputs/input-aws-cloudwatch.asciidoc index 71f66674387..1df0fb99a2e 100644 --- a/x-pack/filebeat/docs/inputs/input-aws-cloudwatch.asciidoc +++ b/x-pack/filebeat/docs/inputs/input-aws-cloudwatch.asciidoc @@ -50,12 +50,18 @@ log_group_name is given. ==== `log_group_name_prefix` The prefix for a group of log group names. Note: `region_name` is required when log_group_name_prefix is given. `log_group_name` and `log_group_name_prefix` -cannot be given at the same time. +cannot be given at the same time. The number of workers that will process the +log groups under this prefix is set through the `number_of_workers` config. [float] ==== `region_name` Region that the specified log group or log group prefix belongs to. +[float] +==== `number_of_workers` +Number of workers that will process the log groups with the given `log_group_name_prefix`. +Default value is 1. + [float] ==== `log_streams` A list of strings of log streams names that Filebeat collect log events from. diff --git a/x-pack/filebeat/input/awscloudwatch/_meta/terraform/.gitignore b/x-pack/filebeat/input/awscloudwatch/_meta/terraform/.gitignore new file mode 100644 index 00000000000..0825744a776 --- /dev/null +++ b/x-pack/filebeat/input/awscloudwatch/_meta/terraform/.gitignore @@ -0,0 +1,3 @@ +terraform/ +outputs.yml +*.tfstate* diff --git a/x-pack/filebeat/input/awscloudwatch/_meta/terraform/.terraform.lock.hcl b/x-pack/filebeat/input/awscloudwatch/_meta/terraform/.terraform.lock.hcl new file mode 100644 index 00000000000..7f6381c60af --- /dev/null +++ b/x-pack/filebeat/input/awscloudwatch/_meta/terraform/.terraform.lock.hcl @@ -0,0 +1,57 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/hashicorp/aws" { + version = "3.70.0" + constraints = "~> 3.52" + hashes = [ + "h1:jn4ImGMZJ9rQdaVSbcCBqUqnhRSpyaM1DivqaNuP+eg=", + "zh:0af710e528e21b930899f0ac295b0ceef8ad7b623dd8f38e92c8ec4bc7af0321", + "zh:4cabcd4519c0aae474d91ae67a8e3a4a8c39c3945c289a9cf7c1409f64409abe", + "zh:58da1a436facb4e4f95cd2870d211ed7bcb8cf721a4a61970aa8da191665f2aa", + "zh:6465339475c1cd3c16a5c8fee61304dcad2c4a27740687d29c6cdc90d2e6423d", + "zh:7a821ed053c355d70ebe33185590953fa5c364c1f3d66fe3f9b4aba3961646b1", + "zh:7c3656cc9cc1739dcb298e7930c9a76ccfce738d2070841d7e6c62fbdae74eef", + "zh:9d9da9e3c60a0c977e156da8590f36a219ae91994bb3df5a1208de2ab3ceeba7", + "zh:a3138817c86bf3e4dca7fd3a92e099cd1bf1d45ee7c7cc9e9773ba04fc3b315a", + "zh:a8603044e935dfb3cb9319a46d26276162c6aea75e02c4827232f9c6029a3182", + "zh:aef9482332bf43d0b73317f5909dec9e95b983c67b10d72e75eacc7c4f37d084", + "zh:fc3f3cad84f2eebe566dd0b65904c934093007323b9b85e73d9dd4535ceeb29d", + ] +} + +provider "registry.terraform.io/hashicorp/local" { + version = "2.1.0" + hashes = [ + "h1:KfieWtVyGWwplSoLIB5usKAUnrIkDQBkWaR5TI+4WYg=", + "zh:0f1ec65101fa35050978d483d6e8916664b7556800348456ff3d09454ac1eae2", + "zh:36e42ac19f5d68467aacf07e6adcf83c7486f2e5b5f4339e9671f68525fc87ab", + "zh:6db9db2a1819e77b1642ec3b5e95042b202aee8151a0256d289f2e141bf3ceb3", + "zh:719dfd97bb9ddce99f7d741260b8ece2682b363735c764cac83303f02386075a", + "zh:7598bb86e0378fd97eaa04638c1a4c75f960f62f69d3662e6d80ffa5a89847fe", + "zh:ad0a188b52517fec9eca393f1e2c9daea362b33ae2eb38a857b6b09949a727c1", + "zh:c46846c8df66a13fee6eff7dc5d528a7f868ae0dcf92d79deaac73cc297ed20c", + "zh:dc1a20a2eec12095d04bf6da5321f535351a594a636912361db20eb2a707ccc4", + "zh:e57ab4771a9d999401f6badd8b018558357d3cbdf3d33cc0c4f83e818ca8e94b", + "zh:ebdcde208072b4b0f8d305ebf2bfdc62c926e0717599dcf8ec2fd8c5845031c3", + "zh:ef34c52b68933bedd0868a13ccfd59ff1c820f299760b3c02e008dc95e2ece91", + ] +} + +provider "registry.terraform.io/hashicorp/random" { + version = "3.1.0" + hashes = [ + "h1:rKYu5ZUbXwrLG1w81k7H3nce/Ys6yAxXhWcbtk36HjY=", + "zh:2bbb3339f0643b5daa07480ef4397bd23a79963cc364cdfbb4e86354cb7725bc", + "zh:3cd456047805bf639fbf2c761b1848880ea703a054f76db51852008b11008626", + "zh:4f251b0eda5bb5e3dc26ea4400dba200018213654b69b4a5f96abee815b4f5ff", + "zh:7011332745ea061e517fe1319bd6c75054a314155cb2c1199a5b01fe1889a7e2", + "zh:738ed82858317ccc246691c8b85995bc125ac3b4143043219bd0437adc56c992", + "zh:7dbe52fac7bb21227acd7529b487511c91f4107db9cc4414f50d04ffc3cab427", + "zh:a3a9251fb15f93e4cfc1789800fc2d7414bbc18944ad4c5c98f466e6477c42bc", + "zh:a543ec1a3a8c20635cf374110bd2f87c07374cf2c50617eee2c669b3ceeeaa9f", + "zh:d9ab41d556a48bd7059f0810cf020500635bfc696c9fc3adab5ea8915c1d886b", + "zh:d9e13427a7d011dbd654e591b0337e6074eef8c3b9bb11b2e39eaaf257044fd7", + "zh:f7605bd1437752114baf601bdf6931debe6dc6bfe3006eb7e9bb9080931dca8a", + ] +} diff --git a/x-pack/filebeat/input/awscloudwatch/_meta/terraform/README.md b/x-pack/filebeat/input/awscloudwatch/_meta/terraform/README.md new file mode 100644 index 00000000000..5d9e4707a4a --- /dev/null +++ b/x-pack/filebeat/input/awscloudwatch/_meta/terraform/README.md @@ -0,0 +1,46 @@ +# Terraform setup for AWS CloudWatch Input Integration Tests + +This directory contains a Terraform module that creates the AWS resources needed +for executing the integration tests for the `aws-cloudwatch` Filebeat input. It +creates two CloudWatch log groups, and one log stream under each log group. + +It outputs configuration information that is consumed by the tests to +`outputs.yml`. The AWS resources are randomly named to prevent name collisions +between multiple users. + +### Usage + +You must have the appropriate AWS environment variables for authentication set +before running Terraform or the integration tests. The AWS key must be +authorized to create and destroy AWS CloudWatch log groups. + +1. Initialize a working directory containing Terraform configuration files. + + `terraform init` + +2. Execute terraform in this directory to create the resources. This will also + write the `outputs.yml`. You can use `export TF_VAR_aws_region=NNNNN` in order + to match the AWS region of the profile you are using. + + `terraform apply` + + +2. (Optional) View the output configuration. + + ```yaml + "aws_region": "us-east-1" + "log_group_name_1": "filebeat-cloudwatch-integtest-1-417koa" + "log_group_name_2": "filebeat-cloudwatch-integtest-2-417koa" + ``` + +3. Execute the integration test. + + ``` + cd x-pack/filebeat/input/awss3 + go test -tags aws,integration -run TestInputRun.+ -v . + ``` + +4. Cleanup AWS resources. Execute terraform to delete the log groups created for +testing. + + `terraform destroy` diff --git a/x-pack/filebeat/input/awscloudwatch/_meta/terraform/main.tf b/x-pack/filebeat/input/awscloudwatch/_meta/terraform/main.tf new file mode 100644 index 00000000000..bb3b2459302 --- /dev/null +++ b/x-pack/filebeat/input/awscloudwatch/_meta/terraform/main.tf @@ -0,0 +1,44 @@ +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "~> 3.52" + } + } +} + +provider "aws" { + region = var.aws_region +} + +resource "random_string" "random" { + length = 6 + special = false + upper = false +} + +resource "aws_cloudwatch_log_group" "filebeat-integtest-1" { + name = "filebeat-log-group-integtest-1-${random_string.random.result}" + + tags = { + Environment = "test" + } +} + +resource "aws_cloudwatch_log_group" "filebeat-integtest-2" { + name = "filebeat-log-group-integtest-2-${random_string.random.result}" + + tags = { + Environment = "test" + } +} + +resource "aws_cloudwatch_log_stream" "filebeat-integtest-1" { + name = "filebeat-log-stream-integtest-1-${random_string.random.result}" + log_group_name = aws_cloudwatch_log_group.filebeat-integtest-1.name +} + +resource "aws_cloudwatch_log_stream" "filebeat-integtest-2" { + name = "filebeat-log-stream-integtest-2-${random_string.random.result}" + log_group_name = aws_cloudwatch_log_group.filebeat-integtest-2.name +} diff --git a/x-pack/filebeat/input/awscloudwatch/_meta/terraform/outputs.tf b/x-pack/filebeat/input/awscloudwatch/_meta/terraform/outputs.tf new file mode 100644 index 00000000000..09e0a07e4a9 --- /dev/null +++ b/x-pack/filebeat/input/awscloudwatch/_meta/terraform/outputs.tf @@ -0,0 +1,11 @@ +resource "local_file" "secrets" { + content = yamlencode({ + "log_group_name_1" : aws_cloudwatch_log_group.filebeat-integtest-1.name + "log_group_name_2" : aws_cloudwatch_log_group.filebeat-integtest-2.name + "log_stream_name_1" : aws_cloudwatch_log_stream.filebeat-integtest-1.name + "log_stream_name_2" : aws_cloudwatch_log_stream.filebeat-integtest-2.name + "aws_region" : var.aws_region + }) + filename = "${path.module}/outputs.yml" + file_permission = "0644" +} diff --git a/x-pack/filebeat/input/awscloudwatch/_meta/terraform/variables.tf b/x-pack/filebeat/input/awscloudwatch/_meta/terraform/variables.tf new file mode 100644 index 00000000000..2c4fb00786b --- /dev/null +++ b/x-pack/filebeat/input/awscloudwatch/_meta/terraform/variables.tf @@ -0,0 +1,5 @@ +variable "aws_region" { + description = "AWS Region" + type = string + default = "us-east-1" +} diff --git a/x-pack/filebeat/input/awscloudwatch/cloudwatch.go b/x-pack/filebeat/input/awscloudwatch/cloudwatch.go new file mode 100644 index 00000000000..29c119117a0 --- /dev/null +++ b/x-pack/filebeat/input/awscloudwatch/cloudwatch.go @@ -0,0 +1,126 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package awscloudwatch + +import ( + "context" + "sync" + "time" + + awssdk "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs" + "github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs/cloudwatchlogsiface" + "github.com/pkg/errors" + + "github.com/elastic/beats/v7/libbeat/logp" + "github.com/elastic/beats/v7/libbeat/monitoring" + "github.com/elastic/beats/v7/libbeat/statestore" + awscommon "github.com/elastic/beats/v7/x-pack/libbeat/common/aws" +) + +type cloudwatchPoller struct { + numberOfWorkers int + apiSleep time.Duration + region string + logStreams []string + logStreamPrefix string + startTime int64 + endTime int64 + prevEndTime int64 + workerSem *awscommon.Sem + log *logp.Logger + metrics *inputMetrics + store *statestore.Store + workersListingMap *sync.Map + workersProcessingMap *sync.Map +} + +func newCloudwatchPoller(log *logp.Logger, metrics *inputMetrics, + store *statestore.Store, + awsRegion string, apiSleep time.Duration, + numberOfWorkers int, logStreams []string, logStreamPrefix string) *cloudwatchPoller { + if metrics == nil { + metrics = newInputMetrics(monitoring.NewRegistry(), "") + } + + return &cloudwatchPoller{ + numberOfWorkers: numberOfWorkers, + apiSleep: apiSleep, + region: awsRegion, + logStreams: logStreams, + logStreamPrefix: logStreamPrefix, + startTime: int64(0), + endTime: int64(0), + workerSem: awscommon.NewSem(numberOfWorkers), + log: log, + metrics: metrics, + store: store, + workersListingMap: new(sync.Map), + workersProcessingMap: new(sync.Map), + } +} + +func (p *cloudwatchPoller) run(svc cloudwatchlogsiface.ClientAPI, logGroup string, startTime int64, endTime int64, logProcessor *logProcessor) { + err := p.getLogEventsFromCloudWatch(svc, logGroup, startTime, endTime, logProcessor) + if err != nil { + var err *awssdk.RequestCanceledError + if errors.As(err, &err) { + p.log.Error("getLogEventsFromCloudWatch failed with RequestCanceledError: ", err) + } + p.log.Error("getLogEventsFromCloudWatch failed: ", err) + } +} + +// getLogEventsFromCloudWatch uses FilterLogEvents API to collect logs from CloudWatch +func (p *cloudwatchPoller) getLogEventsFromCloudWatch(svc cloudwatchlogsiface.ClientAPI, logGroup string, startTime int64, endTime int64, logProcessor *logProcessor) error { + // construct FilterLogEventsInput + filterLogEventsInput := p.constructFilterLogEventsInput(startTime, endTime, logGroup) + + // make API request + req := svc.FilterLogEventsRequest(filterLogEventsInput) + paginator := cloudwatchlogs.NewFilterLogEventsPaginator(req) + for paginator.Next(context.TODO()) { + page := paginator.CurrentPage() + p.metrics.apiCallsTotal.Inc() + + logEvents := page.Events + p.metrics.logEventsReceivedTotal.Add(uint64(len(logEvents))) + + // This sleep is to avoid hitting the FilterLogEvents API limit(5 transactions per second (TPS)/account/Region). + p.log.Debugf("sleeping for %v before making FilterLogEvents API call again", p.apiSleep) + time.Sleep(p.apiSleep) + p.log.Debug("done sleeping") + + p.log.Debugf("Processing #%v events", len(logEvents)) + err := logProcessor.processLogEvents(logEvents, logGroup, p.region) + if err != nil { + err = errors.Wrap(err, "processLogEvents failed") + p.log.Error(err) + } + } + + if err := paginator.Err(); err != nil { + return errors.Wrap(err, "error FilterLogEvents with Paginator") + } + return nil +} + +func (p *cloudwatchPoller) constructFilterLogEventsInput(startTime int64, endTime int64, logGroup string) *cloudwatchlogs.FilterLogEventsInput { + filterLogEventsInput := &cloudwatchlogs.FilterLogEventsInput{ + LogGroupName: awssdk.String(logGroup), + StartTime: awssdk.Int64(startTime), + EndTime: awssdk.Int64(endTime), + Limit: awssdk.Int64(100), + } + + if len(p.logStreams) > 0 { + filterLogEventsInput.LogStreamNames = p.logStreams + } + + if p.logStreamPrefix != "" { + filterLogEventsInput.LogStreamNamePrefix = awssdk.String(p.logStreamPrefix) + } + return filterLogEventsInput +} diff --git a/x-pack/filebeat/input/awscloudwatch/config.go b/x-pack/filebeat/input/awscloudwatch/config.go index 3f04813e78c..0d8a225866c 100644 --- a/x-pack/filebeat/input/awscloudwatch/config.go +++ b/x-pack/filebeat/input/awscloudwatch/config.go @@ -25,7 +25,8 @@ type config struct { APITimeout time.Duration `config:"api_timeout" validate:"min=0,nonzero"` APISleep time.Duration `config:"api_sleep" validate:"min=0,nonzero"` Latency time.Duration `config:"latency"` - AwsConfig awscommon.ConfigAWS `config:",inline"` + NumberOfWorkers int `config:"number_of_workers"` + AWSConfig awscommon.ConfigAWS `config:",inline"` } func defaultConfig() config { @@ -33,10 +34,11 @@ func defaultConfig() config { ForwarderConfig: harvester.ForwarderConfig{ Type: "aws-cloudwatch", }, - StartPosition: "beginning", - ScanFrequency: 10 * time.Second, - APITimeout: 120 * time.Second, - APISleep: 200 * time.Millisecond, // FilterLogEvents has a limit of 5 transactions per second (TPS)/account/Region: 1s / 5 = 200 ms + StartPosition: "beginning", + ScanFrequency: 10 * time.Second, + APITimeout: 120 * time.Second, + APISleep: 200 * time.Millisecond, // FilterLogEvents has a limit of 5 transactions per second (TPS)/account/Region: 1s / 5 = 200 ms + NumberOfWorkers: 1, } } diff --git a/x-pack/filebeat/input/awscloudwatch/input.go b/x-pack/filebeat/input/awscloudwatch/input.go index 967c8102f03..d11afa77ff5 100644 --- a/x-pack/filebeat/input/awscloudwatch/input.go +++ b/x-pack/filebeat/input/awscloudwatch/input.go @@ -6,6 +6,7 @@ package awscloudwatch import ( "context" + "fmt" "strings" "sync" "time" @@ -16,71 +17,61 @@ import ( "github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs/cloudwatchlogsiface" "github.com/pkg/errors" - "github.com/elastic/beats/v7/filebeat/channel" - "github.com/elastic/beats/v7/filebeat/input" + "github.com/elastic/beats/v7/filebeat/beater" + v2 "github.com/elastic/beats/v7/filebeat/input/v2" "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/common/cfgwarn" - "github.com/elastic/beats/v7/libbeat/logp" + "github.com/elastic/beats/v7/libbeat/feature" + "github.com/elastic/beats/v7/libbeat/monitoring" awscommon "github.com/elastic/beats/v7/x-pack/libbeat/common/aws" + "github.com/elastic/go-concert/unison" ) const ( inputName = "aws-cloudwatch" ) -func init() { - err := input.Register(inputName, NewInput) - if err != nil { - panic(errors.Wrapf(err, "failed to register %v input", inputName)) +func Plugin(store beater.StateStore) v2.Plugin { + return v2.Plugin{ + Name: inputName, + Stability: feature.Stable, + Deprecated: false, + Info: "Collect logs from cloudwatch", + Manager: &cloudwatchInputManager{store: store}, } } -// awsCloudWatchInput is a input for AWS CloudWatch logs -type awsCloudWatchInput struct { - config config - awsConfig awssdk.Config - - logger *logp.Logger - outlet channel.Outleter // Output of received aws-cloudwatch logs. - inputCtx *channelContext - - workerOnce sync.Once // Guarantees that the worker goroutine is only started once. - workerWg sync.WaitGroup // Waits on aws-cloudwatch worker goroutine. - stopOnce sync.Once - close chan struct{} - - prevEndTime int64 // track previous endTime for each iteration. +type cloudwatchInputManager struct { + store beater.StateStore } -// channelContext implements context.Context by wrapping a channel -type channelContext struct { - done <-chan struct{} +func (im *cloudwatchInputManager) Init(grp unison.Group, mode v2.Mode) error { + return nil } -func (c *channelContext) Deadline() (time.Time, bool) { return time.Time{}, false } -func (c *channelContext) Done() <-chan struct{} { return c.done } -func (c *channelContext) Err() error { - select { - case <-c.done: - return context.Canceled - default: - return nil +func (im *cloudwatchInputManager) Create(cfg *common.Config) (v2.Input, error) { + config := defaultConfig() + if err := cfg.Unpack(&config); err != nil { + return nil, err } + + return newInput(config, im.store) } -func (c *channelContext) Value(key interface{}) interface{} { return nil } -// NewInput creates a new aws-cloudwatch input -func NewInput(cfg *common.Config, connector channel.Connector, context input.Context) (input.Input, error) { - cfgwarn.Beta("aws-clouwatch input type is used") - logger := logp.NewLogger(inputName) +// cloudwatchInput is an input for reading logs from CloudWatch periodically. +type cloudwatchInput struct { + config config + awsConfig awssdk.Config + store beater.StateStore +} - // Extract and validate the input's configuration. - config := defaultConfig() - if err := cfg.Unpack(&config); err != nil { - return nil, errors.Wrap(err, "failed unpacking config") +func newInput(config config, store beater.StateStore) (*cloudwatchInput, error) { + cfgwarn.Beta("aws-cloudwatch input type is used") + awsConfig, err := awscommon.InitializeAWSConfig(config.AWSConfig) + if err != nil { + return nil, fmt.Errorf("failed to initialize AWS credentials: %w", err) } - logger.Debug("aws-cloudwatch input config = ", config) if config.LogGroupARN != "" { logGroupName, regionName, err := parseARN(config.LogGroupARN) @@ -92,81 +83,150 @@ func NewInput(cfg *common.Config, connector channel.Connector, context input.Con config.RegionName = regionName } - awsConfig, err := awscommon.InitializeAWSConfig(config.AwsConfig) + awsConfig, err = awscommon.InitializeAWSConfig(config.AWSConfig) if err != nil { return nil, errors.Wrap(err, "InitializeAWSConfig failed") } awsConfig.Region = config.RegionName - closeChannel := make(chan struct{}) - in := &awsCloudWatchInput{ - config: config, - awsConfig: awsConfig, - logger: logger, - close: closeChannel, - inputCtx: &channelContext{closeChannel}, - prevEndTime: int64(0), - } + return &cloudwatchInput{ + config: config, + awsConfig: awsConfig, + store: store, + }, nil +} - // Build outlet for events. - in.outlet, err = connector.Connect(cfg) - if err != nil { - return nil, err - } +func (in *cloudwatchInput) Name() string { return inputName } - in.logger.Info("Initialized AWS CloudWatch input.") - return in, nil +func (in *cloudwatchInput) Test(ctx v2.TestContext) error { + return nil } -// Run runs the input -func (in *awsCloudWatchInput) Run() { - // Please see https://docs.aws.amazon.com/general/latest/gr/cwl_region.html for more info on Amazon CloudWatch Logs endpoints. - logsServiceName := awscommon.CreateServiceName("logs", in.config.AwsConfig.FIPSEnabled, in.config.RegionName) - cwConfig := awscommon.EnrichAWSConfigWithEndpoint(in.config.AwsConfig.Endpoint, logsServiceName, in.config.RegionName, in.awsConfig) - svc := cloudwatchlogs.New(cwConfig) - - var logGroupNames []string +func (in *cloudwatchInput) Run(inputContext v2.Context, pipeline beat.Pipeline) error { var err error - if in.config.LogGroupNamePrefix != "" { - logGroupNames, err = in.getLogGroupNames(svc) - if err != nil { - in.logger.Error("getLogGroupNames failed: ", err) - return + + persistentStore, err := in.store.Access() + if err != nil { + return fmt.Errorf("can not access persistent store: %w", err) + } + + defer persistentStore.Close() + + // Wrap input Context's cancellation Done channel a context.Context. This + // goroutine stops with the parent closes the Done channel. + ctx, cancelInputCtx := context.WithCancel(context.Background()) + go func() { + defer cancelInputCtx() + select { + case <-inputContext.Cancelation.Done(): + case <-ctx.Done(): } - } else { - logGroupNames = []string{in.config.LogGroupName} + }() + defer cancelInputCtx() + + // Create client for publishing events and receive notification of their ACKs. + client, err := pipeline.ConnectWith(beat.ClientConfig{ + CloseRef: inputContext.Cancelation, + ACKHandler: awscommon.NewEventACKHandler(), + }) + if err != nil { + return fmt.Errorf("failed to create pipeline client: %w", err) } + defer client.Close() - for _, logGroup := range logGroupNames { - in.config.LogGroupName = logGroup - in.workerOnce.Do(func() { - in.workerWg.Add(1) - go func() { - in.logger.Infof("aws-cloudwatch input worker for log group: '%v' has started", in.config.LogGroupName) - defer in.logger.Infof("aws-cloudwatch input worker for log group '%v' has stopped.", in.config.LogGroupName) - defer in.workerWg.Done() - in.run(svc) - }() - }) + logsServiceName := awscommon.CreateServiceName("logs", in.config.AWSConfig.FIPSEnabled, in.config.RegionName) + cwConfig := awscommon.EnrichAWSConfigWithEndpoint(in.config.AWSConfig.Endpoint, logsServiceName, in.config.RegionName, in.awsConfig) + svc := cloudwatchlogs.New(cwConfig) + + logGroupNames, err := getLogGroupNames(svc, in.config.LogGroupNamePrefix, in.config.LogGroupName) + if err != nil { + return fmt.Errorf("failed to get log group names: %w", err) } + + log := inputContext.Logger + metricRegistry := monitoring.GetNamespace("dataset").GetRegistry() + metrics := newInputMetrics(metricRegistry, inputContext.ID) + cwPoller := newCloudwatchPoller( + log.Named("cloudwatch_poller"), + metrics, + persistentStore, + in.awsConfig.Region, + in.config.APISleep, + in.config.NumberOfWorkers, + in.config.LogStreams, + in.config.LogStreamPrefix) + logProcessor := newLogProcessor(log.Named("log_processor"), metrics, client, ctx) + cwPoller.metrics.logGroupsTotal.Add(uint64(len(logGroupNames))) + return in.Receive(svc, cwPoller, ctx, logProcessor, logGroupNames) } -func (in *awsCloudWatchInput) run(svc cloudwatchlogsiface.ClientAPI) { - for in.inputCtx.Err() == nil { - err := in.getLogEventsFromCloudWatch(svc) +func (in *cloudwatchInput) Receive(svc cloudwatchlogsiface.ClientAPI, cwPoller *cloudwatchPoller, ctx context.Context, logProcessor *logProcessor, logGroupNames []string) error { + // This loop tries to keep the workers busy as much as possible while + // honoring the number in config opposed to a simpler loop that does one + // listing, sequentially processes every object and then does another listing + start := true + workerWg := new(sync.WaitGroup) + lastLogGroupOffset := 0 + for ctx.Err() == nil { + if start == false { + cwPoller.log.Debugf("sleeping for %v before checking new logs", in.config.ScanFrequency) + time.Sleep(in.config.ScanFrequency) + cwPoller.log.Debug("done sleeping") + } + start = false + + currentTime := time.Now() + cwPoller.startTime, cwPoller.endTime = getStartPosition(in.config.StartPosition, currentTime, cwPoller.endTime, in.config.ScanFrequency, in.config.Latency) + cwPoller.log.Debugf("start_position = %s, startTime = %v, endTime = %v", in.config.StartPosition, time.Unix(cwPoller.startTime/1000, 0), time.Unix(cwPoller.endTime/1000, 0)) + availableWorkers, err := cwPoller.workerSem.AcquireContext(in.config.NumberOfWorkers, ctx) if err != nil { - var aerr *awssdk.RequestCanceledError - if errors.As(err, &aerr) { - continue - } - in.logger.Error("getLogEventsFromCloudWatch failed: ", err) + break + } + + if availableWorkers == 0 { continue } - in.logger.Debugf("sleeping for %v before checking new logs", in.config.ScanFrequency) - time.Sleep(in.config.ScanFrequency) - in.logger.Debug("done sleeping") + workerWg.Add(availableWorkers) + logGroupNamesLength := len(logGroupNames) + runningGoroutines := 0 + + for i := lastLogGroupOffset; i < logGroupNamesLength; i++ { + if runningGoroutines >= availableWorkers { + break + } + + runningGoroutines++ + lastLogGroupOffset = i + 1 + if lastLogGroupOffset >= logGroupNamesLength { + // release unused workers + cwPoller.workerSem.Release(availableWorkers - runningGoroutines) + for j := 0; j < availableWorkers-runningGoroutines; j++ { + workerWg.Done() + } + lastLogGroupOffset = 0 + } + + lg := logGroupNames[i] + go func(logGroup string, startTime int64, endTime int64) { + defer func() { + cwPoller.log.Infof("aws-cloudwatch input worker for log group '%v' has stopped.", logGroup) + workerWg.Done() + cwPoller.workerSem.Release(1) + }() + cwPoller.log.Infof("aws-cloudwatch input worker for log group: '%v' has started", logGroup) + cwPoller.run(svc, logGroup, startTime, endTime, logProcessor) + }(lg, cwPoller.startTime, cwPoller.endTime) + } + } + + // Wait for all workers to finish. + workerWg.Wait() + if errors.Is(ctx.Err(), context.Canceled) { + // A canceled context is a normal shutdown. + return nil } + return ctx.Err() } func parseARN(logGroupARN string) (string, string, error) { @@ -185,10 +245,14 @@ func parseARN(logGroupARN string) (string, string, error) { } // getLogGroupNames uses DescribeLogGroups API to retrieve all log group names -func (in *awsCloudWatchInput) getLogGroupNames(svc cloudwatchlogsiface.ClientAPI) ([]string, error) { +func getLogGroupNames(svc cloudwatchlogsiface.ClientAPI, logGroupNamePrefix string, logGroupName string) ([]string, error) { + if logGroupNamePrefix == "" { + return []string{logGroupName}, nil + } + // construct DescribeLogGroupsInput filterLogEventsInput := &cloudwatchlogs.DescribeLogGroupsInput{ - LogGroupNamePrefix: awssdk.String(in.config.LogGroupNamePrefix), + LogGroupNamePrefix: awssdk.String(logGroupNamePrefix), } // make API request @@ -197,75 +261,18 @@ func (in *awsCloudWatchInput) getLogGroupNames(svc cloudwatchlogsiface.ClientAPI var logGroupNames []string for p.Next(context.TODO()) { page := p.CurrentPage() - in.logger.Debugf("Collecting #%v log group names", len(page.LogGroups)) for _, lg := range page.LogGroups { logGroupNames = append(logGroupNames, *lg.LogGroupName) } } if err := p.Err(); err != nil { - in.logger.Error("failed DescribeLogGroupsRequest: ", err) return logGroupNames, err } return logGroupNames, nil } -// getLogEventsFromCloudWatch uses FilterLogEvents API to collect logs from CloudWatch -func (in *awsCloudWatchInput) getLogEventsFromCloudWatch(svc cloudwatchlogsiface.ClientAPI) error { - currentTime := time.Now() - startTime, endTime := getStartPosition(in.config.StartPosition, currentTime, in.prevEndTime, in.config.ScanFrequency, in.config.Latency) - in.logger.Debugf("start_position = %s, startTime = %v, endTime = %v", in.config.StartPosition, time.Unix(startTime/1000, 0), time.Unix(endTime/1000, 0)) - - // overwrite prevEndTime using new endTime - in.prevEndTime = endTime - - // construct FilterLogEventsInput - filterLogEventsInput := in.constructFilterLogEventsInput(startTime, endTime) - - // make API request - req := svc.FilterLogEventsRequest(filterLogEventsInput) - paginator := cloudwatchlogs.NewFilterLogEventsPaginator(req) - for paginator.Next(context.TODO()) { - page := paginator.CurrentPage() - - logEvents := page.Events - in.logger.Debugf("Processing #%v events", len(logEvents)) - err := in.processLogEvents(logEvents) - if err != nil { - err = errors.Wrap(err, "processLogEvents failed") - in.logger.Error(err) - } - } - - if err := paginator.Err(); err != nil { - return errors.Wrap(err, "error FilterLogEvents with Paginator") - } - - // This sleep is to avoid hitting the FilterLogEvents API limit(5 transactions per second (TPS)/account/Region). - in.logger.Debugf("sleeping for %v before making FilterLogEvents API call again", in.config.APISleep) - time.Sleep(in.config.APISleep) - in.logger.Debug("done sleeping") - return nil -} - -func (in *awsCloudWatchInput) constructFilterLogEventsInput(startTime int64, endTime int64) *cloudwatchlogs.FilterLogEventsInput { - filterLogEventsInput := &cloudwatchlogs.FilterLogEventsInput{ - LogGroupName: awssdk.String(in.config.LogGroupName), - StartTime: awssdk.Int64(startTime), - EndTime: awssdk.Int64(endTime), - } - - if len(in.config.LogStreams) > 0 { - filterLogEventsInput.LogStreamNames = in.config.LogStreams - } - - if in.config.LogStreamPrefix != "" { - filterLogEventsInput.LogStreamNamePrefix = awssdk.String(in.config.LogStreamPrefix) - } - return filterLogEventsInput -} - -func getStartPosition(startPosition string, currentTime time.Time, prevEndTime int64, scanFrequency time.Duration, latency time.Duration) (startTime int64, endTime int64) { +func getStartPosition(startPosition string, currentTime time.Time, endTime int64, scanFrequency time.Duration, latency time.Duration) (int64, int64) { if latency != 0 { // add latency if config is not 0 currentTime = currentTime.Add(latency * -1) @@ -273,77 +280,15 @@ func getStartPosition(startPosition string, currentTime time.Time, prevEndTime i switch startPosition { case "beginning": - if prevEndTime != int64(0) { - return prevEndTime, currentTime.UnixNano() / int64(time.Millisecond) + if endTime != int64(0) { + return endTime, currentTime.UnixNano() / int64(time.Millisecond) } return 0, currentTime.UnixNano() / int64(time.Millisecond) case "end": - if prevEndTime != int64(0) { - return prevEndTime, currentTime.UnixNano() / int64(time.Millisecond) + if endTime != int64(0) { + return endTime, currentTime.UnixNano() / int64(time.Millisecond) } return currentTime.Add(-scanFrequency).UnixNano() / int64(time.Millisecond), currentTime.UnixNano() / int64(time.Millisecond) } - return -} - -func (in *awsCloudWatchInput) processLogEvents(logEvents []cloudwatchlogs.FilteredLogEvent) error { - for _, logEvent := range logEvents { - event := createEvent(logEvent, in.config.LogGroupName, in.config.RegionName) - err := in.forwardEvent(event) - if err != nil { - err = errors.Wrap(err, "forwardEvent failed") - in.logger.Error(err) - return err - } - } - return nil -} - -func createEvent(logEvent cloudwatchlogs.FilteredLogEvent, logGroup string, regionName string) beat.Event { - event := beat.Event{ - Timestamp: time.Unix(*logEvent.Timestamp/1000, 0).UTC(), - Fields: common.MapStr{ - "message": *logEvent.Message, - "log.file.path": logGroup + "/" + *logEvent.LogStreamName, - "event": common.MapStr{ - "id": *logEvent.EventId, - "ingested": time.Now(), - }, - "awscloudwatch": common.MapStr{ - "log_group": logGroup, - "log_stream": *logEvent.LogStreamName, - "ingestion_time": time.Unix(*logEvent.IngestionTime/1000, 0), - }, - "cloud": common.MapStr{ - "provider": "aws", - "region": regionName, - }, - }, - } - event.SetID(*logEvent.EventId) - - return event -} - -func (in *awsCloudWatchInput) forwardEvent(event beat.Event) error { - ok := in.outlet.OnEvent(event) - if !ok { - return errors.New("OnEvent returned false. Stopping input worker") - } - return nil -} - -// Stop stops the aws-cloudwatch input -func (in *awsCloudWatchInput) Stop() { - in.stopOnce.Do(func() { - defer in.outlet.Close() - close(in.close) - in.logger.Info("Stopping aws-cloudwatch input") - }) -} - -// Wait is an alias for Stop. -func (in *awsCloudWatchInput) Wait() { - in.Stop() - in.workerWg.Wait() + return 0, 0 } diff --git a/x-pack/filebeat/input/awscloudwatch/input_integration_test.go b/x-pack/filebeat/input/awscloudwatch/input_integration_test.go new file mode 100644 index 00000000000..633a0ddcada --- /dev/null +++ b/x-pack/filebeat/input/awscloudwatch/input_integration_test.go @@ -0,0 +1,232 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// See _meta/terraform/README.md for integration test usage instructions. + +//go:build integration && aws +// +build integration,aws + +package awscloudwatch + +import ( + "bytes" + "context" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "testing" + "time" + + "golang.org/x/sync/errgroup" + "gopkg.in/yaml.v2" + + awssdk "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/external" + "github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs" + "github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs/cloudwatchlogsiface" + "github.com/stretchr/testify/assert" + + "github.com/elastic/beats/v7/filebeat/beater" + v2 "github.com/elastic/beats/v7/filebeat/input/v2" + "github.com/elastic/beats/v7/libbeat/common" + "github.com/elastic/beats/v7/libbeat/logp" + "github.com/elastic/beats/v7/libbeat/monitoring" + pubtest "github.com/elastic/beats/v7/libbeat/publisher/testing" + "github.com/elastic/beats/v7/libbeat/statestore" + "github.com/elastic/beats/v7/libbeat/statestore/storetest" + awscommon "github.com/elastic/beats/v7/x-pack/libbeat/common/aws" +) + +const ( + inputID = "test_id" + message1 = "test1" + message2 = "test2" + terraformOutputYML = "_meta/terraform/outputs.yml" + logGroupNamePrefix = "filebeat-log-group-integtest-" +) + +var ( + cloudwatchConfig = common.MapStr{ + "start_position": "beginning", + "scan_frequency": 10 * time.Second, + "api_timeout": 120 * time.Second, + "number_of_workers": 1, + } +) + +type terraformOutputData struct { + AWSRegion string `yaml:"aws_region"` + LogGroup1 string `yaml:"log_group_name_1"` + LogGroup2 string `yaml:"log_group_name_2"` + LogStream1 string `yaml:"log_stream_name_1"` + LogStream2 string `yaml:"log_stream_name_2"` +} + +func getTerraformOutputs(t *testing.T) terraformOutputData { + t.Helper() + + ymlData, err := ioutil.ReadFile(terraformOutputYML) + if os.IsNotExist(err) { + t.Skipf("Run 'terraform apply' in %v to setup CloudWatch log groups and log streams for the test.", filepath.Dir(terraformOutputYML)) + } + if err != nil { + t.Fatalf("failed reading terraform output data: %v", err) + } + + var rtn terraformOutputData + dec := yaml.NewDecoder(bytes.NewReader(ymlData)) + dec.SetStrict(true) + if err = dec.Decode(&rtn); err != nil { + t.Fatal(err) + } + + return rtn +} + +func assertMetric(t *testing.T, snapshot common.MapStr, name string, value interface{}) { + n, _ := snapshot.GetValue(inputID + "." + name) + assert.EqualValues(t, value, n, name) +} + +func newV2Context() (v2.Context, func()) { + ctx, cancel := context.WithCancel(context.Background()) + return v2.Context{ + Logger: logp.NewLogger(inputName).With("id", inputID), + ID: inputID, + Cancelation: ctx, + }, cancel +} + +type testInputStore struct { + registry *statestore.Registry +} + +func openTestStatestore() beater.StateStore { + return &testInputStore{ + registry: statestore.NewRegistry(storetest.NewMemoryStoreBackend()), + } +} + +func (s *testInputStore) Close() { + s.registry.Close() +} + +func (s *testInputStore) Access() (*statestore.Store, error) { + return s.registry.Get("filebeat") +} + +func (s *testInputStore) CleanupInterval() time.Duration { + return 24 * time.Hour +} + +func createInput(t *testing.T, cfg *common.Config) *cloudwatchInput { + inputV2, err := Plugin(openTestStatestore()).Manager.Create(cfg) + if err != nil { + t.Fatal(err) + } + + return inputV2.(*cloudwatchInput) +} + +func makeTestConfigWithLogGroupNamePrefix(regionName string) *common.Config { + return common.MustNewConfigFrom(fmt.Sprintf(`--- +log_group_name_prefix: %s +region_name: %s +`, logGroupNamePrefix, regionName)) +} + +func uploadLogMessage(t *testing.T, svc cloudwatchlogsiface.ClientAPI, message string, timestamp int64, logGroupName string, logStreamName string) { + describeLogStreamsInput := cloudwatchlogs.DescribeLogStreamsInput{ + LogGroupName: awssdk.String(logGroupName), + LogStreamNamePrefix: awssdk.String(logStreamName), + } + + reqDescribeLogStreams := svc.DescribeLogStreamsRequest(&describeLogStreamsInput) + resp, err := reqDescribeLogStreams.Send(context.TODO()) + if err != nil { + t.Fatalf("Failed to describe log stream %q in log group %q: %v", logStreamName, logGroupName, err) + } + + if len(resp.LogStreams) != 1 { + t.Fatalf("Describe log stream %q in log group %q should return 1 and only 1 value", logStreamName, logGroupName) + } + + inputLogEvent := cloudwatchlogs.InputLogEvent{ + Message: awssdk.String(message), + Timestamp: awssdk.Int64(timestamp), + } + + reqPutLogEvents := svc.PutLogEventsRequest( + &cloudwatchlogs.PutLogEventsInput{ + LogEvents: []cloudwatchlogs.InputLogEvent{inputLogEvent}, + LogGroupName: awssdk.String(logGroupName), + LogStreamName: awssdk.String(logStreamName), + SequenceToken: resp.LogStreams[0].UploadSequenceToken, + }) + _, err = reqPutLogEvents.Send(context.TODO()) + if err != nil { + t.Fatalf("Failed to upload message %q into log stream %q in log group %q: %v", message, logStreamName, logGroupName, err) + } +} + +func TestInputWithLogGroupNamePrefix(t *testing.T) { + logp.TestingSetup() + + // Terraform is used to set up S3 and SQS and must be executed manually. + tfConfig := getTerraformOutputs(t) + + cfg, err := external.LoadDefaultAWSConfig() + if err != nil { + t.Fatal(err) + } + cfg.Region = tfConfig.AWSRegion + + // upload log messages for testing + svc := cloudwatchlogs.New(cfg) + currentTime := time.Now() + timestamp := currentTime.UnixNano() / int64(time.Millisecond) + + uploadLogMessage(t, svc, message1, timestamp, tfConfig.LogGroup1, tfConfig.LogStream1) + uploadLogMessage(t, svc, message2, timestamp, tfConfig.LogGroup2, tfConfig.LogStream2) + + // sleep for 30 seconds to wait for the log messages to show up + time.Sleep(30 * time.Second) + + cloudwatchInput := createInput(t, makeTestConfigWithLogGroupNamePrefix(tfConfig.AWSRegion)) + inputCtx, cancel := newV2Context() + t.Cleanup(cancel) + time.AfterFunc(30*time.Second, func() { + cancel() + }) + + client := pubtest.NewChanClient(0) + defer close(client.Channel) + go func() { + for event := range client.Channel { + // Fake the ACK handling that's not implemented in pubtest. + event.Private.(*awscommon.EventACKTracker).ACK() + } + }() + + var errGroup errgroup.Group + errGroup.Go(func() error { + pipeline := pubtest.PublisherWithClient(client) + return cloudwatchInput.Run(inputCtx, pipeline) + }) + + if err := errGroup.Wait(); err != nil { + t.Fatal(err) + } + + snap := common.MapStr(monitoring.CollectStructSnapshot( + monitoring.GetNamespace("dataset").GetRegistry(), + monitoring.Full, + false)) + t.Log(snap.StringToPrint()) + + assertMetric(t, snap, "log_events_received_total", 2) + assertMetric(t, snap, "log_groups_total", 2) + assertMetric(t, snap, "cloudwatch_events_created_total", 2) +} diff --git a/x-pack/filebeat/input/awscloudwatch/input_test.go b/x-pack/filebeat/input/awscloudwatch/input_test.go index 7d8b45f7d44..c094a1cddb5 100644 --- a/x-pack/filebeat/input/awscloudwatch/input_test.go +++ b/x-pack/filebeat/input/awscloudwatch/input_test.go @@ -15,7 +15,6 @@ import ( "github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs" "github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs/cloudwatchlogsiface" - "github.com/elastic/beats/v7/filebeat/input/inputtest" "github.com/elastic/beats/v7/libbeat/common" ) @@ -197,11 +196,3 @@ func TestParseARN(t *testing.T) { assert.Equal(t, "us-east-1", regionName) assert.NoError(t, err) } - -func TestNewInputDone(t *testing.T) { - config := common.MapStr{ - "log_group_name": "some-group", - "region_name": "eu-west-1", - } - inputtest.AssertNotStartedInputCanBeDone(t, NewInput, &config) -} diff --git a/x-pack/filebeat/input/awscloudwatch/metrics.go b/x-pack/filebeat/input/awscloudwatch/metrics.go new file mode 100644 index 00000000000..8d53ec5700c --- /dev/null +++ b/x-pack/filebeat/input/awscloudwatch/metrics.go @@ -0,0 +1,39 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package awscloudwatch + +import ( + "github.com/elastic/beats/v7/libbeat/monitoring" +) + +type inputMetrics struct { + id string // Input ID. + parent *monitoring.Registry // Parent registry holding this input's ID as a key. + + logEventsReceivedTotal *monitoring.Uint // Number of CloudWatch log events received. + logGroupsTotal *monitoring.Uint // Logs collected from number of CloudWatch log groups. + cloudwatchEventsCreatedTotal *monitoring.Uint // Number of events created from processing logs from CloudWatch. + apiCallsTotal *monitoring.Uint // Number of API calls made total. +} + +// Close removes the metrics from the registry. +func (m *inputMetrics) Close() { + m.parent.Remove(m.id) +} + +func newInputMetrics(parent *monitoring.Registry, id string) *inputMetrics { + reg := parent.NewRegistry(id) + monitoring.NewString(reg, "input").Set(inputName) + monitoring.NewString(reg, "id").Set(id) + out := &inputMetrics{ + id: id, + parent: reg, + logEventsReceivedTotal: monitoring.NewUint(reg, "log_events_received_total"), + logGroupsTotal: monitoring.NewUint(reg, "log_groups_total"), + cloudwatchEventsCreatedTotal: monitoring.NewUint(reg, "cloudwatch_events_created_total"), + apiCallsTotal: monitoring.NewUint(reg, "api_calls_total"), + } + return out +} diff --git a/x-pack/filebeat/input/awscloudwatch/processor.go b/x-pack/filebeat/input/awscloudwatch/processor.go new file mode 100644 index 00000000000..558e91d5da5 --- /dev/null +++ b/x-pack/filebeat/input/awscloudwatch/processor.go @@ -0,0 +1,78 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package awscloudwatch + +import ( + "context" + "time" + + "github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs" + + "github.com/elastic/beats/v7/libbeat/beat" + "github.com/elastic/beats/v7/libbeat/common" + "github.com/elastic/beats/v7/libbeat/logp" + "github.com/elastic/beats/v7/libbeat/monitoring" + awscommon "github.com/elastic/beats/v7/x-pack/libbeat/common/aws" +) + +type logProcessor struct { + log *logp.Logger + metrics *inputMetrics + publisher beat.Client + ack *awscommon.EventACKTracker +} + +func newLogProcessor(log *logp.Logger, metrics *inputMetrics, publisher beat.Client, ctx context.Context) *logProcessor { + if metrics == nil { + metrics = newInputMetrics(monitoring.NewRegistry(), "") + } + return &logProcessor{ + log: log, + metrics: metrics, + publisher: publisher, + ack: awscommon.NewEventACKTracker(ctx), + } +} + +func (p *logProcessor) processLogEvents(logEvents []cloudwatchlogs.FilteredLogEvent, logGroup string, regionName string) error { + for _, logEvent := range logEvents { + event := createEvent(logEvent, logGroup, regionName) + p.publish(p.ack, &event) + } + return nil +} + +func (p *logProcessor) publish(ack *awscommon.EventACKTracker, event *beat.Event) { + ack.Add() + event.Private = ack + p.metrics.cloudwatchEventsCreatedTotal.Inc() + p.publisher.Publish(*event) +} + +func createEvent(logEvent cloudwatchlogs.FilteredLogEvent, logGroup string, regionName string) beat.Event { + event := beat.Event{ + Timestamp: time.Unix(*logEvent.Timestamp/1000, 0).UTC(), + Fields: common.MapStr{ + "message": *logEvent.Message, + "log.file.path": logGroup + "/" + *logEvent.LogStreamName, + "event": common.MapStr{ + "id": *logEvent.EventId, + "ingested": time.Now(), + }, + "awscloudwatch": common.MapStr{ + "log_group": logGroup, + "log_stream": *logEvent.LogStreamName, + "ingestion_time": time.Unix(*logEvent.IngestionTime/1000, 0), + }, + "cloud": common.MapStr{ + "provider": "aws", + "region": regionName, + }, + }, + } + event.SetID(*logEvent.EventId) + + return event +} diff --git a/x-pack/filebeat/input/awss3/input.go b/x-pack/filebeat/input/awss3/input.go index 8d673cabbac..e1558b552a0 100644 --- a/x-pack/filebeat/input/awss3/input.go +++ b/x-pack/filebeat/input/awss3/input.go @@ -111,7 +111,7 @@ func (in *s3Input) Run(inputContext v2.Context, pipeline beat.Pipeline) error { // Create client for publishing events and receive notification of their ACKs. client, err := pipeline.ConnectWith(beat.ClientConfig{ CloseRef: inputContext.Cancelation, - ACKHandler: newEventACKHandler(), + ACKHandler: awscommon.NewEventACKHandler(), }) if err != nil { return fmt.Errorf("failed to create pipeline client: %w", err) diff --git a/x-pack/filebeat/input/awss3/input_benchmark_test.go b/x-pack/filebeat/input/awss3/input_benchmark_test.go index ecdc1756ce4..ec7068bb733 100644 --- a/x-pack/filebeat/input/awss3/input_benchmark_test.go +++ b/x-pack/filebeat/input/awss3/input_benchmark_test.go @@ -15,7 +15,6 @@ import ( "time" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/s3" "github.com/aws/aws-sdk-go-v2/service/sqs" "github.com/dustin/go-humanize" @@ -29,6 +28,7 @@ import ( pubtest "github.com/elastic/beats/v7/libbeat/publisher/testing" "github.com/elastic/beats/v7/libbeat/statestore" "github.com/elastic/beats/v7/libbeat/statestore/storetest" + awscommon "github.com/elastic/beats/v7/x-pack/libbeat/common/aws" ) const cloudtrailTestFile = "testdata/aws-cloudtrail.json.gz" @@ -172,7 +172,7 @@ func benchmarkInputSQS(t *testing.T, maxMessagesInflight int) testing.BenchmarkR go func() { for event := range client.Channel { // Fake the ACK handling that's not implemented in pubtest. - event.Private.(*eventACKTracker).ACK() + event.Private.(*awscommon.EventACKTracker).ACK() } }() @@ -259,7 +259,7 @@ func benchmarkInputS3(t *testing.T, numberOfWorkers int) testing.BenchmarkResult s3API := newConstantS3(t) s3API.pagerConstant = newS3PagerConstant() client := pubtest.NewChanClientWithCallback(100, func(event beat.Event) { - event.Private.(*eventACKTracker).ACK() + event.Private.(*awscommon.EventACKTracker).ACK() }) defer close(client.Channel) diff --git a/x-pack/filebeat/input/awss3/input_integration_test.go b/x-pack/filebeat/input/awss3/input_integration_test.go index a7f4f651c07..d112e6a4c35 100644 --- a/x-pack/filebeat/input/awss3/input_integration_test.go +++ b/x-pack/filebeat/input/awss3/input_integration_test.go @@ -211,7 +211,7 @@ func TestInputRunSQS(t *testing.T) { go func() { for event := range client.Channel { // Fake the ACK handling that's not implemented in pubtest. - event.Private.(*eventACKTracker).ACK() + event.Private.(*awscommon.EventACKTracker).ACK() } }() @@ -274,7 +274,7 @@ func TestInputRunS3(t *testing.T) { go func() { for event := range client.Channel { // Fake the ACK handling that's not implemented in pubtest. - event.Private.(*eventACKTracker).ACK() + event.Private.(*awscommon.EventACKTracker).ACK() } }() @@ -479,7 +479,7 @@ func TestInputRunSNS(t *testing.T) { defer close(client.Channel) go func() { for event := range client.Channel { - event.Private.(*eventACKTracker).ACK() + event.Private.(*awscommon.EventACKTracker).ACK() } }() diff --git a/x-pack/filebeat/input/awss3/interfaces.go b/x-pack/filebeat/input/awss3/interfaces.go index c777072c6c9..1cd1dbf807b 100644 --- a/x-pack/filebeat/input/awss3/interfaces.go +++ b/x-pack/filebeat/input/awss3/interfaces.go @@ -10,6 +10,8 @@ import ( "fmt" "time" + awscommon "github.com/elastic/beats/v7/x-pack/libbeat/common/aws" + awssdk "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/s3" "github.com/aws/aws-sdk-go-v2/service/sqs" @@ -79,7 +81,7 @@ type s3ObjectHandlerFactory interface { // Create returns a new s3ObjectHandler that can be used to process the // specified S3 object. If the handler is not configured to process the // given S3 object (based on key name) then it will return nil. - Create(ctx context.Context, log *logp.Logger, acker *eventACKTracker, obj s3EventV2) s3ObjectHandler + Create(ctx context.Context, log *logp.Logger, acker *awscommon.EventACKTracker, obj s3EventV2) s3ObjectHandler } type s3ObjectHandler interface { diff --git a/x-pack/filebeat/input/awss3/mock_interfaces_test.go b/x-pack/filebeat/input/awss3/mock_interfaces_test.go index 85c11e0fe80..d315258d177 100644 --- a/x-pack/filebeat/input/awss3/mock_interfaces_test.go +++ b/x-pack/filebeat/input/awss3/mock_interfaces_test.go @@ -18,6 +18,7 @@ import ( gomock "github.com/golang/mock/gomock" logp "github.com/elastic/beats/v7/libbeat/logp" + awscommon "github.com/elastic/beats/v7/x-pack/libbeat/common/aws" ) // MockSQSAPI is a mock of sqsAPI interface. @@ -451,7 +452,7 @@ func (m *MockS3ObjectHandlerFactory) EXPECT() *MockS3ObjectHandlerFactoryMockRec } // Create mocks base method. -func (m *MockS3ObjectHandlerFactory) Create(ctx context.Context, log *logp.Logger, acker *eventACKTracker, obj s3EventV2) s3ObjectHandler { +func (m *MockS3ObjectHandlerFactory) Create(ctx context.Context, log *logp.Logger, acker *awscommon.EventACKTracker, obj s3EventV2) s3ObjectHandler { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Create", ctx, log, acker, obj) ret0, _ := ret[0].(s3ObjectHandler) diff --git a/x-pack/filebeat/input/awss3/s3.go b/x-pack/filebeat/input/awss3/s3.go index 1688ca7ebc8..aa6e7be8012 100644 --- a/x-pack/filebeat/input/awss3/s3.go +++ b/x-pack/filebeat/input/awss3/s3.go @@ -17,6 +17,7 @@ import ( "github.com/elastic/beats/v7/libbeat/logp" "github.com/elastic/beats/v7/libbeat/monitoring" "github.com/elastic/beats/v7/libbeat/statestore" + awscommon "github.com/elastic/beats/v7/x-pack/libbeat/common/aws" "github.com/elastic/go-concert/timed" ) @@ -44,7 +45,7 @@ type s3Poller struct { region string provider string bucketPollInterval time.Duration - workerSem *sem + workerSem *awscommon.Sem s3 s3API log *logp.Logger metrics *inputMetrics @@ -77,7 +78,7 @@ func newS3Poller(log *logp.Logger, region: awsRegion, provider: provider, bucketPollInterval: bucketPollInterval, - workerSem: newSem(numberOfWorkers), + workerSem: awscommon.NewSem(numberOfWorkers), s3: s3, log: log, metrics: metrics, @@ -191,7 +192,7 @@ func (p *s3Poller) GetS3Objects(ctx context.Context, s3ObjectPayloadChan chan<- event.S3.Bucket.ARN = p.bucket event.S3.Object.Key = filename - acker := newEventACKTracker(ctx) + acker := awscommon.NewEventACKTracker(ctx) s3Processor := p.s3ObjectHandler.Create(ctx, p.log, acker, event) if s3Processor == nil { diff --git a/x-pack/filebeat/input/awss3/s3_objects.go b/x-pack/filebeat/input/awss3/s3_objects.go index ebe1a5f0828..f2cad661567 100644 --- a/x-pack/filebeat/input/awss3/s3_objects.go +++ b/x-pack/filebeat/input/awss3/s3_objects.go @@ -19,6 +19,8 @@ import ( "strings" "time" + awscommon "github.com/elastic/beats/v7/x-pack/libbeat/common/aws" + "github.com/aws/aws-sdk-go-v2/service/s3" "github.com/pkg/errors" @@ -73,7 +75,7 @@ func (f *s3ObjectProcessorFactory) findReaderConfig(key string) *readerConfig { // Create returns a new s3ObjectProcessor. It returns nil when no file selectors // match the S3 object key. -func (f *s3ObjectProcessorFactory) Create(ctx context.Context, log *logp.Logger, ack *eventACKTracker, obj s3EventV2) s3ObjectHandler { +func (f *s3ObjectProcessorFactory) Create(ctx context.Context, log *logp.Logger, ack *awscommon.EventACKTracker, obj s3EventV2) s3ObjectHandler { log = log.With( "bucket_arn", obj.S3.Bucket.Name, "object_key", obj.S3.Object.Key) @@ -100,9 +102,9 @@ type s3ObjectProcessor struct { log *logp.Logger ctx context.Context - acker *eventACKTracker // ACKer tied to the SQS message (multiple S3 readers share an ACKer when the S3 notification event contains more than one S3 object). - readerConfig *readerConfig // Config about how to process the object. - s3Obj s3EventV2 // S3 object information. + acker *awscommon.EventACKTracker // ACKer tied to the SQS message (multiple S3 readers share an ACKer when the S3 notification event contains more than one S3 object). + readerConfig *readerConfig // Config about how to process the object. + s3Obj s3EventV2 // S3 object information. s3ObjHash string s3RequestURL string @@ -313,7 +315,7 @@ func (p *s3ObjectProcessor) readFile(r io.Reader) error { return nil } -func (p *s3ObjectProcessor) publish(ack *eventACKTracker, event *beat.Event) { +func (p *s3ObjectProcessor) publish(ack *awscommon.EventACKTracker, event *beat.Event) { ack.Add() event.Private = ack p.metrics.s3EventsCreatedTotal.Inc() diff --git a/x-pack/filebeat/input/awss3/s3_objects_test.go b/x-pack/filebeat/input/awss3/s3_objects_test.go index 952fbb757dc..4ab3edfaa4b 100644 --- a/x-pack/filebeat/input/awss3/s3_objects_test.go +++ b/x-pack/filebeat/input/awss3/s3_objects_test.go @@ -24,6 +24,7 @@ import ( "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/logp" + awscommon "github.com/elastic/beats/v7/x-pack/libbeat/common/aws" ) func newS3Object(t testing.TB, filename, contentType string) (s3EventV2, *s3.GetObjectResponse) { @@ -162,7 +163,7 @@ func TestS3ObjectProcessor(t *testing.T) { Return(nil, errFakeConnectivityFailure) s3ObjProc := newS3ObjectProcessorFactory(logp.NewLogger(inputName), nil, mockS3API, mockPublisher, nil) - ack := newEventACKTracker(ctx) + ack := awscommon.NewEventACKTracker(ctx) err := s3ObjProc.Create(ctx, logp.NewLogger(inputName), ack, s3Event).ProcessS3Object() require.Error(t, err) assert.True(t, errors.Is(err, errFakeConnectivityFailure), "expected errFakeConnectivityFailure error") @@ -184,7 +185,7 @@ func TestS3ObjectProcessor(t *testing.T) { Return(nil, nil) s3ObjProc := newS3ObjectProcessorFactory(logp.NewLogger(inputName), nil, mockS3API, mockPublisher, nil) - ack := newEventACKTracker(ctx) + ack := awscommon.NewEventACKTracker(ctx) err := s3ObjProc.Create(ctx, logp.NewLogger(inputName), ack, s3Event).ProcessS3Object() require.Error(t, err) }) @@ -211,7 +212,7 @@ func TestS3ObjectProcessor(t *testing.T) { ) s3ObjProc := newS3ObjectProcessorFactory(logp.NewLogger(inputName), nil, mockS3API, mockPublisher, nil) - ack := newEventACKTracker(ctx) + ack := awscommon.NewEventACKTracker(ctx) err := s3ObjProc.Create(ctx, logp.NewLogger(inputName), ack, s3Event).ProcessS3Object() require.NoError(t, err) }) @@ -249,13 +250,13 @@ func _testProcessS3Object(t testing.TB, file, contentType string, numEvents int, ) s3ObjProc := newS3ObjectProcessorFactory(logp.NewLogger(inputName), nil, mockS3API, mockPublisher, selectors) - ack := newEventACKTracker(ctx) + ack := awscommon.NewEventACKTracker(ctx) err := s3ObjProc.Create(ctx, logp.NewLogger(inputName), ack, s3Event).ProcessS3Object() if !expectErr { require.NoError(t, err) assert.Equal(t, numEvents, len(events)) - assert.EqualValues(t, numEvents, ack.pendingACKs) + assert.EqualValues(t, numEvents, ack.PendingACKs) } else { require.Error(t, err) } diff --git a/x-pack/filebeat/input/awss3/s3_test.go b/x-pack/filebeat/input/awss3/s3_test.go index b41349c1c8b..ef39e085e1f 100644 --- a/x-pack/filebeat/input/awss3/s3_test.go +++ b/x-pack/filebeat/input/awss3/s3_test.go @@ -9,18 +9,15 @@ import ( "testing" "time" - "github.com/elastic/beats/v7/libbeat/statestore" - "github.com/elastic/beats/v7/libbeat/statestore/storetest" - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/s3" - "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/elastic/beats/v7/libbeat/logp" + "github.com/elastic/beats/v7/libbeat/statestore" + "github.com/elastic/beats/v7/libbeat/statestore/storetest" ) func TestS3Poller(t *testing.T) { @@ -135,7 +132,7 @@ func TestS3Poller(t *testing.T) { s3ObjProc := newS3ObjectProcessorFactory(logp.NewLogger(inputName), nil, mockAPI, mockPublisher, nil) receiver := newS3Poller(logp.NewLogger(inputName), nil, mockAPI, s3ObjProc, newStates(inputCtx), store, bucket, "key", "region", "provider", numberOfWorkers, pollInterval) require.Error(t, context.DeadlineExceeded, receiver.Poll(ctx)) - assert.Equal(t, numberOfWorkers, receiver.workerSem.available) + assert.Equal(t, numberOfWorkers, receiver.workerSem.Available()) }) t.Run("retry after Poll error", func(t *testing.T) { @@ -265,6 +262,6 @@ func TestS3Poller(t *testing.T) { s3ObjProc := newS3ObjectProcessorFactory(logp.NewLogger(inputName), nil, mockAPI, mockPublisher, nil) receiver := newS3Poller(logp.NewLogger(inputName), nil, mockAPI, s3ObjProc, newStates(inputCtx), store, bucket, "key", "region", "provider", numberOfWorkers, pollInterval) require.Error(t, context.DeadlineExceeded, receiver.Poll(ctx)) - assert.Equal(t, numberOfWorkers, receiver.workerSem.available) + assert.Equal(t, numberOfWorkers, receiver.workerSem.Available()) }) } diff --git a/x-pack/filebeat/input/awss3/sqs.go b/x-pack/filebeat/input/awss3/sqs.go index 1f13ec010cf..f1fc7588e37 100644 --- a/x-pack/filebeat/input/awss3/sqs.go +++ b/x-pack/filebeat/input/awss3/sqs.go @@ -14,6 +14,7 @@ import ( "github.com/elastic/beats/v7/libbeat/logp" "github.com/elastic/beats/v7/libbeat/monitoring" + awscommon "github.com/elastic/beats/v7/x-pack/libbeat/common/aws" "github.com/elastic/go-concert/timed" ) @@ -23,7 +24,7 @@ const ( type sqsReader struct { maxMessagesInflight int - workerSem *sem + workerSem *awscommon.Sem sqs sqsAPI msgHandler sqsProcessor log *logp.Logger @@ -36,7 +37,7 @@ func newSQSReader(log *logp.Logger, metrics *inputMetrics, sqs sqsAPI, maxMessag } return &sqsReader{ maxMessagesInflight: maxMessagesInflight, - workerSem: newSem(maxMessagesInflight), + workerSem: awscommon.NewSem(maxMessagesInflight), sqs: sqs, msgHandler: msgHandler, log: log, diff --git a/x-pack/filebeat/input/awss3/sqs_s3_event.go b/x-pack/filebeat/input/awss3/sqs_s3_event.go index d1865aec9cd..c17efd87d53 100644 --- a/x-pack/filebeat/input/awss3/sqs_s3_event.go +++ b/x-pack/filebeat/input/awss3/sqs_s3_event.go @@ -14,6 +14,8 @@ import ( "sync" "time" + awscommon "github.com/elastic/beats/v7/x-pack/libbeat/common/aws" + "github.com/aws/aws-sdk-go-v2/aws/awserr" "github.com/aws/aws-sdk-go-v2/service/sqs" "github.com/pkg/errors" @@ -275,7 +277,7 @@ func (p *sqsS3EventProcessor) processS3Events(ctx context.Context, log *logp.Log defer log.Debug("End processing SQS S3 event notifications.") // Wait for all events to be ACKed before proceeding. - acker := newEventACKTracker(ctx) + acker := awscommon.NewEventACKTracker(ctx) defer acker.Wait() var errs []error diff --git a/x-pack/filebeat/input/awss3/sqs_s3_event_test.go b/x-pack/filebeat/input/awss3/sqs_s3_event_test.go index 6100dbe3119..fddfb3d0e74 100644 --- a/x-pack/filebeat/input/awss3/sqs_s3_event_test.go +++ b/x-pack/filebeat/input/awss3/sqs_s3_event_test.go @@ -19,6 +19,7 @@ import ( "github.com/stretchr/testify/require" "github.com/elastic/beats/v7/libbeat/logp" + awscommon "github.com/elastic/beats/v7/x-pack/libbeat/common/aws" "github.com/elastic/go-concert/timed" ) @@ -104,7 +105,7 @@ func TestSQSS3EventProcessor(t *testing.T) { gomock.InOrder( mockS3HandlerFactory.EXPECT().Create(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). - Do(func(ctx context.Context, _ *logp.Logger, _ *eventACKTracker, _ s3EventV2) { + Do(func(ctx context.Context, _ *logp.Logger, _ *awscommon.EventACKTracker, _ s3EventV2) { timed.Wait(ctx, 5*visibilityTimeout) }).Return(mockS3Handler), mockS3Handler.EXPECT().ProcessS3Object().Return(nil), diff --git a/x-pack/filebeat/input/awss3/sqs_test.go b/x-pack/filebeat/input/awss3/sqs_test.go index a8b6e7b5f2a..a2414736198 100644 --- a/x-pack/filebeat/input/awss3/sqs_test.go +++ b/x-pack/filebeat/input/awss3/sqs_test.go @@ -70,7 +70,7 @@ func TestSQSReceiver(t *testing.T) { // Execute sqsReader and verify calls/state. receiver := newSQSReader(logp.NewLogger(inputName), nil, mockAPI, maxMessages, mockMsgHandler) require.NoError(t, receiver.Receive(ctx)) - assert.Equal(t, maxMessages, receiver.workerSem.available) + assert.Equal(t, maxMessages, receiver.workerSem.Available()) }) t.Run("retry after ReceiveMessage error", func(t *testing.T) { @@ -103,7 +103,7 @@ func TestSQSReceiver(t *testing.T) { // Execute SQSReceiver and verify calls/state. receiver := newSQSReader(logp.NewLogger(inputName), nil, mockAPI, maxMessages, mockMsgHandler) require.NoError(t, receiver.Receive(ctx)) - assert.Equal(t, maxMessages, receiver.workerSem.available) + assert.Equal(t, maxMessages, receiver.workerSem.Available()) }) } diff --git a/x-pack/filebeat/input/default-inputs/inputs_other.go b/x-pack/filebeat/input/default-inputs/inputs_other.go index c31106c3baa..5a268fa4758 100644 --- a/x-pack/filebeat/input/default-inputs/inputs_other.go +++ b/x-pack/filebeat/input/default-inputs/inputs_other.go @@ -12,6 +12,7 @@ import ( v2 "github.com/elastic/beats/v7/filebeat/input/v2" "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/logp" + "github.com/elastic/beats/v7/x-pack/filebeat/input/awscloudwatch" "github.com/elastic/beats/v7/x-pack/filebeat/input/awss3" "github.com/elastic/beats/v7/x-pack/filebeat/input/cloudfoundry" "github.com/elastic/beats/v7/x-pack/filebeat/input/http_endpoint" @@ -26,5 +27,6 @@ func xpackInputs(info beat.Info, log *logp.Logger, store beater.StateStore) []v2 httpjson.Plugin(log, store), o365audit.Plugin(log, store), awss3.Plugin(store), + awscloudwatch.Plugin(store), } } diff --git a/x-pack/filebeat/input/awss3/acker.go b/x-pack/libbeat/common/aws/acker.go similarity index 75% rename from x-pack/filebeat/input/awss3/acker.go rename to x-pack/libbeat/common/aws/acker.go index db88c23f7d1..347347dde67 100644 --- a/x-pack/filebeat/input/awss3/acker.go +++ b/x-pack/libbeat/common/aws/acker.go @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. -package awss3 +package aws import ( "context" @@ -12,40 +12,40 @@ import ( "github.com/elastic/beats/v7/libbeat/common/acker" ) -// eventACKTracker tracks the publishing state of S3 objects. Specifically +// EventACKTracker tracks the publishing state of S3 objects. Specifically // it tracks the number of message acknowledgements that are pending from the // output. It can be used to wait until all ACKs have been received for one or // more S3 objects. -type eventACKTracker struct { +type EventACKTracker struct { sync.Mutex - pendingACKs int64 + PendingACKs int64 ctx context.Context cancel context.CancelFunc } -func newEventACKTracker(ctx context.Context) *eventACKTracker { +func NewEventACKTracker(ctx context.Context) *EventACKTracker { ctx, cancel := context.WithCancel(ctx) - return &eventACKTracker{ctx: ctx, cancel: cancel} + return &EventACKTracker{ctx: ctx, cancel: cancel} } // Add increments the number of pending ACKs. -func (a *eventACKTracker) Add() { +func (a *EventACKTracker) Add() { a.Lock() - a.pendingACKs++ + a.PendingACKs++ a.Unlock() } // ACK decrements the number of pending ACKs. -func (a *eventACKTracker) ACK() { +func (a *EventACKTracker) ACK() { a.Lock() defer a.Unlock() - if a.pendingACKs <= 0 { + if a.PendingACKs <= 0 { panic("misuse detected: negative ACK counter") } - a.pendingACKs-- - if a.pendingACKs == 0 { + a.PendingACKs-- + if a.PendingACKs == 0 { a.cancel() } } @@ -55,11 +55,11 @@ func (a *eventACKTracker) ACK() { // `Add` calls are made. Failing to do so could reset the pendingACKs // property to 0 and would results in Wait returning after additional // calls to `Add` are made without a corresponding `ACK` call. -func (a *eventACKTracker) Wait() { +func (a *EventACKTracker) Wait() { // If there were never any pending ACKs then cancel the context. (This can // happen when a document contains no events or cannot be read due to an error). a.Lock() - if a.pendingACKs == 0 { + if a.PendingACKs == 0 { a.cancel() } a.Unlock() @@ -68,15 +68,15 @@ func (a *eventACKTracker) Wait() { <-a.ctx.Done() } -// newEventACKHandler returns a beat ACKer that can receive callbacks when +// NewEventACKHandler returns a beat ACKer that can receive callbacks when // an event has been ACKed an output. If the event contains a private metadata // pointing to an eventACKTracker then it will invoke the trackers ACK() method // to decrement the number of pending ACKs. -func newEventACKHandler() beat.ACKer { +func NewEventACKHandler() beat.ACKer { return acker.ConnectionOnly( acker.EventPrivateReporter(func(_ int, privates []interface{}) { for _, private := range privates { - if ack, ok := private.(*eventACKTracker); ok { + if ack, ok := private.(*EventACKTracker); ok { ack.ACK() } } diff --git a/x-pack/filebeat/input/awss3/acker_test.go b/x-pack/libbeat/common/aws/acker_test.go similarity index 79% rename from x-pack/filebeat/input/awss3/acker_test.go rename to x-pack/libbeat/common/aws/acker_test.go index a038e8a39e4..3c470f0b922 100644 --- a/x-pack/filebeat/input/awss3/acker_test.go +++ b/x-pack/libbeat/common/aws/acker_test.go @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. -package awss3 +package aws import ( "context" @@ -17,11 +17,11 @@ func TestEventACKTracker(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) - acker := newEventACKTracker(ctx) + acker := NewEventACKTracker(ctx) acker.Add() acker.ACK() - assert.EqualValues(t, 0, acker.pendingACKs) + assert.EqualValues(t, 0, acker.PendingACKs) assert.ErrorIs(t, acker.ctx.Err(), context.Canceled) } @@ -29,10 +29,10 @@ func TestEventACKTrackerNoACKs(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) - acker := newEventACKTracker(ctx) + acker := NewEventACKTracker(ctx) acker.Wait() - assert.EqualValues(t, 0, acker.pendingACKs) + assert.EqualValues(t, 0, acker.PendingACKs) assert.ErrorIs(t, acker.ctx.Err(), context.Canceled) } @@ -41,15 +41,15 @@ func TestEventACKHandler(t *testing.T) { t.Cleanup(cancel) // Create acker. Add one pending ACK. - acker := newEventACKTracker(ctx) + acker := NewEventACKTracker(ctx) acker.Add() // Create an ACK handler and simulate one ACKed event. - ackHandler := newEventACKHandler() + ackHandler := NewEventACKHandler() ackHandler.AddEvent(beat.Event{Private: acker}, true) ackHandler.ACKEvents(1) - assert.EqualValues(t, 0, acker.pendingACKs) + assert.EqualValues(t, 0, acker.PendingACKs) assert.ErrorIs(t, acker.ctx.Err(), context.Canceled) } @@ -58,12 +58,12 @@ func TestEventACKHandlerWait(t *testing.T) { t.Cleanup(cancel) // Create acker. Add one pending ACK. - acker := newEventACKTracker(ctx) + acker := NewEventACKTracker(ctx) acker.Add() acker.ACK() acker.Wait() acker.Add() - assert.EqualValues(t, 1, acker.pendingACKs) + assert.EqualValues(t, 1, acker.PendingACKs) assert.ErrorIs(t, acker.ctx.Err(), context.Canceled) } diff --git a/x-pack/filebeat/input/awss3/semaphore.go b/x-pack/libbeat/common/aws/semaphore.go similarity index 81% rename from x-pack/filebeat/input/awss3/semaphore.go rename to x-pack/libbeat/common/aws/semaphore.go index 2a695f4c621..28343bcbd32 100644 --- a/x-pack/filebeat/input/awss3/semaphore.go +++ b/x-pack/libbeat/common/aws/semaphore.go @@ -2,22 +2,22 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. -package awss3 +package aws import ( "context" "sync" ) -type sem struct { +type Sem struct { mutex *sync.Mutex cond sync.Cond available int } -func newSem(n int) *sem { +func NewSem(n int) *Sem { var m sync.Mutex - return &sem{ + return &Sem{ available: n, mutex: &m, cond: sync.Cond{ @@ -26,7 +26,7 @@ func newSem(n int) *sem { } } -func (s *sem) AcquireContext(n int, ctx context.Context) (int, error) { +func (s *Sem) AcquireContext(n int, ctx context.Context) (int, error) { acquireC := make(chan int, 1) go func() { defer close(acquireC) @@ -41,7 +41,7 @@ func (s *sem) AcquireContext(n int, ctx context.Context) (int, error) { } } -func (s *sem) Acquire(n int) int { +func (s *Sem) Acquire(n int) int { if n <= 0 { return 0 } @@ -63,7 +63,7 @@ func (s *sem) Acquire(n int) int { return n } -func (s *sem) Release(n int) { +func (s *Sem) Release(n int) { if n <= 0 { return } @@ -75,7 +75,7 @@ func (s *sem) Release(n int) { s.cond.Signal() } -func (s *sem) Available() int { +func (s *Sem) Available() int { s.mutex.Lock() defer s.mutex.Unlock() diff --git a/x-pack/filebeat/input/awss3/semaphore_test.go b/x-pack/libbeat/common/aws/semaphore_test.go similarity index 95% rename from x-pack/filebeat/input/awss3/semaphore_test.go rename to x-pack/libbeat/common/aws/semaphore_test.go index d71252ffc78..f91831ef8a0 100644 --- a/x-pack/filebeat/input/awss3/semaphore_test.go +++ b/x-pack/libbeat/common/aws/semaphore_test.go @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. -package awss3 +package aws import ( "sync" @@ -12,7 +12,7 @@ import ( ) func TestSemaphore(t *testing.T) { - s := newSem(5) + s := NewSem(5) assert.Equal(t, s.Acquire(5), 5) From 33fc960621124b1c9959c6e7152b73c62c409522 Mon Sep 17 00:00:00 2001 From: Marcin Tojek Date: Tue, 25 Jan 2022 16:50:57 +0100 Subject: [PATCH 39/69] [auditbeat] Flaky: use CI=true to skip test (#29994) --- auditbeat/module/file_integrity/metricset_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/auditbeat/module/file_integrity/metricset_test.go b/auditbeat/module/file_integrity/metricset_test.go index 14522bcd627..13e27cf82ea 100644 --- a/auditbeat/module/file_integrity/metricset_test.go +++ b/auditbeat/module/file_integrity/metricset_test.go @@ -988,7 +988,7 @@ func getConfig(path ...string) map[string]interface{} { } func skipOnCIForDarwinAMD64(t testing.TB) { - if os.Getenv("BUILD_ID") != "" && runtime.GOOS == "darwin" && runtime.GOARCH == "amd64" { + if os.Getenv("CI") == "true" && runtime.GOOS == "darwin" && runtime.GOARCH == "amd64" { t.Skip("Skip test on CI for darwin/amd64") } } From 5f3dd3e39deb2f062a5b051d92203d2444366825 Mon Sep 17 00:00:00 2001 From: Craig MacKenzie Date: Tue, 25 Jan 2022 12:53:38 -0500 Subject: [PATCH 40/69] Add the Elastic product origin header when talking to Elasticsearch or Kibana. (#29966) Set the beats product origin header by default when communicating with Elasticsearch or Kibana. --- libbeat/common/productorigin/productorigin.go | 29 +++++++++++++++++++ libbeat/esleg/eslegclient/connection.go | 22 +++++++------- libbeat/esleg/eslegclient/connection_test.go | 21 +++++++++----- metricbeat/module/elasticsearch/metricset.go | 3 ++ metricbeat/module/kibana/settings/settings.go | 3 ++ metricbeat/module/kibana/stats/stats.go | 3 ++ metricbeat/module/kibana/status/status.go | 3 ++ 7 files changed, 66 insertions(+), 18 deletions(-) create mode 100644 libbeat/common/productorigin/productorigin.go diff --git a/libbeat/common/productorigin/productorigin.go b/libbeat/common/productorigin/productorigin.go new file mode 100644 index 00000000000..133442fae90 --- /dev/null +++ b/libbeat/common/productorigin/productorigin.go @@ -0,0 +1,29 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Package productorigin defines the Elastic product origin header. +package productorigin + +const ( + // Identifies a request as originating from an Elastic product. Has the side effect of + // suppressing Elasticsearch API deprecation warnings in Kibana when set. + Header = "X-Elastic-Product-Origin" + + // Applicable values from https://github.com/elastic/kibana/blob/main/x-pack/plugins/upgrade_assistant/common/constants.ts#L50 + Observability = "observability" + Beats = "beats" +) diff --git a/libbeat/esleg/eslegclient/connection.go b/libbeat/esleg/eslegclient/connection.go index d14ba8544f0..6f2f13aa6b2 100644 --- a/libbeat/esleg/eslegclient/connection.go +++ b/libbeat/esleg/eslegclient/connection.go @@ -30,6 +30,7 @@ import ( "go.elastic.co/apm/module/apmelasticsearch" "github.com/elastic/beats/v7/libbeat/common" + "github.com/elastic/beats/v7/libbeat/common/productorigin" "github.com/elastic/beats/v7/libbeat/common/transport" "github.com/elastic/beats/v7/libbeat/common/transport/httpcommon" "github.com/elastic/beats/v7/libbeat/common/transport/kerberos" @@ -84,7 +85,9 @@ type ConnectionSettings struct { func NewConnection(s ConnectionSettings) (*Connection, error) { logger := logp.NewLogger("esclientleg") - s = settingsWithDefaults(s) + if s.IdleConnTimeout == 0 { + s.IdleConnTimeout = 1 * time.Minute + } u, err := url.Parse(s.URL) if err != nil { @@ -117,6 +120,14 @@ func NewConnection(s ConnectionSettings) (*Connection, error) { } userAgent := useragent.UserAgent(s.Beatname) + // Default the product origin header to beats if it wasn't already set. + if _, ok := s.Headers[productorigin.Header]; !ok { + if s.Headers == nil { + s.Headers = make(map[string]string) + } + s.Headers[productorigin.Header] = productorigin.Beats + } + httpClient, err := s.Transport.Client( httpcommon.WithLogger(logger), httpcommon.WithIOStats(s.Observer), @@ -155,15 +166,6 @@ func NewConnection(s ConnectionSettings) (*Connection, error) { return &conn, nil } -func settingsWithDefaults(s ConnectionSettings) ConnectionSettings { - settings := s - if settings.IdleConnTimeout == 0 { - settings.IdleConnTimeout = 1 * time.Minute - } - - return settings -} - // NewClients returns a list of Elasticsearch clients based on the given // configuration. It accepts the same configuration parameters as the Elasticsearch // output, except for the output specific configuration options. If multiple hosts diff --git a/libbeat/esleg/eslegclient/connection_test.go b/libbeat/esleg/eslegclient/connection_test.go index e0735ebe992..af553d71c09 100644 --- a/libbeat/esleg/eslegclient/connection_test.go +++ b/libbeat/esleg/eslegclient/connection_test.go @@ -25,6 +25,8 @@ import ( "testing" "github.com/stretchr/testify/require" + + "github.com/elastic/beats/v7/libbeat/common/productorigin" ) func TestAPIKeyEncoding(t *testing.T) { @@ -71,18 +73,21 @@ func TestHeaders(t *testing.T) { expected map[string][]string }{ {input: map[string]string{ - "Accept": "application/vnd.elasticsearch+json;compatible-with=7", - "Content-Type": "application/vnd.elasticsearch+json;compatible-with=7", - "X-My-Header": "true"}, + "Accept": "application/vnd.elasticsearch+json;compatible-with=7", + "Content-Type": "application/vnd.elasticsearch+json;compatible-with=7", + productorigin.Header: "elastic-product", + "X-My-Header": "true"}, expected: map[string][]string{ - "Accept": {"application/vnd.elasticsearch+json;compatible-with=7"}, - "Content-Type": {"application/vnd.elasticsearch+json;compatible-with=7"}, - "X-My-Header": {"true"}}}, + "Accept": {"application/vnd.elasticsearch+json;compatible-with=7"}, + "Content-Type": {"application/vnd.elasticsearch+json;compatible-with=7"}, + productorigin.Header: {"elastic-product"}, + "X-My-Header": {"true"}}}, {input: map[string]string{ "X-My-Header": "true"}, expected: map[string][]string{ - "Accept": {"application/json"}, - "X-My-Header": {"true"}}}, + "Accept": {"application/json"}, + productorigin.Header: {productorigin.Beats}, + "X-My-Header": {"true"}}}, } { conn, err := NewConnection(ConnectionSettings{ Headers: td.input, diff --git a/metricbeat/module/elasticsearch/metricset.go b/metricbeat/module/elasticsearch/metricset.go index 7a7b3d863f5..3dcb456b357 100644 --- a/metricbeat/module/elasticsearch/metricset.go +++ b/metricbeat/module/elasticsearch/metricset.go @@ -23,6 +23,7 @@ import ( "github.com/pkg/errors" + "github.com/elastic/beats/v7/libbeat/common/productorigin" "github.com/elastic/beats/v7/metricbeat/helper" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/metricbeat/mb/parse" @@ -90,6 +91,8 @@ func NewMetricSet(base mb.BaseMetricSet, servicePath string) (*MetricSet, error) return nil, err } + http.SetHeaderDefault(productorigin.Header, productorigin.Beats) + config := struct { Scope Scope `config:"scope"` XPackEnabled bool `config:"xpack.enabled"` diff --git a/metricbeat/module/kibana/settings/settings.go b/metricbeat/module/kibana/settings/settings.go index d0cea670b5f..b2468bfa461 100644 --- a/metricbeat/module/kibana/settings/settings.go +++ b/metricbeat/module/kibana/settings/settings.go @@ -20,6 +20,7 @@ package settings import ( "fmt" + "github.com/elastic/beats/v7/libbeat/common/productorigin" "github.com/elastic/beats/v7/metricbeat/helper" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/metricbeat/mb/parse" @@ -77,6 +78,8 @@ func (m *MetricSet) init() (err error) { return err } + httpHelper.SetHeaderDefault(productorigin.Header, productorigin.Beats) + kibanaVersion, err := kibana.GetVersion(httpHelper, kibana.SettingsPath) if err != nil { return err diff --git a/metricbeat/module/kibana/stats/stats.go b/metricbeat/module/kibana/stats/stats.go index 5551e57823f..2f6ba8faa2f 100644 --- a/metricbeat/module/kibana/stats/stats.go +++ b/metricbeat/module/kibana/stats/stats.go @@ -22,6 +22,7 @@ import ( "github.com/pkg/errors" + "github.com/elastic/beats/v7/libbeat/common/productorigin" "github.com/elastic/beats/v7/metricbeat/helper" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/metricbeat/mb/parse" @@ -84,6 +85,8 @@ func (m *MetricSet) init() error { return err } + statsHTTP.SetHeaderDefault(productorigin.Header, productorigin.Beats) + kibanaVersion, err := kibana.GetVersion(statsHTTP, kibana.StatsPath) if err != nil { return err diff --git a/metricbeat/module/kibana/status/status.go b/metricbeat/module/kibana/status/status.go index 32b38723324..7715ca48cb9 100644 --- a/metricbeat/module/kibana/status/status.go +++ b/metricbeat/module/kibana/status/status.go @@ -18,6 +18,7 @@ package status import ( + "github.com/elastic/beats/v7/libbeat/common/productorigin" "github.com/elastic/beats/v7/metricbeat/helper" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/metricbeat/mb/parse" @@ -59,6 +60,8 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { return nil, err } + http.SetHeaderDefault(productorigin.Header, productorigin.Beats) + return &MetricSet{ ms, http, From 1a3dbb51faeb2465f01ce5806269048b2e7f5984 Mon Sep 17 00:00:00 2001 From: Mike Place Date: Tue, 25 Jan 2022 19:01:21 +0100 Subject: [PATCH 41/69] Enable x-pack security for testing against snapshots (#28131) --- auditbeat/docker-compose.yml | 9 ++- auditbeat/tests/system/test_base.py | 10 ++-- dev-tools/cmd/dashboards/export_dashboards.go | 3 +- dev-tools/mage/integtest_docker.go | 4 ++ dev-tools/mage/pytest.go | 8 +++ filebeat/docker-compose.yml | 7 +++ filebeat/fileset/modules_integration_test.go | 4 +- .../system/config/filebeat_modules.yml.j2 | 4 +- filebeat/tests/system/test_base.py | 13 +++-- filebeat/tests/system/test_crawler.py | 25 ++++----- filebeat/tests/system/test_modules.py | 7 +-- filebeat/tests/system/test_pipeline.py | 15 ++--- filebeat/tests/system/test_reload_modules.py | 5 +- filebeat/tests/system/test_setup.py | 8 +-- heartbeat/docker-compose.yml | 6 ++ .../tests/system/config/heartbeat.yml.j2 | 2 + heartbeat/tests/system/test_base.py | 5 +- libbeat/docker-compose.yml | 22 ++++++-- libbeat/kibana/client.go | 1 - .../elastic_fetcher_integration_test.go | 4 +- libbeat/template/load_integration_test.go | 2 + libbeat/tests/system/base.py | 3 +- libbeat/tests/system/beat/beat.py | 55 +++++++++++++++++-- libbeat/tests/system/beat/common_tests.py | 6 +- libbeat/tests/system/config/libbeat.yml.j2 | 4 ++ libbeat/tests/system/config/mockbeat.yml.j2 | 29 ++++++++-- libbeat/tests/system/test_ca_pinning.py | 20 +++---- .../system/test_cmd_setup_index_management.py | 6 +- libbeat/tests/system/test_cmd_test.py | 9 ++- libbeat/tests/system/test_cmd_version.py | 3 +- libbeat/tests/system/test_dashboard.py | 26 +++++++-- libbeat/tests/system/test_ilm.py | 4 +- libbeat/tests/system/test_keystore.py | 12 ++-- libbeat/tests/system/test_monitoring.py | 5 +- libbeat/tests/system/test_template.py | 10 ++-- metricbeat/docker-compose.yml | 2 + packetbeat/docker-compose.yml | 7 +++ testing/environments/Makefile | 2 + testing/environments/docker/README.md | 6 ++ .../docker/elasticsearch/roles.yml | 31 +++++++++++ .../environments/docker/elasticsearch/users | 8 +++ .../docker/elasticsearch/users_roles | 11 ++++ .../logstash/pipeline-xpack/default.conf | 26 +++++++++ testing/environments/snapshot.yml | 37 +++++++++---- x-pack/filebeat/docker-compose.yml | 6 ++ .../system/config/filebeat_modules.yml.j2 | 4 +- x-pack/functionbeat/docker-compose.yml | 4 ++ x-pack/libbeat/docker-compose.yml | 6 +- 48 files changed, 385 insertions(+), 121 deletions(-) create mode 100644 testing/environments/docker/README.md create mode 100644 testing/environments/docker/elasticsearch/roles.yml create mode 100644 testing/environments/docker/elasticsearch/users create mode 100644 testing/environments/docker/elasticsearch/users_roles create mode 100644 testing/environments/docker/logstash/pipeline-xpack/default.conf diff --git a/auditbeat/docker-compose.yml b/auditbeat/docker-compose.yml index 6a0e252106b..adf33888988 100644 --- a/auditbeat/docker-compose.yml +++ b/auditbeat/docker-compose.yml @@ -8,7 +8,7 @@ services: environment: - ES_HOST=elasticsearch - ES_PORT=9200 - - ES_USER=beats + - ES_USER=auditbeat_user - ES_PASS=testing - KIBANA_HOST=kibana - KIBANA_PORT=5601 @@ -32,8 +32,15 @@ services: extends: file: ../testing/environments/${TESTING_ENVIRONMENT}.yml service: elasticsearch + healthcheck: + test: ["CMD-SHELL", "curl -u admin:testing -s http://localhost:9200/_cat/health?h=status | grep -q green"] + retries: 300 + interval: 1s kibana: extends: file: ../testing/environments/${TESTING_ENVIRONMENT}.yml service: kibana + healthcheck: + test: ["CMD-SHELL", "curl -u beats:testing -s http://localhost:5601/api/status?v8format=true | grep -q '\"overall\":{\"level\":\"available\"'"] + retries: 600 diff --git a/auditbeat/tests/system/test_base.py b/auditbeat/tests/system/test_base.py index 2f7f645c750..73a7d8ae9f0 100644 --- a/auditbeat/tests/system/test_base.py +++ b/auditbeat/tests/system/test_base.py @@ -41,7 +41,7 @@ def test_index_management(self): """ dirs = [self.temp_dir("auditbeat_test")] with PathCleanup(dirs): - es = Elasticsearch([self.get_elasticsearch_url()]) + es = self.get_elasticsearch_instance() self.render_config_template( modules=[{ @@ -50,7 +50,8 @@ def test_index_management(self): "paths": dirs, } }], - elasticsearch={"host": self.get_elasticsearch_url()}) + elasticsearch=self.get_elasticsearch_template_config() + ) self.run_beat(extra_args=["setup", "--index-management"], exit_code=0) assert self.log_contains('Loaded index template') @@ -67,7 +68,6 @@ def test_dashboards(self): kibana_dir = os.path.join(self.beat_path, "build", "kibana") shutil.copytree(kibana_dir, os.path.join(self.working_dir, "kibana")) - es = Elasticsearch([self.get_elasticsearch_url()]) self.render_config_template( modules=[{ "name": "file_integrity", @@ -75,8 +75,8 @@ def test_dashboards(self): "paths": dirs, } }], - elasticsearch={"host": self.get_elasticsearch_url()}, - kibana={"host": self.get_kibana_url()}, + elasticsearch=self.get_elasticsearch_template_config(), + kibana=self.get_kibana_template_config(), ) self.run_beat(extra_args=["setup", "--dashboards"], exit_code=0) diff --git a/dev-tools/cmd/dashboards/export_dashboards.go b/dev-tools/cmd/dashboards/export_dashboards.go index 364fae9e0f5..d1ab9b084e4 100644 --- a/dev-tools/cmd/dashboards/export_dashboards.go +++ b/dev-tools/cmd/dashboards/export_dashboards.go @@ -66,7 +66,8 @@ func main() { user = u.User.Username() pass, _ = u.User.Password() } - + user = "beats" + pass = "testing" transport := httpcommon.DefaultHTTPTransportSettings() transport.Timeout = kibanaTimeout diff --git a/dev-tools/mage/integtest_docker.go b/dev-tools/mage/integtest_docker.go index 94d9288d1fa..721736da2e6 100644 --- a/dev-tools/mage/integtest_docker.go +++ b/dev-tools/mage/integtest_docker.go @@ -112,6 +112,10 @@ func (d *DockerIntegrationTester) Test(dir string, mageTarget string, env map[st // Use the host machine's pkg cache to minimize external downloads. "-v", goPkgCache + ":" + dockerGoPkgCache + ":ro", "-e", "GOPROXY=file://" + dockerGoPkgCache + ",direct", + // Do not set ES_USER or ES_PATH in this file unless you intend to override + // values set in all individual docker-compose files + // "-e", "ES_USER=admin", + // "-e", "ES_PASS=testing", } args, err = addUidGidEnvArgs(args) if err != nil { diff --git a/dev-tools/mage/pytest.go b/dev-tools/mage/pytest.go index d4b8dd3fcee..eb0d605c80c 100644 --- a/dev-tools/mage/pytest.go +++ b/dev-tools/mage/pytest.go @@ -133,6 +133,14 @@ func PythonTest(params PythonTestArgs) error { pytestOptions := []string{ "--timeout=90", "--durations=20", + // Enable -x to stop at the first failing test + // "-x", + // Enable --tb=long to produce long tracebacks + //"--tb=long", + // Enable -v to produce verbose output + //"-v", + // Don't capture test output + //"-s", } if mg.Verbose() { pytestOptions = append(pytestOptions, "-v") diff --git a/filebeat/docker-compose.yml b/filebeat/docker-compose.yml index 19302ae1e6f..a73f0bc39d6 100644 --- a/filebeat/docker-compose.yml +++ b/filebeat/docker-compose.yml @@ -40,6 +40,10 @@ services: extends: file: ${ES_BEATS}/testing/environments/${TESTING_ENVIRONMENT}.yml service: elasticsearch + healthcheck: + test: ["CMD-SHELL", "curl -u admin:testing -s http://localhost:9200/_cat/health?h=status | grep -q green"] + retries: 300 + interval: 1s kafka: build: ${ES_BEATS}/testing/environments/docker/kafka @@ -53,6 +57,9 @@ services: extends: file: ${ES_BEATS}/testing/environments/${TESTING_ENVIRONMENT}.yml service: kibana + healthcheck: + test: ["CMD-SHELL", "curl -u beats:testing -s http://localhost:5601/api/status?v8format=true | grep -q '\"overall\":{\"level\":\"available\"'"] + retries: 600 mosquitto: build: ${ES_BEATS}/testing/environments/docker/mosquitto diff --git a/filebeat/fileset/modules_integration_test.go b/filebeat/fileset/modules_integration_test.go index f17be7c8d73..be22b9c1680 100644 --- a/filebeat/fileset/modules_integration_test.go +++ b/filebeat/fileset/modules_integration_test.go @@ -258,7 +258,9 @@ func TestLoadMultiplePipelinesWithRollback(t *testing.T) { func getTestingElasticsearch(t eslegtest.TestLogger) *eslegclient.Connection { conn, err := eslegclient.NewConnection(eslegclient.ConnectionSettings{ - URL: eslegtest.GetURL(), + URL: eslegtest.GetURL(), + Username: eslegtest.GetUser(), + Password: eslegtest.GetPass(), }) if err != nil { t.Fatal(err) diff --git a/filebeat/tests/system/config/filebeat_modules.yml.j2 b/filebeat/tests/system/config/filebeat_modules.yml.j2 index 710a3609ea4..93ded5a1379 100644 --- a/filebeat/tests/system/config/filebeat_modules.yml.j2 +++ b/filebeat/tests/system/config/filebeat_modules.yml.j2 @@ -12,8 +12,10 @@ filebeat.overwrite_pipelines: true filebeat.config.modules: path: {{ beat.working_dir + '/modules.d/*.yml' }} -output.elasticsearch.hosts: ["{{ elasticsearch_url }}"] +output.elasticsearch.hosts: ["{{ elasticsearch.host }}"] output.elasticsearch.index: {{ index_name }} +output.elasticsearch.username: {{ elasticsearch.user }} +output.elasticsearch.password: {{ elasticsearch.pass }} setup.template.name: {{ index_name }} setup.template.pattern: {{ index_name }}* diff --git a/filebeat/tests/system/test_base.py b/filebeat/tests/system/test_base.py index 6082c07f609..61a38c6c895 100644 --- a/filebeat/tests/system/test_base.py +++ b/filebeat/tests/system/test_base.py @@ -1,13 +1,19 @@ import os import unittest from filebeat import BaseTest -from elasticsearch import Elasticsearch from beat.beat import INTEGRATION_TESTS from beat import common_tests class Test(BaseTest, common_tests.TestExportsMixin, common_tests.TestDashboardMixin): + def setUp(self): + super(Test, self).setUp() + self.render_config_template( + elasticsearch=self.get_elasticsearch_template_config(), + ) + self.es = self.get_elasticsearch_instance() + def test_base(self): """ Test if the basic fields exist. @@ -32,12 +38,11 @@ def test_index_management(self): """ Test that the template can be loaded with `setup --index-management` """ - es = Elasticsearch([self.get_elasticsearch_url()]) self.render_config_template( - elasticsearch={"host": self.get_elasticsearch_url()}, + elasticsearch=self.get_elasticsearch_template_config(), ) exit_code = self.run_beat(extra_args=["setup", "--index-management"]) assert exit_code == 0 assert self.log_contains('Loaded index template') - assert len(es.cat.templates(name='filebeat-*', h='name')) > 0 + assert len(self.es.cat.templates(name='filebeat-*', h='name')) > 0 diff --git a/filebeat/tests/system/test_crawler.py b/filebeat/tests/system/test_crawler.py index f3b5d0877a6..51f7a979590 100644 --- a/filebeat/tests/system/test_crawler.py +++ b/filebeat/tests/system/test_crawler.py @@ -19,7 +19,7 @@ def test_fetched_lines(self): """ self.render_config_template( - path=os.path.abspath(self.working_dir) + "/log/*" + path=os.path.abspath(self.working_dir) + "/log/*", ) os.mkdir(self.working_dir + "/log/") @@ -55,7 +55,7 @@ def test_unfinished_line_and_continue(self): """ self.render_config_template( - path=os.path.abspath(self.working_dir) + "/log/*" + path=os.path.abspath(self.working_dir) + "/log/*", ) os.mkdir(self.working_dir + "/log/") @@ -162,7 +162,7 @@ def test_file_renaming(self): """ self.render_config_template( - path=os.path.abspath(self.working_dir) + "/log/*" + path=os.path.abspath(self.working_dir) + "/log/*", ) os.mkdir(self.working_dir + "/log/") @@ -339,7 +339,7 @@ def test_new_line_on_existing_file(self): """ self.render_config_template( - path=os.path.abspath(self.working_dir) + "/log/*" + path=os.path.abspath(self.working_dir) + "/log/*", ) os.mkdir(self.working_dir + "/log/") @@ -373,7 +373,7 @@ def test_multiple_appends(self): """ self.render_config_template( - path=os.path.abspath(self.working_dir) + "/log/*" + path=os.path.abspath(self.working_dir) + "/log/*", ) os.mkdir(self.working_dir + "/log/") @@ -419,7 +419,7 @@ def test_new_line_on_open_file(self): """ self.render_config_template( - path=os.path.abspath(self.working_dir) + "/log/*" + path=os.path.abspath(self.working_dir) + "/log/*", ) os.mkdir(self.working_dir + "/log/") @@ -457,7 +457,7 @@ def test_tail_files(self): self.render_config_template( path=os.path.abspath(self.working_dir) + "/log/*", - tail_files="true" + tail_files="true", ) os.mkdir(self.working_dir + "/log/") @@ -501,7 +501,7 @@ def test_utf8(self): self.render_config_template( path=os.path.abspath(self.working_dir) + "/log/*", - encoding="utf-8" + encoding="utf-8", ) os.mkdir(self.working_dir + "/log/") @@ -613,7 +613,7 @@ def test_include_lines(self): self.render_config_template( path=os.path.abspath(self.working_dir) + "/log/*", - include_lines=["^ERR", "^WARN"] + include_lines=["^ERR", "^WARN"], ) os.mkdir(self.working_dir + "/log/") @@ -648,9 +648,8 @@ def test_default_include_exclude_lines(self): """ Checks if all the log lines are exported by default """ - self.render_config_template( - path=os.path.abspath(self.working_dir) + "/log/*" + path=os.path.abspath(self.working_dir) + "/log/*", ) os.mkdir(self.working_dir + "/log/") @@ -688,7 +687,7 @@ def test_exclude_lines(self): self.render_config_template( path=os.path.abspath(self.working_dir) + "/log/*", - exclude_lines=["^DBG"] + exclude_lines=["^DBG"], ) os.mkdir(self.working_dir + "/log/") @@ -727,7 +726,7 @@ def test_include_exclude_lines(self): self.render_config_template( path=os.path.abspath(self.working_dir) + "/log/*", exclude_lines=["^DBG"], - include_lines=["apache"] + include_lines=["apache"], ) os.mkdir(self.working_dir + "/log/") diff --git a/filebeat/tests/system/test_modules.py b/filebeat/tests/system/test_modules.py index 79f6c0ece38..5263cdbb61c 100644 --- a/filebeat/tests/system/test_modules.py +++ b/filebeat/tests/system/test_modules.py @@ -5,7 +5,6 @@ import glob import subprocess -from elasticsearch import Elasticsearch import json import logging from parameterized import parameterized @@ -118,9 +117,7 @@ def load_fileset_test_cases(): class Test(BaseTest): def init(self): - self.elasticsearch_url = self.get_elasticsearch_url() - print("Using elasticsearch: {}".format(self.elasticsearch_url)) - self.es = Elasticsearch([self.elasticsearch_url]) + self.es = self.get_elasticsearch_instance(user='admin') logging.getLogger("urllib3").setLevel(logging.WARNING) logging.getLogger("elasticsearch").setLevel(logging.ERROR) @@ -146,7 +143,7 @@ def test_fileset_file(self, module, fileset, test_file): template_name="filebeat_modules", output=cfgfile, index_name=self.index_name, - elasticsearch_url=self.elasticsearch_url, + elasticsearch=self.get_elasticsearch_template_config(user='admin') ) self.run_on_file( diff --git a/filebeat/tests/system/test_pipeline.py b/filebeat/tests/system/test_pipeline.py index afb3219e62d..83cc25ff7d4 100644 --- a/filebeat/tests/system/test_pipeline.py +++ b/filebeat/tests/system/test_pipeline.py @@ -2,7 +2,6 @@ from beat.beat import INTEGRATION_TESTS import os import unittest -from elasticsearch import Elasticsearch import json import logging @@ -12,8 +11,7 @@ class Test(BaseTest): def init(self): self.elasticsearch_url = self.get_elasticsearch_url() self.kibana_url = self.get_kibana_url() - print("Using elasticsearch: {}".format(self.elasticsearch_url)) - self.es = Elasticsearch([self.elasticsearch_url]) + self.es = self.get_elasticsearch_instance() logging.getLogger("urllib3").setLevel(logging.WARNING) logging.getLogger("elasticsearch").setLevel(logging.ERROR) @@ -47,10 +45,13 @@ def test_input_pipeline_config(self): self.render_config_template( path=os.path.abspath(self.working_dir) + "/log/*", - elasticsearch=dict( - host=self.elasticsearch_url, - pipeline="estest", - index=index_name), + elasticsearch={ + 'host': self.elasticsearch_url, + 'pipeline': "estest", + 'index': index_name, + 'user': os.getenv("ES_USER"), + 'pass': os.getenv("ES_PASS") + }, pipeline="test", setup_template_name=index_name, setup_template_pattern=index_name + "*", diff --git a/filebeat/tests/system/test_reload_modules.py b/filebeat/tests/system/test_reload_modules.py index b22294e7d9a..5b8e08f49f4 100644 --- a/filebeat/tests/system/test_reload_modules.py +++ b/filebeat/tests/system/test_reload_modules.py @@ -6,7 +6,6 @@ from filebeat import BaseTest from beat.beat import INTEGRATION_TESTS -from elasticsearch import Elasticsearch moduleConfigTemplate = """ @@ -27,7 +26,7 @@ class Test(BaseTest): def setUp(self): super(BaseTest, self).setUp() if INTEGRATION_TESTS: - self.es = Elasticsearch([self.get_elasticsearch_url()]) + self.es = self.get_elasticsearch_instance() # Copy system module shutil.copytree(os.path.join(self.beat_path, "tests", "system", "module", "test"), @@ -72,7 +71,7 @@ def test_reload_writes_pipeline(self): reload_path=self.working_dir + "/configs/*.yml", reload_type="modules", inputs=False, - elasticsearch={"host": self.get_elasticsearch_url()} + elasticsearch=self.get_elasticsearch_template_config(), ) proc = self.start_beat() diff --git a/filebeat/tests/system/test_setup.py b/filebeat/tests/system/test_setup.py index bd1a96a9194..7422c8d3329 100644 --- a/filebeat/tests/system/test_setup.py +++ b/filebeat/tests/system/test_setup.py @@ -3,8 +3,6 @@ import yaml from shutil import copytree, copyfile -from elasticsearch import Elasticsearch - from filebeat import BaseTest INTEGRATION_TESTS = os.environ.get('INTEGRATION_TESTS', False) @@ -15,7 +13,7 @@ class Test(BaseTest): def init(self): self.elasticsearch_url = self.get_elasticsearch_url() print("Using elasticsearch: {}".format(self.elasticsearch_url)) - self.es = Elasticsearch([self.elasticsearch_url]) + self.es = self.get_elasticsearch_instance() @unittest.skipIf(not INTEGRATION_TESTS, "integration tests are disabled, run with INTEGRATION_TESTS=1 to enable them.") @@ -28,9 +26,7 @@ def test_setup_modules_d_config(self): self.init() self.render_config_template( modules=True, - elasticsearch={ - "host": self.get_elasticsearch_url(), - }, + elasticsearch=self.get_elasticsearch_template_config(), ) self._setup_dummy_module() diff --git a/heartbeat/docker-compose.yml b/heartbeat/docker-compose.yml index c7da39a8798..ace731f7bbb 100644 --- a/heartbeat/docker-compose.yml +++ b/heartbeat/docker-compose.yml @@ -8,6 +8,8 @@ services: - REDIS_HOST=redis - REDIS_PORT=6379 - ES_HOST=elasticsearch + - ES_USER=heartbeat_user + - ES_PASS=testing - ES_PORT=9200 working_dir: /go/src/github.com/elastic/beats/heartbeat volumes: @@ -28,6 +30,10 @@ services: extends: file: ${ES_BEATS}/testing/environments/${TESTING_ENVIRONMENT}.yml service: elasticsearch + healthcheck: + test: ["CMD-SHELL", "curl -u admin:testing -s http://localhost:9200/_cat/health?h=status | grep -q green"] + retries: 300 + interval: 1s redis: build: ${PWD}/tests/docker_support/redis diff --git a/heartbeat/tests/system/config/heartbeat.yml.j2 b/heartbeat/tests/system/config/heartbeat.yml.j2 index 44dfe3a836d..6736ab8452e 100644 --- a/heartbeat/tests/system/config/heartbeat.yml.j2 +++ b/heartbeat/tests/system/config/heartbeat.yml.j2 @@ -105,6 +105,8 @@ queue.mem: {%- if elasticsearch %} output.elasticsearch: hosts: ["{{ elasticsearch.host }}"] + username: {{ elasticsearch.user }} + password: {{ elasticsearch.pass }} {%- else %} output.file: path: '{{ output_file_path|default(beat.working_dir + "/output") }}' diff --git a/heartbeat/tests/system/test_base.py b/heartbeat/tests/system/test_base.py index 172960209d3..7819ccb291e 100644 --- a/heartbeat/tests/system/test_base.py +++ b/heartbeat/tests/system/test_base.py @@ -2,7 +2,6 @@ import unittest from heartbeat import BaseTest -from elasticsearch import Elasticsearch from beat.beat import INTEGRATION_TESTS from beat import common_tests from time import sleep @@ -198,13 +197,13 @@ def test_index_management(self): """ Test that the template can be loaded with `setup --index-management` """ - es = Elasticsearch([self.get_elasticsearch_url()]) + es = self.get_elasticsearch_instance() self.render_config_template( monitors=[{ "type": "http", "urls": ["http://localhost:9200"], }], - elasticsearch={"host": self.get_elasticsearch_url()}, + elasticsearch=self.get_elasticsearch_template_config() ) exit_code = self.run_beat(extra_args=["setup", "--index-management"]) diff --git a/libbeat/docker-compose.yml b/libbeat/docker-compose.yml index c96b40e3ea8..73c8f1e2dfe 100644 --- a/libbeat/docker-compose.yml +++ b/libbeat/docker-compose.yml @@ -21,7 +21,9 @@ services: - KIBANA_PASS=testing - ES_HOST=elasticsearch - ES_PORT=9200 - - ES_USER=beats + # ES_USER must be admin in order for the Go Integration tests to + # function because they require indices:data/read/search + - ES_USER=admin - ES_PASS=testing - ES_MONITORING_HOST=elasticsearch_monitoring - ES_MONITORING_PORT=9200 @@ -30,7 +32,7 @@ services: # - ES_KERBEROS_HOST=elasticsearch_kerberos.elastic - ES_PORT_SSL=9200 - ES_SUPERUSER_USER=admin - - ES_SUPERUSER_PASS=changeme + - ES_SUPERUSER_PASS=testing volumes: - ${PWD}/..:/go/src/github.com/elastic/beats/ # Used for docker integration tests: @@ -61,19 +63,24 @@ services: extends: file: ${ES_BEATS}/testing/environments/${TESTING_ENVIRONMENT}.yml service: elasticsearch + healthcheck: + test: ["CMD-SHELL", "curl -u admin:testing -s http://localhost:9200/_cat/health?h=status | grep -q green"] + retries: 300 + interval: 1s elasticsearch_monitoring: extends: file: ${ES_BEATS}/testing/environments/${TESTING_ENVIRONMENT}.yml service: elasticsearch healthcheck: - test: ["CMD-SHELL", "curl -s http://localhost:9200/_cat/health?h=status | grep -q green"] + test: ["CMD-SHELL", "curl -u admin:testing -s http://localhost:9200/_cat/health?h=status | grep -q green"] + elasticsearchssl: extends: file: ${ES_BEATS}/testing/environments/${TESTING_ENVIRONMENT}.yml service: elasticsearch healthcheck: - test: ["CMD", "curl", "-u", "admin:changeme", "-f", "https://localhost:9200", "--insecure"] + test: ["CMD", "curl", "-u", "admin:testing", "-f", "https://localhost:9200", "--insecure"] retries: 1200 interval: 5s start_period: 60s @@ -94,9 +101,11 @@ services: - "xpack.security.authc.realms.file.file1.order=0" volumes: - ${ES_BEATS}/testing/environments/docker/elasticsearch/pki:/usr/share/elasticsearch/config/pki:ro + - ${ES_BEATS}/testing/environments/docker/elasticsearch/roles.yml:/usr/share/elasticsearch/config/roles.yml + - ${ES_BEATS}/testing/environments/docker/elasticsearch/users:/usr/share/elasticsearch/config/users + - ${ES_BEATS}/testing/environments/docker/elasticsearch/users_roles:/usr/share/elasticsearch/config/users_roles expose: - 9200 - command: bash -c "bin/elasticsearch-users useradd admin -r superuser -p changeme | /usr/local/bin/docker-entrypoint.sh eswrapper" # This host name is static because of the certificate. logstash: @@ -163,3 +172,6 @@ services: extends: file: ${ES_BEATS}/testing/environments/${TESTING_ENVIRONMENT}.yml service: kibana + healthcheck: + test: ["CMD-SHELL", "curl -u beats:testing -s http://localhost:5601/api/status?v8format=true | grep -q '\"overall\":{\"level\":\"available\"'"] + retries: 600 diff --git a/libbeat/kibana/client.go b/libbeat/kibana/client.go index 111db2e3816..d7da444fa8b 100644 --- a/libbeat/kibana/client.go +++ b/libbeat/kibana/client.go @@ -370,7 +370,6 @@ func (client *Client) ImportMultiPartFormFile(url string, params url.Values, fil headers := http.Header{} headers.Add("Content-Type", w.FormDataContentType()) - statusCode, response, err := client.Connection.Request("POST", url, params, headers, buf) if err != nil || statusCode >= 300 { return fmt.Errorf("returned %d to import file: %v. Response: %s", statusCode, err, response) diff --git a/libbeat/licenser/elastic_fetcher_integration_test.go b/libbeat/licenser/elastic_fetcher_integration_test.go index c12f6651bfe..b1a21d937cd 100644 --- a/libbeat/licenser/elastic_fetcher_integration_test.go +++ b/libbeat/licenser/elastic_fetcher_integration_test.go @@ -43,8 +43,8 @@ func getTestClient() *eslegclient.Connection { host := "http://" + cli.GetEnvOr("ES_HOST", elasticsearchHost) + ":" + cli.GetEnvOr("ES_POST", elasticsearchPort) client, err := eslegclient.NewConnection(eslegclient.ConnectionSettings{ URL: host, - Username: "myelastic", // NOTE: I will refactor this in a followup PR - Password: "changeme", + Username: "admin", + Password: "testing", CompressionLevel: 3, Transport: transport, }) diff --git a/libbeat/template/load_integration_test.go b/libbeat/template/load_integration_test.go index 07caf39c3e7..7f80f84d99d 100644 --- a/libbeat/template/load_integration_test.go +++ b/libbeat/template/load_integration_test.go @@ -401,6 +401,8 @@ func getTestingElasticsearch(t eslegtest.TestLogger) *eslegclient.Connection { conn, err := eslegclient.NewConnection(eslegclient.ConnectionSettings{ URL: eslegtest.GetURL(), Transport: httpcommon.DefaultHTTPTransportSettings(), + Username: eslegtest.GetUser(), + Password: eslegtest.GetPass(), }) if err != nil { t.Fatal(err) diff --git a/libbeat/tests/system/base.py b/libbeat/tests/system/base.py index a768d80e31e..c5f72d02008 100644 --- a/libbeat/tests/system/base.py +++ b/libbeat/tests/system/base.py @@ -1,7 +1,6 @@ import os from datetime import datetime from beat.beat import TestCase -from elasticsearch import Elasticsearch, NotFoundError class BaseTest(TestCase): @@ -26,5 +25,5 @@ def es_client(self): if self._es: return self._es - self._es = Elasticsearch([self.get_elasticsearch_url()]) + self._es = self.get_elasticsearch_instance() return self._es diff --git a/libbeat/tests/system/beat/beat.py b/libbeat/tests/system/beat/beat.py index e7a2c60640a..4f151bdd936 100644 --- a/libbeat/tests/system/beat/beat.py +++ b/libbeat/tests/system/beat/beat.py @@ -16,6 +16,8 @@ from .compose import ComposeMixin +from elasticsearch import Elasticsearch + BEAT_REQUIRED_FIELDS = ["@timestamp", "agent.type", "agent.name", "agent.version"] @@ -669,8 +671,7 @@ def output_count(self, pred, output_file=None): def get_elasticsearch_url(self): """ - Returns an elasticsearch.Elasticsearch instance built from the - env variables like the integration tests. + Returns a string with the Elasticsearch URL """ return "http://{host}:{port}".format( host=os.getenv("ES_HOST", "localhost"), @@ -679,14 +680,46 @@ def get_elasticsearch_url(self): def get_elasticsearch_url_ssl(self): """ - Returns an elasticsearch.Elasticsearch instance built from the - env variables like the integration tests. + Returns a string with the Elasticsearch URL """ return "https://{host}:{port}".format( host=os.getenv("ES_HOST_SSL", "localhost"), port=os.getenv("ES_PORT_SSL", "9205"), ) + def get_elasticsearch_template_config(self, security=True, user=None): + """ + Returns a template suitable for a Beats config + """ + template = { + "host": self.get_elasticsearch_url(), + } + + if security: + template["user"] = user or os.getenv("ES_USER", "") + template["pass"] = os.getenv("ES_PASS", "") + + return template + + def get_elasticsearch_instance(self, security=True, ssl=False, url=None, user=None): + """ + Returns an elasticsearch.Elasticsearch instance built from the + env variables like the integration tests. + """ + if url is None: + if ssl: + url = self.get_elasticsearch_url_ssl() + else: + url = self.get_elasticsearch_url() + + if security: + username = user or os.getenv("ES_USER", "") + password = os.getenv("ES_PASS", "") + es_instance = Elasticsearch([url], http_auth=(username, password)) + else: + es_instance = Elasticsearch([url]) + return es_instance + def get_kibana_url(self): """ Returns kibana host URL @@ -696,6 +729,20 @@ def get_kibana_url(self): port=os.getenv("KIBANA_PORT", "5601"), ) + def get_kibana_template_config(self, security=True, user=None): + """ + Returns a Kibana template suitable for a Beat + """ + template = { + "host": self.get_kibana_url() + } + + if security: + template["user"] = user or os.getenv("ES_USER", "") + template["pass"] = os.getenv("ES_PASS", "") + + return template + def assert_fields_are_documented(self, evt): """ Assert that all keys present in evt are documented in fields.yml. diff --git a/libbeat/tests/system/beat/common_tests.py b/libbeat/tests/system/beat/common_tests.py index a5caff92c9a..bdfa88f093b 100644 --- a/libbeat/tests/system/beat/common_tests.py +++ b/libbeat/tests/system/beat/common_tests.py @@ -116,8 +116,8 @@ def test_dashboards(self): es = Elasticsearch([self.get_elasticsearch_url()]) self.render_config_template( - elasticsearch={"host": self.get_elasticsearch_url()}, - kibana={"host": self.get_kibana_url()}, + elasticsearch=self.get_elasticsearch_template_config(), + kibana=self.get_kibana_template_config(), ) exit_code = self.run_beat(extra_args=["setup", "--dashboards"]) @@ -131,7 +131,7 @@ def is_saved_object_api_available(self): def get_version(self): url = self.get_kibana_url() + "/api/status" - r = requests.get(url) + r = requests.get(url, auth=(os.getenv('ES_USER'), os.getenv('ES_PASS'))) body = r.json() version = body["version"]["number"] diff --git a/libbeat/tests/system/config/libbeat.yml.j2 b/libbeat/tests/system/config/libbeat.yml.j2 index e6d62a5eae6..8fe9174b723 100644 --- a/libbeat/tests/system/config/libbeat.yml.j2 +++ b/libbeat/tests/system/config/libbeat.yml.j2 @@ -60,6 +60,8 @@ queue.mem: {% if kibana -%} setup.kibana.host: "{{ kibana.host }}" +setup.kibana.username: "{{ kibana.user }}" +setup.kibana.password: "{{ kibana.pass }}" {%- endif %} #================================ Outputs ===================================== @@ -71,6 +73,8 @@ setup.kibana.host: "{{ kibana.host }}" output: elasticsearch: hosts: ["{{ elasticsearch.host }}"] + username: {{ elasticsearch.user }} + password: {{ elasticsearch.pass }} {% if elasticsearch.pipeline %} pipeline: {{elasticsearch.pipeline}} {% endif %} diff --git a/libbeat/tests/system/config/mockbeat.yml.j2 b/libbeat/tests/system/config/mockbeat.yml.j2 index 5657105832e..9e1c21f488a 100644 --- a/libbeat/tests/system/config/mockbeat.yml.j2 +++ b/libbeat/tests/system/config/mockbeat.yml.j2 @@ -37,15 +37,36 @@ output: {% endfor -%} {%- endif %} - {% if elasticsearch -%} +{% if elasticsearch -%} +output: elasticsearch: - {% for k, v in elasticsearch.items() -%} - {{ k }}: {{ v }} - {% endfor -%} + hosts: ["{{ elasticsearch.host }}"] + username: {{ elasticsearch.user }} + password: {{ elasticsearch.pass }} + {% if elasticsearch.pipeline %} + pipeline: {{elasticsearch.pipeline}} + {% endif %} + {% if elasticsearch.index %} + index: {{elasticsearch.index}} + {% endif %} + {% if elasticsearch.ilm %} + ilm.enabled: {{elasticsearch.ilm}} + {% endif %} + {% if elasticsearch.timeout %} + timeout: {{elasticsearch.timeout}} + {% endif %} + {% if elasticsearch.ssl_certificate_authorities %} + ssl.certificate_authorities: {{elasticsearch.ssl_certificate_authorities}} + {% endif %} + {% if elasticsearch.ssl_ca_sha256 %} + ssl.ca_sha256: {{ elasticsearch.ssl_ca_sha256 }} + {% endif %} + # older versions have to be allowed because mockbeat is on v9.9.9 allow_older_versions: true {%- endif %} + # Redis as output # Options: # host, port: where Redis is listening on diff --git a/libbeat/tests/system/test_ca_pinning.py b/libbeat/tests/system/test_ca_pinning.py index 4c1480b82a0..484e90b0337 100644 --- a/libbeat/tests/system/test_ca_pinning.py +++ b/libbeat/tests/system/test_ca_pinning.py @@ -33,11 +33,11 @@ def test_sending_events_with_a_good_sha256(self): self.render_config_template( elasticsearch={ - "hosts": self.get_elasticsearch_url_ssl(), - "username": "admin", - "password": "changeme", - "ssl.certificate_authorities": [ca], - "ssl.ca_sha256": "8hZS8gpciuzlu+7Xi0sdv8T7RKRRxG1TWKumUQsDam0=", + "host": self.get_elasticsearch_url_ssl(), + "user": "admin", + "pass": "testing", + "ssl_certificate_authorities": [ca], + "ssl_ca_sha256": "8hZS8gpciuzlu+7Xi0sdv8T7RKRRxG1TWKumUQsDam0=", }, ) @@ -65,11 +65,11 @@ def test_sending_events_with_a_bad_sha256(self): self.render_config_template( elasticsearch={ - "hosts": self.get_elasticsearch_url_ssl(), - "username": "admin", - "password": "changeme", - "ssl.certificate_authorities": [ca], - "ssl.ca_sha256": "not-good-sha", + "host": self.get_elasticsearch_url_ssl(), + "user": "beats", + "pass": "testing", + "ssl_certificate_authorities": [ca], + "ssl_ca_sha256": "not-good-sha", }, ) diff --git a/libbeat/tests/system/test_cmd_setup_index_management.py b/libbeat/tests/system/test_cmd_setup_index_management.py index 2abe8828175..6c2512b2d74 100644 --- a/libbeat/tests/system/test_cmd_setup_index_management.py +++ b/libbeat/tests/system/test_cmd_setup_index_management.py @@ -27,6 +27,7 @@ def setUp(self): self.custom_template = self.beat_name + "_foobar" self.es = self.es_client() + self.es = self.get_elasticsearch_instance() self.idxmgmt = IdxMgmt(self.es, self.data_stream) self.idxmgmt.delete(indices=[], policies=[self.policy_name, self.custom_policy], @@ -42,8 +43,11 @@ def tearDown(self): def render_config(self, **kwargs): self.render_config_template( - elasticsearch={"hosts": self.get_elasticsearch_url()}, + # Note that the template is such that we need to pass in 'username' as opposed to 'user' and + # 'password' instead of 'pass'. + elasticsearch=self.get_elasticsearch_template_config(), es_template_name=self.data_stream, + **kwargs ) diff --git a/libbeat/tests/system/test_cmd_test.py b/libbeat/tests/system/test_cmd_test.py index 38f15ef095f..944d7791fb6 100644 --- a/libbeat/tests/system/test_cmd_test.py +++ b/libbeat/tests/system/test_cmd_test.py @@ -52,7 +52,7 @@ def test_output(self): self.render_config_template("mockbeat", os.path.join(self.working_dir, "mockbeat.yml"), - elasticsearch={"hosts": self.get_elasticsearch_url()}) + elasticsearch=self.get_elasticsearch_template_config()) exit_code = self.run_beat( extra_args=["test", "output"], config="mockbeat.yml") @@ -62,6 +62,7 @@ def test_output(self): assert self.log_contains('TLS... WARN secure connection disabled') assert self.log_contains('talk to server... OK') + @unittest.skipIf(not INTEGRATION_TESTS, "integration test") def test_wrong_output(self): """ Test test wrong output works @@ -69,7 +70,11 @@ def test_wrong_output(self): self.render_config_template("mockbeat", os.path.join(self.working_dir, "mockbeat.yml"), - elasticsearch={"hosts": '["badhost:9200"]'}) + elasticsearch={ + "host": 'badhost:9200', + "user": 'admin', + "pass": 'testing' + }) exit_code = self.run_beat( extra_args=["test", "output"], config="mockbeat.yml") diff --git a/libbeat/tests/system/test_cmd_version.py b/libbeat/tests/system/test_cmd_version.py index 240b8759668..ace84b99062 100644 --- a/libbeat/tests/system/test_cmd_version.py +++ b/libbeat/tests/system/test_cmd_version.py @@ -1,5 +1,4 @@ from base import BaseTest -from elasticsearch import Elasticsearch, TransportError import logging import os @@ -18,7 +17,7 @@ def setUp(self): self.elasticsearch_url = self.get_elasticsearch_url() print("Using elasticsearch: {}".format(self.elasticsearch_url)) - self.es = Elasticsearch([self.elasticsearch_url]) + self.es = self.get_elasticsearch_instance(url=self.elasticsearch_url, user='beats') logging.getLogger("urllib3").setLevel(logging.WARNING) logging.getLogger("elasticsearch").setLevel(logging.ERROR) diff --git a/libbeat/tests/system/test_dashboard.py b/libbeat/tests/system/test_dashboard.py index e02a644213e..60fa5726518 100644 --- a/libbeat/tests/system/test_dashboard.py +++ b/libbeat/tests/system/test_dashboard.py @@ -33,7 +33,11 @@ def test_load_without_dashboard(self): "-E", "setup.kibana.protocol=http", "-E", "setup.kibana.host=" + self.get_kibana_host(), "-E", "setup.kibana.port=" + self.get_kibana_port(), + "-E", "setup.kibana.username=beats", + "-E", "setup.kibana.password=testing", "-E", "output.elasticsearch.hosts=['" + self.get_host() + "']", + "-E", "output.elasticsearch.username=admin", + "-E", "output.elasticsearch.password=testing", "-E", "output.file.enabled=false"] ) @@ -58,10 +62,13 @@ def test_load_dashboard(self): "-E", "setup.kibana.protocol=http", "-E", "setup.kibana.host=" + self.get_kibana_host(), "-E", "setup.kibana.port=" + self.get_kibana_port(), + "-E", "setup.kibana.username=beats", + "-E", "setup.kibana.password=testing", "-E", "output.elasticsearch.hosts=['" + self.get_host() + "']", + "-E", "output.elasticsearch.username=admin", + "-E", "output.elasticsearch.password=testing", "-E", "output.file.enabled=false"] ) - beat.check_wait(exit_code=0) assert self.log_contains("Kibana dashboards successfully loaded") is True @@ -91,8 +98,12 @@ def test_load_dashboard_into_space(self, create_space=True): "-E", "setup.kibana.protocol=http", "-E", "setup.kibana.host=" + self.get_kibana_host(), "-E", "setup.kibana.port=" + self.get_kibana_port(), + "-E", "setup.kibana.username=beats", + "-E", "setup.kibana.password=testing", "-E", "setup.kibana.space.id=foo-bar", "-E", "output.elasticsearch.hosts=['" + self.get_host() + "']", + "-E", "output.elasticsearch.username=admin", + "-E", "output.elasticsearch.password=testing", "-E", "output.file.enabled=false"] ) @@ -118,7 +129,11 @@ def test_load_only_index_patterns(self): "-E", "setup.kibana.protocol=http", "-E", "setup.kibana.host=" + self.get_kibana_host(), "-E", "setup.kibana.port=" + self.get_kibana_port(), + "-E", "setup.kibana.username=beats", + "-E", "setup.kibana.password=testing", "-E", "output.elasticsearch.hosts=['" + self.get_host() + "']", + "-E", "output.elasticsearch.username=admin", + "-E", "output.elasticsearch.password=testing", "-E", "output.file.enabled=false"] ) @@ -141,6 +156,8 @@ def test_export_dashboard_cmd_export_dashboard_by_id(self): "-E", "setup.kibana.protocol=http", "-E", "setup.kibana.host=" + self.get_kibana_host(), "-E", "setup.kibana.port=" + self.get_kibana_port(), + "-E", "setup.kibana.username=beats", + "-E", "setup.kibana.password=testing", "-id", "Metricbeat-system-overview", "-folder", "system-overview"] ) @@ -162,6 +179,8 @@ def test_export_dashboard_cmd_export_dashboard_by_id_unknown_id(self): "-E", "setup.kibana.protocol=http", "-E", "setup.kibana.host=" + self.get_kibana_host(), "-E", "setup.kibana.port=" + self.get_kibana_port(), + "-E", "setup.kibana.username=beats", + "-E", "setup.kibana.password=testing", "-id", "No-such-dashboard", "-folder", "system-overview"] ) @@ -187,7 +206,6 @@ def test_dev_tool_export_dashboard_by_id(self): p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) content, err = p.communicate() - assert p.returncode == 0 self._check_if_dashboard_exported(folder_name) @@ -267,7 +285,7 @@ def create_kibana_space(self): "kbn-xsrf": "1" } - r = requests.post(url, json=data, headers=headers) + r = requests.post(url, json=data, headers=headers, auth=("beats", "testing")) if r.status_code != 200 and r.status_code != 409: self.fail('Bad Kibana status code when creating space: {}'.format(r.status_code)) @@ -275,7 +293,7 @@ def get_version(self): url = "http://" + self.get_kibana_host() + ":" + self.get_kibana_port() + \ "/api/status" - r = requests.get(url) + r = requests.get(url, auth=("beats", "testing")) body = r.json() version = body["version"]["number"] diff --git a/libbeat/tests/system/test_ilm.py b/libbeat/tests/system/test_ilm.py index 461fc0212f0..4b19f170030 100644 --- a/libbeat/tests/system/test_ilm.py +++ b/libbeat/tests/system/test_ilm.py @@ -37,7 +37,7 @@ def tearDown(self): def render_config(self, **kwargs): self.render_config_template( - elasticsearch={"hosts": self.get_elasticsearch_url()}, + elasticsearch=self.get_elasticsearch_template_config(), es_template_name=self.data_stream, **kwargs ) @@ -126,7 +126,7 @@ def tearDown(self): def render_config(self, **kwargs): self.render_config_template( - elasticsearch={"hosts": self.get_elasticsearch_url()}, + elasticsearch=self.get_elasticsearch_template_config(), es_template_name=self.data_stream, **kwargs ) diff --git a/libbeat/tests/system/test_keystore.py b/libbeat/tests/system/test_keystore.py index b0589123c83..66295b5d34e 100644 --- a/libbeat/tests/system/test_keystore.py +++ b/libbeat/tests/system/test_keystore.py @@ -43,12 +43,12 @@ def test_keystore_with_key_not_present(self): key = "elasticsearch_host" self.render_config_template(keystore_path=self.keystore_path, elasticsearch={ - 'hosts': "${%s}:9200" % key + 'host': "${%s}:9200" % key }) exit_code = self.run_beat() assert self.log_contains( - "missing field accessing 'output.elasticsearch.hosts'") + "missing field accessing 'output.elasticsearch.hosts.0'") assert exit_code == 1 def test_keystore_with_nested_key(self): @@ -80,9 +80,10 @@ def test_export_config_with_keystore(self): key = "asecret" secret = "asecretvalue" - self.render_config_template(keystore_path=self.keystore_path, elasticsearch={ - 'hosts': "${%s}" % key - }) + self.render_config_template( + keystore_path=self.keystore_path, + elasticsearch=self.get_elasticsearch_template_config() + ) exit_code = self.run_beat(extra_args=["keystore", "create"]) assert exit_code == 0 @@ -92,4 +93,3 @@ def test_export_config_with_keystore(self): assert exit_code == 0 assert self.log_contains(secret) == False - assert self.log_contains("${%s}" % key) diff --git a/libbeat/tests/system/test_monitoring.py b/libbeat/tests/system/test_monitoring.py index 2232b19712f..1fd2bc415d1 100644 --- a/libbeat/tests/system/test_monitoring.py +++ b/libbeat/tests/system/test_monitoring.py @@ -7,7 +7,6 @@ import unittest from base import BaseTest -from elasticsearch import Elasticsearch INTEGRATION_TESTS = os.environ.get('INTEGRATION_TESTS', False) @@ -18,8 +17,8 @@ class Test(BaseTest): def setUp(self): super(BaseTest, self).setUp() - self.es = Elasticsearch([self.get_elasticsearch_url()]) - self.es_monitoring = Elasticsearch([self.get_elasticsearch_monitoring_url()]) + self.es = self.get_elasticsearch_instance() + self.es_monitoring = self.get_elasticsearch_instance(url=self.get_elasticsearch_monitoring_url()) @unittest.skipUnless(INTEGRATION_TESTS, "integration test") @pytest.mark.tag('integration') diff --git a/libbeat/tests/system/test_template.py b/libbeat/tests/system/test_template.py index abed68332a8..5e1ab7ca909 100644 --- a/libbeat/tests/system/test_template.py +++ b/libbeat/tests/system/test_template.py @@ -32,7 +32,7 @@ def test_index_not_modified(self): Test that beat starts running if elasticsearch output is set """ self.render_config_template( - elasticsearch={"hosts": "localhost:9200"}, + elasticsearch=self.get_elasticsearch_template_config(), ) proc = self.start_beat() @@ -74,7 +74,7 @@ def test_index_with_pattern_name(self): Test that beat starts running if elasticsearch output with modified index and pattern and name are set """ self.render_config_template( - elasticsearch={"hosts": "localhost:9200"}, + elasticsearch=self.get_elasticsearch_template_config(), es_template_name="test", es_template_pattern="test-*", ) @@ -97,7 +97,7 @@ def test_json_template(self): print(path) self.render_config_template( - elasticsearch={"hosts": self.get_host()}, + elasticsearch=self.get_elasticsearch_template_config(), template_overwrite="true", template_json_enabled="true", template_json_path=path, @@ -136,7 +136,7 @@ def tearDown(self): def render_config(self, **kwargs): self.render_config_template( - elasticsearch={"hosts": self.get_elasticsearch_url()}, + elasticsearch=self.get_elasticsearch_template_config(), **kwargs ) @@ -195,7 +195,7 @@ def tearDown(self): def render_config(self, **kwargs): self.render_config_template( - elasticsearch={"hosts": self.get_elasticsearch_url()}, + elasticsearch=self.get_elasticsearch_template_config(), **kwargs ) diff --git a/metricbeat/docker-compose.yml b/metricbeat/docker-compose.yml index 299bbb4f4b1..96fbc149cfa 100644 --- a/metricbeat/docker-compose.yml +++ b/metricbeat/docker-compose.yml @@ -4,6 +4,8 @@ services: beat: build: ${PWD}/. environment: + - ES_USER=metricbeat_user + - ES_PASS=testing - BEAT_STRICT_PERMS=false - TEST_ENVIRONMENT=false working_dir: /go/src/github.com/elastic/beats/metricbeat diff --git a/packetbeat/docker-compose.yml b/packetbeat/docker-compose.yml index 8abfad19410..038d3e37450 100644 --- a/packetbeat/docker-compose.yml +++ b/packetbeat/docker-compose.yml @@ -30,8 +30,15 @@ services: extends: file: ../testing/environments/${TESTING_ENVIRONMENT}.yml service: elasticsearch + healthcheck: + test: ["CMD-SHELL", "curl -u admin:testing -s http://localhost:9200/_cat/health?h=status | grep -q green"] + retries: 300 + interval: 1s kibana: extends: file: ../testing/environments/${TESTING_ENVIRONMENT}.yml service: kibana + healthcheck: + test: ["CMD-SHELL", "curl -u beats:testing -s http://localhost:5601/api/status?v8format=true | grep -q '\"overall\":{\"level\":\"available\"'"] + retries: 600 diff --git a/testing/environments/Makefile b/testing/environments/Makefile index bef47686095..6387289ac01 100644 --- a/testing/environments/Makefile +++ b/testing/environments/Makefile @@ -9,6 +9,8 @@ start: stop: ${BASE_COMMAND} down -v +status: + ${BASE_COMMAND} ps up: ${BASE_COMMAND} build diff --git a/testing/environments/docker/README.md b/testing/environments/docker/README.md new file mode 100644 index 00000000000..8ecb7bb5241 --- /dev/null +++ b/testing/environments/docker/README.md @@ -0,0 +1,6 @@ +# XPack security + +This directory contains default usernames and passwords with roles configured +according to the Beats documentation. + +The default password for all accounts is `testing`. \ No newline at end of file diff --git a/testing/environments/docker/elasticsearch/roles.yml b/testing/environments/docker/elasticsearch/roles.yml new file mode 100644 index 00000000000..2f324761053 --- /dev/null +++ b/testing/environments/docker/elasticsearch/roles.yml @@ -0,0 +1,31 @@ +--- +beats: + cluster: ['manage_index_templates', 'monitor', 'manage_ingest_pipelines', 'manage_ilm', 'manage_security', 'manage_api_key'] + indices: + - names: ['filebeat-*', 'shrink-filebeat-*'] + privileges: ['all'] +filebeat: + cluster: ['manage_index_templates', 'monitor', 'manage_ingest_pipelines', 'manage_ilm'] + indices: + - names: ['filebeat-*', 'shrink-filebeat-*'] + privileges: ['all'] +heartbeat: + cluster: ['manage_index_templates', 'monitor', 'manage_ingest_pipelines', 'manage_ilm'] + indices: + - names: ['heartbeat-*', 'shrink-heartbeat-*'] + privileges: ['all'] +auditbeat: + cluster: ['manage_index_templates', 'monitor', 'manage_ingest_pipelines', 'manage_ilm'] + indices: + - names: ['auditbeat-*', 'shrink-auditbeat-*'] + privileges: ['all'] +journalbeat: + cluster: ['manage_index_templates', 'monitor', 'manage_ingest_pipelines', 'manage_ilm'] + indices: + - names: ['journalbeat-*', 'shrink-journalbeat-*'] + privileges: ['all'] +metricbeat: + cluster: ['manage_index_templates', 'monitor', 'manage_ingest_pipelines', 'manage_ilm'] + indices: + - names: ['metricbeat-*', 'shrink-metricbeat-*'] + privileges: ['all'] diff --git a/testing/environments/docker/elasticsearch/users b/testing/environments/docker/elasticsearch/users new file mode 100644 index 00000000000..b912ebffd77 --- /dev/null +++ b/testing/environments/docker/elasticsearch/users @@ -0,0 +1,8 @@ +admin:$2a$10$3y5UdMFkcUWtBfDhAJtYieGwZobnb6GNxCBlTt4ymMkEgImZk.vl2 +beats:$2a$10$3y5UdMFkcUWtBfDhAJtYieGwZobnb6GNxCBlTt4ymMkEgImZk.vl2 +filebeat_user:$2a$10$3y5UdMFkcUWtBfDhAJtYieGwZobnb6GNxCBlTt4ymMkEgImZk.vl2 +heartbeat_user:$2a$10$3y5UdMFkcUWtBfDhAJtYieGwZobnb6GNxCBlTt4ymMkEgImZk.vl2 +kibana_system_user:$2a$10$3y5UdMFkcUWtBfDhAJtYieGwZobnb6GNxCBlTt4ymMkEgImZk.vl2 +metricbeat_user:$2a$10$3y5UdMFkcUWtBfDhAJtYieGwZobnb6GNxCBlTt4ymMkEgImZk.vl2 +auditbeat_user:$2a$10$3y5UdMFkcUWtBfDhAJtYieGwZobnb6GNxCBlTt4ymMkEgImZk.vl2 +journalbeat_user:$2a$10$3y5UdMFkcUWtBfDhAJtYieGwZobnb6GNxCBlTt4ymMkEgImZk.vl2 diff --git a/testing/environments/docker/elasticsearch/users_roles b/testing/environments/docker/elasticsearch/users_roles new file mode 100644 index 00000000000..36dd721ecb5 --- /dev/null +++ b/testing/environments/docker/elasticsearch/users_roles @@ -0,0 +1,11 @@ +beats:beats +beats_system:beats,filebeat_user,heartbeat_user,metricbeat_user,auditbeat_user,journalbeat_user +filebeat:filebeat_user +heartbeat:heartbeat_user +ingest_admin:apm_server_user +kibana_system:kibana_system_user +kibana_admin:apm_server_user,apm_user_ro,beats,filebeat_user,heartbeat_user,metricbeat_user,auditbeat_user,journalbeat_user +metricbeat:metricbeat_user +auditbeat:auditbeat_user +journalbeat:journalbeat_user +superuser:admin diff --git a/testing/environments/docker/logstash/pipeline-xpack/default.conf b/testing/environments/docker/logstash/pipeline-xpack/default.conf new file mode 100644 index 00000000000..01d46fc4c4b --- /dev/null +++ b/testing/environments/docker/logstash/pipeline-xpack/default.conf @@ -0,0 +1,26 @@ +input { + beats { + port => 5044 + ssl => false + } + + beats { + port => 5055 + ssl => true + ssl_certificate => "/etc/pki/tls/certs/logstash.crt" + ssl_key => "/etc/pki/tls/private/logstash.key" + } +} + + +output { + elasticsearch { + hosts => ["${ES_HOST:elasticsearch}:${ES_PORT:9200}"] + index => "%{[@metadata][beat]}-%{+YYYY.MM.dd}" + user => admin + password => testing + } + + # Used for easier debugging + #stdout { codec => rubydebug { metadata => true } } +} diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index b55137d4a91..d335efc04fa 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -4,17 +4,19 @@ version: '2.3' services: elasticsearch: image: docker.elastic.co/elasticsearch/elasticsearch:8.1.0-f5a18001-SNAPSHOT - healthcheck: - test: ["CMD-SHELL", "curl -s http://localhost:9200/_cat/health?h=status | grep -q green"] - retries: 300 - interval: 1s + # When extend is used it merges healthcheck.tests, see: + # https://github.com/docker/compose/issues/8962 + # healthcheck: + # test: ["CMD-SHELL", "curl -u admin:testing -s http://localhost:9200/_cat/health?h=status | grep -q green"] + # retries: 300 + # interval: 1s environment: - "ES_JAVA_OPTS=-Xms1g -Xmx1g" - "network.host=" - "transport.host=127.0.0.1" - "http.host=0.0.0.0" - - "xpack.security.enabled=false" - # We want something as unlimited compilation rate, but 'unlimited' is not valid. + - "xpack.security.enabled=true" + # We want something as unlimited compilation rate, but 'unlimited' is not valid. - "script.max_compilations_rate=100000/1m" - "action.destructive_requires_name=false" # Disable geoip updates to prevent golden file test failures when the database @@ -25,6 +27,9 @@ services: - "./GeoLite2-ASN.mmdb:/usr/share/elasticsearch/config/ingest-geoip/GeoLite2-ASN.mmdb:ro" - "./GeoLite2-City.mmdb:/usr/share/elasticsearch/config/ingest-geoip/GeoLite2-City.mmdb:ro" - "./GeoLite2-Country.mmdb:/usr/share/elasticsearch/config/ingest-geoip/GeoLite2-Country.mmdb:ro" + - "./docker/elasticsearch/roles.yml:/usr/share/elasticsearch/config/roles.yml" + - "./docker/elasticsearch/users:/usr/share/elasticsearch/config/users" + - "./docker/elasticsearch/users_roles:/usr/share/elasticsearch/config/users_roles" logstash: image: docker.elastic.co/logstash/logstash@sha256:e01cf165142edf8d67485115b938c94deeda66153e9516aa2ce69ee417c5fc33 @@ -33,12 +38,22 @@ services: retries: 600 interval: 1s volumes: - - ./docker/logstash/pipeline:/usr/share/logstash/pipeline:ro + - ./docker/logstash/pipeline-xpack:/usr/share/logstash/pipeline:ro - ./docker/logstash/pki:/etc/pki:ro kibana: image: docker.elastic.co/kibana/kibana:8.1.0-f5a18001-SNAPSHOT - healthcheck: - test: ["CMD-SHELL", "curl -s http://localhost:5601/api/status?v8format=true | grep -q '\"overall\":{\"level\":\"available\"'"] - retries: 600 - interval: 1s + environment: + - "ELASTICSEARCH_USERNAME=kibana_system_user" + - "ELASTICSEARCH_PASSWORD=testing" + - "XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY=gwaXhuYzE6l3r1wh5ZdSkJvtK6uSw11d" + - "XPACK_SECURITY_ENCRYPTIONKEY=wZSVeczkXAmebqNgfcKEzNMmQCBZKkSH" +# - "XPACK_XPACK_MAIN_TELEMETRY_ENABLED=false" + - "XPACK_REPORTING_ENCRYPTIONKEY=xCyqJUFqrUJJKxjZVGfnhrRkyqqaKeAG" + - "LOGGING_ROOT_LEVEL=all" + # When extend is used it merges healthcheck.tests, see: + # https://github.com/docker/compose/issues/8962 + # healthcheck: + # test: ["CMD-SHELL", "curl -u beats:testing -s http://localhost:5601/api/status?v8format=true | grep -q '\"overall\":{\"level\":\"available\"'"] + # retries: 600 + diff --git a/x-pack/filebeat/docker-compose.yml b/x-pack/filebeat/docker-compose.yml index 0c0b477a611..c53bb1ca983 100644 --- a/x-pack/filebeat/docker-compose.yml +++ b/x-pack/filebeat/docker-compose.yml @@ -9,6 +9,8 @@ services: - BEAT_STRICT_PERMS=false - ES_HOST=elasticsearch - ES_PORT=9200 + - ES_USER=beats + - ES_PASS=testing working_dir: /go/src/github.com/elastic/beats/x-pack/filebeat volumes: - ${PWD}/../..:/go/src/github.com/elastic/beats/ @@ -26,4 +28,8 @@ services: extends: file: ${ES_BEATS}/testing/environments/${STACK_ENVIRONMENT}.yml service: elasticsearch + healthcheck: + test: ["CMD-SHELL", "curl -u admin:testing -s http://localhost:9200/_cat/health?h=status | grep -q green"] + retries: 300 + interval: 1s diff --git a/x-pack/filebeat/tests/system/config/filebeat_modules.yml.j2 b/x-pack/filebeat/tests/system/config/filebeat_modules.yml.j2 index 6df0f3ba0d9..800dbb0d46d 100644 --- a/x-pack/filebeat/tests/system/config/filebeat_modules.yml.j2 +++ b/x-pack/filebeat/tests/system/config/filebeat_modules.yml.j2 @@ -12,8 +12,10 @@ filebeat.registry: {% endif %} {%endif%} -output.elasticsearch.hosts: ["{{ elasticsearch_url }}"] +output.elasticsearch.hosts: ["{{ elasticsearch.host }}"] output.elasticsearch.index: {{ index_name }} +output.elasticsearch.username: {{ elasticsearch.user }} +output.elasticsearch.password: {{ elasticsearch.pass }} setup.template.name: {{ index_name }} setup.template.pattern: {{ index_name }}* diff --git a/x-pack/functionbeat/docker-compose.yml b/x-pack/functionbeat/docker-compose.yml index e49a7cdac29..aa6cc364a7e 100644 --- a/x-pack/functionbeat/docker-compose.yml +++ b/x-pack/functionbeat/docker-compose.yml @@ -22,3 +22,7 @@ services: extends: file: ${ES_BEATS}/testing/environments/${TESTING_ENVIRONMENT}.yml service: elasticsearch + healthcheck: + test: ["CMD-SHELL", "curl -u admin:testing -s http://localhost:9200/_cat/health?h=status | grep -q green"] + retries: 300 + interval: 1s diff --git a/x-pack/libbeat/docker-compose.yml b/x-pack/libbeat/docker-compose.yml index d89e6a30746..b7c84484d58 100644 --- a/x-pack/libbeat/docker-compose.yml +++ b/x-pack/libbeat/docker-compose.yml @@ -29,7 +29,7 @@ services: file: ${ES_BEATS}/testing/environments/${TESTING_ENVIRONMENT}.yml service: elasticsearch healthcheck: - test: ["CMD-SHELL", "curl -u myelastic:changeme -f http://localhost:9200/_cat/health?h=status | grep -q green"] + test: ["CMD-SHELL", "curl -u kibana_system_user:testing -f http://localhost:9200/_cat/health?h=status | grep -q green"] retries: 1200 interval: 5s start_period: 60s @@ -50,8 +50,8 @@ services: file: ${ES_BEATS}/testing/environments/${TESTING_ENVIRONMENT}.yml service: kibana healthcheck: - test: ["CMD-SHELL", "curl -s -u myelastic:changeme -f http://localhost:5601/api/status?v8format=true | grep -q '\"overall\":{\"level\":\"available\"'"] + test: ["CMD-SHELL", "curl -s -u kibana_system_user:testing -f http://localhost:5601/api/status?v8format=true | grep -q '\"overall\":{\"level\":\"available\"'"] retries: 1200 interval: 5s start_period: 60s - command: /usr/local/bin/kibana-docker --xpack.security.enabled=true --elasticsearch.username=myelastic --elasticsearch.password=changeme + command: /usr/local/bin/kibana-docker From a56368191de99f67e381efe5df12bd84933c6f91 Mon Sep 17 00:00:00 2001 From: Pier-Hugues Pellerin Date: Tue, 25 Jan 2022 19:46:06 -0500 Subject: [PATCH 42/69] Revert changes in the artifact build from #29890 (#30007) --- dev-tools/mage/crossbuild.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/mage/crossbuild.go b/dev-tools/mage/crossbuild.go index ee209bd688c..46e75234a7e 100644 --- a/dev-tools/mage/crossbuild.go +++ b/dev-tools/mage/crossbuild.go @@ -44,7 +44,7 @@ const defaultCrossBuildTarget = "golangCrossBuild" var Platforms = BuildPlatforms.Defaults() // SelectedPackageTypes is the list of package types -var SelectedPackageTypes []PackageType = []PackageType{TarGz} +var SelectedPackageTypes []PackageType = []PackageType{} func init() { // Allow overriding via PLATFORMS. From e0053f7e3544ddaed87d44a1ad32e196a16ea549 Mon Sep 17 00:00:00 2001 From: Anderson Queiroz Date: Wed, 26 Jan 2022 08:28:53 +0100 Subject: [PATCH 43/69] dev builds: keep debug symbols, no inline and no optimisations (#29961) --- dev-tools/mage/build.go | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/dev-tools/mage/build.go b/dev-tools/mage/build.go index 5c5156adf44..54d1b9f5c18 100644 --- a/dev-tools/mage/build.go +++ b/dev-tools/mage/build.go @@ -50,9 +50,6 @@ func DefaultBuildArgs() BuildArgs { args := BuildArgs{ Name: BeatName, CGO: build.Default.CgoEnabled, - LDFlags: []string{ - "-s", // Strip all debug symbols from binary (does not affect Go stack traces). - }, Vars: map[string]string{ elasticBeatsModulePath + "/libbeat/version.buildTime": "{{ date }}", elasticBeatsModulePath + "/libbeat/version.commit": "{{ commit }}", @@ -63,18 +60,26 @@ func DefaultBuildArgs() BuildArgs { args.Vars[elasticBeatsModulePath+"/libbeat/version.qualifier"] = "{{ .Qualifier }}" } - if positionIndependendCodeSupported() { + if positionIndependentCodeSupported() { args.ExtraFlags = append(args.ExtraFlags, "-buildmode", "pie") } + if DevBuild { + // Disable optimizations (-N) and inlining (-l) for debugging. + args.ExtraFlags = append(args.ExtraFlags, `-gcflags`, `"all=-N -l"`) + } else { + // Strip all debug symbols from binary (does not affect Go stack traces). + args.LDFlags = append(args.LDFlags, "-s") + } + return args } -// positionIndependendCodeSupported checks if the target platform support position independen code (or ASLR). +// positionIndependentCodeSupported checks if the target platform support position independent code (or ASLR). // // The list of supported platforms is compiled based on the Go release notes: https://golang.org/doc/devel/release.html // The list has been updated according to the Go version: 1.16 -func positionIndependendCodeSupported() bool { +func positionIndependentCodeSupported() bool { return oneOf(Platform.GOOS, "darwin") || (Platform.GOOS == "linux" && oneOf(Platform.GOARCH, "riscv64", "amd64", "arm", "arm64", "ppc64le", "386")) || (Platform.GOOS == "aix" && Platform.GOARCH == "ppc64") || From b4ca6baba179f07d796c47af7ff6acc713856a26 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Wed, 26 Jan 2022 03:13:14 -0500 Subject: [PATCH 44/69] [Automation] Update go release version to 1.17.6 (#29761) Co-authored-by: apmmachine --- .go-version | 2 +- auditbeat/Dockerfile | 2 +- filebeat/Dockerfile | 2 +- heartbeat/Dockerfile | 2 +- libbeat/Dockerfile | 2 +- libbeat/docs/version.asciidoc | 2 +- metricbeat/Dockerfile | 2 +- packetbeat/Dockerfile | 2 +- x-pack/elastic-agent/Dockerfile | 2 +- x-pack/functionbeat/Dockerfile | 2 +- x-pack/libbeat/Dockerfile | 2 +- 11 files changed, 11 insertions(+), 11 deletions(-) diff --git a/.go-version b/.go-version index ff278344b33..622f042fdce 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.17.5 +1.17.6 diff --git a/auditbeat/Dockerfile b/auditbeat/Dockerfile index 08cff3f06a7..69f62bb1457 100644 --- a/auditbeat/Dockerfile +++ b/auditbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.17.5 +FROM golang:1.17.6 RUN \ apt-get update \ diff --git a/filebeat/Dockerfile b/filebeat/Dockerfile index 3b17f95e998..f07051ede6e 100644 --- a/filebeat/Dockerfile +++ b/filebeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.17.5 +FROM golang:1.17.6 RUN \ apt-get update \ diff --git a/heartbeat/Dockerfile b/heartbeat/Dockerfile index f4a1faae369..3a78a045c52 100644 --- a/heartbeat/Dockerfile +++ b/heartbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.17.5 +FROM golang:1.17.6 RUN \ apt-get update \ diff --git a/libbeat/Dockerfile b/libbeat/Dockerfile index 97a1b634384..c455c4179c0 100644 --- a/libbeat/Dockerfile +++ b/libbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.17.5 +FROM golang:1.17.6 RUN \ apt-get update \ diff --git a/libbeat/docs/version.asciidoc b/libbeat/docs/version.asciidoc index a09f29c66bf..a9f0a4a1110 100644 --- a/libbeat/docs/version.asciidoc +++ b/libbeat/docs/version.asciidoc @@ -1,6 +1,6 @@ :stack-version: 8.0.0 :doc-branch: master -:go-version: 1.17.5 +:go-version: 1.17.6 :release-state: unreleased :python: 3.7 :docker: 1.12 diff --git a/metricbeat/Dockerfile b/metricbeat/Dockerfile index e1c97b72d36..933e2ad33a3 100644 --- a/metricbeat/Dockerfile +++ b/metricbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.17.5 +FROM golang:1.17.6 RUN \ apt update \ diff --git a/packetbeat/Dockerfile b/packetbeat/Dockerfile index 0a5a4a84128..b6078ad2b58 100644 --- a/packetbeat/Dockerfile +++ b/packetbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.17.5 +FROM golang:1.17.6 RUN \ apt-get update \ diff --git a/x-pack/elastic-agent/Dockerfile b/x-pack/elastic-agent/Dockerfile index 9c7dce7a435..2e9b5991233 100644 --- a/x-pack/elastic-agent/Dockerfile +++ b/x-pack/elastic-agent/Dockerfile @@ -1,4 +1,4 @@ -ARG GO_VERSION=1.17.5 +ARG GO_VERSION=1.17.6 FROM circleci/golang:${GO_VERSION} diff --git a/x-pack/functionbeat/Dockerfile b/x-pack/functionbeat/Dockerfile index 5a75f945955..72dfd1ccbf3 100644 --- a/x-pack/functionbeat/Dockerfile +++ b/x-pack/functionbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.17.5 +FROM golang:1.17.6 RUN \ apt-get update \ diff --git a/x-pack/libbeat/Dockerfile b/x-pack/libbeat/Dockerfile index 78c387b04d9..e49fe27d2c0 100644 --- a/x-pack/libbeat/Dockerfile +++ b/x-pack/libbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.17.5 +FROM golang:1.17.6 RUN \ apt-get update \ From 2718e9b93204a86e7253fe1d8d3918e9d3d7921d Mon Sep 17 00:00:00 2001 From: Yash Tewari Date: Wed, 26 Jan 2022 14:54:20 +0530 Subject: [PATCH 45/69] osquerybeat/ext: Make packages non-internal. (#29993) --- x-pack/osquerybeat/ext/osquery-extension/main_darwin.go | 2 +- x-pack/osquerybeat/ext/osquery-extension/main_linux.go | 2 +- .../ext/osquery-extension/{internal => pkg}/hostfs/common.go | 0 .../ext/osquery-extension/{internal => pkg}/hostfs/group.go | 0 .../ext/osquery-extension/{internal => pkg}/hostfs/passwd.go | 0 .../ext/osquery-extension/{internal => pkg}/proc/cmdline.go | 0 .../ext/osquery-extension/{internal => pkg}/proc/io.go | 0 .../ext/osquery-extension/{internal => pkg}/proc/link.go | 0 .../ext/osquery-extension/{internal => pkg}/proc/list.go | 0 .../ext/osquery-extension/{internal => pkg}/proc/stat.go | 0 .../ext/osquery-extension/{internal => pkg}/proc/strconv.go | 0 .../ext/osquery-extension/{internal => pkg}/proc/uptime.go | 0 .../osquery-extension/{internal => pkg}/tables/host_groups.go | 2 +- .../{internal => pkg}/tables/host_processes.go | 4 ++-- .../osquery-extension/{internal => pkg}/tables/host_users.go | 2 +- 15 files changed, 6 insertions(+), 6 deletions(-) rename x-pack/osquerybeat/ext/osquery-extension/{internal => pkg}/hostfs/common.go (100%) rename x-pack/osquerybeat/ext/osquery-extension/{internal => pkg}/hostfs/group.go (100%) rename x-pack/osquerybeat/ext/osquery-extension/{internal => pkg}/hostfs/passwd.go (100%) rename x-pack/osquerybeat/ext/osquery-extension/{internal => pkg}/proc/cmdline.go (100%) rename x-pack/osquerybeat/ext/osquery-extension/{internal => pkg}/proc/io.go (100%) rename x-pack/osquerybeat/ext/osquery-extension/{internal => pkg}/proc/link.go (100%) rename x-pack/osquerybeat/ext/osquery-extension/{internal => pkg}/proc/list.go (100%) rename x-pack/osquerybeat/ext/osquery-extension/{internal => pkg}/proc/stat.go (100%) rename x-pack/osquerybeat/ext/osquery-extension/{internal => pkg}/proc/strconv.go (100%) rename x-pack/osquerybeat/ext/osquery-extension/{internal => pkg}/proc/uptime.go (100%) rename x-pack/osquerybeat/ext/osquery-extension/{internal => pkg}/tables/host_groups.go (97%) rename x-pack/osquerybeat/ext/osquery-extension/{internal => pkg}/tables/host_processes.go (99%) rename x-pack/osquerybeat/ext/osquery-extension/{internal => pkg}/tables/host_users.go (97%) diff --git a/x-pack/osquerybeat/ext/osquery-extension/main_darwin.go b/x-pack/osquerybeat/ext/osquery-extension/main_darwin.go index eb9fb591e4c..3896a5c6218 100644 --- a/x-pack/osquerybeat/ext/osquery-extension/main_darwin.go +++ b/x-pack/osquerybeat/ext/osquery-extension/main_darwin.go @@ -11,7 +11,7 @@ import ( "github.com/osquery/osquery-go" "github.com/osquery/osquery-go/plugin/table" - "github.com/elastic/beats/v7/x-pack/osquerybeat/ext/osquery-extension/internal/tables" + "github.com/elastic/beats/v7/x-pack/osquerybeat/ext/osquery-extension/pkg/tables" ) func RegisterTables(server *osquery.ExtensionManagerServer) { diff --git a/x-pack/osquerybeat/ext/osquery-extension/main_linux.go b/x-pack/osquerybeat/ext/osquery-extension/main_linux.go index 953e35974ba..dce243d70d0 100644 --- a/x-pack/osquerybeat/ext/osquery-extension/main_linux.go +++ b/x-pack/osquerybeat/ext/osquery-extension/main_linux.go @@ -11,7 +11,7 @@ import ( "github.com/osquery/osquery-go" "github.com/osquery/osquery-go/plugin/table" - "github.com/elastic/beats/v7/x-pack/osquerybeat/ext/osquery-extension/internal/tables" + "github.com/elastic/beats/v7/x-pack/osquerybeat/ext/osquery-extension/pkg/tables" ) func RegisterTables(server *osquery.ExtensionManagerServer) { diff --git a/x-pack/osquerybeat/ext/osquery-extension/internal/hostfs/common.go b/x-pack/osquerybeat/ext/osquery-extension/pkg/hostfs/common.go similarity index 100% rename from x-pack/osquerybeat/ext/osquery-extension/internal/hostfs/common.go rename to x-pack/osquerybeat/ext/osquery-extension/pkg/hostfs/common.go diff --git a/x-pack/osquerybeat/ext/osquery-extension/internal/hostfs/group.go b/x-pack/osquerybeat/ext/osquery-extension/pkg/hostfs/group.go similarity index 100% rename from x-pack/osquerybeat/ext/osquery-extension/internal/hostfs/group.go rename to x-pack/osquerybeat/ext/osquery-extension/pkg/hostfs/group.go diff --git a/x-pack/osquerybeat/ext/osquery-extension/internal/hostfs/passwd.go b/x-pack/osquerybeat/ext/osquery-extension/pkg/hostfs/passwd.go similarity index 100% rename from x-pack/osquerybeat/ext/osquery-extension/internal/hostfs/passwd.go rename to x-pack/osquerybeat/ext/osquery-extension/pkg/hostfs/passwd.go diff --git a/x-pack/osquerybeat/ext/osquery-extension/internal/proc/cmdline.go b/x-pack/osquerybeat/ext/osquery-extension/pkg/proc/cmdline.go similarity index 100% rename from x-pack/osquerybeat/ext/osquery-extension/internal/proc/cmdline.go rename to x-pack/osquerybeat/ext/osquery-extension/pkg/proc/cmdline.go diff --git a/x-pack/osquerybeat/ext/osquery-extension/internal/proc/io.go b/x-pack/osquerybeat/ext/osquery-extension/pkg/proc/io.go similarity index 100% rename from x-pack/osquerybeat/ext/osquery-extension/internal/proc/io.go rename to x-pack/osquerybeat/ext/osquery-extension/pkg/proc/io.go diff --git a/x-pack/osquerybeat/ext/osquery-extension/internal/proc/link.go b/x-pack/osquerybeat/ext/osquery-extension/pkg/proc/link.go similarity index 100% rename from x-pack/osquerybeat/ext/osquery-extension/internal/proc/link.go rename to x-pack/osquerybeat/ext/osquery-extension/pkg/proc/link.go diff --git a/x-pack/osquerybeat/ext/osquery-extension/internal/proc/list.go b/x-pack/osquerybeat/ext/osquery-extension/pkg/proc/list.go similarity index 100% rename from x-pack/osquerybeat/ext/osquery-extension/internal/proc/list.go rename to x-pack/osquerybeat/ext/osquery-extension/pkg/proc/list.go diff --git a/x-pack/osquerybeat/ext/osquery-extension/internal/proc/stat.go b/x-pack/osquerybeat/ext/osquery-extension/pkg/proc/stat.go similarity index 100% rename from x-pack/osquerybeat/ext/osquery-extension/internal/proc/stat.go rename to x-pack/osquerybeat/ext/osquery-extension/pkg/proc/stat.go diff --git a/x-pack/osquerybeat/ext/osquery-extension/internal/proc/strconv.go b/x-pack/osquerybeat/ext/osquery-extension/pkg/proc/strconv.go similarity index 100% rename from x-pack/osquerybeat/ext/osquery-extension/internal/proc/strconv.go rename to x-pack/osquerybeat/ext/osquery-extension/pkg/proc/strconv.go diff --git a/x-pack/osquerybeat/ext/osquery-extension/internal/proc/uptime.go b/x-pack/osquerybeat/ext/osquery-extension/pkg/proc/uptime.go similarity index 100% rename from x-pack/osquerybeat/ext/osquery-extension/internal/proc/uptime.go rename to x-pack/osquerybeat/ext/osquery-extension/pkg/proc/uptime.go diff --git a/x-pack/osquerybeat/ext/osquery-extension/internal/tables/host_groups.go b/x-pack/osquerybeat/ext/osquery-extension/pkg/tables/host_groups.go similarity index 97% rename from x-pack/osquerybeat/ext/osquery-extension/internal/tables/host_groups.go rename to x-pack/osquerybeat/ext/osquery-extension/pkg/tables/host_groups.go index 1197f5753ef..47e7026eca5 100644 --- a/x-pack/osquerybeat/ext/osquery-extension/internal/tables/host_groups.go +++ b/x-pack/osquerybeat/ext/osquery-extension/pkg/tables/host_groups.go @@ -9,7 +9,7 @@ import ( "github.com/osquery/osquery-go/plugin/table" - "github.com/elastic/beats/v7/x-pack/osquerybeat/ext/osquery-extension/internal/hostfs" + "github.com/elastic/beats/v7/x-pack/osquerybeat/ext/osquery-extension/pkg/hostfs" ) const ( diff --git a/x-pack/osquerybeat/ext/osquery-extension/internal/tables/host_processes.go b/x-pack/osquerybeat/ext/osquery-extension/pkg/tables/host_processes.go similarity index 99% rename from x-pack/osquerybeat/ext/osquery-extension/internal/tables/host_processes.go rename to x-pack/osquerybeat/ext/osquery-extension/pkg/tables/host_processes.go index baf3f08f9e5..77c515c8296 100644 --- a/x-pack/osquerybeat/ext/osquery-extension/internal/tables/host_processes.go +++ b/x-pack/osquerybeat/ext/osquery-extension/pkg/tables/host_processes.go @@ -13,8 +13,8 @@ import ( "github.com/osquery/osquery-go/plugin/table" - "github.com/elastic/beats/v7/x-pack/osquerybeat/ext/osquery-extension/internal/hostfs" - "github.com/elastic/beats/v7/x-pack/osquerybeat/ext/osquery-extension/internal/proc" + "github.com/elastic/beats/v7/x-pack/osquerybeat/ext/osquery-extension/pkg/hostfs" + "github.com/elastic/beats/v7/x-pack/osquerybeat/ext/osquery-extension/pkg/proc" ) const ( diff --git a/x-pack/osquerybeat/ext/osquery-extension/internal/tables/host_users.go b/x-pack/osquerybeat/ext/osquery-extension/pkg/tables/host_users.go similarity index 97% rename from x-pack/osquerybeat/ext/osquery-extension/internal/tables/host_users.go rename to x-pack/osquerybeat/ext/osquery-extension/pkg/tables/host_users.go index 52a3b084745..4a60c5135b8 100644 --- a/x-pack/osquerybeat/ext/osquery-extension/internal/tables/host_users.go +++ b/x-pack/osquerybeat/ext/osquery-extension/pkg/tables/host_users.go @@ -9,7 +9,7 @@ import ( "github.com/osquery/osquery-go/plugin/table" - "github.com/elastic/beats/v7/x-pack/osquerybeat/ext/osquery-extension/internal/hostfs" + "github.com/elastic/beats/v7/x-pack/osquerybeat/ext/osquery-extension/pkg/hostfs" ) const ( From b4cdb49f9479c0654c2371206b3ea2846278845d Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Wed, 26 Jan 2022 09:35:52 +0000 Subject: [PATCH 46/69] ci: fail fast when installing the windows tools (#29999) --- .ci/scripts/install-tools.bat | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/.ci/scripts/install-tools.bat b/.ci/scripts/install-tools.bat index c2098796d97..8cb837553dc 100644 --- a/.ci/scripts/install-tools.bat +++ b/.ci/scripts/install-tools.bat @@ -16,21 +16,15 @@ SET PREVIOUS_USERPROFILE=%USERPROFILE% SET USERPROFILE=%OLD_USERPROFILE% IF NOT EXIST C:\Python38\python.exe ( REM Install python 3.8 - choco install python -y -r --no-progress --version 3.8.5 - IF NOT ERRORLEVEL 0 ( - exit /b 1 - ) + choco install python -y -r --no-progress --version 3.8.5 || exit /b 1 ) python --version where python -where /q gcc -IF ERRORLEVEL 1 ( +WHERE /q gcc +IF %ERRORLEVEL% NEQ 0 ( REM Install mingw 5.3.0 - choco install mingw -y -r --no-progress --version 5.3.0 - IF NOT ERRORLEVEL 0 ( - exit /b 1 - ) + choco install mingw -y -r --no-progress --version 5.3.0 || exit /b 1 ) gcc --version where gcc From 45cff00533f891a61c9fb95ea9e76b0c8a374585 Mon Sep 17 00:00:00 2001 From: endorama <526307+endorama@users.noreply.github.com> Date: Wed, 26 Jan 2022 11:24:48 +0100 Subject: [PATCH 47/69] [Metricbeat] support credentials_json in gcp module (#29584) --- CHANGELOG-developer.next.asciidoc | 1 + .../metricbeat/module/gcp/billing/billing.go | 22 ++++++++++++++++--- .../module/gcp/metrics/metricset.go | 13 ++++++++++- 3 files changed, 32 insertions(+), 4 deletions(-) diff --git a/CHANGELOG-developer.next.asciidoc b/CHANGELOG-developer.next.asciidoc index dcb2a5665f4..92566fdf5f9 100644 --- a/CHANGELOG-developer.next.asciidoc +++ b/CHANGELOG-developer.next.asciidoc @@ -129,6 +129,7 @@ The list below covers the major changes between 7.0.0-rc2 and master only. - Introduce `libbeat/beat.Beat.OutputConfigReloader` {pull}28048[28048] - Update Go version to 1.17.1. {pull}27543[27543] - Whitelist `GCP_*` environment variables in dev tools {pull}28364[28364] +- Add support for `credentials_json` in `gcp` module, all metricsets {pull}29584[29584] ==== Deprecated diff --git a/x-pack/metricbeat/module/gcp/billing/billing.go b/x-pack/metricbeat/module/gcp/billing/billing.go index cecc6b4a0a4..fe075938623 100644 --- a/x-pack/metricbeat/module/gcp/billing/billing.go +++ b/x-pack/metricbeat/module/gcp/billing/billing.go @@ -8,6 +8,7 @@ import ( "context" "crypto/sha256" "encoding/hex" + "errors" "fmt" "strings" "time" @@ -49,7 +50,8 @@ type MetricSet struct { type config struct { Period time.Duration `config:"period" validate:"required"` ProjectID string `config:"project_id" validate:"required"` - CredentialsFilePath string `config:"credentials_file_path" validate:"required"` + CredentialsFilePath string `config:"credentials_file_path"` + CredentialsJSON string `config:"credentials_json"` DatasetID string `config:"dataset_id" validate:"required"` TablePattern string `config:"table_pattern"` CostType string `config:"cost_type"` @@ -57,6 +59,10 @@ type config struct { // Validate checks for deprecated config options func (c config) Validate() error { + if c.CredentialsFilePath == "" && c.CredentialsJSON == "" { + return errors.New("no credentials_file_path or credentials_json specified") + } + if c.CostType != "" { // cost_type can only be regular, tax, adjustment, or rounding error costTypes := []string{"regular", "tax", "adjustment", "rounding error"} @@ -106,7 +112,17 @@ func (m *MetricSet) Fetch(ctx context.Context, reporter mb.ReporterV2) (err erro // find current month month := getCurrentMonth() - opt := []option.ClientOption{option.WithCredentialsFile(m.config.CredentialsFilePath)} + var opt []option.ClientOption + if m.config.CredentialsFilePath != "" && m.config.CredentialsJSON != "" { + return errors.New("both credentials_file_path and credentials_json specified, you must use only one of them") + } else if m.config.CredentialsFilePath != "" { + opt = []option.ClientOption{option.WithCredentialsFile(m.config.CredentialsFilePath)} + } else if m.config.CredentialsJSON != "" { + opt = []option.ClientOption{option.WithCredentialsJSON([]byte(m.config.CredentialsJSON))} + } else { + return errors.New("no credentials_file_path or credentials_json specified") + } + client, err := bigquery.NewClient(ctx, m.config.ProjectID, opt...) if err != nil { return fmt.Errorf("gerror creating bigquery client: %w", err) @@ -286,7 +302,7 @@ func getCurrentDate() string { } func generateEventID(currentDate string, rowItems []bigquery.Value) string { - // create eventID using hash of current_date + invoice_month + project_id + cost_type + // create eventID using hash of current_date + invoice.month + project.id + project.name // This will prevent more than one billing metric getting collected in the same day. eventID := currentDate + rowItems[0].(string) + rowItems[1].(string) + rowItems[2].(string) h := sha256.New() diff --git a/x-pack/metricbeat/module/gcp/metrics/metricset.go b/x-pack/metricbeat/module/gcp/metrics/metricset.go index e121d6a3a02..f24f7701e83 100644 --- a/x-pack/metricbeat/module/gcp/metrics/metricset.go +++ b/x-pack/metricbeat/module/gcp/metrics/metricset.go @@ -104,6 +104,7 @@ type config struct { ProjectID string `config:"project_id" validate:"required"` ExcludeLabels bool `config:"exclude_labels"` CredentialsFilePath string `config:"credentials_file_path"` + CredentialsJSON string `config:"credentials_json"` opt []option.ClientOption period *duration.Duration @@ -129,7 +130,17 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { } m.MetricsConfig = metricsConfigs.Metrics - m.config.opt = []option.ClientOption{option.WithCredentialsFile(m.config.CredentialsFilePath)} + + if m.config.CredentialsFilePath != "" && m.config.CredentialsJSON != "" { + return m, errors.New("both credentials_file_path and credentials_json specified, you must use only one of them") + } else if m.config.CredentialsFilePath != "" { + m.config.opt = []option.ClientOption{option.WithCredentialsFile(m.config.CredentialsFilePath)} + } else if m.config.CredentialsJSON != "" { + m.config.opt = []option.ClientOption{option.WithCredentialsJSON([]byte(m.config.CredentialsJSON))} + } else { + return m, errors.New("no credentials_file_path or credentials_json specified") + } + m.config.period = &duration.Duration{ Seconds: int64(m.Module().Config().Period.Seconds()), } From c2f2486d2c7a39b430b3ba4dc6ffdba4199991eb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?No=C3=A9mi=20V=C3=A1nyi?= Date: Wed, 26 Jan 2022 16:58:26 +0100 Subject: [PATCH 48/69] Update prev-minor env to 8.0 on master (8.1) (#30027) --- testing/environments/prev-minor.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/testing/environments/prev-minor.yml b/testing/environments/prev-minor.yml index a4506188f6b..5daa3b37a3f 100644 --- a/testing/environments/prev-minor.yml +++ b/testing/environments/prev-minor.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:7.15.0 + image: docker.elastic.co/elasticsearch/elasticsearch:8.0.0-rc1 healthcheck: test: ["CMD-SHELL", "curl -s http://localhost:9200/_cat/health?h=status | grep -q green"] retries: 300 @@ -21,7 +21,7 @@ services: - "action.destructive_requires_name=false" logstash: - image: docker.elastic.co/logstash/logstash:7.15.0 + image: docker.elastic.co/logstash/logstash:8.0.0-rc1 healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9600/_node/stats"] retries: 600 @@ -31,7 +31,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:7.15.0 + image: docker.elastic.co/kibana/kibana:8.0.0-rc1 healthcheck: test: ["CMD-SHELL", "curl -s http://localhost:5601/api/status?v8format=true | grep -q '\"overall\":{\"level\":\"available\"'"] retries: 600 From a473d9467517b662cf21fa963119114dfc6e8b29 Mon Sep 17 00:00:00 2001 From: Denis Rechkunov Date: Wed, 26 Jan 2022 20:16:27 +0100 Subject: [PATCH 49/69] Document how timestamps set in Kafka messages affect retention (#30025) Some customers find the default behavior when using Kafka outputs unexpected.There are 2 possible options: 1. Message timestamp to be assigned on event creation (in beats), when the actual event was emitted and then propagated to Kafka 2. Message timestamp to be assigned when Kafka actually receives the message Since beats always set the message timestamp for Kafka version 0.10.0.0+ it's worth to document how to switch between the 2 options in Kafka configuration using the `log.message.timestamp.type` parameter. --- libbeat/outputs/kafka/docs/kafka.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libbeat/outputs/kafka/docs/kafka.asciidoc b/libbeat/outputs/kafka/docs/kafka.asciidoc index 28892f57aa0..a9b517d9490 100644 --- a/libbeat/outputs/kafka/docs/kafka.asciidoc +++ b/libbeat/outputs/kafka/docs/kafka.asciidoc @@ -11,6 +11,8 @@ To use this output, edit the {beatname_uc} configuration file to disable the {es output by commenting it out, and enable the Kafka output by uncommenting the Kafka section. +NOTE: For Kafka version 0.10.0.0+ the message creation timestamp is set by beats and equals to the initial timestamp of the event. This affects the retention policy in Kafka: for example, if a beat event was created 2 weeks ago, the retention policy is set to 7 days and the message from beats arrives to Kafka today, it's going to be immediately discarded since the timestamp value is before the last 7 days. It's possible to change this behavior by setting timestamps on message arrival instead, so the message is not discarded but kept for 7 more days. To do that, please set `log.message.timestamp.type` to `LogAppendTime` (default `CreateTime`) in the Kafka configuration. + Example configuration: [source,yaml] @@ -62,8 +64,6 @@ The cluster metadata contain the actual Kafka brokers events are published to. Kafka version {beatname_lc} is assumed to run against. Defaults to 1.0.0. -Event timestamps will be added, if version 0.10.0.0+ is enabled. - Valid values are all kafka releases in between `0.8.2.0` and `2.0.0`. See <> for information on supported versions. From d84d342cdc5d59a6f5ebfce32b725955d3884fd6 Mon Sep 17 00:00:00 2001 From: Pier-Hugues Pellerin Date: Wed, 26 Jan 2022 16:32:10 -0500 Subject: [PATCH 50/69] Correctly fixe how selected packages are defined (#30039) I will skip the run of the test suite. - It will take 3 hours to run. - It will not test the present code path, I will do a follow-up on that. - Teams are blocked over this in the release manager. - This code is now exactly the same as 7.17 https://github.com/elastic/beats/blob/7.17/dev-tools/mage/crossbuild.go#L47 --- dev-tools/mage/crossbuild.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/mage/crossbuild.go b/dev-tools/mage/crossbuild.go index 46e75234a7e..305a2588dd2 100644 --- a/dev-tools/mage/crossbuild.go +++ b/dev-tools/mage/crossbuild.go @@ -44,7 +44,7 @@ const defaultCrossBuildTarget = "golangCrossBuild" var Platforms = BuildPlatforms.Defaults() // SelectedPackageTypes is the list of package types -var SelectedPackageTypes []PackageType = []PackageType{} +var SelectedPackageTypes []PackageType func init() { // Allow overriding via PLATFORMS. From 47b8d02ab6d7a8ed4ed8d08194326b76971b285c Mon Sep 17 00:00:00 2001 From: Andrew Kroh Date: Wed, 26 Jan 2022 20:01:10 -0500 Subject: [PATCH 51/69] [Filebeat] Fix panic in decode_cef when recovering from invalid data (#30038) * Fix panic in decode_cef when recovering from invalid data When recovering from an invalid extension value the escape sequence state was not cleared. This caused the parser to attempt to unescape the next extension which resulted in invalid data or a panic. Fixes #30010 * Encapsulate non-ragel state Document and encapsulate the non-ragel state variables. ``` $ benchcmp before.txt after.txt benchmark old ns/op new ns/op delta BenchmarkEventUnpack-12 1991 1544 -22.45% benchmark old allocs new allocs delta BenchmarkEventUnpack-12 13 13 +0.00% benchmark old bytes new bytes delta BenchmarkEventUnpack-12 642 642 +0.00% ``` --- CHANGELOG.next.asciidoc | 1 + .../filebeat/processors/decode_cef/cef/cef.go | 23 +- .../filebeat/processors/decode_cef/cef/cef.rl | 72 +++-- .../processors/decode_cef/cef/cef_test.go | 12 + .../processors/decode_cef/cef/parser.go | 284 +++++++++--------- 5 files changed, 219 insertions(+), 173 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 92e3291317e..4bc902fb3b5 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -113,6 +113,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - ibmmq: Fixed `@timestamp` not being populated with correct values. {pull}29773[29773] - Fix using log_group_name_prefix in aws-cloudwatch input. {pull}29695[29695] - aws-s3: Improve gzip detection to avoid false negatives. {issue}29968[29968] +- decode_cef: Fix panic when recovering from invalid CEF extensions that contain escape characters. {issue}30010[30010] *Heartbeat* diff --git a/x-pack/filebeat/processors/decode_cef/cef/cef.go b/x-pack/filebeat/processors/decode_cef/cef/cef.go index 73808171ace..70480335f0a 100644 --- a/x-pack/filebeat/processors/decode_cef/cef/cef.go +++ b/x-pack/filebeat/processors/decode_cef/cef/cef.go @@ -152,28 +152,31 @@ func (e *Event) Unpack(data string, opts ...Option) error { return multierr.Combine(errs...) } +type escapePosition struct { + start, end int +} + // replaceEscapes replaces the escaped characters contained in v with their // unescaped value. -func replaceEscapes(v string, startOffset int, escapes []int) string { +func replaceEscapes(v string, startOffset int, escapes []escapePosition) string { if len(escapes) == 0 { return v } // Adjust escape offsets relative to the start offset of v. for i := 0; i < len(escapes); i++ { - escapes[i] = escapes[i] - startOffset + escapes[i].start = escapes[i].start - startOffset + escapes[i].end = escapes[i].end - startOffset } var buf strings.Builder - var end int + var prevEnd int // Iterate over escapes and replace them. - for i := 0; i < len(escapes); i += 2 { - start := escapes[i] - buf.WriteString(v[end:start]) + for _, escape := range escapes { + buf.WriteString(v[prevEnd:escape.start]) - end = escapes[i+1] - value := v[start:end] + value := v[escape.start:escape.end] switch value { case `\n`: @@ -186,8 +189,10 @@ func replaceEscapes(v string, startOffset int, escapes []int) string { buf.WriteString(value[1:]) } } + + prevEnd = escape.end } - buf.WriteString(v[end:]) + buf.WriteString(v[prevEnd:]) return buf.String() } diff --git a/x-pack/filebeat/processors/decode_cef/cef/cef.rl b/x-pack/filebeat/processors/decode_cef/cef/cef.rl index 09b7fd6e962..256d5ee8672 100644 --- a/x-pack/filebeat/processors/decode_cef/cef/cef.rl +++ b/x-pack/filebeat/processors/decode_cef/cef/cef.rl @@ -15,17 +15,31 @@ import ( variable pe pe; }%% +type cefState struct { + key string // Extension key. + valueStart int // Start index of extension value. + valueEnd int // End index of extension value. + escapes []escapePosition // Array of escapes indices within the current value. +} + +func (s *cefState) reset() { + s.key = "" + s.valueStart = 0 + s.valueEnd = 0 + s.escapes = s.escapes[:0] +} + +func (s *cefState) pushEscape(start, end int) { + s.escapes = append(s.escapes, escapePosition{start, end}) +} + // unpack unpacks a CEF message. func (e *Event) unpack(data string) error { cs, p, pe, eof := 0, 0, len(data), len(data) mark, mark_slash := 0, 0 - var escapes []int - - // Extension key. - var extKey string - // Extension value start and end indices. - extValueStart, extValueEnd := 0, 0 + // state related to CEF values. + var state cefState // recoveredErrs are problems with the message that the parser was able to // recover from (though the parsing might not be "correct"). @@ -42,62 +56,62 @@ func (e *Event) unpack(data string) error { mark_slash = p } action mark_escape { - escapes = append(escapes, mark_slash, p) + state.pushEscape(mark_slash, p) } action version { e.Version, _ = strconv.Atoi(data[mark:p]) } action device_vendor { - e.DeviceVendor = replaceEscapes(data[mark:p], mark, escapes) - escapes = escapes[:0] + e.DeviceVendor = replaceEscapes(data[mark:p], mark, state.escapes) + state.reset() } action device_product { - e.DeviceProduct = replaceEscapes(data[mark:p], mark, escapes) - escapes = escapes[:0] + e.DeviceProduct = replaceEscapes(data[mark:p], mark, state.escapes) + state.reset() } action device_version { - e.DeviceVersion = replaceEscapes(data[mark:p], mark, escapes) - escapes = escapes[:0] + e.DeviceVersion = replaceEscapes(data[mark:p], mark, state.escapes) + state.reset() } action device_event_class_id { - e.DeviceEventClassID = replaceEscapes(data[mark:p], mark, escapes) - escapes = escapes[:0] + e.DeviceEventClassID = replaceEscapes(data[mark:p], mark, state.escapes) + state.reset() } action name { - e.Name = replaceEscapes(data[mark:p], mark, escapes) - escapes = escapes[:0] + e.Name = replaceEscapes(data[mark:p], mark, state.escapes) + state.reset() } action severity { e.Severity = data[mark:p] } action extension_key { // A new extension key marks the end of the last extension value. - if len(extKey) > 0 && extValueStart <= mark - 1 { - e.pushExtension(extKey, replaceEscapes(data[extValueStart:mark-1], extValueStart, escapes)) - extKey, extValueStart, extValueEnd, escapes = "", 0, 0, escapes[:0] + if len(state.key) > 0 && state.valueStart <= mark - 1 { + e.pushExtension(state.key, replaceEscapes(data[state.valueStart:mark-1], state.valueStart, state.escapes)) + state.reset() } - extKey = data[mark:p] + state.key = data[mark:p] } action extension_value_start { - extValueStart = p; - extValueEnd = p + state.valueStart = p; + state.valueEnd = p } action extension_value_mark { - extValueEnd = p+1 + state.valueEnd = p+1 } action extension_eof { // Reaching the EOF marks the end of the final extension value. - if len(extKey) > 0 && extValueStart <= extValueEnd { - e.pushExtension(extKey, replaceEscapes(data[extValueStart:extValueEnd], extValueStart, escapes)) - extKey, extValueStart, extValueEnd, escapes = "", 0, 0, escapes[:0] + if len(state.key) > 0 && state.valueStart <= state.valueEnd { + e.pushExtension(state.key, replaceEscapes(data[state.valueStart:state.valueEnd], state.valueStart, state.escapes)) + state.reset() } } action extension_err { - recoveredErrs = append(recoveredErrs, fmt.Errorf("malformed value for %s at pos %d", extKey, p+1)) + recoveredErrs = append(recoveredErrs, fmt.Errorf("malformed value for %s at pos %d", state.key, p+1)) fhold; fnext gobble_extension; } action recover_next_extension { - extKey, extValueStart, extValueEnd = "", 0, 0 + state.reset() // Resume processing at p, the start of the next extension key. p = mark; fnext extensions; diff --git a/x-pack/filebeat/processors/decode_cef/cef/cef_test.go b/x-pack/filebeat/processors/decode_cef/cef/cef_test.go index 7ab286e1f2c..51f3c937ebf 100644 --- a/x-pack/filebeat/processors/decode_cef/cef/cef_test.go +++ b/x-pack/filebeat/processors/decode_cef/cef/cef_test.go @@ -389,6 +389,18 @@ func TestEventUnpack(t *testing.T) { "msg": StringField("Newlines in messages\nare allowed.\r\nAnd so are carriage feeds\\newlines\\=."), }, e.Extensions) }) + + t.Run("error recovery with escape", func(t *testing.T) { + // Ensure no panic or regression of https://github.com/elastic/beats/issues/30010. + // key1 contains an escape, but then an invalid non-escaped =. + // This triggers the error recovery to try to read the next key. + var e Event + err := e.Unpack(`CEF:0|||||||key1=\\hi= key2=a`) + assert.Error(t, err) + assert.Equal(t, map[string]*Field{ + "key2": UndocumentedField("a"), + }, e.Extensions) + }) } func TestEventUnpackWithFullExtensionNames(t *testing.T) { diff --git a/x-pack/filebeat/processors/decode_cef/cef/parser.go b/x-pack/filebeat/processors/decode_cef/cef/parser.go index b9e19b03e7c..1968ea3ce42 100644 --- a/x-pack/filebeat/processors/decode_cef/cef/parser.go +++ b/x-pack/filebeat/processors/decode_cef/cef/parser.go @@ -33,17 +33,31 @@ const cef_en_main_cef_extensions int = 29 //line cef.rl:16 +type cefState struct { + key string // Extension key. + valueStart int // Start index of extension value. + valueEnd int // End index of extension value. + escapes []escapePosition // Array of escapes indices within the current value. +} + +func (s *cefState) reset() { + s.key = "" + s.valueStart = 0 + s.valueEnd = 0 + s.escapes = s.escapes[:0] +} + +func (s *cefState) pushEscape(start, end int) { + s.escapes = append(s.escapes, escapePosition{start, end}) +} + // unpack unpacks a CEF message. func (e *Event) unpack(data string) error { cs, p, pe, eof := 0, 0, len(data), len(data) mark, mark_slash := 0, 0 - var escapes []int - // Extension key. - var extKey string - - // Extension value start and end indices. - extValueStart, extValueEnd := 0, 0 + // state related to CEF values. + var state cefState // recoveredErrs are problems with the message that the parser was able to // recover from (though the parsing might not be "correct"). @@ -51,12 +65,12 @@ func (e *Event) unpack(data string) error { e.init(data) -//line parser.go:56 +//line parser.go:70 { cs = cef_start } -//line parser.go:61 +//line parser.go:75 { if (p) == (pe) { goto _test_eof @@ -961,299 +975,299 @@ func (e *Event) unpack(data string) error { goto f26 f0: -//line cef.rl:38 +//line cef.rl:52 mark = p goto _again f4: -//line cef.rl:41 +//line cef.rl:55 mark_slash = p goto _again f6: -//line cef.rl:44 +//line cef.rl:58 - escapes = append(escapes, mark_slash, p) + state.pushEscape(mark_slash, p) goto _again f1: -//line cef.rl:47 +//line cef.rl:61 e.Version, _ = strconv.Atoi(data[mark:p]) goto _again f5: -//line cef.rl:50 +//line cef.rl:64 - e.DeviceVendor = replaceEscapes(data[mark:p], mark, escapes) - escapes = escapes[:0] + e.DeviceVendor = replaceEscapes(data[mark:p], mark, state.escapes) + state.reset() goto _again f10: -//line cef.rl:54 +//line cef.rl:68 - e.DeviceProduct = replaceEscapes(data[mark:p], mark, escapes) - escapes = escapes[:0] + e.DeviceProduct = replaceEscapes(data[mark:p], mark, state.escapes) + state.reset() goto _again f13: -//line cef.rl:58 +//line cef.rl:72 - e.DeviceVersion = replaceEscapes(data[mark:p], mark, escapes) - escapes = escapes[:0] + e.DeviceVersion = replaceEscapes(data[mark:p], mark, state.escapes) + state.reset() goto _again f16: -//line cef.rl:62 +//line cef.rl:76 - e.DeviceEventClassID = replaceEscapes(data[mark:p], mark, escapes) - escapes = escapes[:0] + e.DeviceEventClassID = replaceEscapes(data[mark:p], mark, state.escapes) + state.reset() goto _again f19: -//line cef.rl:66 +//line cef.rl:80 - e.Name = replaceEscapes(data[mark:p], mark, escapes) - escapes = escapes[:0] + e.Name = replaceEscapes(data[mark:p], mark, state.escapes) + state.reset() goto _again f22: -//line cef.rl:70 +//line cef.rl:84 e.Severity = data[mark:p] goto _again f23: -//line cef.rl:73 +//line cef.rl:87 // A new extension key marks the end of the last extension value. - if len(extKey) > 0 && extValueStart <= mark-1 { - e.pushExtension(extKey, replaceEscapes(data[extValueStart:mark-1], extValueStart, escapes)) - extKey, extValueStart, extValueEnd, escapes = "", 0, 0, escapes[:0] + if len(state.key) > 0 && state.valueStart <= mark-1 { + e.pushExtension(state.key, replaceEscapes(data[state.valueStart:mark-1], state.valueStart, state.escapes)) + state.reset() } - extKey = data[mark:p] + state.key = data[mark:p] goto _again f29: -//line cef.rl:81 +//line cef.rl:95 - extValueStart = p - extValueEnd = p + state.valueStart = p + state.valueEnd = p goto _again f25: -//line cef.rl:85 +//line cef.rl:99 - extValueEnd = p + 1 + state.valueEnd = p + 1 goto _again f24: -//line cef.rl:95 +//line cef.rl:109 - recoveredErrs = append(recoveredErrs, fmt.Errorf("malformed value for %s at pos %d", extKey, p+1)) + recoveredErrs = append(recoveredErrs, fmt.Errorf("malformed value for %s at pos %d", state.key, p+1)) (p)-- cs = 33 goto _again f26: -//line cef.rl:99 +//line cef.rl:113 - extKey, extValueStart, extValueEnd = "", 0, 0 + state.reset() // Resume processing at p, the start of the next extension key. p = mark cs = 29 goto _again f2: -//line cef.rl:38 +//line cef.rl:52 mark = p -//line cef.rl:41 +//line cef.rl:55 mark_slash = p goto _again f3: -//line cef.rl:38 +//line cef.rl:52 mark = p -//line cef.rl:50 +//line cef.rl:64 - e.DeviceVendor = replaceEscapes(data[mark:p], mark, escapes) - escapes = escapes[:0] + e.DeviceVendor = replaceEscapes(data[mark:p], mark, state.escapes) + state.reset() goto _again f9: -//line cef.rl:38 +//line cef.rl:52 mark = p -//line cef.rl:54 +//line cef.rl:68 - e.DeviceProduct = replaceEscapes(data[mark:p], mark, escapes) - escapes = escapes[:0] + e.DeviceProduct = replaceEscapes(data[mark:p], mark, state.escapes) + state.reset() goto _again f12: -//line cef.rl:38 +//line cef.rl:52 mark = p -//line cef.rl:58 +//line cef.rl:72 - e.DeviceVersion = replaceEscapes(data[mark:p], mark, escapes) - escapes = escapes[:0] + e.DeviceVersion = replaceEscapes(data[mark:p], mark, state.escapes) + state.reset() goto _again f15: -//line cef.rl:38 +//line cef.rl:52 mark = p -//line cef.rl:62 +//line cef.rl:76 - e.DeviceEventClassID = replaceEscapes(data[mark:p], mark, escapes) - escapes = escapes[:0] + e.DeviceEventClassID = replaceEscapes(data[mark:p], mark, state.escapes) + state.reset() goto _again f18: -//line cef.rl:38 +//line cef.rl:52 mark = p -//line cef.rl:66 +//line cef.rl:80 - e.Name = replaceEscapes(data[mark:p], mark, escapes) - escapes = escapes[:0] + e.Name = replaceEscapes(data[mark:p], mark, state.escapes) + state.reset() goto _again f21: -//line cef.rl:38 +//line cef.rl:52 mark = p -//line cef.rl:70 +//line cef.rl:84 e.Severity = data[mark:p] goto _again f33: -//line cef.rl:38 +//line cef.rl:52 mark = p -//line cef.rl:85 +//line cef.rl:99 - extValueEnd = p + 1 + state.valueEnd = p + 1 goto _again f7: -//line cef.rl:44 +//line cef.rl:58 - escapes = append(escapes, mark_slash, p) + state.pushEscape(mark_slash, p) -//line cef.rl:41 +//line cef.rl:55 mark_slash = p goto _again f8: -//line cef.rl:44 +//line cef.rl:58 - escapes = append(escapes, mark_slash, p) + state.pushEscape(mark_slash, p) -//line cef.rl:50 +//line cef.rl:64 - e.DeviceVendor = replaceEscapes(data[mark:p], mark, escapes) - escapes = escapes[:0] + e.DeviceVendor = replaceEscapes(data[mark:p], mark, state.escapes) + state.reset() goto _again f11: -//line cef.rl:44 +//line cef.rl:58 - escapes = append(escapes, mark_slash, p) + state.pushEscape(mark_slash, p) -//line cef.rl:54 +//line cef.rl:68 - e.DeviceProduct = replaceEscapes(data[mark:p], mark, escapes) - escapes = escapes[:0] + e.DeviceProduct = replaceEscapes(data[mark:p], mark, state.escapes) + state.reset() goto _again f14: -//line cef.rl:44 +//line cef.rl:58 - escapes = append(escapes, mark_slash, p) + state.pushEscape(mark_slash, p) -//line cef.rl:58 +//line cef.rl:72 - e.DeviceVersion = replaceEscapes(data[mark:p], mark, escapes) - escapes = escapes[:0] + e.DeviceVersion = replaceEscapes(data[mark:p], mark, state.escapes) + state.reset() goto _again f17: -//line cef.rl:44 +//line cef.rl:58 - escapes = append(escapes, mark_slash, p) + state.pushEscape(mark_slash, p) -//line cef.rl:62 +//line cef.rl:76 - e.DeviceEventClassID = replaceEscapes(data[mark:p], mark, escapes) - escapes = escapes[:0] + e.DeviceEventClassID = replaceEscapes(data[mark:p], mark, state.escapes) + state.reset() goto _again f20: -//line cef.rl:44 +//line cef.rl:58 - escapes = append(escapes, mark_slash, p) + state.pushEscape(mark_slash, p) -//line cef.rl:66 +//line cef.rl:80 - e.Name = replaceEscapes(data[mark:p], mark, escapes) - escapes = escapes[:0] + e.Name = replaceEscapes(data[mark:p], mark, state.escapes) + state.reset() goto _again f35: -//line cef.rl:44 +//line cef.rl:58 - escapes = append(escapes, mark_slash, p) + state.pushEscape(mark_slash, p) -//line cef.rl:85 +//line cef.rl:99 - extValueEnd = p + 1 + state.valueEnd = p + 1 goto _again f30: -//line cef.rl:81 +//line cef.rl:95 - extValueStart = p - extValueEnd = p + state.valueStart = p + state.valueEnd = p -//line cef.rl:41 +//line cef.rl:55 mark_slash = p goto _again f28: -//line cef.rl:81 +//line cef.rl:95 - extValueStart = p - extValueEnd = p + state.valueStart = p + state.valueEnd = p -//line cef.rl:85 +//line cef.rl:99 - extValueEnd = p + 1 + state.valueEnd = p + 1 goto _again f32: -//line cef.rl:85 +//line cef.rl:99 - extValueEnd = p + 1 + state.valueEnd = p + 1 -//line cef.rl:38 +//line cef.rl:52 mark = p @@ -1272,49 +1286,49 @@ func (e *Event) unpack(data string) error { if (p) == eof { switch _cef_eof_actions[cs] { case 32: -//line cef.rl:88 +//line cef.rl:102 // Reaching the EOF marks the end of the final extension value. - if len(extKey) > 0 && extValueStart <= extValueEnd { - e.pushExtension(extKey, replaceEscapes(data[extValueStart:extValueEnd], extValueStart, escapes)) - extKey, extValueStart, extValueEnd, escapes = "", 0, 0, escapes[:0] + if len(state.key) > 0 && state.valueStart <= state.valueEnd { + e.pushExtension(state.key, replaceEscapes(data[state.valueStart:state.valueEnd], state.valueStart, state.escapes)) + state.reset() } case 25: -//line cef.rl:95 +//line cef.rl:109 - recoveredErrs = append(recoveredErrs, fmt.Errorf("malformed value for %s at pos %d", extKey, p+1)) + recoveredErrs = append(recoveredErrs, fmt.Errorf("malformed value for %s at pos %d", state.key, p+1)) (p)-- cs = 33 case 35: -//line cef.rl:44 +//line cef.rl:58 - escapes = append(escapes, mark_slash, p) + state.pushEscape(mark_slash, p) -//line cef.rl:88 +//line cef.rl:102 // Reaching the EOF marks the end of the final extension value. - if len(extKey) > 0 && extValueStart <= extValueEnd { - e.pushExtension(extKey, replaceEscapes(data[extValueStart:extValueEnd], extValueStart, escapes)) - extKey, extValueStart, extValueEnd, escapes = "", 0, 0, escapes[:0] + if len(state.key) > 0 && state.valueStart <= state.valueEnd { + e.pushExtension(state.key, replaceEscapes(data[state.valueStart:state.valueEnd], state.valueStart, state.escapes)) + state.reset() } case 28: -//line cef.rl:81 +//line cef.rl:95 - extValueStart = p - extValueEnd = p + state.valueStart = p + state.valueEnd = p -//line cef.rl:88 +//line cef.rl:102 // Reaching the EOF marks the end of the final extension value. - if len(extKey) > 0 && extValueStart <= extValueEnd { - e.pushExtension(extKey, replaceEscapes(data[extValueStart:extValueEnd], extValueStart, escapes)) - extKey, extValueStart, extValueEnd, escapes = "", 0, 0, escapes[:0] + if len(state.key) > 0 && state.valueStart <= state.valueEnd { + e.pushExtension(state.key, replaceEscapes(data[state.valueStart:state.valueEnd], state.valueStart, state.escapes)) + state.reset() } -//line parser.go:1116 +//line parser.go:1130 } } @@ -1323,7 +1337,7 @@ func (e *Event) unpack(data string) error { } } -//line cef.rl:161 +//line cef.rl:175 // Check if state machine completed. if cs < cef_first_final { From 72a43be9ed23efc3dc2b371e2cadf5a7c575e429 Mon Sep 17 00:00:00 2001 From: Dan Kortschak <90160302+efd6@users.noreply.github.com> Date: Thu, 27 Jan 2022 15:47:50 +1030 Subject: [PATCH 52/69] x-pack/packetbeat: install Npcap at start-up when required (#29112) This add automated installation of an OEM Npcap provided by embedding in the packetbeat executable. The installation is configurable to allow users to retain their own version of Npcap if they have one and to specify the location of the install. To simplify support this configurablity will not be included in the next release, but is comitted here to allow it to be reverted back out to make it easier to add if the decision is made to do that later. --- CHANGELOG.next.asciidoc | 2 + NOTICE.txt | 75 +++--- dev-tools/mage/config.go | 1 + dev-tools/mage/crossbuild.go | 7 +- dev-tools/mage/settings.go | 1 + dev-tools/notice/NOTICE.txt.tmpl | 4 +- dev-tools/packaging/package_test.go | 78 +++++- go.mod | 2 +- packetbeat/README.md | 14 ++ .../_meta/config/beat.reference.yml.tmpl | 2 + packetbeat/_meta/config/beat.yml.tmpl | 2 + .../_meta/config/windows_npcap.yml.tmpl | 36 +++ packetbeat/beater/install_npcap.go | 104 ++++++++ packetbeat/beater/packetbeat.go | 6 + packetbeat/npcap/npcap.go | 166 +++++++++++++ packetbeat/npcap/npcap_other.go | 25 ++ packetbeat/npcap/npcap_test.go | 93 +++++++ packetbeat/npcap/npcap_windows.go | 33 +++ packetbeat/npcap/testdata/mock_installer.go | 44 ++++ packetbeat/npcap/testdata/mock_uninstaller.go | 34 +++ packetbeat/scripts/mage/package.go | 29 +++ x-pack/packetbeat/LICENSE-Npcap.txt | 228 +++++++++++++++++ x-pack/packetbeat/cmd/root.go | 3 + x-pack/packetbeat/magefile.go | 40 ++- x-pack/packetbeat/npcap/installer/.gitignore | 5 + x-pack/packetbeat/npcap/installer/LICENSE | 229 ++++++++++++++++++ x-pack/packetbeat/npcap/installer/README | 19 ++ .../packetbeat/npcap/installer/npcap-0.00.exe | 1 + x-pack/packetbeat/npcap/npcap_other.go | 9 + x-pack/packetbeat/npcap/npcap_windows.go | 54 +++++ 30 files changed, 1299 insertions(+), 47 deletions(-) create mode 100644 packetbeat/_meta/config/windows_npcap.yml.tmpl create mode 100644 packetbeat/beater/install_npcap.go create mode 100644 packetbeat/npcap/npcap.go create mode 100644 packetbeat/npcap/npcap_other.go create mode 100644 packetbeat/npcap/npcap_test.go create mode 100644 packetbeat/npcap/npcap_windows.go create mode 100644 packetbeat/npcap/testdata/mock_installer.go create mode 100644 packetbeat/npcap/testdata/mock_uninstaller.go create mode 100644 x-pack/packetbeat/LICENSE-Npcap.txt create mode 100644 x-pack/packetbeat/npcap/installer/.gitignore create mode 100644 x-pack/packetbeat/npcap/installer/LICENSE create mode 100644 x-pack/packetbeat/npcap/installer/README create mode 100644 x-pack/packetbeat/npcap/installer/npcap-0.00.exe create mode 100644 x-pack/packetbeat/npcap/npcap_other.go create mode 100644 x-pack/packetbeat/npcap/npcap_windows.go diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 4bc902fb3b5..d1feb50c21a 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -196,6 +196,8 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d *Packetbeat* +- Add automated OEM Npcap installation handling. {pull}29112[29112] + *Functionbeat* diff --git a/NOTICE.txt b/NOTICE.txt index 7a332a0e662..75163cd38c4 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -16728,6 +16728,43 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +-------------------------------------------------------------------------------- +Dependency : golang.org/x/mod +Version: v0.5.1 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/golang.org/x/mod@v0.5.1/LICENSE: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + -------------------------------------------------------------------------------- Dependency : golang.org/x/net Version: v0.0.0-20211020060615-d418f374d309 @@ -34519,43 +34556,6 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. --------------------------------------------------------------------------------- -Dependency : golang.org/x/mod -Version: v0.5.1 -Licence type (autodetected): BSD-3-Clause --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/golang.org/x/mod@v0.5.1/LICENSE: - -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -------------------------------------------------------------------------------- Dependency : golang.org/x/term Version: v0.0.0-20210615171337-6886f2dfbf5b @@ -37624,4 +37624,3 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/dev-tools/mage/config.go b/dev-tools/mage/config.go index da54123e916..b80bd9a197b 100644 --- a/dev-tools/mage/config.go +++ b/dev-tools/mage/config.go @@ -149,6 +149,7 @@ func makeConfigTemplate(destination string, mode os.FileMode, confParams ConfigF params := map[string]interface{}{ "GOOS": EnvOr("DEV_OS", "linux"), "GOARCH": EnvOr("DEV_ARCH", "amd64"), + "BeatLicense": BeatLicense, "Reference": false, "Docker": false, "ExcludeConsole": false, diff --git a/dev-tools/mage/crossbuild.go b/dev-tools/mage/crossbuild.go index 305a2588dd2..7a08a035bc4 100644 --- a/dev-tools/mage/crossbuild.go +++ b/dev-tools/mage/crossbuild.go @@ -183,7 +183,7 @@ func CrossBuild(options ...CrossBuildOption) error { builder := GolangCrossBuilder{buildPlatform.Name, params.Target, params.InDir, params.ImageSelector} if params.Serial { if err := builder.Build(); err != nil { - return errors.Wrapf(err, "failed cross-building target=%v for platform=%v %v", params.ImageSelector, + return errors.Wrapf(err, "failed cross-building target=%s for platform=%s", params.Target, buildPlatform.Name) } } else { @@ -321,8 +321,11 @@ func (b GolangCrossBuilder) Build() error { "-v", repoInfo.RootDir+":"+mountPoint, "-w", workDir, image, + + // Arguments for docker crossbuild entrypoint. For details see + // https://github.com/elastic/golang-crossbuild/blob/main/go1.17/base/rootfs/entrypoint.go. "--build-cmd", buildCmd+" "+b.Target, - "-p", b.Platform, + "--platforms", b.Platform, ) return dockerRun(args...) diff --git a/dev-tools/mage/settings.go b/dev-tools/mage/settings.go index b721b730c6a..fbcc916fbe9 100644 --- a/dev-tools/mage/settings.go +++ b/dev-tools/mage/settings.go @@ -209,6 +209,7 @@ BeatUser = {{.BeatUser}} VersionQualifier = {{.Qualifier}} PLATFORMS = {{.PLATFORMS}} PACKAGES = {{.PACKAGES}} +CI = {{.CI}} ## Functions diff --git a/dev-tools/notice/NOTICE.txt.tmpl b/dev-tools/notice/NOTICE.txt.tmpl index b32b4e808ca..5477a22975e 100644 --- a/dev-tools/notice/NOTICE.txt.tmpl +++ b/dev-tools/notice/NOTICE.txt.tmpl @@ -26,5 +26,5 @@ Third party libraries used by the Elastic Beats project: {{ "=" | line }} Indirect dependencies -{{ template "depInfo" .Indirect }} -{{ end }} +{{ template "depInfo" .Indirect -}} +{{- end}} diff --git a/dev-tools/packaging/package_test.go b/dev-tools/packaging/package_test.go index 249bd0bb6db..9759568de6a 100644 --- a/dev-tools/packaging/package_test.go +++ b/dev-tools/packaging/package_test.go @@ -23,6 +23,7 @@ package dev_tools import ( "archive/tar" "archive/zip" + "bufio" "bytes" "compress/gzip" "encoding/json" @@ -169,7 +170,7 @@ func checkTar(t *testing.T, file string) { } func checkZip(t *testing.T, file string) { - p, err := readZip(file) + p, err := readZip(t, file, checkNpcapNotices) if err != nil { t.Error(err) return @@ -183,6 +184,62 @@ func checkZip(t *testing.T, file string) { checkLicensesPresent(t, "", p) } +const ( + npcapSettings = "Windows Npcap installation settings" + npcapGrant = `Insecure.Com LLC \(“The Nmap Project”\) has granted Elasticsearch` + npcapLicense = `Dependency : Npcap \(https://nmap.org/npcap/\)` + libpcapLicense = `Dependency : Libpcap \(http://www.tcpdump.org/\)` + winpcapLicense = `Dependency : Winpcap \(https://www.winpcap.org/\)` + radiotapLicense = `Dependency : ieee80211_radiotap.h Header File` +) + +var ( + // These reflect the order that the licenses and notices appear in the relevant files. + npcapConfigPattern = regexp.MustCompile( + "(?s)" + npcapSettings + + ".*" + npcapGrant, + ) + npcapLicensePattern = regexp.MustCompile( + "(?s)" + npcapLicense + + ".*" + libpcapLicense + + ".*" + winpcapLicense + + ".*" + radiotapLicense, + ) +) + +func checkNpcapNotices(pkg, file string, contents io.Reader) error { + if !strings.Contains(pkg, "packetbeat") { + return nil + } + + wantNotices := strings.Contains(pkg, "windows") && !strings.Contains(pkg, "oss") + + // If the packetbeat README.md is made to be generated + // conditionally then it should also be checked here. + pkg = filepath.Base(pkg) + file, err := filepath.Rel(pkg[:len(pkg)-len(filepath.Ext(pkg))], file) + if err != nil { + return err + } + switch file { + case "packetbeat.yml", "packetbeat.reference.yml": + if npcapConfigPattern.MatchReader(bufio.NewReader(contents)) != wantNotices { + if wantNotices { + return fmt.Errorf("Npcap config section not found in config file %s in %s", file, pkg) + } + return fmt.Errorf("unexpected Npcap config section found in config file %s in %s", file, pkg) + } + case "NOTICE.txt": + if npcapLicensePattern.MatchReader(bufio.NewReader(contents)) != wantNotices { + if wantNotices { + return fmt.Errorf("Npcap license section not found in %s file in %s", file, pkg) + } + return fmt.Errorf("unexpected Npcap license section found in %s file in %s", file, pkg) + } + } + return nil +} + func checkDocker(t *testing.T, file string) { p, info, err := readDocker(file) if err != nil { @@ -623,7 +680,11 @@ func readTarContents(tarName string, data io.Reader) (*packageFile, error) { return p, nil } -func readZip(zipFile string) (*packageFile, error) { +// inspector is a file contents inspector. It vets the contents of the file +// within a package for a requirement and returns an error if it is not met. +type inspector func(pkg, file string, contents io.Reader) error + +func readZip(t *testing.T, zipFile string, inspectors ...inspector) (*packageFile, error) { r, err := zip.OpenReader(zipFile) if err != nil { return nil, err @@ -636,6 +697,18 @@ func readZip(zipFile string) (*packageFile, error) { File: f.Name, Mode: f.Mode(), } + for _, inspect := range inspectors { + r, err := f.Open() + if err != nil { + t.Errorf("failed to open %s in %s: %v", f.Name, zipFile, err) + break + } + err = inspect(zipFile, f.Name, r) + if err != nil { + t.Error(err) + } + r.Close() + } } return p, nil @@ -740,7 +813,6 @@ func readDockerManifest(r io.Reader) (*dockerManifest, error) { err = json.Unmarshal(data, &manifests) if err != nil { return nil, err - } if len(manifests) != 1 { diff --git a/go.mod b/go.mod index 2601349a467..1603cbf798f 100644 --- a/go.mod +++ b/go.mod @@ -164,6 +164,7 @@ require ( go.uber.org/zap v1.14.1 golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 + golang.org/x/mod v0.5.1 golang.org/x/net v0.0.0-20211020060615-d418f374d309 golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1 golang.org/x/sync v0.0.0-20210220032951-036812b2e83c @@ -268,7 +269,6 @@ require ( github.com/xdg/stringprep v1.0.3 // indirect go.elastic.co/fastjson v1.1.0 // indirect go.opencensus.io v0.23.0 // indirect - golang.org/x/mod v0.5.1 // indirect golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect google.golang.org/appengine v1.6.7 // indirect diff --git a/packetbeat/README.md b/packetbeat/README.md index fcda826edbf..0ce898ac499 100644 --- a/packetbeat/README.md +++ b/packetbeat/README.md @@ -39,3 +39,17 @@ If you are sure you found a bug or have a feature request, open an issue on We love contributions from our community! Please read the [CONTRIBUTING.md](../CONTRIBUTING.md) file. + +## LICENSE NOTICE for Windows users of Packetbeat + +The default distribution of Packetbeat for Windows comes bundled with the Npcap +library. This is not available in the OSS-only distribution of Packetbeat. + +**Restrictions on Distribution** + +Insecure.Com LLC (“The Nmap Project”) has granted Elasticsearch BV and its +affiliates the right to include Npcap with this distribution of Packetbeat. +You may not distribute this version of Packetbeat or any other package from +Elastic that includes Npcap. If you wish to distribute Npcap, or any package +that includes Npcap, you should reach out to The Nmap Project to obtain a +distribution license. See https://nmap.org/npcap/ for more details. diff --git a/packetbeat/_meta/config/beat.reference.yml.tmpl b/packetbeat/_meta/config/beat.reference.yml.tmpl index 5d8d8fa9c7a..856a9475fe9 100644 --- a/packetbeat/_meta/config/beat.reference.yml.tmpl +++ b/packetbeat/_meta/config/beat.reference.yml.tmpl @@ -57,6 +57,8 @@ packetbeat.interfaces.internal_networks: # can stay enabled even after beat is shut down. #packetbeat.interfaces.auto_promisc_mode: true +{{- template "windows_npcap.yml.tmpl" .}} + {{header "Flows"}} packetbeat.flows: diff --git a/packetbeat/_meta/config/beat.yml.tmpl b/packetbeat/_meta/config/beat.yml.tmpl index 2a69df42517..95410045593 100644 --- a/packetbeat/_meta/config/beat.yml.tmpl +++ b/packetbeat/_meta/config/beat.yml.tmpl @@ -23,6 +23,8 @@ packetbeat.interfaces.device: {{ call .device .GOOS }} packetbeat.interfaces.internal_networks: - private +{{- template "windows_npcap.yml.tmpl" .}} + {{header "Flows"}} # Set `enabled: false` or comment out all options to disable flows reporting. diff --git a/packetbeat/_meta/config/windows_npcap.yml.tmpl b/packetbeat/_meta/config/windows_npcap.yml.tmpl new file mode 100644 index 00000000000..a6c13db4a53 --- /dev/null +++ b/packetbeat/_meta/config/windows_npcap.yml.tmpl @@ -0,0 +1,36 @@ +{{if and (eq .BeatLicense "Elastic License") (eq .GOOS "windows")}} + +{{header "Windows Npcap installation settings"}} + +# Windows Npcap installation options. These options specify how the Npcap packet +# capture library for Windows should be obtained and installed. +# Npcap installation is only available in the default distribution of Packetbeat +# for Windows and is not available in the OSS-only distribution of Packetbeat. +# +# LICENSE NOTICE +# +# Restrictions on Distribution +# +# Insecure.Com LLC (“The Nmap Project”) has granted Elasticsearch BV and its +# affiliates the right to include Npcap with this distribution of Packetbeat. +# You may not distribute this version of Packetbeat or any other package from +# Elastic that includes Npcap. If you wish to distribute Npcap, or any package +# that includes Npcap, you should reach out to The Nmap Project to obtain a +# distribution license. See https://nmap.org/npcap/ for more details. +# +#npcap: +# # install_destination allows configuration of the location that the Npcap will +# # place the Npcap library and associated files. See https://nmap.org/npcap/guide/npcap-users-guide.html#npcap-installation-uninstall-options. +# install_destination: "" +# install_timeout: 120s +# # ignore_missing_registry specifies that failure to query the registry server +# # will be ignored with a logged warning. +# ignore_missing_registry: false +# # By default Npcap will be installed only when a newer version of Npcap is available. +# # force_reinstall forces a new installation of Npcap in all cases. +# force_reinstall: false +# # If a specific local version of Npcap is required installation by packetbeat +# # can be blocked by setting never_install to true. No action is taken if this +# # option is set to true. +# never_install: false +{{- end -}} diff --git a/packetbeat/beater/install_npcap.go b/packetbeat/beater/install_npcap.go new file mode 100644 index 00000000000..ef9edd5617b --- /dev/null +++ b/packetbeat/beater/install_npcap.go @@ -0,0 +1,104 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package beater + +import ( + "context" + "fmt" + "os" + "path/filepath" + "runtime" + "time" + + "github.com/google/gopacket/pcap" + + "github.com/elastic/beats/v7/libbeat/beat" + "github.com/elastic/beats/v7/libbeat/logp" + "github.com/elastic/beats/v7/packetbeat/npcap" +) + +type npcapConfig struct { + NeverInstall bool `config:"npcap.never_install"` + ForceReinstall bool `config:"npcap.force_reinstall"` + InstallTimeout time.Duration `config:"npcap.install_timeout"` + InstallDestination string `config:"npcal.install_destination"` +} + +func (c *npcapConfig) Init() { + // Set defaults. + c.InstallTimeout = 120 * time.Second +} + +func installNpcap(b *beat.Beat) error { + if !b.Info.ElasticLicensed { + return nil + } + if runtime.GOOS != "windows" { + return nil + } + + defer func() { + log := logp.NewLogger("npcap") + npcapVersion := pcap.Version() + if npcapVersion == "" { + log.Warn("no version available for npcap") + } else { + log.Infof("npcap version: %s", npcapVersion) + } + }() + + var cfg npcapConfig + err := b.BeatConfig.Unpack(&cfg) + if err != nil { + return fmt.Errorf("failed to unpack npcap config: %w", err) + } + if cfg.NeverInstall { + return nil + } + + ctx, cancel := context.WithTimeout(context.Background(), cfg.InstallTimeout) + defer cancel() + + log := logp.NewLogger("npcap_install") + + if npcap.Installer == nil { + return nil + } + if !cfg.ForceReinstall && !npcap.Upgradeable() { + npcap.Installer = nil + return nil + } + tmp, err := os.MkdirTemp("", "") + if err != nil { + return fmt.Errorf("could not create installation temporary directory: %w", err) + } + defer func() { + // The init sequence duplicates the embedded binary. + // Get rid of the part we can. The remainder is in + // the packetbeat text section as a string. + npcap.Installer = nil + // Remove the installer from the file system. + os.RemoveAll(tmp) + }() + installerPath := filepath.Join(tmp, "npcap.exe") + err = os.WriteFile(installerPath, npcap.Installer, 0o700) + if err != nil { + return fmt.Errorf("could not create installation temporary file: %w", err) + } + return npcap.Install(ctx, log, installerPath, cfg.InstallDestination, false) +} diff --git a/packetbeat/beater/packetbeat.go b/packetbeat/beater/packetbeat.go index d72a98d4a5f..3a1f7d6c2d9 100644 --- a/packetbeat/beater/packetbeat.go +++ b/packetbeat/beater/packetbeat.go @@ -111,6 +111,12 @@ func (pb *packetbeat) Run(b *beat.Beat) error { } }() + // Install Npcap if needed. + err := installNpcap(b) + if err != nil { + return err + } + if !b.Manager.Enabled() { return pb.runStatic(b, pb.factory) } diff --git a/packetbeat/npcap/npcap.go b/packetbeat/npcap/npcap.go new file mode 100644 index 00000000000..c81d1ce731d --- /dev/null +++ b/packetbeat/npcap/npcap.go @@ -0,0 +1,166 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Package npcap handles fetching and installing Npcap fow Windows. +// +// The npcap package interacts with a registry and download server that +// provides a current_version end point that serves a JSON message that +// corresponds to the this Go type: +// +// struct { +// Version string // The semverish version of the Npcap installer. +// URL string // The location of the Npcap installer. +// Hash string // The sha256 hash of the Npcap installer. +// } +// +// The URL field will point to the location of anb Npcap installer. +package npcap + +import ( + "bytes" + "context" + "errors" + "fmt" + "os/exec" + "runtime" + "strings" + + "github.com/google/gopacket/pcap" + "golang.org/x/mod/semver" + + "github.com/elastic/beats/v7/libbeat/logp" +) + +var ( + // Installer holds the embedded installer when run with x-pack. + Installer []byte + + // EmbeddedInstallerVersion holds the version of the embedded installer. + EmbeddedInstallerVersion string +) + +// Install runs the Npcap installer at the provided path. The install +// destination is specified by dst and installation using WinPcap +// API-compatible Mode is specifed by compat. If dst is the empty string +// the default install location is used. +// +// See https://nmap.org/npcap/guide/npcap-users-guide.html#npcap-installation-uninstall-options +// for details. +func Install(ctx context.Context, log *logp.Logger, path, dst string, compat bool) error { + if runtime.GOOS != "windows" { + return errors.New("npcap: called Install on non-Windows platform") + } + return install(ctx, log, path, dst, compat) +} + +func install(ctx context.Context, log *logp.Logger, path, dst string, compat bool) error { + args := []string{"/S", "/winpcap_mode=no"} + if compat { + args[1] = "/winpcap_mode=yes" + } + if dst != "" { + // The destination switch must be last as it uses unquoted spaces. + // See https://nmap.org/npcap/guide/npcap-users-guide.html#npcap-installation-uninstall-options. + args = append(args, "/D="+dst) + } + cmd := exec.CommandContext(ctx, path, args...) + var outBuf, errBuf bytes.Buffer + cmd.Stdout = &outBuf + cmd.Stderr = &errBuf + + err := cmd.Start() + if err != nil { + return fmt.Errorf("npcap: failed to start Npcap installer: %w", err) + } + + err = cmd.Wait() + if outBuf.Len() != 0 { + log.Info(&outBuf) + } + if err != nil { + log.Error(&errBuf) + return fmt.Errorf("npcap: failed to install Npcap: %w", err) + } + + return reloadWinPCAP() +} + +func Upgradeable() bool { + // This is only set when a real installer is placed in + // x-pack/packetbeat/npcap/installer. + if EmbeddedInstallerVersion == "" { + return false + } + + // pcap.Version() returns a string in the form: + // + // Npcap version 1.55, based on libpcap version 1.10.2-PRE-GIT + // + // if an Npcap version is installed. See https://nmap.org/npcap/guide/npcap-devguide.html#npcap-detect + installed := pcap.Version() + if !strings.HasPrefix(installed, "Npcap version") { + return true + } + installed = strings.TrimPrefix(installed, "Npcap version ") + idx := strings.Index(installed, ",") + if idx < 0 { + return true + } + installed = installed[:idx] + return semver.Compare("v"+installed, "v"+EmbeddedInstallerVersion) < 0 +} + +// Uninstall uninstalls the Npcap tools. The path to the uninstaller can +// be provided, otherwise the default install location in used. +// +// See https://nmap.org/npcap/guide/npcap-users-guide.html#npcap-installation-uninstall-options +// for details. +func Uninstall(ctx context.Context, log *logp.Logger, path string) error { + if runtime.GOOS != "windows" { + return errors.New("npcap: called Uninstall on non-Windows platform") + } + if pcap.Version() == "" { + return nil + } + return uninstall(ctx, log, path) +} + +func uninstall(ctx context.Context, log *logp.Logger, path string) error { + const uninstaller = `C:\Program Files\Npcap\Uninstall.exe` + if path == "" { + path = uninstaller + } + cmd := exec.CommandContext(ctx, path, `/S`) + var outBuf, errBuf bytes.Buffer + cmd.Stdout = &outBuf + cmd.Stderr = &errBuf + + err := cmd.Start() + if err != nil { + return fmt.Errorf("npcap: failed to start Npcap uninstaller: %w", err) + } + + err = cmd.Wait() + if outBuf.Len() != 0 { + log.Info(&outBuf) + } + if err != nil { + log.Error(&errBuf) + return fmt.Errorf("npcap: failed to uninstall Npcap: %w", err) + } + return nil +} diff --git a/packetbeat/npcap/npcap_other.go b/packetbeat/npcap/npcap_other.go new file mode 100644 index 00000000000..c813644d471 --- /dev/null +++ b/packetbeat/npcap/npcap_other.go @@ -0,0 +1,25 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build !windows +// +build !windows + +package npcap + +func loadWinPCAP() error { return nil } + +func reloadWinPCAP() error { return nil } diff --git a/packetbeat/npcap/npcap_test.go b/packetbeat/npcap/npcap_test.go new file mode 100644 index 00000000000..e0f1a3e4eaf --- /dev/null +++ b/packetbeat/npcap/npcap_test.go @@ -0,0 +1,93 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package npcap + +import ( + "context" + "os" + "os/exec" + "path/filepath" + "runtime" + "testing" + + "github.com/elastic/beats/v7/libbeat/logp" +) + +func TestNpcap(t *testing.T) { + // Ugh. + var lcfg logp.Config + logp.ToObserverOutput()(&lcfg) + logp.Configure(lcfg) + obs := logp.ObserverLogs() + + // Working space. + dir, err := os.MkdirTemp("", "packetbeat-npcap-*") + if err != nil { + t.Fatalf("failed to create working directory: %v", err) + } + defer os.RemoveAll(dir) + path := filepath.Join(dir, "installer") + if runtime.GOOS == "windows" { + path += ".exe" + } + + t.Run("Install", func(t *testing.T) { + build := exec.Command("go", "build", "-o", path, filepath.FromSlash("testdata/mock_installer.go")) + b, err := build.CombinedOutput() + if err != nil { + t.Fatalf("failed to build mock installer: %v\n%s", err, b) + } + log := logp.NewLogger("npcap_test_install") + for _, compat := range []bool{false, true} { + for _, dst := range []string{ + "", // Default. + `C:\some\other\location`, + } { + err = install(context.Background(), log, path, dst, compat) + messages := obs.TakeAll() + if err != nil { + if dst == "" { + dst = "default location" + } + t.Errorf("unexpected error running installer to %s with compat=%t: %v", dst, compat, err) + for _, e := range messages { + t.Log(e.Message) + } + } + } + } + }) + + t.Run("Uninstall", func(t *testing.T) { + path = filepath.Join(filepath.Dir(path), "Uninstall.exe") + build := exec.Command("go", "build", "-o", path, filepath.FromSlash("testdata/mock_uninstaller.go")) + b, err := build.CombinedOutput() + if err != nil { + t.Fatalf("failed to build mock uninstaller: %v\n%s", err, b) + } + log := logp.NewLogger("npcap_test_uninstall") + err = uninstall(context.Background(), log, path) + messages := obs.TakeAll() + if err != nil { + t.Errorf("unexpected error running uninstaller: %v", err) + for _, e := range messages { + t.Log(e.Message) + } + } + }) +} diff --git a/packetbeat/npcap/npcap_windows.go b/packetbeat/npcap/npcap_windows.go new file mode 100644 index 00000000000..44d0053820f --- /dev/null +++ b/packetbeat/npcap/npcap_windows.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build windows +// +build windows + +package npcap + +import "github.com/google/gopacket/pcap" + +func loadWinPCAP() error { return pcap.LoadWinPCAP() } + +func reloadWinPCAP() error { + err := pcap.UnloadWinPCAP() + if err != nil { + return err + } + return pcap.LoadWinPCAP() +} diff --git a/packetbeat/npcap/testdata/mock_installer.go b/packetbeat/npcap/testdata/mock_installer.go new file mode 100644 index 00000000000..cb82464d8b0 --- /dev/null +++ b/packetbeat/npcap/testdata/mock_installer.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package main + +import ( + "log" + "os" + "strings" +) + +func main() { + log.SetFlags(0) + log.SetPrefix("installer message: ") + switch len(os.Args) { + case 3, 4: + // OK + default: + log.Fatalf("unexpected number of argument: want 3 or 4 but got:%q", os.Args) + } + if os.Args[1] != "/S" { + log.Fatalf(`unexpected first argument: want:"/S" got:%q`, os.Args[1]) + } + if os.Args[2] != "/winpcap_mode=yes" && os.Args[2] != "/winpcap_mode=no" { + log.Fatalf(`unexpected second argument: want:"/winpcap_mode={yes,no}" got:%q`, os.Args[2]) + } + if len(os.Args) > 3 && !strings.HasPrefix(os.Args[len(os.Args)-1], "/D=") { + log.Fatalf(`unexpected final argument: want:"/D=" got:%#q`, os.Args[3]) + } +} diff --git a/packetbeat/npcap/testdata/mock_uninstaller.go b/packetbeat/npcap/testdata/mock_uninstaller.go new file mode 100644 index 00000000000..f86472cb990 --- /dev/null +++ b/packetbeat/npcap/testdata/mock_uninstaller.go @@ -0,0 +1,34 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package main + +import ( + "log" + "os" +) + +func main() { + log.SetFlags(0) + log.SetPrefix("uninstaller message: ") + if len(os.Args) != 2 { + log.Fatalf("unexpected number of argument: want 2 but got:%q", os.Args) + } + if os.Args[1] != "/S" { + log.Fatalf(`unexpected first argument: want:"/S" got:%q`, os.Args[1]) + } +} diff --git a/packetbeat/scripts/mage/package.go b/packetbeat/scripts/mage/package.go index 55f26493cf5..031ca09c894 100644 --- a/packetbeat/scripts/mage/package.go +++ b/packetbeat/scripts/mage/package.go @@ -18,6 +18,9 @@ package mage import ( + "os" + "path/filepath" + "github.com/pkg/errors" devtools "github.com/elastic/beats/v7/dev-tools/mage" @@ -48,6 +51,31 @@ func CustomizePackaging() { return devtools.Config(devtools.ReferenceConfigType, c, spec.MustExpand("{{.PackageDir}}")) }, } + npcapNoticeTxt = devtools.PackageFile{ + Mode: 0o644, + Source: "{{.PackageDir}}/NOTICE.txt", + Dep: func(spec devtools.PackageSpec) error { + repo, err := devtools.GetProjectRepoInfo() + if err != nil { + return err + } + + notice, err := os.ReadFile(filepath.Join(repo.RootDir, "NOTICE.txt")) + if err != nil { + return err + } + + if spec.OS == "windows" && spec.License == "Elastic License" { + license, err := os.ReadFile(devtools.XPackBeatDir("npcap/installer/LICENSE")) + if err != nil { + return err + } + notice = append(notice, license...) + } + + return os.WriteFile(devtools.CreateDir(spec.MustExpand("{{.PackageDir}}/NOTICE.txt")), notice, 0o644) + }, + } ) for _, args := range devtools.Packages { @@ -56,6 +84,7 @@ func CustomizePackaging() { case devtools.TarGz, devtools.Zip: args.Spec.ReplaceFile("{{.BeatName}}.yml", configYml) args.Spec.ReplaceFile("{{.BeatName}}.reference.yml", referenceConfigYml) + args.Spec.ReplaceFile("NOTICE.txt", npcapNoticeTxt) case devtools.Deb, devtools.RPM, devtools.DMG: args.Spec.ReplaceFile("/etc/{{.BeatName}}/{{.BeatName}}.yml", configYml) args.Spec.ReplaceFile("/etc/{{.BeatName}}/{{.BeatName}}.reference.yml", referenceConfigYml) diff --git a/x-pack/packetbeat/LICENSE-Npcap.txt b/x-pack/packetbeat/LICENSE-Npcap.txt new file mode 100644 index 00000000000..850c567c317 --- /dev/null +++ b/x-pack/packetbeat/LICENSE-Npcap.txt @@ -0,0 +1,228 @@ + +NPCAP COPYRIGHT / END USER LICENSE AGREEMENT + +Npcap is a Windows packet sniffing driver and library and is copyright +(c) 2013-2021 by Insecure.Com LLC ("The Nmap Project"). All rights +reserved. + +Even though Npcap source code is publicly available for review, it is +not open source software and may not be redistributed without special +permission from the Nmap Project. The standard version is also +limited to installation on five systems. We fund the Npcap project by +selling two types of commercial licenses to a special Npcap OEM +edition: + +1) Npcap OEM Redistribution License allows companies to redistribute +Npcap with their products. + +2) Npcap OEM Internal Use License allows companies to use Npcap OEM +internally in excess of the free/demo version's normal 5-system +limitation. + +Both of these licenses include updates and support as well as a +warranty. Npcap OEM also includes a silent installer for unattended +installation. Further details about Npcap OEM are available from +https://nmap.org/npcap/oem/, and you are also welcome to contact us at +sales@nmap.com to ask any questions or set up a license for your +organization. + +Free and open source software producers are also welcome to contact us +for redistribution requests. However, we normally recommend that such +authors instead ask your users to download and install Npcap themselves. + +If the Nmap Project (directly or through one of our commercial +licensing customers) has granted you additional rights to Npcap or +Npcap OEM, those additional rights take precedence where they conflict +with the terms of this license agreement. + +Since the Npcap source code is available for download and review, +users sometimes contribute code patches to fix bugs or add new +features. By sending these changes to the Nmap Project (including +through direct email or our mailing lists or submitting pull requests +through our source code repository), it is understood unless you +specify otherwise that you are offering the Nmap Project the +unlimited, non-exclusive right to reuse, modify, and relicence your +code contribution so that we may (but are not obligated to) +incorporate it into Npcap. If you wish to specify special license +conditions or restrictions on your contributions, just say so when you +send them. + +This copy of Npcap (the "Software") and accompanying documentation is +licensed and not sold. This Software is protected by copyright laws +and treaties, as well as laws and treaties related to other forms of +intellectual property. The Nmap Project owns intellectual property +rights in the Software. The Licensee's ("you" or "your") license to +download, use, copy, or change the Software is subject to these rights +and to all the terms and conditions of this End User License Agreement +("Agreement"). + +ACCEPTANCE + +By accepting this agreement or by downloading, installing, using, or +copying the Software, or by clicking "I Agree", you agree to be bound +by the terms of this EULA. If you do not agree to the terms of this +EULA, do not install, use, or copy the Software. + +LICENSE GRANT + +This Agreement entitles you to install and use five (5) copies of the +Software. In addition, you may make archival copies of the Software +which may only be used for the reinstallation of the Software. This +Agreement does not permit the installation or use of more than 5 +copies of the Software, or the installation of the Software on more +than five computer at any given time, on a system that allows shared +used of applications by more than five users, or on any configuration +or system of computers that allows more than five users. A user may +only have one instance of this Agreement active at once. For example, +downloading the software multiple times, downloading multiple versions +of the software, and/or executing the software installer multiple +times do not grant any additional rights such as using the software on +more machines. + +The terms "computer" and "machine" in this license include any +computing device, including software computing instances such as +virtual machines and Docker containers. + +Copies of Npcap do not count toward the five copy, five computer, or +five user limitations imposed by this section if they are installed +and used solely in conjunction with any of the following software: + +o The Nmap Security Scanner, as distributed from https://nmap.org + +o The Wireshark network protocol analyzer, as distributed from + https://www.wireshark.org/ + +o Microsoft Defender for Identity, as distributed from + https://www.microsoft.com/en-us/microsoft-365/security/identity-defender + +Users wishing to redistribute Npcap or exceed the usage limits imposed +by this free license or benefit from commercial support and features +such as a silent installer should contact sales@nmap.com to obtain an +appropriate commercial license agreement. More details on our OEM +edition is also available from https://nmap.org/npcap/oem/. + +DISCLAIMER OF WARRANTIES AND LIMITATION OF LIABILITY + +This program is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + +RESTRICTIONS ON TRANSFER + +Without first obtaining the express written consent of the Nmap +Project, you may not assign your rights and obligations under this +Agreement, or redistribute, encumber, sell, rent, lease, sublicense, +or otherwise transfer your rights to the Software Product. + +RESTRICTIONS ON USE + +You may not use, copy, or install the Software Product on more than +five computers, or permit the use, copying, or installation of the +Software Product by more than five users or on more than five +computers. + +RESTRICTIONS ON COPYING + +You may not copy any part of the Software except to the extent that +licensed use inherently demands the creation of a temporary copy +stored in computer memory and not permanently affixed on storage +medium. You may make archival copies as well. + +DISCLAIMER OF WARRANTIES AND LIMITATION OF LIABILITY + +UNLESS OTHERWISE EXPLICITLY AGREED TO IN WRITING BY THE NMAP PROJECT, +THE NMAP PROJECT MAKES NO OTHER WARRANTIES, EXPRESS OR IMPLIED, IN +FACT OR IN LAW, INCLUDING, BUT NOT LIMITED TO, ANY IMPLIED WARRANTIES +OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE OTHER THAN AS +SET FORTH IN THIS AGREEMENT OR IN THE LIMITED WARRANTY DOCUMENTS +PROVIDED WITH THE SOFTWARE. + +The Nmap Project makes no warranty that the Software will meet your +requirements or operate under your specific conditions of use. The +Nmap Project makes no warranty that operation of the Software Product +will be secure, error free, or free from interruption. YOU MUST +DETERMINE WHETHER THE SOFTWARE SUFFICIENTLY MEETS YOUR REQUIREMENTS +FOR SECURITY AND UNINTERRUPTABILITY. YOU BEAR SOLE RESPONSIBILITY AND +ALL LIABILITY FOR ANY LOSS INCURRED DUE TO FAILURE OF THE SOFTWARE TO +MEET YOUR REQUIREMENTS. THE NMAP PROJECT WILL NOT, UNDER ANY +CIRCUMSTANCES, BE RESPONSIBLE OR LIABLE FOR THE LOSS OF DATA ON ANY +COMPUTER OR INFORMATION STORAGE DEVICE. + +UNDER NO CIRCUMSTANCES SHALL THE NMAP PROJECT, ITS DIRECTORS, +OFFICERS, EMPLOYEES OR AGENTS BE LIABLE TO YOU OR ANY OTHER PARTY FOR +INDIRECT, CONSEQUENTIAL, SPECIAL, INCIDENTAL, PUNITIVE, OR EXEMPLARY +DAMAGES OF ANY KIND (INCLUDING LOST REVENUES OR PROFITS OR LOSS OF +BUSINESS) RESULTING FROM THIS AGREEMENT, OR FROM THE FURNISHING, +PERFORMANCE, INSTALLATION, OR USE OF THE SOFTWARE, WHETHER DUE TO A +BREACH OF CONTRACT, BREACH OF WARRANTY, OR THE NEGLIGENCE OF THE NMAP +PROJECT OR ANY OTHER PARTY, EVEN IF THE NMAP PROJECT IS ADVISED +BEFOREHAND OF THE POSSIBILITY OF SUCH DAMAGES. TO THE EXTENT THAT THE +APPLICABLE JURISDICTION LIMITS THE NMAP PROJECT'S ABILITY TO DISCLAIM +ANY IMPLIED WARRANTIES, THIS DISCLAIMER SHALL BE EFFECTIVE TO THE +MAXIMUM EXTENT PERMITTED. + +LIMITATIONS OF REMEDIES AND DAMAGES + +Your remedy for a breach of this Agreement or of any warranty included +in this Agreement is the correction or replacement of the Software or +a refund of the purchase price of the Software, exclusive of any costs +for shipping and handling. Selection of whether to correct or replace +or refund shall be solely at the discretion of the Nmap Project. The +Nmap Project reserves the right to substitute a functionally +equivalent copy of the Software Product as a replacement. + +Any claim must be made within the applicable warranty period. All +warranties cover only defects arising under normal use and do not +include malfunctions or failure resulting from misuse, abuse, neglect, +alteration, problems with electrical power, acts of nature, unusual +temperatures or humidity, improper installation, or damage determined +by the Nmap Project to have been caused by you. All limited warranties +on the Software Product are granted only to you and are +non-transferable. + +You agree to indemnify and hold the Nmap Project harmless from all +claims, judgments, liabilities, expenses, or costs arising from your +breach of this Agreement and/or acts or omissions. + +GOVERNING LAW, JURISDICTION AND COSTS + +This Agreement is governed by the laws the United States of America +and Washington State, without regard to Washington's conflict or +choice of law provisions. + +SEVERABILITY + +If any provision of this Agreement shall be held to be invalid or +unenforceable, the remainder of this Agreement shall remain in full +force and effect. To the extent any express or implied restrictions +are not permitted by applicable laws, these express or implied +restrictions shall remain in force and effect to the maximum extent +permitted by such applicable laws. + +THIRD PARTY SOFTWARE ATTRIBUTION + +Npcap uses several 3rd party open source software libraries: + +* The libpcap portable packet capturing library from https://tcpdump.org +* The Winpcap packet capturing library. It has been abandoned, but is + currently still available from https://www.winpcap.org/. +* The ieee80211_radiotap.h header file from David Young + +All of these are open source with BSD-style licenses that allow for +unlimited use and royalty-free redistribution within other software +(including commercial/proprietary software). Some include a warranty +disclaimer (relating to the original authors) and require a small +amount of acknowledgment text be added somewhere in the documentation +of any software which includes them (including indirect inclusion +through Npcap). + +The required acknowledgement text as well as full license text and +source details for these libraries is available from: +https://npcap.org/src/docs/Npcap-Third-Party-Open-Source.pdf . + +Since Insecure.Com LLC is not the author of this 3rd party code, we +can not waive or modify it’s software copyright or license. Npcap +users and redistributors must comply with the relevant Npcap license +(either the free/demo license or a commercial Npcap OEM license they +may have purchased) as well as the minimal requirements of this 3rd +party open source software. diff --git a/x-pack/packetbeat/cmd/root.go b/x-pack/packetbeat/cmd/root.go index c7c15b058be..407d24570df 100644 --- a/x-pack/packetbeat/cmd/root.go +++ b/x-pack/packetbeat/cmd/root.go @@ -9,6 +9,9 @@ import ( packetbeatCmd "github.com/elastic/beats/v7/packetbeat/cmd" _ "github.com/elastic/beats/v7/x-pack/libbeat/include" + + // This registers the Npcap installer on Windows. + _ "github.com/elastic/beats/v7/x-pack/packetbeat/npcap" ) // Name of this beat. diff --git a/x-pack/packetbeat/magefile.go b/x-pack/packetbeat/magefile.go index f5e930c5156..d8a82058c24 100644 --- a/x-pack/packetbeat/magefile.go +++ b/x-pack/packetbeat/magefile.go @@ -10,9 +10,11 @@ package main import ( "fmt" "os" + "strings" "time" "github.com/magefile/mage/mg" + "github.com/magefile/mage/sh" devtools "github.com/elastic/beats/v7/dev-tools/mage" packetbeat "github.com/elastic/beats/v7/packetbeat/scripts/mage" @@ -27,6 +29,11 @@ import ( _ "github.com/elastic/beats/v7/dev-tools/mage/target/test" ) +// NpcapVersion specifies the version of the OEM Npcap installer to bundle with +// the packetbeat executable. It is used to specify which npcap builder crossbuild +// image to use. +const NpcapVersion = "1.60" + func init() { common.RegisterCheckDeps(Update) @@ -57,12 +64,43 @@ func Build() error { // GolangCrossBuild build the Beat binary inside of the golang-builder. // Do not use directly, use crossBuild instead. func GolangCrossBuild() error { + if devtools.Platform.GOOS == "windows" && (devtools.Platform.GOARCH == "amd64" || devtools.Platform.GOARCH == "386") { + const installer = "npcap-" + NpcapVersion + "-oem.exe" + err := sh.Copy("./npcap/installer/"+installer, "/installer/"+installer) + if err != nil { + return fmt.Errorf("failed to copy Npcap installer into source tree: %w", err) + } + } return packetbeat.GolangCrossBuild() } // CrossBuild cross-builds the beat for all target platforms. +// +// On Windows platforms, if CrossBuild is invoked with the environment variables +// CI or NPCAP_LOCAL set to "true", a private cross-build image is selected that +// provides the OEM Npcap installer for the build. This behaviour requires access +// to the private image. func CrossBuild() error { - return packetbeat.CrossBuild() + return devtools.CrossBuild( + // Run all builds serially to try to address failures that might be caused + // by concurrent builds. See https://github.com/elastic/beats/issues/24304. + devtools.Serially(), + + devtools.ImageSelector(func(platform string) (string, error) { + image, err := devtools.CrossBuildImage(platform) + if err != nil { + return "", err + } + if os.Getenv("CI") != "true" && os.Getenv("NPCAP_LOCAL") != "true" { + return image, nil + } + if platform == "windows/amd64" || platform == "windows/386" { + image = strings.ReplaceAll(image, "beats-dev", "observability-ci") // Temporarily work around naming of npcap image. + image = strings.ReplaceAll(image, "main", "npcap-"+NpcapVersion+"-debian9") + } + return image, nil + }), + ) } // BuildGoDaemon builds the go-daemon binary (use crossBuildGoDaemon). diff --git a/x-pack/packetbeat/npcap/installer/.gitignore b/x-pack/packetbeat/npcap/installer/.gitignore new file mode 100644 index 00000000000..43c0420c171 --- /dev/null +++ b/x-pack/packetbeat/npcap/installer/.gitignore @@ -0,0 +1,5 @@ +# Ignore everything but the README and the place-holder. +* +!.gitignore +!README +!npcap-0.00.exe diff --git a/x-pack/packetbeat/npcap/installer/LICENSE b/x-pack/packetbeat/npcap/installer/LICENSE new file mode 100644 index 00000000000..94134072930 --- /dev/null +++ b/x-pack/packetbeat/npcap/installer/LICENSE @@ -0,0 +1,229 @@ +-------------------------------------------------------------------------------- +Dependency : Npcap (https://nmap.org/npcap/) +Version: 1.60 +Licence type: Commercial +-------------------------------------------------------------------------------- + +Npcap is Copyright (c) 2013-2021 Insecure.Com LLC. All rights reserved. +See https://npcap.org for details. + +Portions of Npcap are Copyright (c) 1999 - 2005 NetGroup, Politecnico di +Torino (Italy). +Portions of Npcap are Copyright (c) 2005 - 2010 CACE Technologies, +Davis (California). +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + 3. Neither the name of the Politecnico di Torino, CACE Technologies + nor the names of its contributors may be used to endorse or + promote products derived from this software without specific + prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS +OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED +AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT +OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +SUCH DAMAGE. + +This product includes software developed by the University of California, +Lawrence Berkeley Laboratory and its contributors. + +Portions of Npcap are Copyright (c) 1990, 1991, 1992, 1993, 1994, 1995, +1996, 1997 The Regents of the University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + 3. Neither the name of the University nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS +BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +THE POSSIBILITY OF SUCH DAMAGE. + +Portions of Npcap are Copyright (c) 2003, 2004 David Young. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + + 1. Redistributions of source code must retain the above copyright notice, + thislist of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + 3. The name of David Young may not be used to endorse or promote + products derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY DAVID YOUNG ``AS IS'' AND ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL DAVID YOUNG BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; +OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : Libpcap (http://www.tcpdump.org/) +Version: 1.10 +Licence type: BSD +-------------------------------------------------------------------------------- + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + 3. The names of the authors may not be used to endorse or promote + products derived from this software without specific prior + written permission. + +THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. + + +-------------------------------------------------------------------------------- +Dependency : Winpcap (https://www.winpcap.org/) +Version: 4.1.3 +Licence type: BSD +-------------------------------------------------------------------------------- + +Copyright (c) 1999 - 2005 NetGroup, Politecnico di Torino (Italy). +Copyright (c) 2005 - 2010 CACE Technologies, Davis (California). +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + 3. Neither the name of the Politecnico di Torino, CACE Technologies + nor the names of its contributors may be used to endorse or + promote products derived from this software without specific + prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +This product includes software developed by the University of California, +Lawrence Berkeley Laboratory and its contributors. +Portions Copyright (c) 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997 The +Regents of the University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + 3. Neither the name of the University nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS +BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : ieee80211_radiotap.h Header File +Version: Unversioned +Licence type: BSD +-------------------------------------------------------------------------------- + +Copyright (c) 2003, 2004 David Young. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + 3. The name of David Young may not be used to endorse or promote + products derived from this software without specific prior + written permission. + +THIS SOFTWARE IS PROVIDED BY DAVID YOUNG ``AS IS'' AND ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL DAVID YOUNG BE LIABLE FOR ANY DIRECT, +INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING +IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. diff --git a/x-pack/packetbeat/npcap/installer/README b/x-pack/packetbeat/npcap/installer/README new file mode 100644 index 00000000000..404d6617dd1 --- /dev/null +++ b/x-pack/packetbeat/npcap/installer/README @@ -0,0 +1,19 @@ +This directory is pinned to allow placement of the Npcap OEM installer +during x-pack Packetbeat builds. + +Only one additional exe file may be placed here beyond the place-holder +and this should be the most recent available Npcap installer. As part of the +distribution a place-holder file is put here to allow compilation. + +LICENSE NOTICE + +Restrictions on Distribution + +Insecure.Com LLC (“The Nmap Project”) has granted Elasticsearch BV and its +affiliates the right to include Npcap with this distribution of Packetbeat. +You may not distribute this version of Packetbeat or any other package from +Elastic that includes Npcap. If you wish to distribute Npcap, or any package +that includes Npcap, you should reach out to The Nmap Project to obtain a +distribution license. See https://nmap.org/npcap/ for more details. + +See the LICENSE file in this directory. diff --git a/x-pack/packetbeat/npcap/installer/npcap-0.00.exe b/x-pack/packetbeat/npcap/installer/npcap-0.00.exe new file mode 100644 index 00000000000..43d4762b1ed --- /dev/null +++ b/x-pack/packetbeat/npcap/installer/npcap-0.00.exe @@ -0,0 +1 @@ +This is not the installer you are looking for. \ No newline at end of file diff --git a/x-pack/packetbeat/npcap/npcap_other.go b/x-pack/packetbeat/npcap/npcap_other.go new file mode 100644 index 00000000000..647eab35bff --- /dev/null +++ b/x-pack/packetbeat/npcap/npcap_other.go @@ -0,0 +1,9 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build !windows +// +build !windows + +// Package npcap provides an embedded Npcap OEM installer on Windows systems. +package npcap diff --git a/x-pack/packetbeat/npcap/npcap_windows.go b/x-pack/packetbeat/npcap/npcap_windows.go new file mode 100644 index 00000000000..0eab2f5c67c --- /dev/null +++ b/x-pack/packetbeat/npcap/npcap_windows.go @@ -0,0 +1,54 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build windows +// +build windows + +// Package npcap provides an embedded Npcap OEM installer. The embedded installer +// must be placed in the installer directory and have a name that matches the pattern +// "npcap-([0-9]\.[0-9]+)(?:|-oem)\.exe" where the capture is the installer version. +package npcap + +import ( + "embed" + "fmt" + "path" + "strings" + + "github.com/elastic/beats/v7/packetbeat/npcap" +) + +//go:embed installer/*.exe +var fs embed.FS + +func init() { + list, err := fs.ReadDir("installer") + if err != nil { + panic(fmt.Sprintf("failed to set up npcap installer: %v", err)) + } + var installer string + for _, f := range list { + name := f.Name() + if name != "npcap-0.00.exe" { + installer = name + break + } + } + if installer == "" { + return + } + if len(list) > 2 { + panic(fmt.Sprintf("unexpected number of installers found: want only one but got %d", len(list)-1)) + } + + version := strings.TrimPrefix(installer, "npcap-") + version = strings.TrimSuffix(version, ".exe") + version = strings.TrimSuffix(version, "-oem") + npcap.EmbeddedInstallerVersion = version + + npcap.Installer, err = fs.ReadFile(path.Join("installer", installer)) + if err != nil { + panic(fmt.Sprintf("failed to set up npcap installer: %v", err)) + } +} From 2b99db94f653bf2a579d58361d4d49b9c3c42889 Mon Sep 17 00:00:00 2001 From: Carlos Delgado <6339205+carlosdelest@users.noreply.github.com> Date: Thu, 27 Jan 2022 10:39:26 +0100 Subject: [PATCH 53/69] Metricbeat enterprise search module: add xpack.enabled support (#29871) --- CHANGELOG.next.asciidoc | 1 + metricbeat/docs/modules/enterprisesearch.asciidoc | 15 +++++++++++++-- metricbeat/helper/elastic/elastic.go | 5 +++++ x-pack/metricbeat/metricbeat.reference.yml | 4 ++-- .../enterprisesearch/_meta/config-xpack.yml | 8 ++++++++ .../module/enterprisesearch/_meta/config.yml | 4 ++-- .../module/enterprisesearch/_meta/docs.asciidoc | 11 +++++++++++ .../module/enterprisesearch/docker-compose.yml | 2 +- .../module/enterprisesearch/health/data.go | 10 +++++++++- .../module/enterprisesearch/health/health.go | 15 +++++++++++++-- .../module/enterprisesearch/stats/data.go | 10 +++++++++- .../module/enterprisesearch/stats/stats.go | 15 +++++++++++++-- .../modules.d/enterprisesearch-xpack.yml.disabled | 11 +++++++++++ .../modules.d/enterprisesearch.yml.disabled | 4 ++-- 14 files changed, 100 insertions(+), 15 deletions(-) create mode 100644 x-pack/metricbeat/module/enterprisesearch/_meta/config-xpack.yml create mode 100644 x-pack/metricbeat/modules.d/enterprisesearch-xpack.yml.disabled diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index d1feb50c21a..81271742fd1 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -193,6 +193,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Add k8s metadata in state_cronjob metricset. {pull}29572[29572] - Add `elasticsearch.cluster.id` field to Beat and Kibana modules. {pull}29577[29577] - Add `elasticsearch.cluster.id` field to Logstash module. {pull}29625[29625] +- Add `xpack.enabled` support for Enterprise Search module. {pull}29871[29871] *Packetbeat* diff --git a/metricbeat/docs/modules/enterprisesearch.asciidoc b/metricbeat/docs/modules/enterprisesearch.asciidoc index 1f34a3158f1..a7c0948012a 100644 --- a/metricbeat/docs/modules/enterprisesearch.asciidoc +++ b/metricbeat/docs/modules/enterprisesearch.asciidoc @@ -20,6 +20,17 @@ The module has been tested with Enterprise Search versions 7.16.0 and higher. Ve === Usage The Enterprise Search module requires a set of credentials (a username and a password) for an Elasticserch user for a user that has a `monitor` https://www.elastic.co/guide/en/elasticsearch/reference/current/security-privileges.html#privileges-list-cluster[cluster privilege]. +[float] +=== Usage for {stack} Monitoring + +The Enterprise Search module can be used to collect metrics shown in our {stack-monitor-app} +UI in {kib}. To enable this usage, set `xpack.enabled: true` in configuration. + +NOTE: When this module is used for {stack} Monitoring, it sends metrics to the +monitoring index instead of the default index typically used by {metricbeat}. +For more details about the monitoring index, see +{ref}/config-monitoring-indices.html[Configuring indices for monitoring]. + [float] === Example configuration @@ -35,8 +46,8 @@ metricbeat.modules: enabled: true period: 10s hosts: ["http://localhost:3002"] - username: elastic - password: changeme + #username: "user" + #password: "secret" ---- This module supports TLS connections when using `ssl` config field, as described in <>. diff --git a/metricbeat/helper/elastic/elastic.go b/metricbeat/helper/elastic/elastic.go index 837ff615064..c51179747e1 100644 --- a/metricbeat/helper/elastic/elastic.go +++ b/metricbeat/helper/elastic/elastic.go @@ -44,6 +44,9 @@ const ( // Beats product Beats + + // Enterprise Search product + EnterpriseSearch ) func (p Product) xPackMonitoringIndexString() string { @@ -52,6 +55,7 @@ func (p Product) xPackMonitoringIndexString() string { "kibana", "logstash", "beats", + "ent-search", } if int(p) < 0 || int(p) > len(indexProductNames) { @@ -67,6 +71,7 @@ func (p Product) String() string { "kibana", "logstash", "beats", + "enterprisesearch", } if int(p) < 0 || int(p) > len(productNames) { diff --git a/x-pack/metricbeat/metricbeat.reference.yml b/x-pack/metricbeat/metricbeat.reference.yml index e46f549be40..4971d923273 100644 --- a/x-pack/metricbeat/metricbeat.reference.yml +++ b/x-pack/metricbeat/metricbeat.reference.yml @@ -523,8 +523,8 @@ metricbeat.modules: enabled: true period: 10s hosts: ["http://localhost:3002"] - username: elastic - password: changeme + #username: "user" + #password: "secret" #------------------------------ Envoyproxy Module ------------------------------ - module: envoyproxy diff --git a/x-pack/metricbeat/module/enterprisesearch/_meta/config-xpack.yml b/x-pack/metricbeat/module/enterprisesearch/_meta/config-xpack.yml new file mode 100644 index 00000000000..d80e6d349b6 --- /dev/null +++ b/x-pack/metricbeat/module/enterprisesearch/_meta/config-xpack.yml @@ -0,0 +1,8 @@ +- module: enterprisesearch + xpack.enabled: true + metricsets: ["health", "stats"] + enabled: true + period: 10s + hosts: ["http://localhost:3002"] + #username: "user" + #password: "secret" diff --git a/x-pack/metricbeat/module/enterprisesearch/_meta/config.yml b/x-pack/metricbeat/module/enterprisesearch/_meta/config.yml index 7c2efca0a61..e90fa79f9ff 100644 --- a/x-pack/metricbeat/module/enterprisesearch/_meta/config.yml +++ b/x-pack/metricbeat/module/enterprisesearch/_meta/config.yml @@ -3,5 +3,5 @@ enabled: true period: 10s hosts: ["http://localhost:3002"] - username: elastic - password: changeme + #username: "user" + #password: "secret" diff --git a/x-pack/metricbeat/module/enterprisesearch/_meta/docs.asciidoc b/x-pack/metricbeat/module/enterprisesearch/_meta/docs.asciidoc index 3251d02c09a..e417e57b682 100644 --- a/x-pack/metricbeat/module/enterprisesearch/_meta/docs.asciidoc +++ b/x-pack/metricbeat/module/enterprisesearch/_meta/docs.asciidoc @@ -7,3 +7,14 @@ The module has been tested with Enterprise Search versions 7.16.0 and higher. Ve [float] === Usage The Enterprise Search module requires a set of credentials (a username and a password) for an Elasticserch user for a user that has a `monitor` https://www.elastic.co/guide/en/elasticsearch/reference/current/security-privileges.html#privileges-list-cluster[cluster privilege]. + +[float] +=== Usage for {stack} Monitoring + +The Enterprise Search module can be used to collect metrics shown in our {stack-monitor-app} +UI in {kib}. To enable this usage, set `xpack.enabled: true` in configuration. + +NOTE: When this module is used for {stack} Monitoring, it sends metrics to the +monitoring index instead of the default index typically used by {metricbeat}. +For more details about the monitoring index, see +{ref}/config-monitoring-indices.html[Configuring indices for monitoring]. diff --git a/x-pack/metricbeat/module/enterprisesearch/docker-compose.yml b/x-pack/metricbeat/module/enterprisesearch/docker-compose.yml index 3e9dbfdf9bf..09d7addb71c 100644 --- a/x-pack/metricbeat/module/enterprisesearch/docker-compose.yml +++ b/x-pack/metricbeat/module/enterprisesearch/docker-compose.yml @@ -24,7 +24,7 @@ services: - 3002:3002 elasticsearch: - image: docker.elastic.co/integrations-ci/beats-elasticsearch:${ELASTICSEARCH_VERSION:-7.15.0}-1 + image: docker.elastic.co/integrations-ci/beats-elasticsearch:${ELASTICSEARCH_VERSION:-8.0.0-SNAPSHOT}-1 build: args: ELASTICSEARCH_VERSION: ${ELASTICSEARCH_VERSION:-7.15.0} diff --git a/x-pack/metricbeat/module/enterprisesearch/health/data.go b/x-pack/metricbeat/module/enterprisesearch/health/data.go index 517b0d1fbbc..32fa3849788 100644 --- a/x-pack/metricbeat/module/enterprisesearch/health/data.go +++ b/x-pack/metricbeat/module/enterprisesearch/health/data.go @@ -13,6 +13,7 @@ import ( "github.com/elastic/beats/v7/libbeat/common" s "github.com/elastic/beats/v7/libbeat/common/schema" c "github.com/elastic/beats/v7/libbeat/common/schema/mapstriface" + "github.com/elastic/beats/v7/metricbeat/helper/elastic" "github.com/elastic/beats/v7/metricbeat/mb" ) @@ -73,7 +74,7 @@ var ( } ) -func eventMapping(report mb.ReporterV2, input []byte) error { +func eventMapping(report mb.ReporterV2, input []byte, isXpack bool) error { var data map[string]interface{} err := json.Unmarshal(input, &data) if err != nil { @@ -115,6 +116,13 @@ func eventMapping(report mb.ReporterV2, input []byte) error { // Set the process info we have collected data["process"] = process + // xpack.enabled in config using standalone metricbeat writes to `.monitoring` instead of `metricbeat-*` + // When using Agent, the index name is overwritten anyways. + if isXpack { + index := elastic.MakeXPackMonitoringIndexName(elastic.EnterpriseSearch) + event.Index = index + } + event.MetricSetFields, err = schema.Apply(data) if err != nil { errs = append(errs, errors.Wrap(err, "failure to apply health schema")) diff --git a/x-pack/metricbeat/module/enterprisesearch/health/health.go b/x-pack/metricbeat/module/enterprisesearch/health/health.go index e649748c2b7..d263314bf95 100644 --- a/x-pack/metricbeat/module/enterprisesearch/health/health.go +++ b/x-pack/metricbeat/module/enterprisesearch/health/health.go @@ -38,7 +38,8 @@ func init() { type MetricSet struct { mb.BaseMetricSet - http *helper.HTTP + http *helper.HTTP + XPackEnabled bool } func New(base mb.BaseMetricSet) (mb.MetricSet, error) { @@ -48,9 +49,19 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { if err != nil { return nil, err } + config := struct { + XPackEnabled bool `config:"xpack.enabled"` + }{ + XPackEnabled: false, + } + if err := base.Module().UnpackConfig(&config); err != nil { + return nil, err + } + return &MetricSet{ base, http, + config.XPackEnabled, }, nil } @@ -63,7 +74,7 @@ func (m *MetricSet) Fetch(report mb.ReporterV2) error { return errors.Wrap(err, "error in fetch") } - err = eventMapping(report, content) + err = eventMapping(report, content, m.XPackEnabled) if err != nil { return errors.Wrap(err, "error converting event") } diff --git a/x-pack/metricbeat/module/enterprisesearch/stats/data.go b/x-pack/metricbeat/module/enterprisesearch/stats/data.go index 049145dda19..d11767b316e 100644 --- a/x-pack/metricbeat/module/enterprisesearch/stats/data.go +++ b/x-pack/metricbeat/module/enterprisesearch/stats/data.go @@ -13,6 +13,7 @@ import ( "github.com/elastic/beats/v7/libbeat/common" s "github.com/elastic/beats/v7/libbeat/common/schema" c "github.com/elastic/beats/v7/libbeat/common/schema/mapstriface" + "github.com/elastic/beats/v7/metricbeat/helper/elastic" "github.com/elastic/beats/v7/metricbeat/mb" ) @@ -155,7 +156,7 @@ var ( } ) -func eventMapping(report mb.ReporterV2, input []byte) error { +func eventMapping(report mb.ReporterV2, input []byte, isXpack bool) error { var data map[string]interface{} err := json.Unmarshal(input, &data) if err != nil { @@ -185,6 +186,13 @@ func eventMapping(report mb.ReporterV2, input []byte) error { errs = append(errs, errors.New("queues is not a map")) } + // xpack.enabled in config using standalone metricbeat writes to `.monitoring` instead of `metricbeat-*` + // When using Agent, the index name is overwritten anyways. + if isXpack { + index := elastic.MakeXPackMonitoringIndexName(elastic.EnterpriseSearch) + event.Index = index + } + event.MetricSetFields, err = schema.Apply(data) if err != nil { errs = append(errs, errors.Wrap(err, "failure to apply stats schema")) diff --git a/x-pack/metricbeat/module/enterprisesearch/stats/stats.go b/x-pack/metricbeat/module/enterprisesearch/stats/stats.go index 8a1f2036235..c59b3481af4 100644 --- a/x-pack/metricbeat/module/enterprisesearch/stats/stats.go +++ b/x-pack/metricbeat/module/enterprisesearch/stats/stats.go @@ -38,7 +38,8 @@ func init() { type MetricSet struct { mb.BaseMetricSet - http *helper.HTTP + http *helper.HTTP + XPackEnabled bool } func New(base mb.BaseMetricSet) (mb.MetricSet, error) { @@ -48,9 +49,19 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { if err != nil { return nil, err } + + config := struct { + XPackEnabled bool `config:"xpack.enabled"` + }{ + XPackEnabled: false, + } + if err := base.Module().UnpackConfig(&config); err != nil { + return nil, err + } return &MetricSet{ base, http, + config.XPackEnabled, }, nil } @@ -63,7 +74,7 @@ func (m *MetricSet) Fetch(report mb.ReporterV2) error { return errors.Wrap(err, "error in fetch") } - err = eventMapping(report, content) + err = eventMapping(report, content, m.XPackEnabled) if err != nil { return errors.Wrap(err, "error converting event") } diff --git a/x-pack/metricbeat/modules.d/enterprisesearch-xpack.yml.disabled b/x-pack/metricbeat/modules.d/enterprisesearch-xpack.yml.disabled new file mode 100644 index 00000000000..e42dde843c2 --- /dev/null +++ b/x-pack/metricbeat/modules.d/enterprisesearch-xpack.yml.disabled @@ -0,0 +1,11 @@ +# Module: enterprisesearch +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-enterprisesearch.html + +- module: enterprisesearch + xpack.enabled: true + metricsets: ["health", "stats"] + enabled: true + period: 10s + hosts: ["http://localhost:3002"] + #username: "user" + #password: "secret" diff --git a/x-pack/metricbeat/modules.d/enterprisesearch.yml.disabled b/x-pack/metricbeat/modules.d/enterprisesearch.yml.disabled index 78c9ca01448..241791cc203 100644 --- a/x-pack/metricbeat/modules.d/enterprisesearch.yml.disabled +++ b/x-pack/metricbeat/modules.d/enterprisesearch.yml.disabled @@ -6,5 +6,5 @@ enabled: true period: 10s hosts: ["http://localhost:3002"] - username: elastic - password: changeme + #username: "user" + #password: "secret" From 2ebb8b3e200713e4295d8ae4c26cf8c028cbaea7 Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Thu, 27 Jan 2022 11:10:41 +0000 Subject: [PATCH 54/69] probot: update stale dates (#29997) --- .github/stale.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/stale.yml b/.github/stale.yml index c0fadc07678..160c31c5744 100644 --- a/.github/stale.yml +++ b/.github/stale.yml @@ -14,7 +14,7 @@ onlyLabels: [] exemptLabels: [] # Set to true to ignore issues in a project (defaults to false) -exemptProjects: true +exemptProjects: false # Set to true to ignore issues in a milestone (defaults to false) exemptMilestones: true @@ -47,7 +47,7 @@ limitPerRun: 30 # Optionally, specify configuration settings that are specific to just 'issues' or 'pulls': pulls: - daysUntilStale: 30 + daysUntilStale: 90 daysUntilClose: 30 markComment: > Hi! @@ -79,4 +79,4 @@ pulls: # issues: # exemptLabels: -# - confirmed \ No newline at end of file +# - confirmed From dc1e3473c885474f0a142bd2257fe39195a18b1d Mon Sep 17 00:00:00 2001 From: gpop63 <94497545+gpop63@users.noreply.github.com> Date: Thu, 27 Jan 2022 05:02:45 -0800 Subject: [PATCH 55/69] [Metricbeat] gcp: add firestore metricset (#29918) * Add firestore metricset * Add firestore metricset reference * Add firestore metricset metrics mapping * Add firestore filter logic * Update gcp fields & module * Update gcp module & metricbeat docs * Add changelog entry * Add changelog entry in developer.next --- CHANGELOG-developer.next.asciidoc | 1 + CHANGELOG.next.asciidoc | 1 + metricbeat/docs/fields.asciidoc | 33 +++++++++++ metricbeat/docs/modules/gcp.asciidoc | 5 ++ .../docs/modules/gcp/firestore.asciidoc | 24 ++++++++ metricbeat/docs/modules_list.asciidoc | 3 +- x-pack/metricbeat/metricbeat.reference.yml | 1 + x-pack/metricbeat/module/gcp/_meta/config.yml | 1 + x-pack/metricbeat/module/gcp/constants.go | 1 + x-pack/metricbeat/module/gcp/fields.go | 2 +- .../module/gcp/firestore/_meta/data.json | 39 ++++++++++++ .../gcp/firestore/_meta/data_document.json | 39 ++++++++++++ .../module/gcp/firestore/_meta/docs.asciidoc | 13 ++++ .../module/gcp/firestore/_meta/fields.yml | 14 +++++ .../firestore/firestore_integration_test.go | 59 +++++++++++++++++++ .../module/gcp/firestore/firestore_test.go | 21 +++++++ .../module/gcp/firestore/manifest.yml | 11 ++++ .../module/gcp/metrics/metrics_requester.go | 2 +- .../module/gcp/metrics/response_parser.go | 5 ++ x-pack/metricbeat/module/gcp/module.yml | 1 + x-pack/metricbeat/modules.d/gcp.yml.disabled | 1 + 21 files changed, 274 insertions(+), 3 deletions(-) create mode 100644 metricbeat/docs/modules/gcp/firestore.asciidoc create mode 100644 x-pack/metricbeat/module/gcp/firestore/_meta/data.json create mode 100644 x-pack/metricbeat/module/gcp/firestore/_meta/data_document.json create mode 100644 x-pack/metricbeat/module/gcp/firestore/_meta/docs.asciidoc create mode 100644 x-pack/metricbeat/module/gcp/firestore/_meta/fields.yml create mode 100644 x-pack/metricbeat/module/gcp/firestore/firestore_integration_test.go create mode 100644 x-pack/metricbeat/module/gcp/firestore/firestore_test.go create mode 100644 x-pack/metricbeat/module/gcp/firestore/manifest.yml diff --git a/CHANGELOG-developer.next.asciidoc b/CHANGELOG-developer.next.asciidoc index 92566fdf5f9..2757def7829 100644 --- a/CHANGELOG-developer.next.asciidoc +++ b/CHANGELOG-developer.next.asciidoc @@ -130,6 +130,7 @@ The list below covers the major changes between 7.0.0-rc2 and master only. - Update Go version to 1.17.1. {pull}27543[27543] - Whitelist `GCP_*` environment variables in dev tools {pull}28364[28364] - Add support for `credentials_json` in `gcp` module, all metricsets {pull}29584[29584] +- Add gcp firestore metricset. {pull}29918[29918] ==== Deprecated diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 81271742fd1..60ceff9a1ff 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -194,6 +194,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Add `elasticsearch.cluster.id` field to Beat and Kibana modules. {pull}29577[29577] - Add `elasticsearch.cluster.id` field to Logstash module. {pull}29625[29625] - Add `xpack.enabled` support for Enterprise Search module. {pull}29871[29871] +- Add gcp firestore metricset. {pull}29918[29918] *Packetbeat* diff --git a/metricbeat/docs/fields.asciidoc b/metricbeat/docs/fields.asciidoc index d1b6ece6ffc..f525089ad93 100644 --- a/metricbeat/docs/fields.asciidoc +++ b/metricbeat/docs/fields.asciidoc @@ -34847,6 +34847,39 @@ type: long -- +[float] +=== firestore + +Google Cloud Firestore metrics + + +*`gcp.firestore.document.delete.count`*:: ++ +-- +The number of successful document deletes. + +type: long + +-- + +*`gcp.firestore.document.read.count`*:: ++ +-- +The number of successful document reads from queries or lookups. + +type: long + +-- + +*`gcp.firestore.document.write.count`*:: ++ +-- +The number of successful document writes. + +type: long + +-- + [float] === gke diff --git a/metricbeat/docs/modules/gcp.asciidoc b/metricbeat/docs/modules/gcp.asciidoc index 164a7829c87..86b6239ae2a 100644 --- a/metricbeat/docs/modules/gcp.asciidoc +++ b/metricbeat/docs/modules/gcp.asciidoc @@ -275,6 +275,7 @@ metricbeat.modules: metricsets: - pubsub - loadbalancing + - firestore zone: "us-central1-a" project_id: "your project id" credentials_file_path: "your JSON credentials file path" @@ -339,6 +340,8 @@ The following metricsets are available: * <> +* <> + * <> * <> @@ -353,6 +356,8 @@ include::gcp/billing.asciidoc[] include::gcp/compute.asciidoc[] +include::gcp/firestore.asciidoc[] + include::gcp/gke.asciidoc[] include::gcp/loadbalancing.asciidoc[] diff --git a/metricbeat/docs/modules/gcp/firestore.asciidoc b/metricbeat/docs/modules/gcp/firestore.asciidoc new file mode 100644 index 00000000000..2e36b795b15 --- /dev/null +++ b/metricbeat/docs/modules/gcp/firestore.asciidoc @@ -0,0 +1,24 @@ +//// +This file is generated! See scripts/mage/docs_collector.go +//// + +[[metricbeat-metricset-gcp-firestore]] +[role="xpack"] +=== Google Cloud Platform firestore metricset + +beta[] + +include::../../../../x-pack/metricbeat/module/gcp/firestore/_meta/docs.asciidoc[] + + +==== Fields + +For a description of each field in the metricset, see the +<> section. + +Here is an example document generated by this metricset: + +[source,json] +---- +include::../../../../x-pack/metricbeat/module/gcp/firestore/_meta/data.json[] +---- diff --git a/metricbeat/docs/modules_list.asciidoc b/metricbeat/docs/modules_list.asciidoc index 30177e13624..e4f6d15aea9 100644 --- a/metricbeat/docs/modules_list.asciidoc +++ b/metricbeat/docs/modules_list.asciidoc @@ -120,8 +120,9 @@ This file is generated! See scripts/mage/docs_collector.go |<> |<> |<> beta[] |image:./images/icon-yes.png[Prebuilt dashboards are available] | -.7+| .7+| |<> beta[] +.8+| .8+| |<> beta[] |<> beta[] +|<> beta[] |<> beta[] |<> beta[] |<> beta[] diff --git a/x-pack/metricbeat/metricbeat.reference.yml b/x-pack/metricbeat/metricbeat.reference.yml index 4971d923273..cb59390723f 100644 --- a/x-pack/metricbeat/metricbeat.reference.yml +++ b/x-pack/metricbeat/metricbeat.reference.yml @@ -552,6 +552,7 @@ metricbeat.modules: metricsets: - pubsub - loadbalancing + - firestore zone: "us-central1-a" project_id: "your project id" credentials_file_path: "your JSON credentials file path" diff --git a/x-pack/metricbeat/module/gcp/_meta/config.yml b/x-pack/metricbeat/module/gcp/_meta/config.yml index 00295e795d4..632e9e23538 100644 --- a/x-pack/metricbeat/module/gcp/_meta/config.yml +++ b/x-pack/metricbeat/module/gcp/_meta/config.yml @@ -11,6 +11,7 @@ metricsets: - pubsub - loadbalancing + - firestore zone: "us-central1-a" project_id: "your project id" credentials_file_path: "your JSON credentials file path" diff --git a/x-pack/metricbeat/module/gcp/constants.go b/x-pack/metricbeat/module/gcp/constants.go index 3776263b7b6..c8bd1812ff8 100644 --- a/x-pack/metricbeat/module/gcp/constants.go +++ b/x-pack/metricbeat/module/gcp/constants.go @@ -22,6 +22,7 @@ const ( ServiceLoadBalancing = "loadbalancing" ServicePubsub = "pubsub" ServiceStorage = "storage" + ServiceFirestore = "firestore" ) //Paths within the GCP monitoring.TimeSeries response, if converted to JSON, where you can find each ECS field required for the output event diff --git a/x-pack/metricbeat/module/gcp/fields.go b/x-pack/metricbeat/module/gcp/fields.go index a5b2d2a836a..d9d6047873f 100644 --- a/x-pack/metricbeat/module/gcp/fields.go +++ b/x-pack/metricbeat/module/gcp/fields.go @@ -19,5 +19,5 @@ func init() { // AssetGcp returns asset data. // This is the base64 encoded zlib format compressed contents of module/gcp. func AssetGcp() string { - return "eJzcXVtz27byf/en2OlLkv84yr89Z85D5kxnUqftyZy49YyTvLIguJJQgQALgHaUT38GN14kSqJ4kZMmT7bE3d/esLvAgn4JG9y+hhUtrgAMMxxfw7NfpVxxhBsuywzuODFLqfJnVwAKORKNryFFQ64AMtRUscIwKV7Dj1cAAL/e3EEus5LjFcCSIc/0a/fBSxAkx8jK/jPbwv6sZBl/0/x+8xlOUuS6+nV8VKZ/IjWNX3fgif88LsGMVEysIEejGNX7lHchNGGUGtXi/1ofHYRi//lfJv4bG9w+SpV1Es7RkIwYMhdxK+ostPVWG8xnIa1Qy1JRnIx4JPxdpRD//7vTftWim8kydd7d8WmSk6JgYhW++l2L+BHvvA3uaNbEgEJTKoEZLJXMoRWMb+7ewV8lqu1iT6yUcc7E6hC/Fpmf/HejazSe2Y3wtmKasQqdwRLRUKm9Rq72Tddl9RbWG6mN+64GJigvMwSFq5ITdQ2GfL4Gkv1ZapOjMNdARAZKliKzakelpFp04GHiQTKKSS6FWQ/BFFWmsJDKgKPTxahQ0nkDy4ZwufNPw7u3IJdg1hjNGvmmyKVYaTCyi7mRhvAOvksuiTnM9YN9rOJEclkKs+9gVOZFabCXg934787oYEum8JFwvsiULArMFunWoO6Q3errsOjvBJW5ldo9DoEYpFun/MikB/+kIHSDRifUae+B8LLL93uiCcTOwMOENkRQXNCiXCjUqB4wS6hUqA+C2VvJduD8VuYpKuuJjg5EsiCFg7O2cRr8NPI/Ba3UZIWJYTkuNNIBoD5aArCUCgjnARgToJFKkele7BfFTubox/mDtYAi1P4UxSacS0oMZnBz99Ev30wDLZVCYfjWIis1RoX1UVLG9GahkAz16Bvrfxae92hLyScSS7gX40QW49y4gmBJegTvfgdZoCL2C0eN5FA8KmZwGvktKYMCjOynAMd6Yg04mv1VkGMu1XaRWt+SYqFInmj2BQdCsV7r8kJY2C0qz8E6p/XKT7cL+LBmOqzW1oGl4FsgD4RxknIfbZ9uQ3niU5BVqH0Yf4AlyRnfdmfdwyKVGrOBIt16+HWUWVpPJ41+JEXCxEB/tfbZs4yLGSYCqlWJ2vggZkaDfBRgeYIuCMUnkVaWZkpxY5A6EWuJjXwKeQWaR6k2CyZWCrWeahmmyB5iTW/BBDbnIAlVwcKtTMMRxeJiFCacUDka7U8PqAaCmFgvZ8MpiyP1zHHmdYkVCpiwhsGaaEgRBahSCCZWR13WA0jcMj8Ixs+cFHYNtWRAM0Ex4ngkGrQhymB23aizFnBP8oJjBviAagv/+v/6kzdLgwq0/ZyJ1TVkxBAbqEIaeGCaxSgtCxuY3/9QP7rXc6w2B/uNZ3+sNvgHUCkMYcLrLW+20Y8ImipSVI30zR3cG0I3mWLWuLaXDk+7Xnh/c8iCtE/9+t+fn83SIzvmqFxlaivZZHR1fFPmJSeGPaArSB09W33WtbLLlaGfqBD0s+xJGTjLmRnZdFjYHqmjFgvtmtFofKVhnH1xddiEnYDF7RH37QJCJnOKAkqEjQ/8TBEz+B6IDsZrf2Cfd1ymC8Af/nmGBhX+ZVPzZI1lbe1AucM5Z15rTok6j7vU4p7TOO66DKQIK4XEuGRFxI7jNL0mMHwCv8FijTkqwhNtpLL9t/fgYbXDe0kJh4omBJoh9pjwNcXQVWIfa9TbtGgD1Rnwur5qWrDen8ZCDX3EGNuHxq87MUwKcKaoz5sCfGN5ovfiGfRY2FJmSUpuBhbmdZYo3JafJaWvIVVygwIy2xbaVLEt8Bpy8qdU7iwgZ6L7CGAP4LjIvo2dug/kId44swEulcCCU/+tc1hQ6YjFNfjLNGupQteTjQ4s22noHce1nWegj9kTeO/Rbvqkp9oGtm5mvFPuy9fqrM+ygZCZP0kIO/8k5ThZNdyg2aiMQ/w4zuOxzrMa7CLvvxhcKJQrXVy0z26YbnCLXSF32zsjfc0f9YqO/mucl+3XoQ2PmLgc9ZuGTY+bGjuzv9fJUuHQU5dfFGJDzZ6gxcm7hZoStt8GHHhatOMflwHuEQ88UHCID3nJ1J4xQ3flgTa35QaiDTXK+MBrLG6htpsR4wUS0sgCtU+/1cHustV/U7tjwqkuRkIAVYd00y2444vpGT00ni3F07HE23Hg2VKNU+wotjp9a8DdO3UagV+jMHNhN4oInTNjpodfsCzxGxfDxx1y8hnu/Ajd7/cjfdXiGTGqYOHU2gsdBxRKUtQ6jiyMBZlkBHMpnuwYydme4wPyMBAMHtCokjcKNXHnHSDv4RzQjhcy21stxq9np9aJQmYj46yJ264S02PuWB+mgf0geZlPUS/WiN2EVmgpqiGSMDBiWV4qgTfEG+Hrv3XJ1YzV4yKdwDVLpeaJd1RnKdq1sgn+WIV2fBdxZ2s8DMRV9g4YwoDRRUYbuCRZSjgRtO/U/ntJMvgpPjLjbPXamEIvUkI3KLJk3DZ5OwE25n1IdeSr/XzGfz58uHt17/QCXjHWlBICjk7n7EY6rN7ZSdURWxh5TrcVEPtxF9g+AHUhhR7alx3XpScdlFlhfS4VUELX+MLqEj8bVIJwh//5/Yu+AlzKByhnKIy2UM/T8MymPxfMpcx8KGaCHrsg8n8soheMG+I7CNMhqzztw83dq49v72LO38Ea/LTGHLPCksvHBfwilSXgftLAzDMNzsL1jQ1blBYFZ9SlJdBGIcndTGg/4ZNxw4NtJbQmCKdTwwlBxg2qHjdjQNNcOnpJMq/pgsQz226w7N3omZg/6t69/6nDl54vR9jiRT9xZrZFt2Cng6RCeZkgacKcV+uXDoGGZJ03D2mRaM2TQsnP2wXlUrvbZ0Kgq/UPH1j1bWcatOJ4r0IwqHIm3P0r11za+Ly/fw8exkmcoyJxd6e01tqn24aPljrs+PQBNM5LDyOq7fjp9jxEAh8vYEfqOrdhRpQFigkg3vi+sxEOsjS23cysttqwlSxXa7f0HMD68mAv5l63sN+6dbdoQy7YHnqHQlGmukx7Eb8r0/synbHB1IIUei2Na4y4XI3ayrHPgmZfsB6y07btt8WyG/lwV8NIzbQHIL9xnqTbROGKycF3uwYB3Btx81YJSI6hp1Is2Sopi4xMsu9P49UYT7j0lxeBrolYob72FvcdUnW10fHwt/JRl/y4ukWZJ1Ebo5eWMXZvAmmYfWZIwy0teYbaRMgJWQ27fvRmhfC83qV/ET3Uk4/whyh0H2BDrReHer6iy7TivSB0EwWZMKYq3yB0I+Qjx2zlQ+lN/XO1F9GKtQw5c1uTlvVJ9LOssaVooa5keU4WmwVZQGBaffAi2KMJ7CTwrcGEysHbTi2l+xHd+gL2NeRIdKm8p4TTH7erzMKGuZHxK/BXKQ2B5vb3KeyXXYndnjMSum6BGLE+N0XJkGQJR2NQzRkFRZlyptde8ZYneJ5gZMFoS5aewHOZJTZ0LTHOBM6J/nEtNULk5G5Nets7wLcyY8vtG7p5G78wQVwfEi+pTD2hoPsSxAW3GVIT2GjcRm4/6J3K7wnYFgqNxmCG6iV4gQ+E9pL5zIaJXgOKrJBM2LRWGnf4tEXTyiO95ChFxWs6OS6RGUJxEXO99aBahLmKoS6xdsuNSYUYXYdOK9C4GipI67xjflt1O+EhHxzhgh3yzG22A7JNbK+i5DxpFL6zZJWGID3ziTtWIJDhkgkWpwi6HtXY2M14VW9ntKR8dTqHhtcA9VbX9PmLnGoHemYuB3FOa1oGfwczTm9Cp5lxttPrWXDpNRBjMC+6ccFHwdkGnQD62g8Q2WfcybgClhcccxTGdxaZRD8KkxJD1+51qVWaX8C99C1KdWNS8G39+g4psPXAwp1NNJkpa3h/aOJe82i9wxZIK/aAovWsuzZIigKJgrzkhhUc/VW3sxSdcGJQUDa4KHrLtFEsLaODO2mi+BVxt9bnjCoZF/whHtIuI6Zt9rsLuhGlz07NM+tm67ngx6VNjThHDrBkjwbqaVzCzNn/uqOmSllxrTYyorDNQRhKGdHy+gNS23ZdpDy5j+zu7Pr99ylWOtQ4g8tGJu3sB4/MrEFI8dL68ralVZYN8+22OHN6xI5Ql3KDf1OZ4Y+DnOFc5V1yU6kdX/v7NOO2lk5J9mQuvy+oPymdQMhq8nBGocIA4hC0sVueNeEf2oAan+ndTvT4Y4lxZxGNUwi37e/OJjoLWw/3WzkT9mjjcmMNOnQMZr/sdicL1ZLraLuq29F/cRDMt7BDF05HBnny17mHNUair62xGCOLRpHNvF/Tjotvppht6GeOPRGvlTO3awKmrz0de5hPn4fPCYyqlPAvTLg6AKM1wnYfXq4w3wwbKdjIWydvkRtSW9a9j5a4fbaGKa0m7Sc5mrXMHO+YSJ3hwbYGXZYmpVl/WRDKk5RozJLwV2oIpah1spgCcVXghj9bY53OmVeEv4kTrvCtFBEGM/C8QUuOfAtZ6e5Zhm++uXnfWcTUYtRrzEB9+78cYTV9876xYnW9UO4wkqBGXSBlS0YTiywvzZi1eUercaYjJ1lTQZHjQU1NdBV5B83OJeTdK7zT+OoEt5E7YXe+yXsayPGFMN46Q8fvd+4ie2IaClSQlnSDpgU2vjGGcqJ16w6seyF+aCGkoOgoZGTr/zySu2Ubv6ew8LPWxISRoXCbFdzdggfC44CzLP2r0zLSOYXdenmPSyVJvOz67XjedG8giqmPcF7ZMbzf4Ssz5f8CAAD//6e5oMw=" + return "eJzcXVtvG7mSfs+vKMxLkoWj7Jld7EOwOEDGOTMbbDxjwEleeyh2SeKITXZ4saP8+gPe+iK1rFZf5GSSJ1vqqq+urCqS7Vewxd0bWNPyGYBhhuMbeP6blGuOcM2lzeGWE7OSqnj+DEAhR6LxDSzRkGcAOWqqWGmYFG/gn88AAH67voVC5pbjM4AVQ57rN/6DVyBIgYmV+2d2pftZSZt+0/x+8xlOlsh19ev0qFz+hdQ0ft2BJ/0LuAQzUjGxhgKNYlQfUt6H0IRhNarFf7Q+OgrF/Qu/zMI3trh7kCrvJFygITkxZC7iTtRZaOudNljMQlqhllZRnIx4IvxTpZDw/6fTftWim0u79N7d8WlWkLJkYh2/+lOL+CPeeRPd0WyIAYXGKoE5rJQsoBWMb2/fwxeLarc4EGvJOGdifYxfi8wv4bvJNRrP7Ed4WzHNWIXOYEloqNRBI88OTddl9RbWa6mN/64GJii3OYLCteVEXYEhX6+A5H9ZbQoU5gqIyEFJK3KndlRKqkUHHibuJaOYFVKYzRBMSWUKS6kMeDpdjEolvTewfAiX2/A0vH8HcgVmg8msie8SuRRrDUZ2MTfSEN7Bd8UlMce5fnSPVZxIIa0whw5GZVFag70c7Dp8d0YHWzGFD4TzRa5kWWK+WO4M6g7Znb6Oi/5eUFk4qf3jEInBcueVn5j04J+VhG7R6Ix67d0Tbrt8vyeaSOwMPExoQwTFBS3tQqFGdY95RqVCfRTMQSbbg/O7LZaonCd6OpDIghQezsbFafTTxP8UNKvJGjPDClxopANAfXIEYCUVEM4jMCZAI5Ui173YL8q9laMf54/OAopQ91MSm3AuKTGYw/Xtp5C+mQZqlUJh+M4hsxqTwvooKWd6u1BIhnr0tfM/By94tKMUFhJHuBfjTJbj3LiC4EgGBO//AFmiIu4LjxrJo3hQzOA08jtSBgUY2U8BnvXEGvA0+6ugwEKq3WLpfEuKhSJFptk3HAjFea1fF2Jid6gCB+eczis/3yzg44bpmK2dA0vBd0DuCeNkyUO0fb6J5UlYgpxC3cP4M6xIwfiue9U9LpLVmA8U6SbAr6PM0Xo6afQDKTMmBvqrs8+BZXzMMBFRrS1qE4KYGQ3yQYDjCbokFJ9EWmnNlOKmIPUi1hIb+RTyCjQPUm0XTKwVaj1VGqbI7lNN78BENucgiVXBwmem4YhScTEKE06oHI3up3tUA0FMrJez4djykXrmceZ1iRULmJjDYEM0LBEFKCsEE+tHXTYAyHyaHwTjX5yULoc6MqCZoJhwPBAN2hBlML9q1FkLuCNFyTEHvEe1g//5z/qTtyuDCrT7nIn1FeTEEBeoQhq4Z5qlKLWlC8x//Fw/etBzuIJXG6n6dR2/pm/P0HfUmHJJrWs9FzlyNNjhdUd13pELRe0AllLUemV5xQICC714HIivFueE4RjokCa+WFQMNUgFXMqtLU+BC7XcnOg8hw7nWW+Pus3zP9db/BOoFIYwEYKuaM5gHhA0VaSspjDXt3BnCN3mirnM8Pb2fXraD1IOJ4vOw91Tv/3/v57PMmDxzFH5tsa1Qdno1uraFpYTw+7RdzOenmtd6kbLF1qxGa0Q9EsLJ2XgrGBmZMfqYAeknlrq0mpGo/FZwzj75ov4CdtIhzsg7ttCxjLIKwooES654leKmMM/gOhovPYH7nnPZbrs/fN/n6FBhV9cXTfZVKK2dqTc4ZwzL1SnRJ3HXWpxz5k67LsMLBHWConxlQ4Re47T9JrI8An8BssNFqgIz9zKTtYY4nBg4flBUsKhogmRZow9JkJBOjRLHGJNepsWbaQ6A17flE8LNvjTWKixCR1j+zg16F4YJgU4U9QXTQF+sHWid/KMeixdKbMilpuBXV29SpR+XuxI6StYKrlFAbl8EH6p2JV4BQX5Syq/kVQw0b1/dABwXGTfpDFPCOQh3jizAS61gEWn/luvYVGlI5Jr9JdpcqlrkokaH1iu09B7jrshfpsoDAyewHsfHcWc9FRWYKOZCU55KF9rLHOWDYTMwzZU3DYiS46TVcMNmo3KOMaP5zwe6zzZYB95/2RwoVCudHHRPrthusEtdoXczwZH+lo4JyA6+q9xXnZYhzY8YuJyNEycmx43NXbmfq+zlcKhW3a/KmzOvAJBh5N3CzUl7DBDHrjVuOcflwEeEA/cjfKIj3nJ1J4xQ3cVgDbHcgPRxhplfOA1klus7WbEeIEFaWSB2qff6mB32eq/qd0x4VQXIzGAqh3e6RLu+GJ6Rg9NG5NpazULdhy4MVnjFHuKrbZuG3APtixH4NcozFzYjSJCF8yY6eGXLM/C4GL4WZmCfIXbcP7yj7uRvurwjDjn0t76ih0HlEpS1DqddxkLMssJFlI82TaStz3He+TxNDkEQKNK3iTUxJ13hHyAc0A7Xsr8IFuMz2en8kQp85Fx1sTtssT0mDvywzSw7yW3xRT1Yo3YH++LLUV1AimeNnIsL7WAN8Qb4eu/d8nVjNXHRTqBa5ZKLRDvqM6W6HJlE/xjFdrjU8S90Xg8TVnZO2KIp9Muci6GS5IvCSeC9r3y8UGSHH5Jj8x4MH9jTKkXS0K3KPJs3Ji8vQA2DouRass3Hkv5v48fb1/feb1AUIwzpYSIo9M5u5EOq3f2luqELZ6XX+4qIO7jLrB9AOpSCj20L3tcl4F0VGaF9YVUQAnd4EunS/xqUAnCPf4Xdy/7CnApH6CcoTDaQT1PwzOb/lwwlzLzsZiJeuyCyP9rkbxg3AnQozA9ssrTPl7fvv707jat+XtYo5/WmNOqsOLyYQG/SuUI+J80MPNcg7dwfd3HFaVlyRn1yxJoo5AU/kBxP+GzcSdP20poHT+dTg0nBBl3yvlxM0Y0zdTRS5J5TRclntl2g2XvRs/E/FH3/sMvHb70YjXCFi/7iTOzLboFOx0kFcrLBEkT5rxav3QINCTrvLZKy0xrnpVKft0tKJfaX10UAn2tf3zDqm8706CVjvcqBIOqYMJf3vPNpYvPu7sPEGCcxDkqEvcnpbXWPt80fNTqOPHpA2iclx5HVNvx8815iAQ+XMCO1Hduw4woSxQTQLwOfWcjHKQ1rt3MnbbasJW0641PPUewvjrai/l3dRy2bt0t2pDb2cdewFHapbbLXsRv7fLOLmdsMLUgpd5I4xsjLtejRjnuWdDsG9aH7LRr+12x7I98+HuFpGbaA1AYnGfLXaZwzeTgi4GDAB4ccQtWiUgeQ0+lWLF1ZsucTDL3p+leVSBsw81XoBsi1qivgsVDh1Tdi/U8wisdUFv+uLqFLbKkjdGpZYzdm0AaZp8Z0nBLS56jNglyRtbD7q69XSO8qKf0L5OHBvIJ/hCFHgJsqPXiUM9XtF1WvBeEbpMgE8ZU5RuEboV84JivQyi9rX+uZhGtWMuRMz+adKxPop8lx1rRQl3J8oIstguygMi0+uBltEcT2EngO4MZlYPHTi2lhyO69e39KyiQaKuCp8TdHz9VZnFgbmT6Cnyx0hBojr9PYb9sJvYzZyR00wIxIj83RcmR5BlHY1DNGQWlXXKmN0HxjicEnmBkyWhLlp7AC5lnLnQdMc4Ezon+YSM1QuLkr9wG23vANzJnq91bun2XvjBBXB8TL6tMPaGghxKkhNsMqQlsNG6Q2w96p/J7AnaFQqMxmKF6iV4QAqGdMp+7MNEbQJGXkgm3rFnjN592aFrrSC85rKh4TSfHJVaGWFyktd55UC3CXMVQl1j75cakQoyuQ6cVaFwNFaX13jG/rbqd8JgPjnDBDnnmNtsR2Sa2V2k5zxqF7yyrSkOQnuuJ31YgkOOKCZZOEXQ9qrExzXhdjzNaUr4+vYbGd0j1Vtf06xc51Q70XLk8xDmt6Rj8Hcw4vQm9ZsbZTm9mwaU3QIzBouzGBZ8EZ1v0AuircIDIPeN3xhWwouRYoDChs8glhqMwS2Loxr9rt1rmF3AnQ4tS3ZgUfFe/vkMKbD2w8HsTTWbKGT5smvh3hDrvcAXSmt2jaD3rrw2SskSioLDcsJJjuOp2lqIzTgwKygYXRe+YNootbXJwL00SvyLuc33BqJIp4Q/xkHYZMW2z313QjSh99mqeWYet54Ift2xqxDnWAEf20UA9jUuYOftfv9VUKSvlaiMTCtccxEMpI1resEHq2q6LlCd3id2ty99/n2KlQ40zuGxi0l794IGZDQgpXjlf3rW0yvJhvt0WZ06P2BPqUm7wv1Tm+M9BznCu8i45VGrH1+GcZtxo6ZRkT+byh4KGndIJhKxOHs4oVDyAOARt6pZnXfCPDaDGr/R+Ej1+W2LcXkRjF8KP/f3eRGdhG+D+KHvCAW1KN86gQ4/BHJbdfmehSrmetq+6Pf2XR8H8CBO6uDsyyJO/zxnWGIm+t8ZijCwaRT7zvKYdFz9MMdvQzxwzkaCVM8c1EdP3vhwHmE+/Dp8TGFUpEV6Y8OwIjNYRtrv4coX5zrCRko28dfIOuSG1Zf37aImfszVM6TTpPinQbGTueaeF1BseXGvQZWlizebbglCeLYnGPIt/4oj4l+9miykQVwVu/JtHzum8eUX8g0rxCt9aEWEwh8AbtOTId5Bbf88yfvPt9YfOIqYWo84xA/Ud/uyI0/T1h0bG6nqh3HEkUY26RMpWjGYOWWHNmNy8p9V0pqMgeVNBieNRTU10FXkPzd4l5P0rvNP46gS3kTthd74GfhrI6YUwwTpDj9/v3UUOxDSUqGBp6RZNC2x6YwzlROvWHVj/1xRiCyEFRU8hJ7vwt7X8Ldv0PYVlOGtNTDwyFG+zgr9bcE94OuAsbXh1Wk46T2G3Xt7jl5IsXXb9cTxvujcQpaWPcF7ZMb7f4Tsz5b8DAAD//5UOSUI=" } diff --git a/x-pack/metricbeat/module/gcp/firestore/_meta/data.json b/x-pack/metricbeat/module/gcp/firestore/_meta/data.json new file mode 100644 index 00000000000..3eeb092ce76 --- /dev/null +++ b/x-pack/metricbeat/module/gcp/firestore/_meta/data.json @@ -0,0 +1,39 @@ +{ + "@timestamp": "2016-05-23T08:05:34.853Z", + "cloud": { + "account": { + "id": "elastic-apm" + }, + "provider": "gcp" + }, + "event": { + "dataset": "gcp.firestore", + "duration": 115000, + "module": "gcp" + }, + "gcp": { + "labels": { + "metrics": { + "storage_class": "MULTI_REGIONAL" + }, + "resource": { + "bucket_name": "artifacts.elastic-apm.appspot.com", + "location": "us" + } + }, + "firestore": { + "document": { + "delete_count": { + "value": 15 + } + } + } + }, + "metricset": { + "name": "firestore", + "period": 10000 + }, + "service": { + "type": "gcp" + } +} \ No newline at end of file diff --git a/x-pack/metricbeat/module/gcp/firestore/_meta/data_document.json b/x-pack/metricbeat/module/gcp/firestore/_meta/data_document.json new file mode 100644 index 00000000000..3eeb092ce76 --- /dev/null +++ b/x-pack/metricbeat/module/gcp/firestore/_meta/data_document.json @@ -0,0 +1,39 @@ +{ + "@timestamp": "2016-05-23T08:05:34.853Z", + "cloud": { + "account": { + "id": "elastic-apm" + }, + "provider": "gcp" + }, + "event": { + "dataset": "gcp.firestore", + "duration": 115000, + "module": "gcp" + }, + "gcp": { + "labels": { + "metrics": { + "storage_class": "MULTI_REGIONAL" + }, + "resource": { + "bucket_name": "artifacts.elastic-apm.appspot.com", + "location": "us" + } + }, + "firestore": { + "document": { + "delete_count": { + "value": 15 + } + } + } + }, + "metricset": { + "name": "firestore", + "period": 10000 + }, + "service": { + "type": "gcp" + } +} \ No newline at end of file diff --git a/x-pack/metricbeat/module/gcp/firestore/_meta/docs.asciidoc b/x-pack/metricbeat/module/gcp/firestore/_meta/docs.asciidoc new file mode 100644 index 00000000000..8496de82389 --- /dev/null +++ b/x-pack/metricbeat/module/gcp/firestore/_meta/docs.asciidoc @@ -0,0 +1,13 @@ +Firestore metricset fetches metrics from https://cloud.google.com/firestore/[Firestore] in Google Cloud Platform. + +The `firestore` metricset contains all metrics exported from the https://cloud.google.com/monitoring/api/metrics_gcp#gcp-firestore[GCP Firestore Monitoring API]. The field names have been left untouched for people already familiar with them. + +You can specify a single region to fetch metrics like `us-central1`. Be aware that GCP Storage does not use zones so `us-central1-a` will return nothing. If no region is specified, it will return metrics from all buckets. + +[float] +=== Metrics +Here is a list of metrics collected by `firestore` metricset: + +- `firestore.document.delete_count`: The number of successful document deletes. +- `firestore.document.read_count`: The number of successful document reads from queries or lookups. +- `firestore.document.write_count`: The number of successful document writes. diff --git a/x-pack/metricbeat/module/gcp/firestore/_meta/fields.yml b/x-pack/metricbeat/module/gcp/firestore/_meta/fields.yml new file mode 100644 index 00000000000..99ed9bf5e17 --- /dev/null +++ b/x-pack/metricbeat/module/gcp/firestore/_meta/fields.yml @@ -0,0 +1,14 @@ +- name: firestore + description: Google Cloud Firestore metrics + release: beta + type: group + fields: + - name: document.delete.count + type: long + description: The number of successful document deletes. + - name: document.read.count + type: long + description: The number of successful document reads from queries or lookups. + - name: document.write.count + type: long + description: The number of successful document writes. diff --git a/x-pack/metricbeat/module/gcp/firestore/firestore_integration_test.go b/x-pack/metricbeat/module/gcp/firestore/firestore_integration_test.go new file mode 100644 index 00000000000..8783f3144fd --- /dev/null +++ b/x-pack/metricbeat/module/gcp/firestore/firestore_integration_test.go @@ -0,0 +1,59 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build integration && gcp +// +build integration,gcp + +package firestore + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/beats/v7/libbeat/common" + mbtest "github.com/elastic/beats/v7/metricbeat/mb/testing" + "github.com/elastic/beats/v7/x-pack/metricbeat/module/gcp/metrics" +) + +func TestFetch(t *testing.T) { + config := metrics.GetConfigForTest(t, "firestore") + fmt.Printf("%+v\n", config) + + metricSet := mbtest.NewReportingMetricSetV2WithContext(t, config) + events, errs := mbtest.ReportingFetchV2WithContext(metricSet) + if len(errs) > 0 { + t.Fatalf("Expected 0 error, had %d. %v\n", len(errs), errs) + } + + assert.NotEmpty(t, events) + mbtest.TestMetricsetFieldsDocumented(t, metricSet, events) +} + +func TestData(t *testing.T) { + metricPrefixIs := func(metricPrefix string) func(e common.MapStr) bool { + return func(e common.MapStr) bool { + v, err := e.GetValue(metricPrefix) + return err == nil && v != nil + } + } + + dataFiles := []struct { + metricPrefix string + path string + }{ + {"gcp.firestore", "./_meta/data.json"}, + {"gcp.firestore.document", "./_meta/data_document.json"}, + } + + config := metrics.GetConfigForTest(t, "firestore") + + for _, df := range dataFiles { + metricSet := mbtest.NewFetcher(t, config) + t.Run(fmt.Sprintf("metric prefix: %s", df.metricPrefix), func(t *testing.T) { + metricSet.WriteEventsCond(t, df.path, metricPrefixIs(df.metricPrefix)) + }) + } +} diff --git a/x-pack/metricbeat/module/gcp/firestore/firestore_test.go b/x-pack/metricbeat/module/gcp/firestore/firestore_test.go new file mode 100644 index 00000000000..ea7d802296d --- /dev/null +++ b/x-pack/metricbeat/module/gcp/firestore/firestore_test.go @@ -0,0 +1,21 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package firestore + +import ( + "os" + + "github.com/elastic/beats/v7/metricbeat/mb" + + // Register input module and metricset + _ "github.com/elastic/beats/v7/x-pack/metricbeat/module/gcp" + _ "github.com/elastic/beats/v7/x-pack/metricbeat/module/gcp/metrics" +) + +func init() { + // To be moved to some kind of helper + os.Setenv("BEAT_STRICT_PERMS", "false") + mb.Registry.SetSecondarySource(mb.NewLightModulesSource("../../../module")) +} diff --git a/x-pack/metricbeat/module/gcp/firestore/manifest.yml b/x-pack/metricbeat/module/gcp/firestore/manifest.yml new file mode 100644 index 00000000000..95760a8e894 --- /dev/null +++ b/x-pack/metricbeat/module/gcp/firestore/manifest.yml @@ -0,0 +1,11 @@ +default: false +input: + module: gcp + metricset: metrics + defaults: + metrics: + - service: firestore + metric_types: + - "document/delete_count" + - "document/read_count" + - "document/write_count" diff --git a/x-pack/metricbeat/module/gcp/metrics/metrics_requester.go b/x-pack/metricbeat/module/gcp/metrics/metrics_requester.go index d518acc0014..d25701236e4 100644 --- a/x-pack/metricbeat/module/gcp/metrics/metrics_requester.go +++ b/x-pack/metricbeat/module/gcp/metrics/metrics_requester.go @@ -124,7 +124,7 @@ func (r *metricsRequester) getFilterForMetric(serviceName, m string) (f string) } f = fmt.Sprintf("%s AND resource.label.location=starts_with(\"%s\")", f, zone) } - case gcp.ServicePubsub, gcp.ServiceLoadBalancing, gcp.ServiceCloudFunctions: + case gcp.ServicePubsub, gcp.ServiceLoadBalancing, gcp.ServiceCloudFunctions, gcp.ServiceFirestore: return case gcp.ServiceStorage: if r.config.Region == "" { diff --git a/x-pack/metricbeat/module/gcp/metrics/response_parser.go b/x-pack/metricbeat/module/gcp/metrics/response_parser.go index 02fa9893cad..e0f968f77a5 100644 --- a/x-pack/metricbeat/module/gcp/metrics/response_parser.go +++ b/x-pack/metricbeat/module/gcp/metrics/response_parser.go @@ -213,6 +213,11 @@ var reMapping = map[string]string{ "storage.object_count.value": "storage.object.count", "storage.total_byte_seconds.value": "storage.total_byte_seconds.bytes", "storage.total_bytes.value": "storage.total.bytes", + + // gcp.firestore metricset + "document.delete_count.value": "document.delete.count", + "document.read_count.value": "document.read.count", + "document.write_count.value": "document.write.count", } func remap(l *logp.Logger, s string) string { diff --git a/x-pack/metricbeat/module/gcp/module.yml b/x-pack/metricbeat/module/gcp/module.yml index 6a3ecf77e58..e07d9656454 100644 --- a/x-pack/metricbeat/module/gcp/module.yml +++ b/x-pack/metricbeat/module/gcp/module.yml @@ -5,6 +5,7 @@ metricsets: - loadbalancing - storage - gke + - firestore dashboards: - id: Metricbeat-gcp-gke-overview file: 1ae960c0-f9f8-11eb-bc38-79936db7c106.json diff --git a/x-pack/metricbeat/modules.d/gcp.yml.disabled b/x-pack/metricbeat/modules.d/gcp.yml.disabled index d7d873be6d1..638846d0b96 100644 --- a/x-pack/metricbeat/modules.d/gcp.yml.disabled +++ b/x-pack/metricbeat/modules.d/gcp.yml.disabled @@ -14,6 +14,7 @@ metricsets: - pubsub - loadbalancing + - firestore zone: "us-central1-a" project_id: "your project id" credentials_file_path: "your JSON credentials file path" From e691b6e53a291ce7492338b6f67ead2ae8e541b5 Mon Sep 17 00:00:00 2001 From: Andrew Kroh Date: Thu, 27 Jan 2022 08:26:08 -0500 Subject: [PATCH 56/69] Include the error message with auditd module events (#30009) Auditbeat adds event.original when there is a parse failure, but it wasn't including the error message. Having the error helps you understand what went wrong. Example output: {"@timestamp":"2022-01-26T00:15:20.241Z","@metadata":{"beat":"auditbeat","type":"_doc","version":"8.1.0"},"error":{"message":"missing syscall message in compound event"},"event":{"original":["type=UNKNOWN[1333] msg=audit(1643156118.179:545): op=freq old=36792303616000 new=-176298262528000","type=UNKNOWN[1333] msg=audit(1643156118.179:545): op=tick old=9977 new=10000"],"module":"auditd"},"service":{"type":"auditd"},"host":{"name":"ubuntu-impish"},"agent":{"version":"8.1.0","ephemeral_id":"a6dd5138-f1b2-437a-8b83-324ec09bbaa3","id":"c127e0a1-be4b-4f9f-a5e4-97496699f75e","name":"ubuntu-impish","type":"auditbeat"},"ecs":{"version":"8.0.0"}} --- CHANGELOG.next.asciidoc | 1 + auditbeat/module/auditd/audit_linux.go | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 60ceff9a1ff..6bbc77f3036 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -102,6 +102,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - system/socket: Fix process name and arg truncation for long names, paths and args lists. {issue}24667[24667] {pull}29410[29410] - system/socket: Fix startup errors on newer 5.x kernels due to missing _do_fork function. {issue}29607[29607] {pull}29744[29744] - libbeat/processors/add_process_metadata: Fix memory leak in process cache. {issue}24890[24890] {pull}29717[29717] +- auditd: Add error.message to events when processing fails. {pull}30009[30009] *Filebeat* diff --git a/auditbeat/module/auditd/audit_linux.go b/auditbeat/module/auditd/audit_linux.go index 03d705187f7..82ae7d61128 100644 --- a/auditbeat/module/auditd/audit_linux.go +++ b/auditbeat/module/auditd/audit_linux.go @@ -512,7 +512,7 @@ func buildMetricbeatEvent(msgs []*auparse.AuditMessage, config Config) mb.Event auditEvent, err := aucoalesce.CoalesceMessages(msgs) if err != nil { // Add messages on error so that it's possible to debug the problem. - out := mb.Event{RootFields: common.MapStr{}} + out := mb.Event{RootFields: common.MapStr{}, Error: err} addEventOriginal(msgs, out.RootFields) return out } From 206de56092f2480dbff796393aed3e263834a31f Mon Sep 17 00:00:00 2001 From: Lars Lehtonen Date: Thu, 27 Jan 2022 05:58:28 -0800 Subject: [PATCH 57/69] filebeat/generator/fields: fix dropped error (#29943) --- filebeat/generator/fields/fields.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/filebeat/generator/fields/fields.go b/filebeat/generator/fields/fields.go index a656dc75a08..7f26081201d 100644 --- a/filebeat/generator/fields/fields.go +++ b/filebeat/generator/fields/fields.go @@ -385,8 +385,5 @@ func (p *pipeline) toFieldsYml(noDoc bool) ([]byte, error) { } f := generateFields(fs, noDoc) - var d []byte - d, err = yaml.Marshal(&f) - - return d, nil + return yaml.Marshal(&f) } From 3d3452f1cb00944cd965b9e7fc91dda4ecc61913 Mon Sep 17 00:00:00 2001 From: Aleksandr Maus Date: Thu, 27 Jan 2022 09:52:18 -0500 Subject: [PATCH 58/69] Remove msitools install for windows build, using the latest docker image with msitools preinstalled (#30040) --- x-pack/osquerybeat/magefile.go | 21 ++------------------- 1 file changed, 2 insertions(+), 19 deletions(-) diff --git a/x-pack/osquerybeat/magefile.go b/x-pack/osquerybeat/magefile.go index 070cf584125..272d9a70fac 100644 --- a/x-pack/osquerybeat/magefile.go +++ b/x-pack/osquerybeat/magefile.go @@ -101,17 +101,6 @@ func extractFromMSI() error { return err } - // Install msitools - err := execCommand("apt", "update") - if err != nil { - return err - } - - err = execCommand("apt", "install", "-y", "msitools") - if err != nil { - return err - } - osArchs := osquerybeat.OSArchs(devtools.Platforms) for _, osarch := range osArchs { @@ -161,18 +150,12 @@ func GolangCrossBuild() error { // Currently we can't reproduce this is issue, but here we can eliminate the need for calling msiexec // if extract the osqueryd.exe binary during the build. // - // The builder docker images are Debian so we need to install msitools for - // linux in order to extract the osqueryd.exe from MSI during build process. // Install MSI tools in order to extract file from MSI - // Ideally we would want these to be a part of the build docker image, - // but doing this here for now due to limited time before 7.16.2 - // // The cross build is currently called for two binaries osquerybeat and osqquery-extension - // Only install msitools and extract osqueryd.exe during osquerybeat build on windows + // Only extract osqueryd.exe during osquerybeat build on windows args := devtools.DefaultGolangCrossBuildArgs() - // Install msitools only if !strings.HasPrefix(args.Name, "osquery-extension-") { - // Install msitools in the container and extract osqueryd.exe from MSI + // Extract osqueryd.exe from MSI if err := extractFromMSI(); err != nil { return err } From 44802d456ecb8c394a0475bc437851bb00bf7073 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Thu, 27 Jan 2022 15:14:00 -0500 Subject: [PATCH 59/69] [Automation] Update elastic stack version to 8.1.0-aa69d697 for testing (#30012) Co-authored-by: apmmachine Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- testing/environments/snapshot-oss.yml | 6 +++--- testing/environments/snapshot.yml | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/testing/environments/snapshot-oss.yml b/testing/environments/snapshot-oss.yml index eea926aab2f..9103be2793a 100644 --- a/testing/environments/snapshot-oss.yml +++ b/testing/environments/snapshot-oss.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.1.0-f5a18001-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.1.0-aa69d697-SNAPSHOT healthcheck: test: ["CMD-SHELL", "curl -s http://localhost:9200/_cat/health?h=status | grep -q green"] retries: 300 @@ -21,7 +21,7 @@ services: - "script.context.template.cache_max_size=2000" logstash: - image: docker.elastic.co/logstash/logstash-oss:8.1.0-f5a18001-SNAPSHOT + image: docker.elastic.co/logstash/logstash-oss:8.1.0-aa69d697-SNAPSHOT healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9600/_node/stats"] retries: 600 @@ -31,7 +31,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.1.0-f5a18001-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.1.0-aa69d697-SNAPSHOT healthcheck: test: ["CMD-SHELL", "curl -s http://localhost:5601/api/status?v8format=true | grep -q '\"overall\":{\"level\":\"available\"'"] retries: 600 diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index d335efc04fa..37cfff5df30 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.1.0-f5a18001-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.1.0-aa69d697-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.1.0-f5a18001-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.1.0-aa69d697-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From 11e488de5539be74c21fb80f143fe6b7aa57c3f3 Mon Sep 17 00:00:00 2001 From: James Rodewig Date: Thu, 27 Jan 2022 18:07:53 -0500 Subject: [PATCH 60/69] [DOCS] Add redirect for GSuite module (#30034) Adds a redirect for the [GSuite module](https://www.elastic.co/guide/en/beats/filebeat/7.17/filebeat-module-gsuite.html) page, which has been removed in 8.0+. We saw this missing page create several broken links in https://github.com/elastic/docs/pull/2312. I've opened https://github.com/elastic/security-docs/pull/1441 to update those links. This adds a redirect for anyone who otherwise lands on the page. --- filebeat/docs/redirects.asciidoc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/filebeat/docs/redirects.asciidoc b/filebeat/docs/redirects.asciidoc index 7a41406099b..6d8b54b075b 100644 --- a/filebeat/docs/redirects.asciidoc +++ b/filebeat/docs/redirects.asciidoc @@ -8,3 +8,7 @@ The following pages have moved or been deleted. See <>. +[role="exclude",id="filebeat-module-gsuite"] +== GSuite module + +The GSuite module has been replaced by the <>. From fc7c75ff2b9da94a9a1b2a4469bb2e0f949a03b6 Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Fri, 28 Jan 2022 09:36:25 +0000 Subject: [PATCH 61/69] probot[stale]: ignore issues with the tag flaky-test (#30065) --- .github/stale.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/stale.yml b/.github/stale.yml index 160c31c5744..0086b50c8c7 100644 --- a/.github/stale.yml +++ b/.github/stale.yml @@ -11,7 +11,8 @@ daysUntilClose: 30 onlyLabels: [] # Issues or Pull Requests with these labels will never be considered stale. Set to `[]` to disable -exemptLabels: [] +exemptLabels: + - flaky-test # Set to true to ignore issues in a project (defaults to false) exemptProjects: false From 8cc07ddaf14e03579e6d12c2c6f6853c8470f0a9 Mon Sep 17 00:00:00 2001 From: Tiago Queiroz Date: Fri, 28 Jan 2022 10:51:28 +0100 Subject: [PATCH 62/69] libbeat/reader: Fix messge conversion to beat.Event (#30057) `Message.ToEvent` was not copying the `Message.Privage` field when converting a Message to `beat.Event`. This commit fixes it. Fixes: #30031 --- libbeat/reader/message.go | 1 + libbeat/reader/message_test.go | 15 +++++++++++++++ 2 files changed, 16 insertions(+) diff --git a/libbeat/reader/message.go b/libbeat/reader/message.go index 0eae606f80b..e8bd1202729 100644 --- a/libbeat/reader/message.go +++ b/libbeat/reader/message.go @@ -93,5 +93,6 @@ func (m *Message) ToEvent() beat.Event { Timestamp: m.Ts, Meta: m.Meta, Fields: m.Fields, + Private: m.Private, } } diff --git a/libbeat/reader/message_test.go b/libbeat/reader/message_test.go index c73576c4767..95891724719 100644 --- a/libbeat/reader/message_test.go +++ b/libbeat/reader/message_test.go @@ -19,6 +19,7 @@ package reader import ( "testing" + "time" "github.com/stretchr/testify/require" @@ -55,6 +56,20 @@ func TestToEvent(t *testing.T) { Message{Content: []byte("my message"), Fields: common.MapStr{"my_field": "my_value"}, Meta: common.MapStr{"meta": "id"}}, beat.Event{Fields: common.MapStr{"message": "my message", "my_field": "my_value"}, Meta: common.MapStr{"meta": "id"}}, }, + "content, meta, message and private fields": { + Message{ + Ts: time.Date(2022, 1, 9, 10, 42, 0, 0, time.UTC), + Content: []byte("my message"), + Meta: common.MapStr{"foo": "bar"}, + Private: 42, + }, + beat.Event{ + Timestamp: time.Date(2022, 1, 9, 10, 42, 0, 0, time.UTC), + Fields: common.MapStr{"message": "my message"}, + Meta: common.MapStr{"foo": "bar"}, + Private: 42, + }, + }, } for name, test := range testCases { From 457b0bb8db21819ff48dc7d0ce93ae693594ba27 Mon Sep 17 00:00:00 2001 From: "Lucas F. da Costa" Date: Fri, 28 Jan 2022 13:56:25 +0000 Subject: [PATCH 63/69] Add fonts to support more different types of characters for multiple languages (#29861) This PR fixes #29495 by adding more fonts to support multiple different characters from multiple languages. After upgrading the base image, even though all characters from a few languages worked fine, not all characters from all languages were being displayed. --- CHANGELOG.next.asciidoc | 1 + .../packaging/templates/docker/Dockerfile.elastic-agent.tmpl | 4 +++- dev-tools/packaging/templates/docker/Dockerfile.tmpl | 4 +++- 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 6bbc77f3036..0220c964256 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -53,6 +53,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Only add monitor.status to browser events when summary. {pull}29460[29460] - Also add summary to journeys for which the synthetics runner crashes. {pull}29606[29606] - Update size of ICMP packets to adhere to standard min size. {pull}29948[29948] +- Add fonts to support more different types of characters for multiple languages. {pull}29606[29861] *Metricbeat* diff --git a/dev-tools/packaging/templates/docker/Dockerfile.elastic-agent.tmpl b/dev-tools/packaging/templates/docker/Dockerfile.elastic-agent.tmpl index 644df5bd73a..31ca7e78da2 100644 --- a/dev-tools/packaging/templates/docker/Dockerfile.elastic-agent.tmpl +++ b/dev-tools/packaging/templates/docker/Dockerfile.elastic-agent.tmpl @@ -69,7 +69,9 @@ RUN apt-get update -y && \ libcairo2\ libasound2\ libatspi2.0-0\ - libxshmfence1 && \ + libxshmfence1 \ + fonts-noto-core\ + fonts-noto-cjk &&\ apt-get clean all && \ exit_code=0 && break || exit_code=$? && echo "apt-get error: retry $iter in 10s" && sleep 10; \ done; \ diff --git a/dev-tools/packaging/templates/docker/Dockerfile.tmpl b/dev-tools/packaging/templates/docker/Dockerfile.tmpl index 9309516bd80..fec54d5ccb1 100644 --- a/dev-tools/packaging/templates/docker/Dockerfile.tmpl +++ b/dev-tools/packaging/templates/docker/Dockerfile.tmpl @@ -59,7 +59,9 @@ RUN apt-get update -y && \ libcairo2\ libasound2\ libatspi2.0-0\ - libxshmfence1 && \ + libxshmfence1 \ + fonts-noto \ + fonts-noto-cjk && \ apt-get clean all && \ exit_code=0 && break || exit_code=$? && echo "apt-get error: retry $iter in 10s" && sleep 10; \ done; \ From 04445373f99a206522deb6309a400bf992cdf73d Mon Sep 17 00:00:00 2001 From: Alex Resnick Date: Fri, 28 Jan 2022 08:19:24 -0600 Subject: [PATCH 64/69] [libbeat] Add script processor to all beats (#29752) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Noémi Ványi --- CHANGELOG.next.asciidoc | 1 + filebeat/cmd/root.go | 1 - heartbeat/beater/heartbeat.go | 1 - libbeat/cmd/instance/imports_common.go | 1 + metricbeat/cmd/root.go | 3 --- winlogbeat/cmd/root.go | 1 - 6 files changed, 2 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 0220c964256..bbb2acb9be0 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -161,6 +161,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Add FIPS configuration option for all AWS API calls. {pull}28899[28899] - Add `default_region` config to AWS common module. {pull}29415[29415] - Add support for latest k8s versions v1.23 and v1.22 {pull}29575[29575] +- Add `script` processor to all beats {issue}29269[29269] {pull}29752[29752] - Only connect to Elasticsearch instances with the same version or newer. {pull}29683[29683] - Move umask from code to service files. {pull}29708[29708] diff --git a/filebeat/cmd/root.go b/filebeat/cmd/root.go index 2b9bff54629..1a17e64cdbb 100644 --- a/filebeat/cmd/root.go +++ b/filebeat/cmd/root.go @@ -28,7 +28,6 @@ import ( "github.com/elastic/beats/v7/libbeat/cmd/instance" // Import processors. - _ "github.com/elastic/beats/v7/libbeat/processors/script" _ "github.com/elastic/beats/v7/libbeat/processors/timestamp" ) diff --git a/heartbeat/beater/heartbeat.go b/heartbeat/beater/heartbeat.go index 0e499ce6f85..003f4da5934 100644 --- a/heartbeat/beater/heartbeat.go +++ b/heartbeat/beater/heartbeat.go @@ -37,7 +37,6 @@ import ( "github.com/elastic/beats/v7/libbeat/management" _ "github.com/elastic/beats/v7/heartbeat/security" - _ "github.com/elastic/beats/v7/libbeat/processors/script" ) // Heartbeat represents the root datastructure of this beat. diff --git a/libbeat/cmd/instance/imports_common.go b/libbeat/cmd/instance/imports_common.go index 724919364db..aa298984f94 100644 --- a/libbeat/cmd/instance/imports_common.go +++ b/libbeat/cmd/instance/imports_common.go @@ -38,6 +38,7 @@ import ( _ "github.com/elastic/beats/v7/libbeat/processors/fingerprint" _ "github.com/elastic/beats/v7/libbeat/processors/ratelimit" _ "github.com/elastic/beats/v7/libbeat/processors/registered_domain" + _ "github.com/elastic/beats/v7/libbeat/processors/script" _ "github.com/elastic/beats/v7/libbeat/processors/translate_sid" _ "github.com/elastic/beats/v7/libbeat/processors/urldecode" _ "github.com/elastic/beats/v7/libbeat/publisher/includes" // Register publisher pipeline modules diff --git a/metricbeat/cmd/root.go b/metricbeat/cmd/root.go index 50724cd10bd..9adbd0542a7 100644 --- a/metricbeat/cmd/root.go +++ b/metricbeat/cmd/root.go @@ -33,9 +33,6 @@ import ( // import modules _ "github.com/elastic/beats/v7/metricbeat/include" _ "github.com/elastic/beats/v7/metricbeat/include/fields" - - // Import processors. - _ "github.com/elastic/beats/v7/libbeat/processors/script" ) const ( diff --git a/winlogbeat/cmd/root.go b/winlogbeat/cmd/root.go index 3326e768d9e..7bd35f1597d 100644 --- a/winlogbeat/cmd/root.go +++ b/winlogbeat/cmd/root.go @@ -29,7 +29,6 @@ import ( _ "github.com/elastic/beats/v7/winlogbeat/include" // Import processors and supporting modules. - _ "github.com/elastic/beats/v7/libbeat/processors/script" _ "github.com/elastic/beats/v7/libbeat/processors/timestamp" ) From 442d1b92d3198bdadfcb3a290eb0788afde4607a Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Fri, 28 Jan 2022 14:50:52 +0000 Subject: [PATCH 65/69] Revert "Packaging: rename arm64 suffix to aarch64 in the tar.gz artifacts ONLY (#28813)" (#30083) This reverts commit 13dc3de89d4f39319ed8c5fe92f7382473665a28. --- CHANGELOG.next.asciidoc | 1 - dev-tools/mage/pkgtypes.go | 2 +- x-pack/elastic-agent/magefile.go | 2 +- x-pack/elastic-agent/pkg/artifact/artifact.go | 2 +- x-pack/elastic-agent/pkg/artifact/download/http/elastic_test.go | 2 +- 5 files changed, 4 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index bbb2acb9be0..99b1f259a7a 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -24,7 +24,6 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Remove option `setup.template.type` and always load composable template with data streams. {pull}28450[28450] - Remove several ILM options (`rollover_alias` and `pattern`) as data streams does not require index aliases. {pull}28450[28450] - Index template's default_fields setting is only populated with ECS fields. {pull}28596[28596] {issue}28215[28215] -- tar.gz packages for ARM64 will now use the suffix `aarch64` rather than `arm64`. {pull}28813[28813] - Remove deprecated `--template` and `--ilm-policy` flags. Use `--index-management` instead. {pull}28870[28870] - Remove options `logging.files.suffix` and default to datetime endings. {pull}28927[28927] - Remove Journalbeat. Use `journald` input of Filebeat instead. {pull}29131[29131] diff --git a/dev-tools/mage/pkgtypes.go b/dev-tools/mage/pkgtypes.go index 9944547c82a..1fc5fe79e50 100644 --- a/dev-tools/mage/pkgtypes.go +++ b/dev-tools/mage/pkgtypes.go @@ -162,7 +162,7 @@ var OSArchNames = map[string]map[PackageType]map[string]string{ "armv5": "armv5", "armv6": "armv6", "armv7": "armv7", - "arm64": "aarch64", + "arm64": "arm64", "mips": "mips", "mipsle": "mipsel", "mips64": "mips64", diff --git a/x-pack/elastic-agent/magefile.go b/x-pack/elastic-agent/magefile.go index ed84c8ca073..113f84e7221 100644 --- a/x-pack/elastic-agent/magefile.go +++ b/x-pack/elastic-agent/magefile.go @@ -308,7 +308,7 @@ func Package() { }{ {"darwin/amd64", "darwin-x86_64.tar.gz"}, {"linux/amd64", "linux-x86_64.tar.gz"}, - {"linux/arm64", "linux-aarch64.tar.gz"}, + {"linux/arm64", "linux-arm64.tar.gz"}, {"windows/amd64", "windows-x86_64.zip"}, } diff --git a/x-pack/elastic-agent/pkg/artifact/artifact.go b/x-pack/elastic-agent/pkg/artifact/artifact.go index ff81fc326f1..5f8a099ed6a 100644 --- a/x-pack/elastic-agent/pkg/artifact/artifact.go +++ b/x-pack/elastic-agent/pkg/artifact/artifact.go @@ -15,7 +15,7 @@ import ( var packageArchMap = map[string]string{ "linux-binary-32": "linux-x86.tar.gz", "linux-binary-64": "linux-x86_64.tar.gz", - "linux-binary-arm64": "linux-aarch64.tar.gz", + "linux-binary-arm64": "linux-arm64.tar.gz", "windows-binary-32": "windows-x86.zip", "windows-binary-64": "windows-x86_64.zip", "darwin-binary-32": "darwin-x86_64.tar.gz", diff --git a/x-pack/elastic-agent/pkg/artifact/download/http/elastic_test.go b/x-pack/elastic-agent/pkg/artifact/download/http/elastic_test.go index d0b321da3d1..747324a7cc7 100644 --- a/x-pack/elastic-agent/pkg/artifact/download/http/elastic_test.go +++ b/x-pack/elastic-agent/pkg/artifact/download/http/elastic_test.go @@ -170,7 +170,7 @@ func getElasticCoClient() http.Client { fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "i686.rpm"): struct{}{}, fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "x86_64.rpm"): struct{}{}, fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "linux-x86.tar.gz"): struct{}{}, - fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "linux-aarch64.tar.gz"): struct{}{}, + fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "linux-arm64.tar.gz"): struct{}{}, fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "linux-x86_64.tar.gz"): struct{}{}, fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "windows-x86.zip"): struct{}{}, fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "windows-x86_64.zip"): struct{}{}, From 1306e5fe9897aa1af19b5dd91afa211b881604da Mon Sep 17 00:00:00 2001 From: Fae Charlton Date: Fri, 28 Jan 2022 10:50:16 -0500 Subject: [PATCH 66/69] Remove SSL3 support from libbeat and its documentation. (#30071) * Remove SSL3 support from libbeat and its documentation. * update changelog --- CHANGELOG.next.asciidoc | 1 + .../transport/tlscommon/versions_default.go | 22 ++++++++----------- .../transport/tlscommon/versions_legacy.go | 18 ++++++--------- .../transport/tlscommon/versions_test.go | 5 ----- libbeat/docs/shared-ssl-config.asciidoc | 2 +- packetbeat/docs/packetbeat-options.asciidoc | 3 +-- 6 files changed, 19 insertions(+), 32 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 99b1f259a7a..6c2814ea810 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -32,6 +32,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Change Docker base image from CentOS 7 to Ubuntu 20.04 {pull}29681[29681] - Enrich kubernetes metadata with node annotations. {pull}29605[29605] - Allign kubernetes configuration settings. {pull}29908[29908] +- Remove legacy support for SSLv3. {pull}30071[30071] *Auditbeat* diff --git a/libbeat/common/transport/tlscommon/versions_default.go b/libbeat/common/transport/tlscommon/versions_default.go index 2d5180864c6..0d0ea0d2df0 100644 --- a/libbeat/common/transport/tlscommon/versions_default.go +++ b/libbeat/common/transport/tlscommon/versions_default.go @@ -26,14 +26,13 @@ import ( // Define all the possible TLS version. const ( - TLSVersionSSL30 TLSVersion = tls.VersionSSL30 - TLSVersion10 TLSVersion = tls.VersionTLS10 - TLSVersion11 TLSVersion = tls.VersionTLS11 - TLSVersion12 TLSVersion = tls.VersionTLS12 - TLSVersion13 TLSVersion = tls.VersionTLS13 + TLSVersion10 TLSVersion = tls.VersionTLS10 + TLSVersion11 TLSVersion = tls.VersionTLS11 + TLSVersion12 TLSVersion = tls.VersionTLS12 + TLSVersion13 TLSVersion = tls.VersionTLS13 // TLSVersionMin is the min TLS version supported. - TLSVersionMin = TLSVersionSSL30 + TLSVersionMin = TLSVersion10 // TLSVersionMax is the max TLS version supported. TLSVersionMax = TLSVersion13 @@ -55,8 +54,6 @@ var TLSDefaultVersions = []TLSVersion{ } var tlsProtocolVersions = map[string]TLSVersion{ - "SSLv3": TLSVersionSSL30, - "SSLv3.0": TLSVersionSSL30, "TLSv1": TLSVersion10, "TLSv1.0": TLSVersion10, "TLSv1.1": TLSVersion11, @@ -77,9 +74,8 @@ func (pv TLSVersionDetails) String() string { } var tlsInverseLookup = map[TLSVersion]TLSVersionDetails{ - TLSVersionSSL30: TLSVersionDetails{Version: "3.0", Protocol: "ssl", Combined: "SSLv3"}, - TLSVersion10: TLSVersionDetails{Version: "1.0", Protocol: "tls", Combined: "TLSv1.0"}, - TLSVersion11: TLSVersionDetails{Version: "1.1", Protocol: "tls", Combined: "TLSv1.1"}, - TLSVersion12: TLSVersionDetails{Version: "1.2", Protocol: "tls", Combined: "TLSv1.2"}, - TLSVersion13: TLSVersionDetails{Version: "1.3", Protocol: "tls", Combined: "TLSv1.3"}, + TLSVersion10: TLSVersionDetails{Version: "1.0", Protocol: "tls", Combined: "TLSv1.0"}, + TLSVersion11: TLSVersionDetails{Version: "1.1", Protocol: "tls", Combined: "TLSv1.1"}, + TLSVersion12: TLSVersionDetails{Version: "1.2", Protocol: "tls", Combined: "TLSv1.2"}, + TLSVersion13: TLSVersionDetails{Version: "1.3", Protocol: "tls", Combined: "TLSv1.3"}, } diff --git a/libbeat/common/transport/tlscommon/versions_legacy.go b/libbeat/common/transport/tlscommon/versions_legacy.go index 18d557a2495..3d538a7ab9f 100644 --- a/libbeat/common/transport/tlscommon/versions_legacy.go +++ b/libbeat/common/transport/tlscommon/versions_legacy.go @@ -23,13 +23,12 @@ package tlscommon import "crypto/tls" const ( - TLSVersionSSL30 TLSVersion = tls.VersionSSL30 - TLSVersion10 TLSVersion = tls.VersionTLS10 - TLSVersion11 TLSVersion = tls.VersionTLS11 - TLSVersion12 TLSVersion = tls.VersionTLS12 + TLSVersion10 TLSVersion = tls.VersionTLS10 + TLSVersion11 TLSVersion = tls.VersionTLS11 + TLSVersion12 TLSVersion = tls.VersionTLS12 // TLSVersionMin is the min TLS version supported. - TLSVersionMin = TLSVersionSSL30 + TLSVersionMin = TLSVersion10 // TLSVersionMax is the max TLS version supported. TLSVersionMax = TLSVersion12 @@ -51,8 +50,6 @@ var TLSDefaultVersions = []TLSVersion{ } var tlsProtocolVersions = map[string]TLSVersion{ - "SSLv3": TLSVersionSSL30, - "SSLv3.0": TLSVersionSSL30, "TLSv1": TLSVersion10, "TLSv1.0": TLSVersion10, "TLSv1.1": TLSVersion11, @@ -60,8 +57,7 @@ var tlsProtocolVersions = map[string]TLSVersion{ } var tlsProtocolVersionsInverse = map[TLSVersion]string{ - TLSVersionSSL30: "SSLv3", - TLSVersion10: "TLSv1.0", - TLSVersion11: "TLSv1.1", - TLSVersion12: "TLSv1.2", + TLSVersion10: "TLSv1.0", + TLSVersion11: "TLSv1.1", + TLSVersion12: "TLSv1.2", } diff --git a/libbeat/common/transport/tlscommon/versions_test.go b/libbeat/common/transport/tlscommon/versions_test.go index b1251109b05..7f2b2e02763 100644 --- a/libbeat/common/transport/tlscommon/versions_test.go +++ b/libbeat/common/transport/tlscommon/versions_test.go @@ -36,11 +36,6 @@ func TestTLSVersion(t *testing.T) { 0x0, nil, }, - { - "SSLv3", - tls.VersionSSL30, - &TLSVersionDetails{Version: "3.0", Protocol: "ssl", Combined: "SSLv3"}, - }, { "TLSv1.0", tls.VersionTLS10, diff --git a/libbeat/docs/shared-ssl-config.asciidoc b/libbeat/docs/shared-ssl-config.asciidoc index 97a2605ef4b..71f9b45c2b8 100644 --- a/libbeat/docs/shared-ssl-config.asciidoc +++ b/libbeat/docs/shared-ssl-config.asciidoc @@ -120,7 +120,7 @@ SSL settings are disabled if either `enabled` is set to `false` or the List of allowed SSL/TLS versions. If SSL/TLS server decides for protocol versions not configured, the connection will be dropped during or after the handshake. The setting is a list of allowed protocol versions: -`SSLv3`, `TLSv1` for TLS version 1.0, `TLSv1.0`, `TLSv1.1`, `TLSv1.2`, and +`TLSv1` for TLS version 1.0, `TLSv1.0`, `TLSv1.1`, `TLSv1.2`, and `TLSv1.3`. The default value is `[TLSv1.1, TLSv1.2, TLSv1.3]`. diff --git a/packetbeat/docs/packetbeat-options.asciidoc b/packetbeat/docs/packetbeat-options.asciidoc index 6c062187494..2cd21bfd8fc 100644 --- a/packetbeat/docs/packetbeat-options.asciidoc +++ b/packetbeat/docs/packetbeat-options.asciidoc @@ -1235,8 +1235,7 @@ Packetbeat intercepts the initial handshake in a TLS connection and extracts useful information that helps operators diagnose problems and strengthen the security of their network and systems. It does not decrypt any information from the encapsulated protocol, nor does it reveal any -sensitive information such as cryptographic keys. TLS versions 1.0 to 1.3 and -SSL 3.0 are supported. +sensitive information such as cryptographic keys. TLS versions 1.0 to 1.3 are supported. It works by intercepting the client and server "hello" messages, which contain the negotiated parameters for the connection such as cryptographic ciphers and From 27d44ce27c8c82d41c6d43b81ed9ec0edda5fdd7 Mon Sep 17 00:00:00 2001 From: Mat Schaffer Date: Sat, 29 Jan 2022 01:37:06 +0900 Subject: [PATCH 67/69] [Filebeat] Update handling of elasticsearch server logs (#30018) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Noémi Ványi Co-authored-by: klacabane --- filebeat/docs/fields.asciidoc | 47 +++ .../module/elasticsearch/_meta/fields.yml | 12 + .../elasticsearch/audit/_meta/fields.yml | 4 + .../audit/ingest/pipeline-json.yml | 10 + .../elasticsearch/audit/ingest/pipeline.yml | 6 +- .../audit/test/test-audit-800.log | 3 + .../test/test-audit-800.log-expected.json | 105 ++++++ .../deprecation/ingest/pipeline-json-7.yml | 97 +++++ .../deprecation/ingest/pipeline-json-8.yml | 15 + .../deprecation/ingest/pipeline-json.yml | 97 +---- .../deprecation/ingest/pipeline.yml | 6 +- .../elasticsearch/deprecation/manifest.yml | 2 + .../test/es_deprecation-json.800.log | 17 +- .../es_deprecation-json.800.log-expected.json | 343 ++---------------- filebeat/module/elasticsearch/fields.go | 2 +- .../elasticsearch/gc/ingest/pipeline.yml | 6 +- .../server/ingest/pipeline-json-7.yml | 101 ++++++ .../server/ingest/pipeline-json-8.yml | 109 ++++++ .../server/ingest/pipeline-json.yml | 103 +----- .../elasticsearch/server/ingest/pipeline.yml | 6 +- .../module/elasticsearch/server/manifest.yml | 2 + .../server/test/elasticsearch-json.800.log | 6 +- .../elasticsearch-json.800.log-expected.json | 61 ++-- .../slowlog/ingest/pipeline-json-7.yml | 120 ++++++ .../slowlog/ingest/pipeline-json-8.yml | 36 ++ .../slowlog/ingest/pipeline-json.yml | 114 +----- .../elasticsearch/slowlog/ingest/pipeline.yml | 6 +- .../module/elasticsearch/slowlog/manifest.yml | 2 + .../slowlog/test/es_indexing_slowlog.800.log | 4 +- .../es_indexing_slowlog.800.log-expected.json | 72 ++-- .../slowlog/test/es_search_slowlog.800.log | 5 +- .../es_search_slowlog.800.log-expected.json | 108 ++++-- 32 files changed, 881 insertions(+), 746 deletions(-) create mode 100644 filebeat/module/elasticsearch/audit/test/test-audit-800.log create mode 100644 filebeat/module/elasticsearch/audit/test/test-audit-800.log-expected.json create mode 100644 filebeat/module/elasticsearch/deprecation/ingest/pipeline-json-7.yml create mode 100644 filebeat/module/elasticsearch/deprecation/ingest/pipeline-json-8.yml create mode 100644 filebeat/module/elasticsearch/server/ingest/pipeline-json-7.yml create mode 100644 filebeat/module/elasticsearch/server/ingest/pipeline-json-8.yml create mode 100644 filebeat/module/elasticsearch/slowlog/ingest/pipeline-json-7.yml create mode 100644 filebeat/module/elasticsearch/slowlog/ingest/pipeline-json-8.yml diff --git a/filebeat/docs/fields.asciidoc b/filebeat/docs/fields.asciidoc index 2c921af2f39..9030022bc67 100644 --- a/filebeat/docs/fields.asciidoc +++ b/filebeat/docs/fields.asciidoc @@ -49735,6 +49735,39 @@ example: 0 -- +*`elasticsearch.elastic_product_origin`*:: ++ +-- +Used by Elastic stack to identify which component of the stack sent the request + +type: keyword + +example: kibana + +-- + +*`elasticsearch.http.request.x_opaque_id`*:: ++ +-- +Used by Elasticsearch to throttle and deduplicate deprecation warnings + +type: keyword + +example: v7app + +-- + +*`elasticsearch.event.category`*:: ++ +-- +Category of the deprecation event + +type: keyword + +example: compatible_api + +-- + *`elasticsearch.audit.layer`*:: + @@ -49922,6 +49955,20 @@ type: boolean -- +*`elasticsearch.audit.authentication.type`*:: ++ +-- +type: keyword + +-- + +*`elasticsearch.audit.opaque_id`*:: ++ +-- +type: text + +-- + [float] === deprecation diff --git a/filebeat/module/elasticsearch/_meta/fields.yml b/filebeat/module/elasticsearch/_meta/fields.yml index 721f33a4879..8ae4789d929 100644 --- a/filebeat/module/elasticsearch/_meta/fields.yml +++ b/filebeat/module/elasticsearch/_meta/fields.yml @@ -40,3 +40,15 @@ description: "Id of the shard" example: "0" type: keyword + - name: elastic_product_origin + type: keyword + description: "Used by Elastic stack to identify which component of the stack sent the request" + example: "kibana" + - name: http.request.x_opaque_id + description: "Used by Elasticsearch to throttle and deduplicate deprecation warnings" + example: "v7app" + type: keyword + - name: event.category + description: "Category of the deprecation event" + example: "compatible_api" + type: keyword diff --git a/filebeat/module/elasticsearch/audit/_meta/fields.yml b/filebeat/module/elasticsearch/audit/_meta/fields.yml index 38774e4f8b9..ce0ffdf1fda 100644 --- a/filebeat/module/elasticsearch/audit/_meta/fields.yml +++ b/filebeat/module/elasticsearch/audit/_meta/fields.yml @@ -70,3 +70,7 @@ type: text - name: invalidate.apikeys.owned_by_authenticated_user type: boolean + - name: authentication.type + type: keyword + - name: opaque_id + type: text diff --git a/filebeat/module/elasticsearch/audit/ingest/pipeline-json.yml b/filebeat/module/elasticsearch/audit/ingest/pipeline-json.yml index 359af0ab196..14e6a03538e 100644 --- a/filebeat/module/elasticsearch/audit/ingest/pipeline-json.yml +++ b/filebeat/module/elasticsearch/audit/ingest/pipeline-json.yml @@ -176,6 +176,16 @@ processors: field: elasticsearch.audit.level target_field: log.level ignore_missing: true + - dot_expander: + field: trace.id + path: elasticsearch.audit + - rename: + field: elasticsearch.audit.trace.id + target_field: trace.id + ignore_missing: true + - remove: + field: elasticsearch.audit.trace.id + ignore_missing: true - date: field: elasticsearch.audit.@timestamp target_field: "@timestamp" diff --git a/filebeat/module/elasticsearch/audit/ingest/pipeline.yml b/filebeat/module/elasticsearch/audit/ingest/pipeline.yml index 1ae5da8dbb7..e241acafacb 100644 --- a/filebeat/module/elasticsearch/audit/ingest/pipeline.yml +++ b/filebeat/module/elasticsearch/audit/ingest/pipeline.yml @@ -3,9 +3,9 @@ processors: - set: field: event.ingested value: '{{_ingest.timestamp}}' -- rename: - field: '@timestamp' - target_field: event.created +- set: + copy_from: "@timestamp" + field: event.created - grok: field: message patterns: diff --git a/filebeat/module/elasticsearch/audit/test/test-audit-800.log b/filebeat/module/elasticsearch/audit/test/test-audit-800.log new file mode 100644 index 00000000000..75c7ebb6055 --- /dev/null +++ b/filebeat/module/elasticsearch/audit/test/test-audit-800.log @@ -0,0 +1,3 @@ +{"type":"audit", "timestamp":"2022-01-27T14:16:25,271+0100", "node.id":"O8SFUsk8QpGG16JVJcNgUw", "event.type":"transport", "event.action":"access_granted", "authentication.type":"REALM", "user.name":"elastic", "user.realm":"reserved", "user.roles":["superuser"], "origin.type":"rest", "origin.address":"[::1]:64583", "request.id":"yEUG-8deS2y8ZxGgeyeUnw", "action":"indices:admin/create", "request.name":"CreateIndexRequest", "indices":["test_1"], "opaque_id":"myApp1", "trace.id":"0af7651916cd43dd8448eb211c80319c"} +{"type":"audit", "timestamp":"2022-01-27T14:16:28,601+0100", "node.id":"O8SFUsk8QpGG16JVJcNgUw", "event.type":"transport", "event.action":"access_granted", "authentication.type":"REALM", "user.name":"elastic", "user.realm":"reserved", "user.roles":["superuser"], "origin.type":"rest", "origin.address":"[::1]:64583", "request.id":"qo04VI2qRzKrE1dlrsjYgw", "action":"indices:admin/create", "request.name":"CreateIndexRequest", "indices":["test_2"]} +{"type":"audit", "timestamp":"2022-01-27T14:16:30,950+0100", "node.id":"O8SFUsk8QpGG16JVJcNgUw", "event.type":"rest", "event.action":"anonymous_access_denied", "origin.type":"rest", "origin.address":"[::1]:64583", "url.path":"/test_3", "request.method":"PUT", "request.id":"0ybRdKGYRAekov1eKI6nIw", "opaque_id":"myApp1", "trace.id":"0af7651916cd43dd8448eb211c80319c"} diff --git a/filebeat/module/elasticsearch/audit/test/test-audit-800.log-expected.json b/filebeat/module/elasticsearch/audit/test/test-audit-800.log-expected.json new file mode 100644 index 00000000000..6477bb708e5 --- /dev/null +++ b/filebeat/module/elasticsearch/audit/test/test-audit-800.log-expected.json @@ -0,0 +1,105 @@ +[ + { + "@timestamp": "2022-01-27T13:16:25.271Z", + "elasticsearch.audit.action": "indices:admin/create", + "elasticsearch.audit.authentication.type": "REALM", + "elasticsearch.audit.indices": [ + "test_1" + ], + "elasticsearch.audit.layer": "transport", + "elasticsearch.audit.opaque_id": "myApp1", + "elasticsearch.audit.origin.type": "rest", + "elasticsearch.audit.request.id": "yEUG-8deS2y8ZxGgeyeUnw", + "elasticsearch.audit.request.name": "CreateIndexRequest", + "elasticsearch.audit.user.realm": "reserved", + "elasticsearch.audit.user.roles": [ + "superuser" + ], + "elasticsearch.node.id": "O8SFUsk8QpGG16JVJcNgUw", + "event.action": "access_granted", + "event.category": "database", + "event.dataset": "elasticsearch.audit", + "event.kind": "event", + "event.module": "elasticsearch", + "event.outcome": "success", + "fileset.name": "audit", + "host.id": "O8SFUsk8QpGG16JVJcNgUw", + "http.request.id": "yEUG-8deS2y8ZxGgeyeUnw", + "input.type": "log", + "log.offset": 0, + "message": "{\"type\":\"audit\", \"timestamp\":\"2022-01-27T14:16:25,271+0100\", \"node.id\":\"O8SFUsk8QpGG16JVJcNgUw\", \"event.type\":\"transport\", \"event.action\":\"access_granted\", \"authentication.type\":\"REALM\", \"user.name\":\"elastic\", \"user.realm\":\"reserved\", \"user.roles\":[\"superuser\"], \"origin.type\":\"rest\", \"origin.address\":\"[::1]:64583\", \"request.id\":\"yEUG-8deS2y8ZxGgeyeUnw\", \"action\":\"indices:admin/create\", \"request.name\":\"CreateIndexRequest\", \"indices\":[\"test_1\"], \"opaque_id\":\"myApp1\", \"trace.id\":\"0af7651916cd43dd8448eb211c80319c\"}", + "related.user": [ + "elastic" + ], + "service.type": "elasticsearch", + "source.address": "[::1]:64583", + "source.ip": "::1", + "source.port": 64583, + "trace.id": "0af7651916cd43dd8448eb211c80319c", + "user.name": "elastic" + }, + { + "@timestamp": "2022-01-27T13:16:28.601Z", + "elasticsearch.audit.action": "indices:admin/create", + "elasticsearch.audit.authentication.type": "REALM", + "elasticsearch.audit.indices": [ + "test_2" + ], + "elasticsearch.audit.layer": "transport", + "elasticsearch.audit.origin.type": "rest", + "elasticsearch.audit.request.id": "qo04VI2qRzKrE1dlrsjYgw", + "elasticsearch.audit.request.name": "CreateIndexRequest", + "elasticsearch.audit.user.realm": "reserved", + "elasticsearch.audit.user.roles": [ + "superuser" + ], + "elasticsearch.node.id": "O8SFUsk8QpGG16JVJcNgUw", + "event.action": "access_granted", + "event.category": "database", + "event.dataset": "elasticsearch.audit", + "event.kind": "event", + "event.module": "elasticsearch", + "event.outcome": "success", + "fileset.name": "audit", + "host.id": "O8SFUsk8QpGG16JVJcNgUw", + "http.request.id": "qo04VI2qRzKrE1dlrsjYgw", + "input.type": "log", + "log.offset": 517, + "message": "{\"type\":\"audit\", \"timestamp\":\"2022-01-27T14:16:28,601+0100\", \"node.id\":\"O8SFUsk8QpGG16JVJcNgUw\", \"event.type\":\"transport\", \"event.action\":\"access_granted\", \"authentication.type\":\"REALM\", \"user.name\":\"elastic\", \"user.realm\":\"reserved\", \"user.roles\":[\"superuser\"], \"origin.type\":\"rest\", \"origin.address\":\"[::1]:64583\", \"request.id\":\"qo04VI2qRzKrE1dlrsjYgw\", \"action\":\"indices:admin/create\", \"request.name\":\"CreateIndexRequest\", \"indices\":[\"test_2\"]}", + "related.user": [ + "elastic" + ], + "service.type": "elasticsearch", + "source.address": "[::1]:64583", + "source.ip": "::1", + "source.port": 64583, + "user.name": "elastic" + }, + { + "@timestamp": "2022-01-27T13:16:30.950Z", + "elasticsearch.audit.layer": "rest", + "elasticsearch.audit.opaque_id": "myApp1", + "elasticsearch.audit.origin.type": "rest", + "elasticsearch.audit.request.id": "0ybRdKGYRAekov1eKI6nIw", + "elasticsearch.node.id": "O8SFUsk8QpGG16JVJcNgUw", + "event.action": "anonymous_access_denied", + "event.category": "database", + "event.dataset": "elasticsearch.audit", + "event.kind": "event", + "event.module": "elasticsearch", + "event.outcome": "failure", + "fileset.name": "audit", + "host.id": "O8SFUsk8QpGG16JVJcNgUw", + "http.request.id": "0ybRdKGYRAekov1eKI6nIw", + "http.request.method": "PUT", + "input.type": "log", + "log.offset": 965, + "message": "{\"type\":\"audit\", \"timestamp\":\"2022-01-27T14:16:30,950+0100\", \"node.id\":\"O8SFUsk8QpGG16JVJcNgUw\", \"event.type\":\"rest\", \"event.action\":\"anonymous_access_denied\", \"origin.type\":\"rest\", \"origin.address\":\"[::1]:64583\", \"url.path\":\"/test_3\", \"request.method\":\"PUT\", \"request.id\":\"0ybRdKGYRAekov1eKI6nIw\", \"opaque_id\":\"myApp1\", \"trace.id\":\"0af7651916cd43dd8448eb211c80319c\"}", + "service.type": "elasticsearch", + "source.address": "[::1]:64583", + "source.ip": "::1", + "source.port": 64583, + "trace.id": "0af7651916cd43dd8448eb211c80319c", + "url.original": "/test_3" + } +] \ No newline at end of file diff --git a/filebeat/module/elasticsearch/deprecation/ingest/pipeline-json-7.yml b/filebeat/module/elasticsearch/deprecation/ingest/pipeline-json-7.yml new file mode 100644 index 00000000000..08f044e68d5 --- /dev/null +++ b/filebeat/module/elasticsearch/deprecation/ingest/pipeline-json-7.yml @@ -0,0 +1,97 @@ +description: Pipeline for parsing the Elasticsearch deprecation log file in JSON format. +on_failure: +- set: + field: error.message + value: '{{ _ingest.on_failure_message }}' +processors: +- json: + field: message + target_field: elasticsearch.deprecation +- drop: + if: '!["deprecation", "deprecation.elasticsearch"].contains(ctx.elasticsearch.deprecation.type)' +- remove: + field: elasticsearch.deprecation.type +- dot_expander: + field: service.name + path: elasticsearch.deprecation +- rename: + field: elasticsearch.deprecation.service.name + target_field: service.name + ignore_missing: true +- rename: + field: elasticsearch.deprecation.level + target_field: log.level + ignore_missing: true +- dot_expander: + field: log.level + path: elasticsearch.deprecation +- rename: + field: elasticsearch.deprecation.log.level + target_field: log.level + ignore_missing: true +- dot_expander: + field: log.logger + path: elasticsearch.deprecation +- rename: + field: elasticsearch.deprecation.log.logger + target_field: log.logger + ignore_missing: true +- dot_expander: + field: process.thread.name + path: elasticsearch.deprecation +- rename: + field: elasticsearch.deprecation.process.thread.name + target_field: process.thread.name + ignore_missing: true +- rename: + field: elasticsearch.deprecation.component + target_field: elasticsearch.component + ignore_missing: true +- dot_expander: + field: cluster.name + path: elasticsearch.deprecation +- rename: + field: elasticsearch.deprecation.cluster.name + target_field: elasticsearch.cluster.name +- dot_expander: + field: node.name + path: elasticsearch.deprecation +- rename: + field: elasticsearch.deprecation.node.name + target_field: elasticsearch.node.name +- dot_expander: + field: cluster.uuid + path: elasticsearch.deprecation +- rename: + field: elasticsearch.deprecation.cluster.uuid + target_field: elasticsearch.cluster.uuid + ignore_missing: true +- dot_expander: + field: node.id + path: elasticsearch.deprecation +- rename: + field: elasticsearch.deprecation.node.id + target_field: elasticsearch.node.id + ignore_missing: true +- remove: + field: message +- rename: + field: elasticsearch.deprecation.message + target_field: message +- date: + field: 'elasticsearch.deprecation.@timestamp' + formats: + - ISO8601 + ignore_failure: true + if: 'ctx.elasticsearch?.deprecation["@timestamp"] != null' +- date: + field: 'elasticsearch.deprecation.timestamp' + formats: + - ISO8601 + ignore_failure: true + if: 'ctx.elasticsearch?.deprecation?.timestamp != null' +- remove: + field: + - elasticsearch.deprecation.timestamp + - elasticsearch.deprecation.@timestamp + ignore_missing: true diff --git a/filebeat/module/elasticsearch/deprecation/ingest/pipeline-json-8.yml b/filebeat/module/elasticsearch/deprecation/ingest/pipeline-json-8.yml new file mode 100644 index 00000000000..89c7b4083f6 --- /dev/null +++ b/filebeat/module/elasticsearch/deprecation/ingest/pipeline-json-8.yml @@ -0,0 +1,15 @@ +description: Pipeline for parsing the Elasticsearch deprecation log file in JSON format. +on_failure: +- set: + field: error.message + value: '{{ _ingest.on_failure_message }}' +processors: +- json: + field: message + add_to_root: true +- dot_expander: + field: '*' + override: true +- set: + field: event.dataset + value: elasticsearch.deprecation diff --git a/filebeat/module/elasticsearch/deprecation/ingest/pipeline-json.yml b/filebeat/module/elasticsearch/deprecation/ingest/pipeline-json.yml index 08f044e68d5..d4647fbff10 100644 --- a/filebeat/module/elasticsearch/deprecation/ingest/pipeline-json.yml +++ b/filebeat/module/elasticsearch/deprecation/ingest/pipeline-json.yml @@ -4,94 +4,9 @@ on_failure: field: error.message value: '{{ _ingest.on_failure_message }}' processors: -- json: - field: message - target_field: elasticsearch.deprecation -- drop: - if: '!["deprecation", "deprecation.elasticsearch"].contains(ctx.elasticsearch.deprecation.type)' -- remove: - field: elasticsearch.deprecation.type -- dot_expander: - field: service.name - path: elasticsearch.deprecation -- rename: - field: elasticsearch.deprecation.service.name - target_field: service.name - ignore_missing: true -- rename: - field: elasticsearch.deprecation.level - target_field: log.level - ignore_missing: true -- dot_expander: - field: log.level - path: elasticsearch.deprecation -- rename: - field: elasticsearch.deprecation.log.level - target_field: log.level - ignore_missing: true -- dot_expander: - field: log.logger - path: elasticsearch.deprecation -- rename: - field: elasticsearch.deprecation.log.logger - target_field: log.logger - ignore_missing: true -- dot_expander: - field: process.thread.name - path: elasticsearch.deprecation -- rename: - field: elasticsearch.deprecation.process.thread.name - target_field: process.thread.name - ignore_missing: true -- rename: - field: elasticsearch.deprecation.component - target_field: elasticsearch.component - ignore_missing: true -- dot_expander: - field: cluster.name - path: elasticsearch.deprecation -- rename: - field: elasticsearch.deprecation.cluster.name - target_field: elasticsearch.cluster.name -- dot_expander: - field: node.name - path: elasticsearch.deprecation -- rename: - field: elasticsearch.deprecation.node.name - target_field: elasticsearch.node.name -- dot_expander: - field: cluster.uuid - path: elasticsearch.deprecation -- rename: - field: elasticsearch.deprecation.cluster.uuid - target_field: elasticsearch.cluster.uuid - ignore_missing: true -- dot_expander: - field: node.id - path: elasticsearch.deprecation -- rename: - field: elasticsearch.deprecation.node.id - target_field: elasticsearch.node.id - ignore_missing: true -- remove: - field: message -- rename: - field: elasticsearch.deprecation.message - target_field: message -- date: - field: 'elasticsearch.deprecation.@timestamp' - formats: - - ISO8601 - ignore_failure: true - if: 'ctx.elasticsearch?.deprecation["@timestamp"] != null' -- date: - field: 'elasticsearch.deprecation.timestamp' - formats: - - ISO8601 - ignore_failure: true - if: 'ctx.elasticsearch?.deprecation?.timestamp != null' -- remove: - field: - - elasticsearch.deprecation.timestamp - - elasticsearch.deprecation.@timestamp - ignore_missing: true + - pipeline: + if: '!ctx.message.contains("ecs.version")' + name: '{< IngestPipeline "pipeline-json-7" >}' + - pipeline: + if: 'ctx.message.contains("ecs.version")' + name: '{< IngestPipeline "pipeline-json-8" >}' diff --git a/filebeat/module/elasticsearch/deprecation/ingest/pipeline.yml b/filebeat/module/elasticsearch/deprecation/ingest/pipeline.yml index e1f4838df9b..7c64e431021 100644 --- a/filebeat/module/elasticsearch/deprecation/ingest/pipeline.yml +++ b/filebeat/module/elasticsearch/deprecation/ingest/pipeline.yml @@ -3,9 +3,9 @@ processors: - set: field: event.ingested value: '{{_ingest.timestamp}}' -- rename: - field: '@timestamp' - target_field: event.created +- set: + copy_from: "@timestamp" + field: event.created - grok: field: message patterns: diff --git a/filebeat/module/elasticsearch/deprecation/manifest.yml b/filebeat/module/elasticsearch/deprecation/manifest.yml index 8dfbaec866b..93b1ef80b09 100644 --- a/filebeat/module/elasticsearch/deprecation/manifest.yml +++ b/filebeat/module/elasticsearch/deprecation/manifest.yml @@ -16,4 +16,6 @@ ingest_pipeline: - ingest/pipeline.yml - ingest/pipeline-plaintext.yml - ingest/pipeline-json.yml + - ingest/pipeline-json-7.yml + - ingest/pipeline-json-8.yml input: config/log.yml diff --git a/filebeat/module/elasticsearch/deprecation/test/es_deprecation-json.800.log b/filebeat/module/elasticsearch/deprecation/test/es_deprecation-json.800.log index 888a5d92080..40157a6d5e2 100644 --- a/filebeat/module/elasticsearch/deprecation/test/es_deprecation-json.800.log +++ b/filebeat/module/elasticsearch/deprecation/test/es_deprecation-json.800.log @@ -1,15 +1,2 @@ -{"@timestamp":"2020-04-15T12:35:20.315Z", "log.level": "WARN", "message":"Field parameter [precision] is deprecated and will be removed in a future version." , "service.name":"ES_ECS","process.thread.name":"elasticsearch[integTest-0][masterService#updateTask][T#1]","log.logger":"org.elasticsearch.deprecation.index.mapper.LegacyGeoShapeFieldMapper","type":"deprecation","cluster.uuid":"a0P-i2H5R9-tJqwtF7BL0A","node.id":"FFMF7MVISuCWZMtxGmcGhg","node.name":"integTest-0","cluster.name":"integTest"} -{"@timestamp":"2020-04-15T12:35:20.316Z", "log.level": "WARN", "message":"Field parameter [tree] is deprecated and will be removed in a future version." , "service.name":"ES_ECS","process.thread.name":"elasticsearch[integTest-0][masterService#updateTask][T#1]","log.logger":"org.elasticsearch.deprecation.index.mapper.LegacyGeoShapeFieldMapper","type":"deprecation","cluster.uuid":"a0P-i2H5R9-tJqwtF7BL0A","node.id":"FFMF7MVISuCWZMtxGmcGhg","node.name":"integTest-0","cluster.name":"integTest"} -{"@timestamp":"2020-04-15T12:35:20.366Z", "log.level": "WARN", "message":"Field parameter [precision] is deprecated and will be removed in a future version." , "service.name":"ES_ECS","process.thread.name":"elasticsearch[integTest-0][masterService#updateTask][T#1]","log.logger":"org.elasticsearch.deprecation.index.mapper.LegacyGeoShapeFieldMapper","type":"deprecation","cluster.uuid":"a0P-i2H5R9-tJqwtF7BL0A","node.id":"FFMF7MVISuCWZMtxGmcGhg","node.name":"integTest-0","cluster.name":"integTest"} -{"@timestamp":"2020-04-15T12:35:20.367Z", "log.level": "WARN", "message":"Field parameter [strategy] is deprecated and will be removed in a future version." , "service.name":"ES_ECS","process.thread.name":"elasticsearch[integTest-0][masterService#updateTask][T#1]","log.logger":"org.elasticsearch.deprecation.index.mapper.LegacyGeoShapeFieldMapper","type":"deprecation","cluster.uuid":"a0P-i2H5R9-tJqwtF7BL0A","node.id":"FFMF7MVISuCWZMtxGmcGhg","node.name":"integTest-0","cluster.name":"integTest"} -{"@timestamp":"2020-04-15T12:35:20.479Z", "log.level": "WARN", "message":"Field parameter [precision] is deprecated and will be removed in a future version." , "service.name":"ES_ECS","process.thread.name":"elasticsearch[integTest-0][clusterApplierService#updateTask][T#1]","log.logger":"org.elasticsearch.deprecation.index.mapper.LegacyGeoShapeFieldMapper","type":"deprecation","cluster.uuid":"a0P-i2H5R9-tJqwtF7BL0A","node.id":"FFMF7MVISuCWZMtxGmcGhg","node.name":"integTest-0","cluster.name":"integTest"} -{"@timestamp":"2020-04-15T12:35:20.480Z", "log.level": "WARN", "message":"Field parameter [strategy] is deprecated and will be removed in a future version." , "service.name":"ES_ECS","process.thread.name":"elasticsearch[integTest-0][clusterApplierService#updateTask][T#1]","log.logger":"org.elasticsearch.deprecation.index.mapper.LegacyGeoShapeFieldMapper","type":"deprecation","cluster.uuid":"a0P-i2H5R9-tJqwtF7BL0A","node.id":"FFMF7MVISuCWZMtxGmcGhg","node.name":"integTest-0","cluster.name":"integTest"} -{"@timestamp":"2020-04-15T12:35:20.481Z", "log.level": "WARN", "message":"Field parameter [precision] is deprecated and will be removed in a future version." , "service.name":"ES_ECS","process.thread.name":"elasticsearch[integTest-0][clusterApplierService#updateTask][T#1]","log.logger":"org.elasticsearch.deprecation.index.mapper.LegacyGeoShapeFieldMapper","type":"deprecation","cluster.uuid":"a0P-i2H5R9-tJqwtF7BL0A","node.id":"FFMF7MVISuCWZMtxGmcGhg","node.name":"integTest-0","cluster.name":"integTest"} -{"@timestamp":"2020-04-15T12:35:20.487Z", "log.level": "WARN", "message":"Field parameter [strategy] is deprecated and will be removed in a future version." , "service.name":"ES_ECS","process.thread.name":"elasticsearch[integTest-0][clusterApplierService#updateTask][T#1]","log.logger":"org.elasticsearch.deprecation.index.mapper.LegacyGeoShapeFieldMapper","type":"deprecation","cluster.uuid":"a0P-i2H5R9-tJqwtF7BL0A","node.id":"FFMF7MVISuCWZMtxGmcGhg","node.name":"integTest-0","cluster.name":"integTest"} -{"@timestamp":"2020-04-16T13:46:33.582Z", "log.level": "WARN", "message":"[PUT /_xpack/security/user/{username}/_password] is deprecated! Use [PUT /_security/user/{username}/_password] instead." , "service.name":"ES_ECS","process.thread.name":"elasticsearch[n1][http_server_worker][T#3]","log.logger":"org.elasticsearch.deprecation.rest.RestController","type":"deprecation","cluster.uuid":"ZGYecRsDQPK_-ktRec3ZGQ","node.id":"Ni-9zbrZRm24wm7_zNtMTw","node.name":"n1","cluster.name":"es800"} -{"@timestamp":"2020-04-16T13:46:34.219Z", "log.level": "WARN", "message":"[PUT /_xpack/security/user/{username}/_password] is deprecated! Use [PUT /_security/user/{username}/_password] instead." , "service.name":"ES_ECS","process.thread.name":"elasticsearch[n1][http_server_worker][T#4]","log.logger":"org.elasticsearch.deprecation.rest.RestController","type":"deprecation","cluster.uuid":"ZGYecRsDQPK_-ktRec3ZGQ","node.id":"Ni-9zbrZRm24wm7_zNtMTw","node.name":"n1","cluster.name":"es800"} -{"@timestamp":"2020-04-16T13:46:34.339Z", "log.level": "WARN", "message":"[PUT /_xpack/security/user/{username}/_password] is deprecated! Use [PUT /_security/user/{username}/_password] instead." , "service.name":"ES_ECS","process.thread.name":"elasticsearch[n1][http_server_worker][T#5]","log.logger":"org.elasticsearch.deprecation.rest.RestController","type":"deprecation","cluster.uuid":"ZGYecRsDQPK_-ktRec3ZGQ","node.id":"Ni-9zbrZRm24wm7_zNtMTw","node.name":"n1","cluster.name":"es800"} -{"@timestamp":"2020-04-16T13:46:34.455Z", "log.level": "WARN", "message":"[PUT /_xpack/security/user/{username}/_password] is deprecated! Use [PUT /_security/user/{username}/_password] instead." , "service.name":"ES_ECS","process.thread.name":"elasticsearch[n1][http_server_worker][T#6]","log.logger":"org.elasticsearch.deprecation.rest.RestController","type":"deprecation","cluster.uuid":"ZGYecRsDQPK_-ktRec3ZGQ","node.id":"Ni-9zbrZRm24wm7_zNtMTw","node.name":"n1","cluster.name":"es800"} -{"@timestamp":"2020-04-16T13:47:36.309Z", "log.level": "WARN", "message":"index name [.apm-custom-link] starts with a dot '.', in the next major version, index names starting with a dot are reserved for hidden indices and system indices" , "service.name":"ES_ECS","process.thread.name":"elasticsearch[n1][masterService#updateTask][T#1]","log.logger":"org.elasticsearch.deprecation.cluster.metadata.MetadataCreateIndexService","type":"deprecation","cluster.uuid":"ZGYecRsDQPK_-ktRec3ZGQ","node.id":"Ni-9zbrZRm24wm7_zNtMTw","node.name":"n1","cluster.name":"es800"} -{"@timestamp":"2020-04-16T13:55:56.365Z", "log.level": "WARN", "message":"index name [.monitoring-alerts-7] starts with a dot '.', in the next major version, index names starting with a dot are reserved for hidden indices and system indices" , "service.name":"ES_ECS","process.thread.name":"elasticsearch[n1][masterService#updateTask][T#1]","log.logger":"org.elasticsearch.deprecation.cluster.metadata.MetadataCreateIndexService","type":"deprecation","cluster.uuid":"ZGYecRsDQPK_-ktRec3ZGQ","node.id":"Ni-9zbrZRm24wm7_zNtMTw","node.name":"n1","cluster.name":"es800"} -{"@timestamp":"2020-04-16T13:56:14.697Z", "log.level": "WARN", "message":"[types removal] Using the _type field in queries and aggregations is deprecated, prefer to use a field instead." , "service.name":"ES_ECS","process.thread.name":"elasticsearch[n1][search][T#7]","log.logger":"org.elasticsearch.deprecation.index.query.QueryShardContext","type":"deprecation","cluster.uuid":"ZGYecRsDQPK_-ktRec3ZGQ","node.id":"Ni-9zbrZRm24wm7_zNtMTw","node.name":"n1","cluster.name":"es800"} +{"@timestamp":"2022-01-27T11:48:45.809Z", "log.level":"CRITICAL", "data_stream.dataset":"deprecation.elasticsearch","data_stream.namespace":"default","data_stream.type":"logs","elasticsearch.elastic_product_origin":"","elasticsearch.event.category":"compatible_api","elasticsearch.http.request.x_opaque_id":"v7app","event.code":"create_index_with_types","message":"[types removal] Using include_type_name in create index requests is deprecated. The parameter will be removed in the next major version." , "ecs.version": "1.2.0","service.name":"ES_ECS","event.dataset":"deprecation.elasticsearch","process.thread.name":"elasticsearch[runTask-0][transport_worker][T#8]","log.logger":"org.elasticsearch.deprecation.rest.action.admin.indices.RestCreateIndexAction","trace.id":"0af7651916cd43dd8448eb211c80319c","elasticsearch.cluster.uuid":"5alW33KLT16Lp1SevDqDSQ","elasticsearch.node.id":"tVLnAGLgQum5ca6z50aqbw","elasticsearch.node.name":"runTask-0","elasticsearch.cluster.name":"runTask"} +{"@timestamp":"2022-01-27T11:52:39.882Z", "log.level":"CRITICAL", "data_stream.dataset":"deprecation.elasticsearch","data_stream.namespace":"default","data_stream.type":"logs","elasticsearch.event.category":"compatible_api","event.code":"create_index_with_types","message":"[types removal] Using include_type_name in create index requests is deprecated. The parameter will be removed in the next major version." , "ecs.version": "1.2.0","service.name":"ES_ECS","event.dataset":"deprecation.elasticsearch","process.thread.name":"elasticsearch[runTask-0][transport_worker][T#9]","log.logger":"org.elasticsearch.deprecation.rest.action.admin.indices.RestCreateIndexAction","elasticsearch.cluster.uuid":"5alW33KLT16Lp1SevDqDSQ","elasticsearch.node.id":"tVLnAGLgQum5ca6z50aqbw","elasticsearch.node.name":"runTask-0","elasticsearch.cluster.name":"runTask"} diff --git a/filebeat/module/elasticsearch/deprecation/test/es_deprecation-json.800.log-expected.json b/filebeat/module/elasticsearch/deprecation/test/es_deprecation-json.800.log-expected.json index 89f625d1f17..20dd03fab88 100644 --- a/filebeat/module/elasticsearch/deprecation/test/es_deprecation-json.800.log-expected.json +++ b/filebeat/module/elasticsearch/deprecation/test/es_deprecation-json.800.log-expected.json @@ -1,331 +1,58 @@ [ { - "@timestamp": "2020-04-15T12:35:20.315Z", - "elasticsearch.cluster.name": "integTest", - "elasticsearch.cluster.uuid": "a0P-i2H5R9-tJqwtF7BL0A", - "elasticsearch.node.id": "FFMF7MVISuCWZMtxGmcGhg", - "elasticsearch.node.name": "integTest-0", + "@timestamp": "2022-01-27T11:48:45.809Z", + "data_stream.dataset": "deprecation.elasticsearch", + "data_stream.namespace": "default", + "data_stream.type": "logs", + "elasticsearch.cluster.name": "runTask", + "elasticsearch.cluster.uuid": "5alW33KLT16Lp1SevDqDSQ", + "elasticsearch.elastic_product_origin": "", + "elasticsearch.event.category": "compatible_api", + "elasticsearch.http.request.x_opaque_id": "v7app", + "elasticsearch.node.id": "tVLnAGLgQum5ca6z50aqbw", + "elasticsearch.node.name": "runTask-0", "event.category": "database", + "event.code": "create_index_with_types", "event.dataset": "elasticsearch.deprecation", "event.kind": "event", "event.module": "elasticsearch", "event.type": "info", "fileset.name": "deprecation", - "host.id": "FFMF7MVISuCWZMtxGmcGhg", + "host.id": "tVLnAGLgQum5ca6z50aqbw", "input.type": "log", - "log.level": "WARN", - "log.logger": "org.elasticsearch.deprecation.index.mapper.LegacyGeoShapeFieldMapper", + "log.level": "CRITICAL", + "log.logger": "org.elasticsearch.deprecation.rest.action.admin.indices.RestCreateIndexAction", "log.offset": 0, - "message": "Field parameter [precision] is deprecated and will be removed in a future version.", - "process.thread.name": "elasticsearch[integTest-0][masterService#updateTask][T#1]", + "message": "[types removal] Using include_type_name in create index requests is deprecated. The parameter will be removed in the next major version.", + "process.thread.name": "elasticsearch[runTask-0][transport_worker][T#8]", "service.name": "ES_ECS", - "service.type": "elasticsearch" - }, - { - "@timestamp": "2020-04-15T12:35:20.316Z", - "elasticsearch.cluster.name": "integTest", - "elasticsearch.cluster.uuid": "a0P-i2H5R9-tJqwtF7BL0A", - "elasticsearch.node.id": "FFMF7MVISuCWZMtxGmcGhg", - "elasticsearch.node.name": "integTest-0", - "event.category": "database", - "event.dataset": "elasticsearch.deprecation", - "event.kind": "event", - "event.module": "elasticsearch", - "event.type": "info", - "fileset.name": "deprecation", - "host.id": "FFMF7MVISuCWZMtxGmcGhg", - "input.type": "log", - "log.level": "WARN", - "log.logger": "org.elasticsearch.deprecation.index.mapper.LegacyGeoShapeFieldMapper", - "log.offset": 501, - "message": "Field parameter [tree] is deprecated and will be removed in a future version.", - "process.thread.name": "elasticsearch[integTest-0][masterService#updateTask][T#1]", - "service.name": "ES_ECS", - "service.type": "elasticsearch" - }, - { - "@timestamp": "2020-04-15T12:35:20.366Z", - "elasticsearch.cluster.name": "integTest", - "elasticsearch.cluster.uuid": "a0P-i2H5R9-tJqwtF7BL0A", - "elasticsearch.node.id": "FFMF7MVISuCWZMtxGmcGhg", - "elasticsearch.node.name": "integTest-0", - "event.category": "database", - "event.dataset": "elasticsearch.deprecation", - "event.kind": "event", - "event.module": "elasticsearch", - "event.type": "info", - "fileset.name": "deprecation", - "host.id": "FFMF7MVISuCWZMtxGmcGhg", - "input.type": "log", - "log.level": "WARN", - "log.logger": "org.elasticsearch.deprecation.index.mapper.LegacyGeoShapeFieldMapper", - "log.offset": 997, - "message": "Field parameter [precision] is deprecated and will be removed in a future version.", - "process.thread.name": "elasticsearch[integTest-0][masterService#updateTask][T#1]", - "service.name": "ES_ECS", - "service.type": "elasticsearch" - }, - { - "@timestamp": "2020-04-15T12:35:20.367Z", - "elasticsearch.cluster.name": "integTest", - "elasticsearch.cluster.uuid": "a0P-i2H5R9-tJqwtF7BL0A", - "elasticsearch.node.id": "FFMF7MVISuCWZMtxGmcGhg", - "elasticsearch.node.name": "integTest-0", - "event.category": "database", - "event.dataset": "elasticsearch.deprecation", - "event.kind": "event", - "event.module": "elasticsearch", - "event.type": "info", - "fileset.name": "deprecation", - "host.id": "FFMF7MVISuCWZMtxGmcGhg", - "input.type": "log", - "log.level": "WARN", - "log.logger": "org.elasticsearch.deprecation.index.mapper.LegacyGeoShapeFieldMapper", - "log.offset": 1498, - "message": "Field parameter [strategy] is deprecated and will be removed in a future version.", - "process.thread.name": "elasticsearch[integTest-0][masterService#updateTask][T#1]", - "service.name": "ES_ECS", - "service.type": "elasticsearch" - }, - { - "@timestamp": "2020-04-15T12:35:20.479Z", - "elasticsearch.cluster.name": "integTest", - "elasticsearch.cluster.uuid": "a0P-i2H5R9-tJqwtF7BL0A", - "elasticsearch.node.id": "FFMF7MVISuCWZMtxGmcGhg", - "elasticsearch.node.name": "integTest-0", - "event.category": "database", - "event.dataset": "elasticsearch.deprecation", - "event.kind": "event", - "event.module": "elasticsearch", - "event.type": "info", - "fileset.name": "deprecation", - "host.id": "FFMF7MVISuCWZMtxGmcGhg", - "input.type": "log", - "log.level": "WARN", - "log.logger": "org.elasticsearch.deprecation.index.mapper.LegacyGeoShapeFieldMapper", - "log.offset": 1998, - "message": "Field parameter [precision] is deprecated and will be removed in a future version.", - "process.thread.name": "elasticsearch[integTest-0][clusterApplierService#updateTask][T#1]", - "service.name": "ES_ECS", - "service.type": "elasticsearch" - }, - { - "@timestamp": "2020-04-15T12:35:20.480Z", - "elasticsearch.cluster.name": "integTest", - "elasticsearch.cluster.uuid": "a0P-i2H5R9-tJqwtF7BL0A", - "elasticsearch.node.id": "FFMF7MVISuCWZMtxGmcGhg", - "elasticsearch.node.name": "integTest-0", - "event.category": "database", - "event.dataset": "elasticsearch.deprecation", - "event.kind": "event", - "event.module": "elasticsearch", - "event.type": "info", - "fileset.name": "deprecation", - "host.id": "FFMF7MVISuCWZMtxGmcGhg", - "input.type": "log", - "log.level": "WARN", - "log.logger": "org.elasticsearch.deprecation.index.mapper.LegacyGeoShapeFieldMapper", - "log.offset": 2507, - "message": "Field parameter [strategy] is deprecated and will be removed in a future version.", - "process.thread.name": "elasticsearch[integTest-0][clusterApplierService#updateTask][T#1]", - "service.name": "ES_ECS", - "service.type": "elasticsearch" - }, - { - "@timestamp": "2020-04-15T12:35:20.481Z", - "elasticsearch.cluster.name": "integTest", - "elasticsearch.cluster.uuid": "a0P-i2H5R9-tJqwtF7BL0A", - "elasticsearch.node.id": "FFMF7MVISuCWZMtxGmcGhg", - "elasticsearch.node.name": "integTest-0", - "event.category": "database", - "event.dataset": "elasticsearch.deprecation", - "event.kind": "event", - "event.module": "elasticsearch", - "event.type": "info", - "fileset.name": "deprecation", - "host.id": "FFMF7MVISuCWZMtxGmcGhg", - "input.type": "log", - "log.level": "WARN", - "log.logger": "org.elasticsearch.deprecation.index.mapper.LegacyGeoShapeFieldMapper", - "log.offset": 3015, - "message": "Field parameter [precision] is deprecated and will be removed in a future version.", - "process.thread.name": "elasticsearch[integTest-0][clusterApplierService#updateTask][T#1]", - "service.name": "ES_ECS", - "service.type": "elasticsearch" - }, - { - "@timestamp": "2020-04-15T12:35:20.487Z", - "elasticsearch.cluster.name": "integTest", - "elasticsearch.cluster.uuid": "a0P-i2H5R9-tJqwtF7BL0A", - "elasticsearch.node.id": "FFMF7MVISuCWZMtxGmcGhg", - "elasticsearch.node.name": "integTest-0", - "event.category": "database", - "event.dataset": "elasticsearch.deprecation", - "event.kind": "event", - "event.module": "elasticsearch", - "event.type": "info", - "fileset.name": "deprecation", - "host.id": "FFMF7MVISuCWZMtxGmcGhg", - "input.type": "log", - "log.level": "WARN", - "log.logger": "org.elasticsearch.deprecation.index.mapper.LegacyGeoShapeFieldMapper", - "log.offset": 3524, - "message": "Field parameter [strategy] is deprecated and will be removed in a future version.", - "process.thread.name": "elasticsearch[integTest-0][clusterApplierService#updateTask][T#1]", - "service.name": "ES_ECS", - "service.type": "elasticsearch" - }, - { - "@timestamp": "2020-04-16T13:46:33.582Z", - "elasticsearch.cluster.name": "es800", - "elasticsearch.cluster.uuid": "ZGYecRsDQPK_-ktRec3ZGQ", - "elasticsearch.node.id": "Ni-9zbrZRm24wm7_zNtMTw", - "elasticsearch.node.name": "n1", - "event.category": "database", - "event.dataset": "elasticsearch.deprecation", - "event.kind": "event", - "event.module": "elasticsearch", - "event.type": "info", - "fileset.name": "deprecation", - "host.id": "Ni-9zbrZRm24wm7_zNtMTw", - "input.type": "log", - "log.level": "WARN", - "log.logger": "org.elasticsearch.deprecation.rest.RestController", - "log.offset": 4032, - "message": "[PUT /_xpack/security/user/{username}/_password] is deprecated! Use [PUT /_security/user/{username}/_password] instead.", - "process.thread.name": "elasticsearch[n1][http_server_worker][T#3]", - "service.name": "ES_ECS", - "service.type": "elasticsearch" - }, - { - "@timestamp": "2020-04-16T13:46:34.219Z", - "elasticsearch.cluster.name": "es800", - "elasticsearch.cluster.uuid": "ZGYecRsDQPK_-ktRec3ZGQ", - "elasticsearch.node.id": "Ni-9zbrZRm24wm7_zNtMTw", - "elasticsearch.node.name": "n1", - "event.category": "database", - "event.dataset": "elasticsearch.deprecation", - "event.kind": "event", - "event.module": "elasticsearch", - "event.type": "info", - "fileset.name": "deprecation", - "host.id": "Ni-9zbrZRm24wm7_zNtMTw", - "input.type": "log", - "log.level": "WARN", - "log.logger": "org.elasticsearch.deprecation.rest.RestController", - "log.offset": 4523, - "message": "[PUT /_xpack/security/user/{username}/_password] is deprecated! Use [PUT /_security/user/{username}/_password] instead.", - "process.thread.name": "elasticsearch[n1][http_server_worker][T#4]", - "service.name": "ES_ECS", - "service.type": "elasticsearch" - }, - { - "@timestamp": "2020-04-16T13:46:34.339Z", - "elasticsearch.cluster.name": "es800", - "elasticsearch.cluster.uuid": "ZGYecRsDQPK_-ktRec3ZGQ", - "elasticsearch.node.id": "Ni-9zbrZRm24wm7_zNtMTw", - "elasticsearch.node.name": "n1", - "event.category": "database", - "event.dataset": "elasticsearch.deprecation", - "event.kind": "event", - "event.module": "elasticsearch", - "event.type": "info", - "fileset.name": "deprecation", - "host.id": "Ni-9zbrZRm24wm7_zNtMTw", - "input.type": "log", - "log.level": "WARN", - "log.logger": "org.elasticsearch.deprecation.rest.RestController", - "log.offset": 5014, - "message": "[PUT /_xpack/security/user/{username}/_password] is deprecated! Use [PUT /_security/user/{username}/_password] instead.", - "process.thread.name": "elasticsearch[n1][http_server_worker][T#5]", - "service.name": "ES_ECS", - "service.type": "elasticsearch" - }, - { - "@timestamp": "2020-04-16T13:46:34.455Z", - "elasticsearch.cluster.name": "es800", - "elasticsearch.cluster.uuid": "ZGYecRsDQPK_-ktRec3ZGQ", - "elasticsearch.node.id": "Ni-9zbrZRm24wm7_zNtMTw", - "elasticsearch.node.name": "n1", - "event.category": "database", - "event.dataset": "elasticsearch.deprecation", - "event.kind": "event", - "event.module": "elasticsearch", - "event.type": "info", - "fileset.name": "deprecation", - "host.id": "Ni-9zbrZRm24wm7_zNtMTw", - "input.type": "log", - "log.level": "WARN", - "log.logger": "org.elasticsearch.deprecation.rest.RestController", - "log.offset": 5505, - "message": "[PUT /_xpack/security/user/{username}/_password] is deprecated! Use [PUT /_security/user/{username}/_password] instead.", - "process.thread.name": "elasticsearch[n1][http_server_worker][T#6]", - "service.name": "ES_ECS", - "service.type": "elasticsearch" - }, - { - "@timestamp": "2020-04-16T13:47:36.309Z", - "elasticsearch.cluster.name": "es800", - "elasticsearch.cluster.uuid": "ZGYecRsDQPK_-ktRec3ZGQ", - "elasticsearch.node.id": "Ni-9zbrZRm24wm7_zNtMTw", - "elasticsearch.node.name": "n1", - "event.category": "database", - "event.dataset": "elasticsearch.deprecation", - "event.kind": "event", - "event.module": "elasticsearch", - "event.type": "info", - "fileset.name": "deprecation", - "host.id": "Ni-9zbrZRm24wm7_zNtMTw", - "input.type": "log", - "log.level": "WARN", - "log.logger": "org.elasticsearch.deprecation.cluster.metadata.MetadataCreateIndexService", - "log.offset": 5996, - "message": "index name [.apm-custom-link] starts with a dot '.', in the next major version, index names starting with a dot are reserved for hidden indices and system indices", - "process.thread.name": "elasticsearch[n1][masterService#updateTask][T#1]", - "service.name": "ES_ECS", - "service.type": "elasticsearch" - }, - { - "@timestamp": "2020-04-16T13:55:56.365Z", - "elasticsearch.cluster.name": "es800", - "elasticsearch.cluster.uuid": "ZGYecRsDQPK_-ktRec3ZGQ", - "elasticsearch.node.id": "Ni-9zbrZRm24wm7_zNtMTw", - "elasticsearch.node.name": "n1", - "event.category": "database", - "event.dataset": "elasticsearch.deprecation", - "event.kind": "event", - "event.module": "elasticsearch", - "event.type": "info", - "fileset.name": "deprecation", - "host.id": "Ni-9zbrZRm24wm7_zNtMTw", - "input.type": "log", - "log.level": "WARN", - "log.logger": "org.elasticsearch.deprecation.cluster.metadata.MetadataCreateIndexService", - "log.offset": 6560, - "message": "index name [.monitoring-alerts-7] starts with a dot '.', in the next major version, index names starting with a dot are reserved for hidden indices and system indices", - "process.thread.name": "elasticsearch[n1][masterService#updateTask][T#1]", - "service.name": "ES_ECS", - "service.type": "elasticsearch" + "service.type": "elasticsearch", + "trace.id": "0af7651916cd43dd8448eb211c80319c" }, { - "@timestamp": "2020-04-16T13:56:14.697Z", - "elasticsearch.cluster.name": "es800", - "elasticsearch.cluster.uuid": "ZGYecRsDQPK_-ktRec3ZGQ", - "elasticsearch.node.id": "Ni-9zbrZRm24wm7_zNtMTw", - "elasticsearch.node.name": "n1", + "@timestamp": "2022-01-27T11:52:39.882Z", + "data_stream.dataset": "deprecation.elasticsearch", + "data_stream.namespace": "default", + "data_stream.type": "logs", + "elasticsearch.cluster.name": "runTask", + "elasticsearch.cluster.uuid": "5alW33KLT16Lp1SevDqDSQ", + "elasticsearch.event.category": "compatible_api", + "elasticsearch.node.id": "tVLnAGLgQum5ca6z50aqbw", + "elasticsearch.node.name": "runTask-0", "event.category": "database", + "event.code": "create_index_with_types", "event.dataset": "elasticsearch.deprecation", "event.kind": "event", "event.module": "elasticsearch", "event.type": "info", "fileset.name": "deprecation", - "host.id": "Ni-9zbrZRm24wm7_zNtMTw", + "host.id": "tVLnAGLgQum5ca6z50aqbw", "input.type": "log", - "log.level": "WARN", - "log.logger": "org.elasticsearch.deprecation.index.query.QueryShardContext", - "log.offset": 7128, - "message": "[types removal] Using the _type field in queries and aggregations is deprecated, prefer to use a field instead.", - "process.thread.name": "elasticsearch[n1][search][T#7]", + "log.level": "CRITICAL", + "log.logger": "org.elasticsearch.deprecation.rest.action.admin.indices.RestCreateIndexAction", + "log.offset": 989, + "message": "[types removal] Using include_type_name in create index requests is deprecated. The parameter will be removed in the next major version.", + "process.thread.name": "elasticsearch[runTask-0][transport_worker][T#9]", "service.name": "ES_ECS", "service.type": "elasticsearch" } diff --git a/filebeat/module/elasticsearch/fields.go b/filebeat/module/elasticsearch/fields.go index 889d94d042a..525d0c50eac 100644 --- a/filebeat/module/elasticsearch/fields.go +++ b/filebeat/module/elasticsearch/fields.go @@ -32,5 +32,5 @@ func init() { // AssetElasticsearch returns asset data. // This is the base64 encoded zlib format compressed contents of module/elasticsearch. func AssetElasticsearch() string { - return "eJzUml9z2zYSwN/zKXb0cu2MzZP/1Fdr5m6mVRzbmSZxLdu5VvFwVuCKQgQCNABKVjv57jcAKVmiSOrPtbmeXxKKAPa3i8ViF8QhjGnWARJoLGeGULPRKwDLraAOtC6Wf2+9AtAkCA11IMZXABEZpnlquZId+NcrAFgdCd6pKBP0CmDISUSm45scgsSE1oW6PztL3eBaZWnxS4WM1eGWh2QqSZUkaRdvSgOsavTSHoZaJTAdkSawIwKhYqCJe6E0j7lES1FraVB6xiT1JlIBBSxIgndk8TVa7GpCS9cyouce6QlntNwv129Ms6nS0Tq+yIwlHWQZj2o1uL+/fg1q6DGLDtVkl8lED67E+zvee/iF3wy/Hz/H5/HuNO6pluY9JrQVTaTYmPRhRZtmCqkiChrM8WIM17Ja9use/8juZnQ3+mjv//3Tj+dv2z++m+7IsLUZ6jkmH9+/Nb+ebC+YOzdqluw9zTevljnkggaE9tCSsYdcppndVX6T9b10XrM28MNl/Ho6uL8ddh+++8cPPfY06MY72N2MUEeN4qO50X3Taor29gIxi7hda70cjqAi+iyPIHBGeuVNmfnOhRfXah5zOBuBHXGzFnA6oMnYA7AapUmVdu+Ap+GQi9ISWlXY9Sq/rdZ7mdxLD127jfiukTN8DmxHaEExlmntmFEqOUtUZkJkjIwJI5KcogPAzI5IWs7QDRUOkQv/c6lV/hhrlNY9MyUlMd+j6rd5N4tJSpqiUNNT5q2mMxni0kDFc96h3nir8nc3Yz59wUY7flzsNQXx2sTDN+tvcp9BuL3o3cEPN9fzzt8ue8mi3xQNaGLEJxSBkl7aSzM2QilJfHsAQjEUoYtb8E2++zEUPo4BNyajaJnz23rbvYyzu900oUg2et6qD+WdPFzphdN8goJH3mgYI5fra6IAb7ndiYaYCeuW1h7smSEdbKeAa/o3U6nHAfDh8otaL215N7V8QmHENTGr9GxfaCXINELfuhZg1SJQEaSaS8ZTFDAgoWRsaj2iD60xH6DEEKOEy9YBtNxeZIpHeNyT2i/l8r64+wBVU7Z5hKokc7ue6GPWRh+RS8lE3iWPsc6p6ZlYVu8aHWgVGVYnUZJbpf+eIJd7eIcWQYoakw3e4eLQ/e01+LZkSdc7Q+t3Z3s3/D8/IxtLzkbHX1qV0rmMONvgmNd5m2LDoAgGs8JaTe44VOrwuH10HrSPgvapc8iVX07Wfjnbx0uLULmauqyrcC/5U0aQp7BFn3rzffztp3A8OHvoTT6Mfnhq2+nN5OrDz/tE2hyuYvnUb/XzLWUHR+wKQt1jWglxW63b1qzhQEWzys4oOJb9JEU76sDI2jSY6+r6B0xJu75sEx5rzDW2OqOGTT3EKNJkyuI2gRiVaUYBT/cQnGm+ozS3cIt8QewhcBHbdxVr1ivFbWUmZAzG1aHc0rOtCRHz/T3AlI9pZgI1lRSFg1m4somGDq1y7IFSglCuVQERpZrynXljLVB5PAENRxQx239MgMsuuBTFkC0EBFvWJekITbWFy9I3ELi/N14QmJQYH3LmsoPLbi4iKDWuYlrmqnAZaIwMWwG6v+WS/LILTAmR1wzVoEvTn+UeGxpitWhDobAcSbYE65ZIFgLdVqZ0xGXsLOq43+IEYcK1zVBAgmzEZQO4YTobhGaWDJQILQ4EhZYn9GfpATeYGQInArgEQ0zJyABza8rpkKWQs4BnMRvBreYy/grgW3B7lI3cU8JxqGlowlQrl4V4/j+R/M4xm9RV3S8SPQZoGpIm6TKiF6Xq0V2+JgSJUJNhKL8W9ZK9E9RjRy/4hEANPhOzxhUZggDTVMxLI27AWJWmFNUrwwQaE2ZSKIy+lia5NO8vMnPpp4fY0voszTxnLWNVUN6S8SZ3DOje3Oc+XvgL6aHSiQN+CYUViPUhG0pVVI2RYaOht1TE/ZWUUJk1PMqPTcakJYkqBZYCy8z8Dyi5LENCI6UrQ78G5p2yKIAEps5fS9BW+cJWkM3Jl/ZLf+ZkLGrfasglN6OgMsv4PElCncmaJVivyAYFfCHiUD3J24d3BU2WLq22A0ADmA/vvDxVXFqQWTIgXU1rR5owMqF1dgldlKkLHnuTX6IeYLxizUIqeKk+thXTUBU0Fo7sQqDfXebMf7SJHYJVauymOIcqOBu5LMbV5VB16rbJWl0QKo7zrTeuETkiLEfGvRPZK8IUUAhVbDYoo/m88N92zmVdn3A8qA3qXFqK12qRLTBhsXid8l6Oc/wxF2ows00ZituZ/jSkexdGPFE9zKKCFlEYU/nsa++J+yAiiElSkTgrxrIUJZv99WfQT54aOoMsa/AXmM5am26e3ZnKZPxHzu8vbsD/8xmelXX4C8xxg12r6RZ2Iz1ZEbp6VNjzr/21ifInlG2/ob5sdcjGViNbzY6X5LU60HONwLdy4MyV0WoIpLXSqxuS/4TdgSGKlfOPyuOYslb5frR6alnn0k2HL94TmhZAK5+Xy279aWr12WnV0qpeAotALNerjlWWsqQmijmHUGsKLlKEqfoaAhf6TUiPCKPQ0FOjyXv0lLl6uUgRay1/cnp6fn5+XGn+WoqXfC+cn+4EGz6lrFbJl90D90/CheBFBlZLeHTWbm+ZBy6sNHALGncD9NHN56rOyMUHwaXMdoqmGJiiHei/34p+ER6EmgoV10ei/H1+N8HkFcPaNbY1iFb/uH30/WH77PD4/O6o3WmfdY5OD85PTh771+/ffIDHfn4VJh8iKCCCp4z07BH6k/Dh7ejzwyP0E7KaM3/h5iw4CdqHbtygfRYcnz32248+xe6fBt8l5vHAP4S5kfqn/tkVIiNuTf/o/PTkO/fTLCXTfzxwYdHm//EI/qJG/+f7i9tfwruri/fhm4u77tViDH8dxvSPXHv/+aH/+6eWp/3U6vz+qZWgZaMQhcgfB0oZ+6nVOQraX758eTz4b+K3y+BL29PqDP3kG6xdWVqejUpjD8muzl59rbGIPUqNG0j8kuN2UfcU37R8/euNVcd30m4nZkcUN5FNLO59nbzdRHlXaRDVc+/zGa2V6N8e7Sj3xTObpOdXL12rOuFlt94Rwzt86CewiUOoafMs77BkdiOkZ6sxzDkbCC9cs0Id4HKodILr37f39ZKXYNPklXnVyW2do5we7yE0j04bxTrjc4ryu311AMe7AWiVWV7atMsXXnyLOiOb9tHVr8c//zg+/zw9jW2Mb6zczfClSwEr0q+jP2Zum5fgXcPaixTbZ7nVS+vl/quGECmWJYsbgy5b8HGeogZ5/wkAAP//ZMWUGw==" + return "eJzUWltz2zb2f8+nOKOXfztj8y9f6taa2Z1plcRxprk0sp1tFQ/nCDyiEIEADYCS1U6++w5AShYpkrpsm+3qxSZxOb9zPwfEMUxp0QMSaCxnhlCzyTMAy62gHnRerL/vPAPQJAgN9SDGZwARGaZ5armSPfjnMwAo7wRvVJQJegYw5iQi0/NTjkFiQptE3c8uUre5VllavKmhUd5ufUumklRJknY1UtmgzNHTfBhrlcB8QprATgiEioFmbkBpHnOJlqLO2qb0iEnqRaQCCliQBG/I4nO02NeElq5lRI8D0jPOaH1dzt+UFnOlo034IjOWdJBlPGrk4Pb2+jmosYdZLKhHdpXM9OiVeHvDB3e/8vfjH6aP8WW8Pxr31IjmLSa0E5pIsSnp45o57SikiihoEceTMNzMetrPB/wju1nQzeSjvf3Xzz9dvu7+9Ga+J4adxdCMY/bx7Wvz29nuhLkzo3bK3tL89HqaYy5oRGiPLRl7zGWa2X3pt0nfU+cNvoHvruLn89Hth3H/7rvvfxywh1E/3kPuZoI6aiUfLYXup9aj6O5OsAhJYapVlDEb5s6/dfmGixqKYLSAItiAscimYBXwiKTl4wXMJ7wUfpZc+InGvXGPmh4yMraerSkfocTOBg8Ta9OgWBk8hirFh4zCtoBSRluERqvATrSyVhCgjCCiKEsFZ2gJIko1MXTrYY5achmbBov/HtN0D/m7kBs4GrHSi0bE/WLCUmzrePwW9WCcuNHykaAQU747KswibjdmrycpqMlJ6zsIXJAujVRZunFJx81aZiJnH3bCzUYa6oEmY4/AapQmVdqNAU/DMReVwFrmXlcsqc2cKxoJ3byt8N0kp5EcsJ2gBcVYprXDjFLJRaIyEyJjZEwYkeQUHQFmduKcIldfOEYu/OvKrPwx1iite2ZKSmJ+Rd275TKLSUqaorDwhyPQmQxxbaPiOV/QLLwy/f3FmKsv2CrHj6sKpEC8oXj4ZnMktxmEDy8GN/Dj++vl4m/XrWS1bo4GNDHiM4pASU/taRqboJQkvj0CoRiK0GUz+CaviRgKn92AG5NRtI7z22bZPe2zv9w0oUi2Wl7ZhvJFHlxlwHE+Q8EjLzSMkctNnyiAd1zNQmPMhHWudQD2zJAOdmPATf0/U8vHEfDx+kCjlXa8mVo+ozDimphVenEoaCXItIL+4Ga4HLEMVASp5pLxFAWMSKhKRihZxHCZvEKMEi47R9BxFYopHuH+QNTelavV0v4b1Kls+w51rcduK9HHrK02ItdKzHxJHmOdUdMjsazZNFzuy+vuXqIkt0r/f4JcHmAdWgQpaky2WIeLQ7cfrsHPJUu62Rg6fzjZu+3/8RnZVHI2Of3SqaXOZcTZFsO8zucUCSMvbHJptZnjWKnj0+7JZdA9CbrnziBLb8423lwcYqXLooxHrSzcSv6QEeSNTU0JWBbfx99/Dqeji7vB7N3kx4eunb+fvXr3yyGRNgdX4z7NqX6ZUvYwxL4g1AOmlRAf6nnbGWs4UtGidjEKjlU7SdFOKtWxWx8wJe2m2yY81phzbHVGLUk9xCjSZKrktgExKtOMAp4eQDjTfE9qznGLekEcQHAV2/clazbPD3almZAxGNeHckuPtiFELPN7gCmf0sIEai4pCkeLsJREQwetdu+RUoJQ1ofqUoKuK+d2KAVrGrIGzpZL1vqbrW1I7XkZtJyZxezwPQGu+uCqI0O2IBDs2BKlEzT1wqtS34LA/V56QmBSYnzMmStMrvo5iaAyuQ7TOq4aa4VWve4E0P3Wz4iu+sCUEHm7Ug90Tf1Z7iyhIdYIbSwUVoPYjsD6FSQrgi6LKh1xGefHAQSvcYYw49pmKCBBNuGyBbhhOhuFZpGMlAgtusbb8oT+Kj7gPWaGwJEALsEQUzIywJw7Ox6yFHIs4LGYrcCt5jL+CsB3wO2hbMU9J5yGmsYmTLVyBZDH/xciv3GYTeoa/ieKHgZoGpMm6YqxJ6aaobtSUQgSoSbDUH4t1GvyTlBPHXrBZwRq9JmYNa6/EQSY5kdfzie4AWNVmlLUzAwTaEyYSaEw+lqc5NS8vcjMVb4exI7SZ2nmcTZirAvKO2J8nxsG9N/f5jZe2AvpsdKJA/wUCmsgNodsqDRwDUKGrYLekRH3qzChMmt4lJ/YTElLEnUMrAWWhfkvoOSyChJaUboO+GvAvFEWBZDA1NlrBbRVvqcWZHPka/nSH3cZi9rPGnPJzSSorTI+z5JQZ7LBBZsZ2cKA74EcVI/k9d2bAk2WrnnbEaABzLd3Vp4qLi3ILBmRrkdrJ5owMqF1cgldlGkKHgcjv0I9wrgkzYIqeKo+thVqqAsaK0N2IdBnlyXmP1vEDoJVyn83yUEVOFtxWYzrO7H60m2btPogVBznqTduIDkhrEbGgwvZV4QpoBCqSDYoo6Ve+O9717JuTTgdNQZ1Li3FG23QDjBh5byOeU/HGf6UCzVa2LYKxWWmvwyS/5LlETWDWbVhIgpjqh67Hay4dyKCmCQVhbNiLEtRssXfX4NeeWrsBLLOwd9AnY0y3a7dhcpk/Gfq91e34f+4hhdVHv4GOm6Raz26ldxIz0pEy6eUAz/s7/FUv97s+vn2KdUhm1qNrFwdr9Hr9GCQf+J3sxxw5tpoNQbSWulyQvJ3KnowRlE6/6g9jqlyleej8oFpk0m3Hb54S2hzgE6ul6t+80Fu/bFtnWvVu8AqEMvNrqOMpUqpDcUSh1AbDK5KhLn6GgRX/M1ITwij0NBDq8gH9JC5frkoERslf3Z+fnl5eVor/kYUT/VeuDzdCbZ8xSl3yVf9I/cn4ULwogJrRHhy0e3uWAeupDRyDo37AfTRzdeqTsirSzWrynaOptiYoj3Q/7AT+lV4EGouVNwcifLx/FqEyTuGjXuVGyA6w9PuyQ/H3Yvj08ubk26ve9E7OT+6PDu7H16/ffkO7of53ax8i6AAETxkpBf3MJyFd68nn+/uYZiQ1Zz5G2AXwVnQPXb7Bt2L4PTifti99yX28Dz4LjH3R/4hzIU0PPfPrhGZcGuGJ5fnZ9+5V4uUzPD+yIVFm//jIfg7IsNfbl98+DW8efXibfjyxU3/1WoPfz/LDE/cfP/lY/jHp45H+6nT++NTJ0HLJiEKkT+OlDL2U6d3EnS/fPlyf/SfxG9XwVfSU1lDP/sJG3fo1rVRK+wx2bL2mnuNVexRatqCxLsct6u+p/ic5vtfL6wmfGfdbmL2hOIU2YbFjTfR24+UN5UWUgM3nmu0kaIfPdmT7pNltlEvLrwt0kb1V816Txje4EOvwDYcQs3btbyHy+yHkB6txjDH2YLwhZtWsANcjpVOcPPT+qFW8hRs2qwy7zq5bTKU89MDiObRaStZJ3xOUX7ZtAnA6X4AtMosryTt6l0bP6NJyKZ78uq3019+ml5+np/HNsaXVu4n+MqnyBL16+jP0W27C960+F6k2CHu1kxtkNuvGkOkWJasLiu6asHHeYpa6P07AAD//wFdWko=" } diff --git a/filebeat/module/elasticsearch/gc/ingest/pipeline.yml b/filebeat/module/elasticsearch/gc/ingest/pipeline.yml index d0980763ecc..6d3c9006a20 100644 --- a/filebeat/module/elasticsearch/gc/ingest/pipeline.yml +++ b/filebeat/module/elasticsearch/gc/ingest/pipeline.yml @@ -36,9 +36,9 @@ processors: PROCTIME: '\[Times: user=%{BASE10NUM:elasticsearch.gc.phase.cpu_time.user_sec} sys=%{BASE10NUM:elasticsearch.gc.phase.cpu_time.sys_sec}, real=%{BASE10NUM:elasticsearch.gc.phase.cpu_time.real_sec} secs\]' -- rename: - field: '@timestamp' - target_field: event.created +- set: + copy_from: "@timestamp" + field: event.created - date: field: timestamp target_field: '@timestamp' diff --git a/filebeat/module/elasticsearch/server/ingest/pipeline-json-7.yml b/filebeat/module/elasticsearch/server/ingest/pipeline-json-7.yml new file mode 100644 index 00000000000..d92dd640772 --- /dev/null +++ b/filebeat/module/elasticsearch/server/ingest/pipeline-json-7.yml @@ -0,0 +1,101 @@ +description: Pipeline for parsing the Elasticsearch 7.x server log file in JSON format. +on_failure: +- set: + field: error.message + value: '{{ _ingest.on_failure_message }}' +processors: +- drop: + if: ctx.elasticsearch.server.type != 'server' +- remove: + field: elasticsearch.server.type +- dot_expander: + field: service.name + path: elasticsearch.server +- rename: + field: elasticsearch.server.service.name + target_field: service.name + ignore_missing: true +- rename: + field: elasticsearch.server.component + target_field: elasticsearch.component + ignore_missing: true +- dot_expander: + field: cluster.name + path: elasticsearch.server +- rename: + field: elasticsearch.server.cluster.name + target_field: elasticsearch.cluster.name +- dot_expander: + field: node.name + path: elasticsearch.server +- rename: + field: elasticsearch.server.node.name + target_field: elasticsearch.node.name +- dot_expander: + field: cluster.uuid + path: elasticsearch.server +- rename: + field: elasticsearch.server.cluster.uuid + target_field: elasticsearch.cluster.uuid + ignore_missing: true +- dot_expander: + field: node.id + path: elasticsearch.server +- rename: + field: elasticsearch.server.node.id + target_field: elasticsearch.node.id + ignore_missing: true +- rename: + field: elasticsearch.server.level + target_field: log.level + ignore_missing: true +- dot_expander: + field: log.level + path: elasticsearch.server +- rename: + field: elasticsearch.server.log.level + target_field: log.level + ignore_missing: true +- dot_expander: + field: log.logger + path: elasticsearch.server +- rename: + field: elasticsearch.server.log.logger + target_field: log.logger + ignore_missing: true +- dot_expander: + field: process.thread.name + path: elasticsearch.server +- rename: + field: elasticsearch.server.process.thread.name + target_field: process.thread.name + ignore_missing: true +- grok: + field: elasticsearch.server.message + pattern_definitions: + GREEDYMULTILINE: |- + (.| + )* + INDEXNAME: '[a-zA-Z0-9_.-]*' + GC_ALL: \[gc\]\[%{NUMBER:elasticsearch.server.gc.overhead_seq}\] overhead, spent + \[%{NUMBER:elasticsearch.server.gc.collection_duration.time:float}%{DATA:elasticsearch.server.gc.collection_duration.unit}\] + collecting in the last \[%{NUMBER:elasticsearch.server.gc.observation_duration.time:float}%{DATA:elasticsearch.server.gc.observation_duration.unit}\] + GC_YOUNG: \[gc\]\[young\]\[%{NUMBER:elasticsearch.server.gc.young.one}\]\[%{NUMBER:elasticsearch.server.gc.young.two}\]%{SPACE}%{GREEDYMULTILINE:message} + patterns: + - '%{GC_ALL}' + - '%{GC_YOUNG}' + - ((\[%{INDEXNAME:elasticsearch.index.name}\]|\[%{INDEXNAME:elasticsearch.index.name}\/%{DATA:elasticsearch.index.id}\]))?%{SPACE}%{GREEDYMULTILINE:message} +- remove: + field: elasticsearch.server.message +- set: + field: '@timestamp' + value: '{{ elasticsearch.server.timestamp }}' + ignore_empty_value: true +- remove: + field: elasticsearch.server.timestamp +- date: + field: '@timestamp' + target_field: '@timestamp' + formats: + - ISO8601 + ignore_failure: true diff --git a/filebeat/module/elasticsearch/server/ingest/pipeline-json-8.yml b/filebeat/module/elasticsearch/server/ingest/pipeline-json-8.yml new file mode 100644 index 00000000000..7619050a4cb --- /dev/null +++ b/filebeat/module/elasticsearch/server/ingest/pipeline-json-8.yml @@ -0,0 +1,109 @@ +description: Pipeline for parsing the Elasticsearch 8.0 server log file in JSON format. +on_failure: +- set: + field: error.message + value: '{{ _ingest.on_failure_message }}' +processors: +- dot_expander: + field: event.dataset + path: elasticsearch.server +- drop: + if: ctx.elasticsearch.server.event.dataset != 'elasticsearch.server' +- set: + value: '{{ elasticsearch.server.event.dataset }}' + field: event.dataset + ignore_empty_value: true +- remove: + field: elasticsearch.server.event.dataset +- dot_expander: + field: ecs.version + path: elasticsearch.server +- set: + value: '{{ elasticsearch.server.ecs.version }}' + field: ecs.version + ignore_empty_value: true +- remove: + field: elasticsearch.server.ecs.version +- dot_expander: + field: service.name + path: elasticsearch.server +- rename: + field: elasticsearch.server.service.name + target_field: service.name + ignore_missing: true +- dot_expander: + field: elasticsearch.cluster.name + path: elasticsearch.server +- rename: + field: elasticsearch.server.elasticsearch.cluster.name + target_field: elasticsearch.cluster.name +- dot_expander: + field: elasticsearch.node.name + path: elasticsearch.server +- rename: + field: elasticsearch.server.elasticsearch.node.name + target_field: elasticsearch.node.name +- dot_expander: + field: elasticsearch.cluster.uuid + path: elasticsearch.server +- rename: + field: elasticsearch.server.elasticsearch.cluster.uuid + target_field: elasticsearch.cluster.uuid + ignore_missing: true +- dot_expander: + field: elasticsearch.node.id + path: elasticsearch.server +- rename: + field: elasticsearch.server.elasticsearch.node.id + target_field: elasticsearch.node.id + ignore_missing: true +- dot_expander: + field: log.level + path: elasticsearch.server +- rename: + field: elasticsearch.server.log.level + target_field: log.level + ignore_missing: true +- dot_expander: + field: log.logger + path: elasticsearch.server +- rename: + field: elasticsearch.server.log.logger + target_field: log.logger + ignore_missing: true +- dot_expander: + field: process.thread.name + path: elasticsearch.server +- rename: + field: elasticsearch.server.process.thread.name + target_field: process.thread.name + ignore_missing: true +- grok: + field: elasticsearch.server.message + pattern_definitions: + GREEDYMULTILINE: |- + (.| + )* + INDEXNAME: '[a-zA-Z0-9_.-]*' + GC_ALL: \[gc\]\[%{NUMBER:elasticsearch.server.gc.overhead_seq}\] overhead, spent + \[%{NUMBER:elasticsearch.server.gc.collection_duration.time:float}%{DATA:elasticsearch.server.gc.collection_duration.unit}\] + collecting in the last \[%{NUMBER:elasticsearch.server.gc.observation_duration.time:float}%{DATA:elasticsearch.server.gc.observation_duration.unit}\] + GC_YOUNG: \[gc\]\[young\]\[%{NUMBER:elasticsearch.server.gc.young.one}\]\[%{NUMBER:elasticsearch.server.gc.young.two}\]%{SPACE}%{GREEDYMULTILINE:message} + patterns: + - '%{GC_ALL}' + - '%{GC_YOUNG}' + - ((\[%{INDEXNAME:elasticsearch.index.name}\]|\[%{INDEXNAME:elasticsearch.index.name}\/%{DATA:elasticsearch.index.id}\]))?%{SPACE}%{GREEDYMULTILINE:message} +- remove: + field: elasticsearch.server.message +- set: + field: '@timestamp' + value: '{{ elasticsearch.server.@timestamp }}' + ignore_empty_value: true +- remove: + field: elasticsearch.server.@timestamp +- date: + field: '@timestamp' + target_field: '@timestamp' + formats: + - ISO8601 + ignore_failure: true diff --git a/filebeat/module/elasticsearch/server/ingest/pipeline-json.yml b/filebeat/module/elasticsearch/server/ingest/pipeline-json.yml index c3b655643ed..e5b23aabfd9 100644 --- a/filebeat/module/elasticsearch/server/ingest/pipeline-json.yml +++ b/filebeat/module/elasticsearch/server/ingest/pipeline-json.yml @@ -7,100 +7,9 @@ processors: - json: field: message target_field: elasticsearch.server -- drop: - if: ctx.elasticsearch.server.type != 'server' -- remove: - field: elasticsearch.server.type -- dot_expander: - field: service.name - path: elasticsearch.server -- rename: - field: elasticsearch.server.service.name - target_field: service.name - ignore_missing: true -- rename: - field: elasticsearch.server.component - target_field: elasticsearch.component - ignore_missing: true -- dot_expander: - field: cluster.name - path: elasticsearch.server -- rename: - field: elasticsearch.server.cluster.name - target_field: elasticsearch.cluster.name -- dot_expander: - field: node.name - path: elasticsearch.server -- rename: - field: elasticsearch.server.node.name - target_field: elasticsearch.node.name -- dot_expander: - field: cluster.uuid - path: elasticsearch.server -- rename: - field: elasticsearch.server.cluster.uuid - target_field: elasticsearch.cluster.uuid - ignore_missing: true -- dot_expander: - field: node.id - path: elasticsearch.server -- rename: - field: elasticsearch.server.node.id - target_field: elasticsearch.node.id - ignore_missing: true -- rename: - field: elasticsearch.server.level - target_field: log.level - ignore_missing: true -- dot_expander: - field: log.level - path: elasticsearch.server -- rename: - field: elasticsearch.server.log.level - target_field: log.level - ignore_missing: true -- dot_expander: - field: log.logger - path: elasticsearch.server -- rename: - field: elasticsearch.server.log.logger - target_field: log.logger - ignore_missing: true -- dot_expander: - field: process.thread.name - path: elasticsearch.server -- rename: - field: elasticsearch.server.process.thread.name - target_field: process.thread.name - ignore_missing: true -- grok: - field: elasticsearch.server.message - pattern_definitions: - GREEDYMULTILINE: |- - (.| - )* - INDEXNAME: '[a-zA-Z0-9_.-]*' - GC_ALL: \[gc\]\[%{NUMBER:elasticsearch.server.gc.overhead_seq}\] overhead, spent - \[%{NUMBER:elasticsearch.server.gc.collection_duration.time:float}%{DATA:elasticsearch.server.gc.collection_duration.unit}\] - collecting in the last \[%{NUMBER:elasticsearch.server.gc.observation_duration.time:float}%{DATA:elasticsearch.server.gc.observation_duration.unit}\] - GC_YOUNG: \[gc\]\[young\]\[%{NUMBER:elasticsearch.server.gc.young.one}\]\[%{NUMBER:elasticsearch.server.gc.young.two}\]%{SPACE}%{GREEDYMULTILINE:message} - patterns: - - '%{GC_ALL}' - - '%{GC_YOUNG}' - - ((\[%{INDEXNAME:elasticsearch.index.name}\]|\[%{INDEXNAME:elasticsearch.index.name}\/%{DATA:elasticsearch.index.id}\]))?%{SPACE}%{GREEDYMULTILINE:message} -- remove: - field: elasticsearch.server.message -- rename: - field: elasticsearch.server.@timestamp - target_field: '@timestamp' - ignore_missing: true -- rename: - field: elasticsearch.server.timestamp - target_field: '@timestamp' - ignore_missing: true -- date: - field: '@timestamp' - target_field: '@timestamp' - formats: - - ISO8601 - ignore_failure: true +- pipeline: + if: ctx.elasticsearch.server.containsKey('type') + name: '{< IngestPipeline "pipeline-json-7" >}' +- pipeline: + if: ctx.elasticsearch.server.containsKey('ecs.version') + name: '{< IngestPipeline "pipeline-json-8" >}' diff --git a/filebeat/module/elasticsearch/server/ingest/pipeline.yml b/filebeat/module/elasticsearch/server/ingest/pipeline.yml index 4d4e634cc4b..32abc88dae4 100644 --- a/filebeat/module/elasticsearch/server/ingest/pipeline.yml +++ b/filebeat/module/elasticsearch/server/ingest/pipeline.yml @@ -3,9 +3,9 @@ processors: - set: field: event.ingested value: '{{_ingest.timestamp}}' -- rename: - field: '@timestamp' - target_field: event.created +- set: + copy_from: "@timestamp" + field: event.created - grok: field: message patterns: diff --git a/filebeat/module/elasticsearch/server/manifest.yml b/filebeat/module/elasticsearch/server/manifest.yml index 406972cba56..d9d5d4e398b 100644 --- a/filebeat/module/elasticsearch/server/manifest.yml +++ b/filebeat/module/elasticsearch/server/manifest.yml @@ -16,4 +16,6 @@ ingest_pipeline: - ingest/pipeline.yml - ingest/pipeline-plaintext.yml - ingest/pipeline-json.yml + - ingest/pipeline-json-7.yml + - ingest/pipeline-json-8.yml input: config/log.yml diff --git a/filebeat/module/elasticsearch/server/test/elasticsearch-json.800.log b/filebeat/module/elasticsearch/server/test/elasticsearch-json.800.log index b7119ffc069..78c9f51fd7d 100644 --- a/filebeat/module/elasticsearch/server/test/elasticsearch-json.800.log +++ b/filebeat/module/elasticsearch/server/test/elasticsearch-json.800.log @@ -1,3 +1,3 @@ -{"@timestamp":"2020-04-14T14:05:58.019Z", "log.level": "INFO", "message":"adding template [.management-beats] for index patterns [.management-beats]", "service.name":"ES_ECS","process.thread.name":"elasticsearch[CBR-MBP.local][masterService#updateTask][T#1]","log.logger":"org.elasticsearch.cluster.metadata.MetaDataIndexTemplateService","type":"server","cluster.uuid":"ECEBF2VPQuCF9tbBKaLqXQ","node.id":"suOYiQwuRvialOY-c0wHLA","node.name":"CBR-MBP.local","cluster.name":"elasticsearch"} -{"@timestamp":"2020-04-14T20:57:49.663Z", "log.level": "INFO", "message":"[test-filebeat-modules] creating index, cause [auto(bulk api)], templates [test-filebeat-modules], shards [1]/[1], mappings [_doc]", "service.name":"ES_ECS","process.thread.name":"elasticsearch[7debcb878699][masterService#updateTask][T#1]","log.logger":"org.elasticsearch.cluster.metadata.MetadataCreateIndexService","type":"server","cluster.uuid":"QxYAE76DTAWkgk9CwIRedQ","node.id":"kZnYdakGTqihZQT_1rM92g","node.name":"7debcb878699","cluster.name":"docker-cluster"} -{"@timestamp":"2020-04-14T20:57:49.772Z", "log.level": "INFO", "message":"[test-filebeat-modules/IW1jJcOBTFeIDihqjoT8yQ] update_mapping [_doc]", "service.name":"ES_ECS","process.thread.name":"elasticsearch[7debcb878699][masterService#updateTask][T#1]","log.logger":"org.elasticsearch.cluster.metadata.MetadataMappingService","type":"server","cluster.uuid":"QxYAE76DTAWkgk9CwIRedQ","node.id":"kZnYdakGTqihZQT_1rM92g","node.name":"7debcb878699","cluster.name":"docker-cluster"} +{"@timestamp":"2022-01-25T15:12:08.472Z", "log.level": "INFO", "message":"adding template [.monitoring-kibana] for index patterns [.monitoring-kibana-7-*]", "ecs.version": "1.2.0","service.name":"ES_ECS","event.dataset":"elasticsearch.server","process.thread.name":"elasticsearch[matschaffer-mbp2019.lan][masterService#updateTask][T#1]","log.logger":"org.elasticsearch.cluster.metadata.MetadataIndexTemplateService","elasticsearch.cluster.uuid":"28iKoFsvTJ6HEyXbdLL-PQ","elasticsearch.node.id":"tc3nhgC0SFCKfwwy6jCmkw","elasticsearch.node.name":"matschaffer-mbp2019.lan","elasticsearch.cluster.name":"main"} +{"@timestamp":"2022-01-25T15:12:08.588Z", "log.level": "INFO", "message":"adding template [.monitoring-logstash] for index patterns [.monitoring-logstash-7-*]", "ecs.version": "1.2.0","service.name":"ES_ECS","event.dataset":"elasticsearch.server","process.thread.name":"elasticsearch[matschaffer-mbp2019.lan][masterService#updateTask][T#1]","log.logger":"org.elasticsearch.cluster.metadata.MetadataIndexTemplateService","elasticsearch.cluster.uuid":"28iKoFsvTJ6HEyXbdLL-PQ","elasticsearch.node.id":"tc3nhgC0SFCKfwwy6jCmkw","elasticsearch.node.name":"matschaffer-mbp2019.lan","elasticsearch.cluster.name":"main"} +{"@timestamp":"2022-01-25T15:12:08.686Z", "log.level": "INFO", "message":"adding template [.monitoring-alerts-7] for index patterns [.monitoring-alerts-7]", "ecs.version": "1.2.0","service.name":"ES_ECS","event.dataset":"elasticsearch.server","process.thread.name":"elasticsearch[matschaffer-mbp2019.lan][masterService#updateTask][T#1]","log.logger":"org.elasticsearch.cluster.metadata.MetadataIndexTemplateService","elasticsearch.cluster.uuid":"28iKoFsvTJ6HEyXbdLL-PQ","elasticsearch.node.id":"tc3nhgC0SFCKfwwy6jCmkw","elasticsearch.node.name":"matschaffer-mbp2019.lan","elasticsearch.cluster.name":"main"} diff --git a/filebeat/module/elasticsearch/server/test/elasticsearch-json.800.log-expected.json b/filebeat/module/elasticsearch/server/test/elasticsearch-json.800.log-expected.json index 817cadf6002..228661fc9c3 100644 --- a/filebeat/module/elasticsearch/server/test/elasticsearch-json.800.log-expected.json +++ b/filebeat/module/elasticsearch/server/test/elasticsearch-json.800.log-expected.json @@ -1,70 +1,67 @@ [ { - "@timestamp": "2020-04-14T14:05:58.019Z", - "elasticsearch.cluster.name": "elasticsearch", - "elasticsearch.cluster.uuid": "ECEBF2VPQuCF9tbBKaLqXQ", - "elasticsearch.node.id": "suOYiQwuRvialOY-c0wHLA", - "elasticsearch.node.name": "CBR-MBP.local", + "@timestamp": "2022-01-25T15:12:08.472Z", + "elasticsearch.cluster.name": "main", + "elasticsearch.cluster.uuid": "28iKoFsvTJ6HEyXbdLL-PQ", + "elasticsearch.node.id": "tc3nhgC0SFCKfwwy6jCmkw", + "elasticsearch.node.name": "matschaffer-mbp2019.lan", "event.category": "database", "event.dataset": "elasticsearch.server", "event.kind": "event", "event.module": "elasticsearch", "event.type": "info", "fileset.name": "server", - "host.id": "suOYiQwuRvialOY-c0wHLA", + "host.id": "tc3nhgC0SFCKfwwy6jCmkw", "input.type": "log", "log.level": "INFO", - "log.logger": "org.elasticsearch.cluster.metadata.MetaDataIndexTemplateService", + "log.logger": "org.elasticsearch.cluster.metadata.MetadataIndexTemplateService", "log.offset": 0, - "message": "adding template [.management-beats] for index patterns [.management-beats]", - "process.thread.name": "elasticsearch[CBR-MBP.local][masterService#updateTask][T#1]", + "message": "adding template [.monitoring-kibana] for index patterns [.monitoring-kibana-7-*]", + "process.thread.name": "elasticsearch[matschaffer-mbp2019.lan][masterService#updateTask][T#1]", "service.name": "ES_ECS", "service.type": "elasticsearch" }, { - "@timestamp": "2020-04-14T20:57:49.663Z", - "elasticsearch.cluster.name": "docker-cluster", - "elasticsearch.cluster.uuid": "QxYAE76DTAWkgk9CwIRedQ", - "elasticsearch.index.name": "test-filebeat-modules", - "elasticsearch.node.id": "kZnYdakGTqihZQT_1rM92g", - "elasticsearch.node.name": "7debcb878699", + "@timestamp": "2022-01-25T15:12:08.588Z", + "elasticsearch.cluster.name": "main", + "elasticsearch.cluster.uuid": "28iKoFsvTJ6HEyXbdLL-PQ", + "elasticsearch.node.id": "tc3nhgC0SFCKfwwy6jCmkw", + "elasticsearch.node.name": "matschaffer-mbp2019.lan", "event.category": "database", "event.dataset": "elasticsearch.server", "event.kind": "event", "event.module": "elasticsearch", "event.type": "info", "fileset.name": "server", - "host.id": "kZnYdakGTqihZQT_1rM92g", + "host.id": "tc3nhgC0SFCKfwwy6jCmkw", "input.type": "log", "log.level": "INFO", - "log.logger": "org.elasticsearch.cluster.metadata.MetadataCreateIndexService", - "log.offset": 489, - "message": "creating index, cause [auto(bulk api)], templates [test-filebeat-modules], shards [1]/[1], mappings [_doc]", - "process.thread.name": "elasticsearch[7debcb878699][masterService#updateTask][T#1]", + "log.logger": "org.elasticsearch.cluster.metadata.MetadataIndexTemplateService", + "log.offset": 608, + "message": "adding template [.monitoring-logstash] for index patterns [.monitoring-logstash-7-*]", + "process.thread.name": "elasticsearch[matschaffer-mbp2019.lan][masterService#updateTask][T#1]", "service.name": "ES_ECS", "service.type": "elasticsearch" }, { - "@timestamp": "2020-04-14T20:57:49.772Z", - "elasticsearch.cluster.name": "docker-cluster", - "elasticsearch.cluster.uuid": "QxYAE76DTAWkgk9CwIRedQ", - "elasticsearch.index.id": "IW1jJcOBTFeIDihqjoT8yQ", - "elasticsearch.index.name": "test-filebeat-modules", - "elasticsearch.node.id": "kZnYdakGTqihZQT_1rM92g", - "elasticsearch.node.name": "7debcb878699", + "@timestamp": "2022-01-25T15:12:08.686Z", + "elasticsearch.cluster.name": "main", + "elasticsearch.cluster.uuid": "28iKoFsvTJ6HEyXbdLL-PQ", + "elasticsearch.node.id": "tc3nhgC0SFCKfwwy6jCmkw", + "elasticsearch.node.name": "matschaffer-mbp2019.lan", "event.category": "database", "event.dataset": "elasticsearch.server", "event.kind": "event", "event.module": "elasticsearch", "event.type": "info", "fileset.name": "server", - "host.id": "kZnYdakGTqihZQT_1rM92g", + "host.id": "tc3nhgC0SFCKfwwy6jCmkw", "input.type": "log", "log.level": "INFO", - "log.logger": "org.elasticsearch.cluster.metadata.MetadataMappingService", - "log.offset": 1031, - "message": "update_mapping [_doc]", - "process.thread.name": "elasticsearch[7debcb878699][masterService#updateTask][T#1]", + "log.logger": "org.elasticsearch.cluster.metadata.MetadataIndexTemplateService", + "log.offset": 1220, + "message": "adding template [.monitoring-alerts-7] for index patterns [.monitoring-alerts-7]", + "process.thread.name": "elasticsearch[matschaffer-mbp2019.lan][masterService#updateTask][T#1]", "service.name": "ES_ECS", "service.type": "elasticsearch" } diff --git a/filebeat/module/elasticsearch/slowlog/ingest/pipeline-json-7.yml b/filebeat/module/elasticsearch/slowlog/ingest/pipeline-json-7.yml new file mode 100644 index 00000000000..00ce95ccaa1 --- /dev/null +++ b/filebeat/module/elasticsearch/slowlog/ingest/pipeline-json-7.yml @@ -0,0 +1,120 @@ +description: Pipeline for parsing the Elasticsearch slow logs in JSON format. +on_failure: +- set: + field: error.message + value: '{{ _ingest.on_failure_message }}' +processors: +- json: + field: message + target_field: elasticsearch.slowlog +- drop: + if: ctx.elasticsearch.slowlog.type != 'index_indexing_slowlog' && ctx.elasticsearch.slowlog.type + != 'index_search_slowlog' +- remove: + field: elasticsearch.slowlog.type +- dot_expander: + field: service.name + path: elasticsearch.slowlog +- rename: + field: elasticsearch.slowlog.service.name + target_field: service.name + ignore_missing: true +- rename: + field: elasticsearch.slowlog.level + target_field: log.level + ignore_missing: true +- dot_expander: + field: log.level + path: elasticsearch.slowlog +- rename: + field: elasticsearch.slowlog.log.level + target_field: log.level + ignore_missing: true +- dot_expander: + field: log.logger + path: elasticsearch.slowlog +- rename: + field: elasticsearch.slowlog.log.logger + target_field: log.logger + ignore_missing: true +- dot_expander: + field: process.thread.name + path: elasticsearch.slowlog +- rename: + field: elasticsearch.slowlog.process.thread.name + target_field: process.thread.name + ignore_missing: true +- rename: + field: elasticsearch.slowlog.component + target_field: elasticsearch.component + ignore_missing: true +- dot_expander: + field: cluster.name + path: elasticsearch.slowlog +- rename: + field: elasticsearch.slowlog.cluster.name + target_field: elasticsearch.cluster.name +- dot_expander: + field: node.name + path: elasticsearch.slowlog +- rename: + field: elasticsearch.slowlog.node.name + target_field: elasticsearch.node.name +- dot_expander: + field: cluster.uuid + path: elasticsearch.slowlog +- rename: + field: elasticsearch.slowlog.cluster.uuid + target_field: elasticsearch.cluster.uuid + ignore_missing: true +- dot_expander: + field: node.id + path: elasticsearch.slowlog +- rename: + field: elasticsearch.slowlog.node.id + target_field: elasticsearch.node.id + ignore_missing: true +- rename: + field: elasticsearch.slowlog.doc_type + target_field: elasticsearch.slowlog.types + ignore_missing: true +- convert: + field: elasticsearch.slowlog.took_millis + type: float + ignore_missing: true +- rename: + field: elasticsearch.slowlog.took_millis + target_field: elasticsearch.slowlog.duration + ignore_missing: true +- grok: + field: elasticsearch.slowlog.message + pattern_definitions: + GREEDYMULTILINE: |- + (.| + )* + INDEXNAME: '[a-zA-Z0-9_.-]*' + patterns: + - (\[%{INDEXNAME:elasticsearch.index.name}\]\[%{NUMBER:elasticsearch.shard.id}\])?(%{SPACE})(\[%{INDEXNAME:elasticsearch.index.name}\/%{DATA:elasticsearch.index.id}\])?(%{SPACE})%{SPACE}(took\[%{DATA:elasticsearch.slowlog.took}\],)?%{SPACE}(took_millis\[%{NUMBER:elasticsearch.slowlog.duration:long}\],)?%{SPACE}(type\[%{DATA:elasticsearch.slowlog.type}\],)?%{SPACE}(id\[%{DATA:elasticsearch.slowlog.id}\],)?%{SPACE}(routing\[%{DATA:elasticsearch.slowlog.routing}\],)?%{SPACE}(total_hits\[%{NUMBER:elasticsearch.slowlog.total_hits:int}\],)?%{SPACE}(types\[%{DATA:elasticsearch.slowlog.types}\],)?%{SPACE}(stats\[%{DATA:elasticsearch.slowlog.stats}\],)?%{SPACE}(search_type\[%{DATA:elasticsearch.slowlog.search_type}\],)?%{SPACE}(total_shards\[%{NUMBER:elasticsearch.slowlog.total_shards:int}\],)?%{SPACE}(source\[%{GREEDYMULTILINE:elasticsearch.slowlog.source_query}\])?,?%{SPACE}(extra_source\[%{DATA:elasticsearch.slowlog.extra_source}\])?,? + - \[%{INDEXNAME:elasticsearch.index.name}\]\[%{NUMBER:elasticsearch.shard.id}\] +- remove: + field: elasticsearch.slowlog.message +- set: + value: "{{ elasticsearch.slowlog.@timestamp }}" + field: "@timestamp" + ignore_empty_value: true +- set: + value: "{{ elasticsearch.slowlog.timestamp }}" + field: "@timestamp" + ignore_empty_value: true +- remove: + field: elasticsearch.slowlog.@timestamp + ignore_missing: true +- remove: + field: elasticsearch.slowlog.timestamp + ignore_missing: true +- date: + field: '@timestamp' + target_field: '@timestamp' + formats: + - ISO8601 + ignore_failure: true diff --git a/filebeat/module/elasticsearch/slowlog/ingest/pipeline-json-8.yml b/filebeat/module/elasticsearch/slowlog/ingest/pipeline-json-8.yml new file mode 100644 index 00000000000..3e0479d59ea --- /dev/null +++ b/filebeat/module/elasticsearch/slowlog/ingest/pipeline-json-8.yml @@ -0,0 +1,36 @@ +description: Pipeline for parsing the Elasticsearch slow logs in JSON format. +on_failure: +- set: + field: error.message + value: '{{ _ingest.on_failure_message }}' +processors: +- json: + field: message + add_to_root: true +- dot_expander: + field: '*' + override: true +- convert: + field: elasticsearch.slowlog.took_millis + type: float + ignore_missing: true +- rename: + field: elasticsearch.slowlog.took_millis + target_field: elasticsearch.slowlog.duration + ignore_missing: true +- grok: + field: elasticsearch.slowlog.message + pattern_definitions: + GREEDYMULTILINE: |- + (.| + )* + INDEXNAME: '[a-zA-Z0-9_.-]*' + patterns: + - (\[%{INDEXNAME:elasticsearch.index.name}\]\[%{NUMBER:elasticsearch.shard.id}\])?(%{SPACE})(\[%{INDEXNAME:elasticsearch.index.name}\/%{DATA:elasticsearch.index.id}\])?(%{SPACE})%{SPACE}(took\[%{DATA:elasticsearch.slowlog.took}\],)?%{SPACE}(took_millis\[%{NUMBER:elasticsearch.slowlog.duration:long}\],)?%{SPACE}(type\[%{DATA:elasticsearch.slowlog.type}\],)?%{SPACE}(id\[%{DATA:elasticsearch.slowlog.id}\],)?%{SPACE}(routing\[%{DATA:elasticsearch.slowlog.routing}\],)?%{SPACE}(total_hits\[%{NUMBER:elasticsearch.slowlog.total_hits:int}\],)?%{SPACE}(types\[%{DATA:elasticsearch.slowlog.types}\],)?%{SPACE}(stats\[%{DATA:elasticsearch.slowlog.stats}\],)?%{SPACE}(search_type\[%{DATA:elasticsearch.slowlog.search_type}\],)?%{SPACE}(total_shards\[%{NUMBER:elasticsearch.slowlog.total_shards:int}\],)?%{SPACE}(source\[%{GREEDYMULTILINE:elasticsearch.slowlog.source_query}\])?,?%{SPACE}(extra_source\[%{DATA:elasticsearch.slowlog.extra_source}\])?,? + - \[%{INDEXNAME:elasticsearch.index.name}\]\[%{NUMBER:elasticsearch.shard.id}\] +- set: + field: message + value: '{{ elasticsearch.slowlog.message }}' + ignore_empty_value: true +- remove: + field: elasticsearch.slowlog.message diff --git a/filebeat/module/elasticsearch/slowlog/ingest/pipeline-json.yml b/filebeat/module/elasticsearch/slowlog/ingest/pipeline-json.yml index 174a429946a..614c9f7aa43 100644 --- a/filebeat/module/elasticsearch/slowlog/ingest/pipeline-json.yml +++ b/filebeat/module/elasticsearch/slowlog/ingest/pipeline-json.yml @@ -4,111 +4,9 @@ on_failure: field: error.message value: '{{ _ingest.on_failure_message }}' processors: -- json: - field: message - target_field: elasticsearch.slowlog -- drop: - if: ctx.elasticsearch.slowlog.type != 'index_indexing_slowlog' && ctx.elasticsearch.slowlog.type - != 'index_search_slowlog' -- remove: - field: elasticsearch.slowlog.type -- dot_expander: - field: service.name - path: elasticsearch.slowlog -- rename: - field: elasticsearch.slowlog.service.name - target_field: service.name - ignore_missing: true -- rename: - field: elasticsearch.slowlog.level - target_field: log.level - ignore_missing: true -- dot_expander: - field: log.level - path: elasticsearch.slowlog -- rename: - field: elasticsearch.slowlog.log.level - target_field: log.level - ignore_missing: true -- dot_expander: - field: log.logger - path: elasticsearch.slowlog -- rename: - field: elasticsearch.slowlog.log.logger - target_field: log.logger - ignore_missing: true -- dot_expander: - field: process.thread.name - path: elasticsearch.slowlog -- rename: - field: elasticsearch.slowlog.process.thread.name - target_field: process.thread.name - ignore_missing: true -- rename: - field: elasticsearch.slowlog.component - target_field: elasticsearch.component - ignore_missing: true -- dot_expander: - field: cluster.name - path: elasticsearch.slowlog -- rename: - field: elasticsearch.slowlog.cluster.name - target_field: elasticsearch.cluster.name -- dot_expander: - field: node.name - path: elasticsearch.slowlog -- rename: - field: elasticsearch.slowlog.node.name - target_field: elasticsearch.node.name -- dot_expander: - field: cluster.uuid - path: elasticsearch.slowlog -- rename: - field: elasticsearch.slowlog.cluster.uuid - target_field: elasticsearch.cluster.uuid - ignore_missing: true -- dot_expander: - field: node.id - path: elasticsearch.slowlog -- rename: - field: elasticsearch.slowlog.node.id - target_field: elasticsearch.node.id - ignore_missing: true -- rename: - field: elasticsearch.slowlog.doc_type - target_field: elasticsearch.slowlog.types - ignore_missing: true -- convert: - field: elasticsearch.slowlog.took_millis - type: float - ignore_missing: true -- rename: - field: elasticsearch.slowlog.took_millis - target_field: elasticsearch.slowlog.duration - ignore_missing: true -- grok: - field: elasticsearch.slowlog.message - pattern_definitions: - GREEDYMULTILINE: |- - (.| - )* - INDEXNAME: '[a-zA-Z0-9_.-]*' - patterns: - - (\[%{INDEXNAME:elasticsearch.index.name}\]\[%{NUMBER:elasticsearch.shard.id}\])?(%{SPACE})(\[%{INDEXNAME:elasticsearch.index.name}\/%{DATA:elasticsearch.index.id}\])?(%{SPACE})%{SPACE}(took\[%{DATA:elasticsearch.slowlog.took}\],)?%{SPACE}(took_millis\[%{NUMBER:elasticsearch.slowlog.duration:long}\],)?%{SPACE}(type\[%{DATA:elasticsearch.slowlog.type}\],)?%{SPACE}(id\[%{DATA:elasticsearch.slowlog.id}\],)?%{SPACE}(routing\[%{DATA:elasticsearch.slowlog.routing}\],)?%{SPACE}(total_hits\[%{NUMBER:elasticsearch.slowlog.total_hits:int}\],)?%{SPACE}(types\[%{DATA:elasticsearch.slowlog.types}\],)?%{SPACE}(stats\[%{DATA:elasticsearch.slowlog.stats}\],)?%{SPACE}(search_type\[%{DATA:elasticsearch.slowlog.search_type}\],)?%{SPACE}(total_shards\[%{NUMBER:elasticsearch.slowlog.total_shards:int}\],)?%{SPACE}(source\[%{GREEDYMULTILINE:elasticsearch.slowlog.source_query}\])?,?%{SPACE}(extra_source\[%{DATA:elasticsearch.slowlog.extra_source}\])?,? - - \[%{INDEXNAME:elasticsearch.index.name}\]\[%{NUMBER:elasticsearch.shard.id}\] -- remove: - field: elasticsearch.slowlog.message -- rename: - field: elasticsearch.slowlog.@timestamp - target_field: '@timestamp' - ignore_missing: true -- rename: - field: elasticsearch.slowlog.timestamp - target_field: '@timestamp' - ignore_missing: true -- date: - field: '@timestamp' - target_field: '@timestamp' - formats: - - ISO8601 - ignore_failure: true + - pipeline: + if: '!ctx.message.contains("ecs.version")' + name: '{< IngestPipeline "pipeline-json-7" >}' + - pipeline: + if: 'ctx.message.contains("ecs.version")' + name: '{< IngestPipeline "pipeline-json-8" >}' diff --git a/filebeat/module/elasticsearch/slowlog/ingest/pipeline.yml b/filebeat/module/elasticsearch/slowlog/ingest/pipeline.yml index ea501d9b3e0..440220f1dd7 100644 --- a/filebeat/module/elasticsearch/slowlog/ingest/pipeline.yml +++ b/filebeat/module/elasticsearch/slowlog/ingest/pipeline.yml @@ -3,9 +3,9 @@ processors: - set: field: event.ingested value: '{{_ingest.timestamp}}' -- rename: - field: '@timestamp' - target_field: event.created +- set: + copy_from: "@timestamp" + field: event.created - grok: field: message patterns: diff --git a/filebeat/module/elasticsearch/slowlog/manifest.yml b/filebeat/module/elasticsearch/slowlog/manifest.yml index caddd94158b..08b49643108 100644 --- a/filebeat/module/elasticsearch/slowlog/manifest.yml +++ b/filebeat/module/elasticsearch/slowlog/manifest.yml @@ -22,4 +22,6 @@ ingest_pipeline: - ingest/pipeline.yml - ingest/pipeline-plaintext.yml - ingest/pipeline-json.yml + - ingest/pipeline-json-7.yml + - ingest/pipeline-json-8.yml input: config/slowlog.yml diff --git a/filebeat/module/elasticsearch/slowlog/test/es_indexing_slowlog.800.log b/filebeat/module/elasticsearch/slowlog/test/es_indexing_slowlog.800.log index 3704f88d189..b580682cda0 100644 --- a/filebeat/module/elasticsearch/slowlog/test/es_indexing_slowlog.800.log +++ b/filebeat/module/elasticsearch/slowlog/test/es_indexing_slowlog.800.log @@ -1,2 +1,2 @@ -{"@timestamp":"2020-04-16T11:20:02.069Z", "log.level":"TRACE", "id":"5xy3gnEBmUEb0NJ1lijF", "message":"[test_index/M4fNwSWlTfek9m1SNL49Kg]", "source":"{\\\"f", "took":"15.1ms", "took_millis":"15" , "service.name":"ES_ECS","process.thread.name":"elasticsearch[integTest-0][write][T#2]","log.logger":"index.indexing.slowlog.index.M4fNwSWlTfek9m1SNL49Kg","type":"index_indexing_slowlog","cluster.uuid":"HHmOPeWKQlSeaF88DSfFVw","node.id":"wxTr7N_gRWWg3mUdY4spbg","node.name":"integTest-0","cluster.name":"integTest"} -{"@timestamp":"2020-04-16T11:20:02.777Z", "log.level":"TRACE", "id":"6By3gnEBmUEb0NJ1mSij", "message":"[test_index/Jsz7IUYMQ9ubo2ahiMgCbQ]", "source":"{\\\"field\\\":123}", "took":"10.4ms", "took_millis":"10" , "service.name":"ES_ECS","process.thread.name":"elasticsearch[integTest-0][write][T#4]","log.logger":"index.indexing.slowlog.index.Jsz7IUYMQ9ubo2ahiMgCbQ","type":"index_indexing_slowlog","cluster.uuid":"HHmOPeWKQlSeaF88DSfFVw","node.id":"wxTr7N_gRWWg3mUdY4spbg","node.name":"integTest-0","cluster.name":"integTest"} +{"@timestamp":"2022-01-27T11:36:49.421Z", "log.level":"TRACE", "elasticsearch.slowlog.id":"_YRSm34B7FprLQsj6fZg","elasticsearch.slowlog.message":"[test_1/8pT6xiN_Tt-dcJWRR3LX6A]","elasticsearch.slowlog.source":"{\\\"a\\\":\\\"b\\\"}","elasticsearch.slowlog.took":"31.9ms","elasticsearch.slowlog.took_millis":"31" , "ecs.version": "1.2.0","service.name":"ES_ECS","event.dataset":"elasticsearch.index_indexing_slowlog","process.thread.name":"elasticsearch[runTask-0][write][T#3]","log.logger":"index.indexing.slowlog.index","trace.id":"0af7651916cd43dd8448eb211c80319c","elasticsearch.cluster.uuid":"5alW33KLT16Lp1SevDqDSQ","elasticsearch.node.id":"tVLnAGLgQum5ca6z50aqbw","elasticsearch.node.name":"runTask-0","elasticsearch.cluster.name":"runTask"} +{"@timestamp":"2022-01-27T11:39:29.508Z", "log.level":"TRACE", "elasticsearch.slowlog.id":"_oRVm34B7FprLQsjW_Zh","elasticsearch.slowlog.message":"[test_1/8pT6xiN_Tt-dcJWRR3LX6A]","elasticsearch.slowlog.source":"{\\\"a\\\":","elasticsearch.slowlog.took":"1.7ms","elasticsearch.slowlog.took_millis":"1" , "ecs.version": "1.2.0","service.name":"ES_ECS","event.dataset":"elasticsearch.index_indexing_slowlog","process.thread.name":"elasticsearch[runTask-0][write][T#5]","log.logger":"index.indexing.slowlog.index","trace.id":"0af7651916cd43dd8448eb211c80319c","elasticsearch.cluster.uuid":"5alW33KLT16Lp1SevDqDSQ","elasticsearch.node.id":"tVLnAGLgQum5ca6z50aqbw","elasticsearch.node.name":"runTask-0","elasticsearch.cluster.name":"runTask"} diff --git a/filebeat/module/elasticsearch/slowlog/test/es_indexing_slowlog.800.log-expected.json b/filebeat/module/elasticsearch/slowlog/test/es_indexing_slowlog.800.log-expected.json index cce5652340a..c3571decd25 100644 --- a/filebeat/module/elasticsearch/slowlog/test/es_indexing_slowlog.800.log-expected.json +++ b/filebeat/module/elasticsearch/slowlog/test/es_indexing_slowlog.800.log-expected.json @@ -1,58 +1,60 @@ [ { - "@timestamp": "2020-04-16T11:20:02.069Z", - "elasticsearch.cluster.name": "integTest", - "elasticsearch.cluster.uuid": "HHmOPeWKQlSeaF88DSfFVw", - "elasticsearch.index.id": "M4fNwSWlTfek9m1SNL49Kg", - "elasticsearch.index.name": "test_index", - "elasticsearch.node.id": "wxTr7N_gRWWg3mUdY4spbg", - "elasticsearch.node.name": "integTest-0", - "elasticsearch.slowlog.id": "5xy3gnEBmUEb0NJ1lijF", - "elasticsearch.slowlog.source": "{\\\"f", - "elasticsearch.slowlog.took": "15.1ms", + "@timestamp": "2022-01-27T11:36:49.421Z", + "elasticsearch.cluster.name": "runTask", + "elasticsearch.cluster.uuid": "5alW33KLT16Lp1SevDqDSQ", + "elasticsearch.index.id": "8pT6xiN_Tt-dcJWRR3LX6A", + "elasticsearch.index.name": "test_1", + "elasticsearch.node.id": "tVLnAGLgQum5ca6z50aqbw", + "elasticsearch.node.name": "runTask-0", + "elasticsearch.slowlog.id": "_YRSm34B7FprLQsj6fZg", + "elasticsearch.slowlog.source": "{\\\"a\\\":\\\"b\\\"}", + "elasticsearch.slowlog.took": "31.9ms", "event.category": "database", - "event.dataset": "elasticsearch.slowlog", - "event.duration": 15000000, + "event.dataset": "elasticsearch.index_indexing_slowlog", + "event.duration": 31000000, "event.kind": "event", "event.module": "elasticsearch", "event.type": "info", "fileset.name": "slowlog", - "host.id": "wxTr7N_gRWWg3mUdY4spbg", + "host.id": "tVLnAGLgQum5ca6z50aqbw", "input.type": "log", "log.level": "TRACE", - "log.logger": "index.indexing.slowlog.index.M4fNwSWlTfek9m1SNL49Kg", + "log.logger": "index.indexing.slowlog.index", "log.offset": 0, - "message": "{\"@timestamp\":\"2020-04-16T11:20:02.069Z\", \"log.level\":\"TRACE\", \"id\":\"5xy3gnEBmUEb0NJ1lijF\", \"message\":\"[test_index/M4fNwSWlTfek9m1SNL49Kg]\", \"source\":\"{\\\\\\\"f\", \"took\":\"15.1ms\", \"took_millis\":\"15\" , \"service.name\":\"ES_ECS\",\"process.thread.name\":\"elasticsearch[integTest-0][write][T#2]\",\"log.logger\":\"index.indexing.slowlog.index.M4fNwSWlTfek9m1SNL49Kg\",\"type\":\"index_indexing_slowlog\",\"cluster.uuid\":\"HHmOPeWKQlSeaF88DSfFVw\",\"node.id\":\"wxTr7N_gRWWg3mUdY4spbg\",\"node.name\":\"integTest-0\",\"cluster.name\":\"integTest\"}", - "process.thread.name": "elasticsearch[integTest-0][write][T#2]", + "message": "[test_1/8pT6xiN_Tt-dcJWRR3LX6A]", + "process.thread.name": "elasticsearch[runTask-0][write][T#3]", "service.name": "ES_ECS", - "service.type": "elasticsearch" + "service.type": "elasticsearch", + "trace.id": "0af7651916cd43dd8448eb211c80319c" }, { - "@timestamp": "2020-04-16T11:20:02.777Z", - "elasticsearch.cluster.name": "integTest", - "elasticsearch.cluster.uuid": "HHmOPeWKQlSeaF88DSfFVw", - "elasticsearch.index.id": "Jsz7IUYMQ9ubo2ahiMgCbQ", - "elasticsearch.index.name": "test_index", - "elasticsearch.node.id": "wxTr7N_gRWWg3mUdY4spbg", - "elasticsearch.node.name": "integTest-0", - "elasticsearch.slowlog.id": "6By3gnEBmUEb0NJ1mSij", - "elasticsearch.slowlog.source": "{\\\"field\\\":123}", - "elasticsearch.slowlog.took": "10.4ms", + "@timestamp": "2022-01-27T11:39:29.508Z", + "elasticsearch.cluster.name": "runTask", + "elasticsearch.cluster.uuid": "5alW33KLT16Lp1SevDqDSQ", + "elasticsearch.index.id": "8pT6xiN_Tt-dcJWRR3LX6A", + "elasticsearch.index.name": "test_1", + "elasticsearch.node.id": "tVLnAGLgQum5ca6z50aqbw", + "elasticsearch.node.name": "runTask-0", + "elasticsearch.slowlog.id": "_oRVm34B7FprLQsjW_Zh", + "elasticsearch.slowlog.source": "{\\\"a\\\":", + "elasticsearch.slowlog.took": "1.7ms", "event.category": "database", - "event.dataset": "elasticsearch.slowlog", - "event.duration": 10000000, + "event.dataset": "elasticsearch.index_indexing_slowlog", + "event.duration": 1000000, "event.kind": "event", "event.module": "elasticsearch", "event.type": "info", "fileset.name": "slowlog", - "host.id": "wxTr7N_gRWWg3mUdY4spbg", + "host.id": "tVLnAGLgQum5ca6z50aqbw", "input.type": "log", "log.level": "TRACE", - "log.logger": "index.indexing.slowlog.index.Jsz7IUYMQ9ubo2ahiMgCbQ", - "log.offset": 514, - "message": "{\"@timestamp\":\"2020-04-16T11:20:02.777Z\", \"log.level\":\"TRACE\", \"id\":\"6By3gnEBmUEb0NJ1mSij\", \"message\":\"[test_index/Jsz7IUYMQ9ubo2ahiMgCbQ]\", \"source\":\"{\\\\\\\"field\\\\\\\":123}\", \"took\":\"10.4ms\", \"took_millis\":\"10\" , \"service.name\":\"ES_ECS\",\"process.thread.name\":\"elasticsearch[integTest-0][write][T#4]\",\"log.logger\":\"index.indexing.slowlog.index.Jsz7IUYMQ9ubo2ahiMgCbQ\",\"type\":\"index_indexing_slowlog\",\"cluster.uuid\":\"HHmOPeWKQlSeaF88DSfFVw\",\"node.id\":\"wxTr7N_gRWWg3mUdY4spbg\",\"node.name\":\"integTest-0\",\"cluster.name\":\"integTest\"}", - "process.thread.name": "elasticsearch[integTest-0][write][T#4]", + "log.logger": "index.indexing.slowlog.index", + "log.offset": 750, + "message": "[test_1/8pT6xiN_Tt-dcJWRR3LX6A]", + "process.thread.name": "elasticsearch[runTask-0][write][T#5]", "service.name": "ES_ECS", - "service.type": "elasticsearch" + "service.type": "elasticsearch", + "trace.id": "0af7651916cd43dd8448eb211c80319c" } ] \ No newline at end of file diff --git a/filebeat/module/elasticsearch/slowlog/test/es_search_slowlog.800.log b/filebeat/module/elasticsearch/slowlog/test/es_search_slowlog.800.log index b817ea08fe7..d113ad63f1f 100644 --- a/filebeat/module/elasticsearch/slowlog/test/es_search_slowlog.800.log +++ b/filebeat/module/elasticsearch/slowlog/test/es_search_slowlog.800.log @@ -1,2 +1,3 @@ -{"@timestamp":"2020-04-16T11:20:02.828Z", "log.level":"TRACE", "id":"null", "message":"[test_index][0]", "search_type":"QUERY_THEN_FETCH", "source":"{\\\"query\\\":{\\\"match_all\\\":{\\\"boost\\\":1.0}}}", "stats":"[]", "took":"10ms", "took_millis":"10", "total_hits":"0 hits", "total_shards":"1" , "service.name":"ES_ECS","process.thread.name":"elasticsearch[integTest-0][search][T#1]","log.logger":"index.search.slowlog.query.Jsz7IUYMQ9ubo2ahiMgCbQ","type":"index_search_slowlog","cluster.uuid":"HHmOPeWKQlSeaF88DSfFVw","node.id":"wxTr7N_gRWWg3mUdY4spbg","node.name":"integTest-0","cluster.name":"integTest"} -{"@timestamp":"2020-04-16T11:20:02.839Z", "log.level":"TRACE", "id":"my-identifier", "message":"[test_index][0]", "search_type":"QUERY_THEN_FETCH", "source":"{\\\"query\\\":{\\\"match_all\\\":{\\\"boost\\\":1.0}}}", "stats":"[]", "took":"76.4micros", "took_millis":"0", "total_hits":"0 hits", "total_shards":"1" , "service.name":"ES_ECS","process.thread.name":"elasticsearch[integTest-0][search][T#3]","log.logger":"index.search.slowlog.query.Jsz7IUYMQ9ubo2ahiMgCbQ","type":"index_search_slowlog","cluster.uuid":"HHmOPeWKQlSeaF88DSfFVw","node.id":"wxTr7N_gRWWg3mUdY4spbg","node.name":"integTest-0","cluster.name":"integTest"} +{"@timestamp":"2022-01-27T11:36:57.424Z", "log.level":"DEBUG", "elasticsearch.slowlog.id":"myApp1","elasticsearch.slowlog.message":"[test_1][0]","elasticsearch.slowlog.search_type":"QUERY_THEN_FETCH","elasticsearch.slowlog.source":"{}","elasticsearch.slowlog.stats":"[]","elasticsearch.slowlog.took":"8ms","elasticsearch.slowlog.took_millis":8,"elasticsearch.slowlog.total_hits":"1 hits","elasticsearch.slowlog.total_shards":1 , "ecs.version": "1.2.0","service.name":"ES_ECS","event.dataset":"elasticsearch.index_search_slowlog","process.thread.name":"elasticsearch[runTask-0][search][T#1]","log.logger":"index.search.slowlog.query","trace.id":"0af7651916cd43dd8448eb211c80319c","elasticsearch.cluster.uuid":"5alW33KLT16Lp1SevDqDSQ","elasticsearch.node.id":"tVLnAGLgQum5ca6z50aqbw","elasticsearch.node.name":"runTask-0","elasticsearch.cluster.name":"runTask"} +{"@timestamp":"2022-01-27T11:42:17.693Z", "log.level":"DEBUG", "elasticsearch.slowlog.id":null,"elasticsearch.slowlog.message":"[test_1][0]","elasticsearch.slowlog.search_type":"QUERY_THEN_FETCH","elasticsearch.slowlog.source":"{}","elasticsearch.slowlog.stats":"[]","elasticsearch.slowlog.took":"164.7micros","elasticsearch.slowlog.took_millis":0,"elasticsearch.slowlog.total_hits":"2 hits","elasticsearch.slowlog.total_shards":1 , "ecs.version": "1.2.0","service.name":"ES_ECS","event.dataset":"elasticsearch.index_search_slowlog","process.thread.name":"elasticsearch[runTask-0][search][T#3]","log.logger":"index.search.slowlog.query","trace.id":"0af7651916cd43dd8448eb211c80319c","elasticsearch.cluster.uuid":"5alW33KLT16Lp1SevDqDSQ","elasticsearch.node.id":"tVLnAGLgQum5ca6z50aqbw","elasticsearch.node.name":"runTask-0","elasticsearch.cluster.name":"runTask"} +{"@timestamp":"2022-01-27T11:42:31.395Z", "log.level":"DEBUG", "elasticsearch.slowlog.id":null,"elasticsearch.slowlog.message":"[test_1][0]","elasticsearch.slowlog.search_type":"QUERY_THEN_FETCH","elasticsearch.slowlog.source":"{}","elasticsearch.slowlog.stats":"[]","elasticsearch.slowlog.took":"115.3micros","elasticsearch.slowlog.took_millis":0,"elasticsearch.slowlog.total_hits":"2 hits","elasticsearch.slowlog.total_shards":1 , "ecs.version": "1.2.0","service.name":"ES_ECS","event.dataset":"elasticsearch.index_search_slowlog","process.thread.name":"elasticsearch[runTask-0][search][T#5]","log.logger":"index.search.slowlog.query","elasticsearch.cluster.uuid":"5alW33KLT16Lp1SevDqDSQ","elasticsearch.node.id":"tVLnAGLgQum5ca6z50aqbw","elasticsearch.node.name":"runTask-0","elasticsearch.cluster.name":"runTask"} diff --git a/filebeat/module/elasticsearch/slowlog/test/es_search_slowlog.800.log-expected.json b/filebeat/module/elasticsearch/slowlog/test/es_search_slowlog.800.log-expected.json index 39cd0679087..af79450e828 100644 --- a/filebeat/module/elasticsearch/slowlog/test/es_search_slowlog.800.log-expected.json +++ b/filebeat/module/elasticsearch/slowlog/test/es_search_slowlog.800.log-expected.json @@ -1,65 +1,99 @@ [ { - "@timestamp": "2020-04-16T11:20:02.828Z", - "elasticsearch.cluster.name": "integTest", - "elasticsearch.cluster.uuid": "HHmOPeWKQlSeaF88DSfFVw", - "elasticsearch.index.name": "test_index", - "elasticsearch.node.id": "wxTr7N_gRWWg3mUdY4spbg", - "elasticsearch.node.name": "integTest-0", + "@timestamp": "2022-01-27T11:36:57.424Z", + "elasticsearch.cluster.name": "runTask", + "elasticsearch.cluster.uuid": "5alW33KLT16Lp1SevDqDSQ", + "elasticsearch.index.name": "test_1", + "elasticsearch.node.id": "tVLnAGLgQum5ca6z50aqbw", + "elasticsearch.node.name": "runTask-0", "elasticsearch.shard.id": "0", - "elasticsearch.slowlog.id": "null", + "elasticsearch.slowlog.id": "myApp1", "elasticsearch.slowlog.search_type": "QUERY_THEN_FETCH", - "elasticsearch.slowlog.source": "{\\\"query\\\":{\\\"match_all\\\":{\\\"boost\\\":1.0}}}", + "elasticsearch.slowlog.source": "{}", "elasticsearch.slowlog.stats": "[]", - "elasticsearch.slowlog.took": "10ms", - "elasticsearch.slowlog.total_hits": "0 hits", - "elasticsearch.slowlog.total_shards": "1", + "elasticsearch.slowlog.took": "8ms", + "elasticsearch.slowlog.total_hits": "1 hits", + "elasticsearch.slowlog.total_shards": 1, "event.category": "database", - "event.dataset": "elasticsearch.slowlog", - "event.duration": 10000000, + "event.dataset": "elasticsearch.index_search_slowlog", + "event.duration": 8000000, "event.kind": "event", "event.module": "elasticsearch", "event.type": "info", "fileset.name": "slowlog", - "host.id": "wxTr7N_gRWWg3mUdY4spbg", + "host.id": "tVLnAGLgQum5ca6z50aqbw", "input.type": "log", - "log.level": "TRACE", - "log.logger": "index.search.slowlog.query.Jsz7IUYMQ9ubo2ahiMgCbQ", + "log.level": "DEBUG", + "log.logger": "index.search.slowlog.query", "log.offset": 0, - "message": "{\"@timestamp\":\"2020-04-16T11:20:02.828Z\", \"log.level\":\"TRACE\", \"id\":\"null\", \"message\":\"[test_index][0]\", \"search_type\":\"QUERY_THEN_FETCH\", \"source\":\"{\\\\\\\"query\\\\\\\":{\\\\\\\"match_all\\\\\\\":{\\\\\\\"boost\\\\\\\":1.0}}}\", \"stats\":\"[]\", \"took\":\"10ms\", \"took_millis\":\"10\", \"total_hits\":\"0 hits\", \"total_shards\":\"1\" , \"service.name\":\"ES_ECS\",\"process.thread.name\":\"elasticsearch[integTest-0][search][T#1]\",\"log.logger\":\"index.search.slowlog.query.Jsz7IUYMQ9ubo2ahiMgCbQ\",\"type\":\"index_search_slowlog\",\"cluster.uuid\":\"HHmOPeWKQlSeaF88DSfFVw\",\"node.id\":\"wxTr7N_gRWWg3mUdY4spbg\",\"node.name\":\"integTest-0\",\"cluster.name\":\"integTest\"}", - "process.thread.name": "elasticsearch[integTest-0][search][T#1]", + "message": "[test_1][0]", + "process.thread.name": "elasticsearch[runTask-0][search][T#1]", "service.name": "ES_ECS", - "service.type": "elasticsearch" + "service.type": "elasticsearch", + "trace.id": "0af7651916cd43dd8448eb211c80319c" + }, + { + "@timestamp": "2022-01-27T11:42:17.693Z", + "elasticsearch.cluster.name": "runTask", + "elasticsearch.cluster.uuid": "5alW33KLT16Lp1SevDqDSQ", + "elasticsearch.index.name": "test_1", + "elasticsearch.node.id": "tVLnAGLgQum5ca6z50aqbw", + "elasticsearch.node.name": "runTask-0", + "elasticsearch.shard.id": "0", + "elasticsearch.slowlog.id": null, + "elasticsearch.slowlog.search_type": "QUERY_THEN_FETCH", + "elasticsearch.slowlog.source": "{}", + "elasticsearch.slowlog.stats": "[]", + "elasticsearch.slowlog.took": "164.7micros", + "elasticsearch.slowlog.total_hits": "2 hits", + "elasticsearch.slowlog.total_shards": 1, + "event.category": "database", + "event.dataset": "elasticsearch.index_search_slowlog", + "event.duration": 0, + "event.kind": "event", + "event.module": "elasticsearch", + "event.type": "info", + "fileset.name": "slowlog", + "host.id": "tVLnAGLgQum5ca6z50aqbw", + "input.type": "log", + "log.level": "DEBUG", + "log.logger": "index.search.slowlog.query", + "log.offset": 861, + "message": "[test_1][0]", + "process.thread.name": "elasticsearch[runTask-0][search][T#3]", + "service.name": "ES_ECS", + "service.type": "elasticsearch", + "trace.id": "0af7651916cd43dd8448eb211c80319c" }, { - "@timestamp": "2020-04-16T11:20:02.839Z", - "elasticsearch.cluster.name": "integTest", - "elasticsearch.cluster.uuid": "HHmOPeWKQlSeaF88DSfFVw", - "elasticsearch.index.name": "test_index", - "elasticsearch.node.id": "wxTr7N_gRWWg3mUdY4spbg", - "elasticsearch.node.name": "integTest-0", + "@timestamp": "2022-01-27T11:42:31.395Z", + "elasticsearch.cluster.name": "runTask", + "elasticsearch.cluster.uuid": "5alW33KLT16Lp1SevDqDSQ", + "elasticsearch.index.name": "test_1", + "elasticsearch.node.id": "tVLnAGLgQum5ca6z50aqbw", + "elasticsearch.node.name": "runTask-0", "elasticsearch.shard.id": "0", - "elasticsearch.slowlog.id": "my-identifier", + "elasticsearch.slowlog.id": null, "elasticsearch.slowlog.search_type": "QUERY_THEN_FETCH", - "elasticsearch.slowlog.source": "{\\\"query\\\":{\\\"match_all\\\":{\\\"boost\\\":1.0}}}", + "elasticsearch.slowlog.source": "{}", "elasticsearch.slowlog.stats": "[]", - "elasticsearch.slowlog.took": "76.4micros", - "elasticsearch.slowlog.total_hits": "0 hits", - "elasticsearch.slowlog.total_shards": "1", + "elasticsearch.slowlog.took": "115.3micros", + "elasticsearch.slowlog.total_hits": "2 hits", + "elasticsearch.slowlog.total_shards": 1, "event.category": "database", - "event.dataset": "elasticsearch.slowlog", + "event.dataset": "elasticsearch.index_search_slowlog", "event.duration": 0, "event.kind": "event", "event.module": "elasticsearch", "event.type": "info", "fileset.name": "slowlog", - "host.id": "wxTr7N_gRWWg3mUdY4spbg", + "host.id": "tVLnAGLgQum5ca6z50aqbw", "input.type": "log", - "log.level": "TRACE", - "log.logger": "index.search.slowlog.query.Jsz7IUYMQ9ubo2ahiMgCbQ", - "log.offset": 613, - "message": "{\"@timestamp\":\"2020-04-16T11:20:02.839Z\", \"log.level\":\"TRACE\", \"id\":\"my-identifier\", \"message\":\"[test_index][0]\", \"search_type\":\"QUERY_THEN_FETCH\", \"source\":\"{\\\\\\\"query\\\\\\\":{\\\\\\\"match_all\\\\\\\":{\\\\\\\"boost\\\\\\\":1.0}}}\", \"stats\":\"[]\", \"took\":\"76.4micros\", \"took_millis\":\"0\", \"total_hits\":\"0 hits\", \"total_shards\":\"1\" , \"service.name\":\"ES_ECS\",\"process.thread.name\":\"elasticsearch[integTest-0][search][T#3]\",\"log.logger\":\"index.search.slowlog.query.Jsz7IUYMQ9ubo2ahiMgCbQ\",\"type\":\"index_search_slowlog\",\"cluster.uuid\":\"HHmOPeWKQlSeaF88DSfFVw\",\"node.id\":\"wxTr7N_gRWWg3mUdY4spbg\",\"node.name\":\"integTest-0\",\"cluster.name\":\"integTest\"}", - "process.thread.name": "elasticsearch[integTest-0][search][T#3]", + "log.level": "DEBUG", + "log.logger": "index.search.slowlog.query", + "log.offset": 1726, + "message": "[test_1][0]", + "process.thread.name": "elasticsearch[runTask-0][search][T#5]", "service.name": "ES_ECS", "service.type": "elasticsearch" } From 70ce544cc70fb972bcd79ccb0a2efa8250e2bd6c Mon Sep 17 00:00:00 2001 From: Andrew Kroh Date: Fri, 28 Jan 2022 14:00:44 -0500 Subject: [PATCH 68/69] Make include_matches backwards compatible with 7.x config (#30032) To make transitioning from 7.x/8.0 to 8.1 easier allow the old format to continue working by translating it internally to the new format. This logs a deprecation warning when the old config format is detected. This will make it easier to Agent users to migrate from 7.x/8.0 to 8.1. There will be time window when integrations with the old format are running with the new Agent (or vice versa). --- filebeat/input/journald/config.go | 41 +++++++++++++++- filebeat/input/journald/config_test.go | 65 ++++++++++++++++++++++++++ filebeat/input/journald/input.go | 2 +- 3 files changed, 105 insertions(+), 3 deletions(-) create mode 100644 filebeat/input/journald/config_test.go diff --git a/filebeat/input/journald/config.go b/filebeat/input/journald/config.go index 4e1c0b66da4..e2ea723e51a 100644 --- a/filebeat/input/journald/config.go +++ b/filebeat/input/journald/config.go @@ -22,20 +22,30 @@ package journald import ( "errors" + "sync" "time" + "github.com/elastic/go-ucfg" + "github.com/elastic/beats/v7/filebeat/input/journald/pkg/journalfield" "github.com/elastic/beats/v7/filebeat/input/journald/pkg/journalread" + "github.com/elastic/beats/v7/libbeat/common/cfgwarn" "github.com/elastic/beats/v7/libbeat/reader/parser" ) +var ( + // includeMatchesWarnOnce allow for a config deprecation warning to be + // logged only once if an old config format is detected. + includeMatchesWarnOnce sync.Once +) + // Config stores the options of a journald input. type config struct { // Paths stores the paths to the journal files to be read. Paths []string `config:"paths"` // Backoff is the current interval to wait before - // attemting to read again from the journal. + // attempting to read again from the journal. Backoff time.Duration `config:"backoff" validate:"min=0,nonzero"` // MaxBackoff is the limit of the backoff time. @@ -48,7 +58,7 @@ type config struct { CursorSeekFallback journalread.SeekMode `config:"cursor_seek_fallback"` // Matches store the key value pairs to match entries. - Matches journalfield.IncludeMatches `config:"include_matches"` + Matches bwcIncludeMatches `config:"include_matches"` // Units stores the units to monitor. Units []string `config:"units"` @@ -66,6 +76,33 @@ type config struct { Parsers parser.Config `config:",inline"` } +// bwcIncludeMatches is a wrapper that accepts include_matches configuration +// from 7.x to allow old config to remain compatible. +type bwcIncludeMatches journalfield.IncludeMatches + +func (im *bwcIncludeMatches) Unpack(c *ucfg.Config) error { + // Handle 7.x config format in a backwards compatible manner. Old format: + // include_matches: [_SYSTEMD_UNIT=foo.service, _SYSTEMD_UNIT=bar.service] + if c.IsArray() { + var matches []journalfield.Matcher + if err := c.Unpack(&matches); err != nil { + return err + } + for _, x := range matches { + im.OR = append(im.OR, journalfield.IncludeMatches{ + Matches: []journalfield.Matcher{x}, + }) + } + includeMatchesWarnOnce.Do(func() { + cfgwarn.Deprecate("", "Please migrate your journald input's "+ + "include_matches config to the new more expressive format.") + }) + return nil + } + + return c.Unpack((*journalfield.IncludeMatches)(im)) +} + var errInvalidSeekFallback = errors.New("invalid setting for cursor_seek_fallback") func defaultConfig() config { diff --git a/filebeat/input/journald/config_test.go b/filebeat/input/journald/config_test.go new file mode 100644 index 00000000000..5bf3d2fc69b --- /dev/null +++ b/filebeat/input/journald/config_test.go @@ -0,0 +1,65 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build linux && cgo && withjournald +// +build linux,cgo,withjournald + +package journald + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/elastic/beats/v7/libbeat/common" +) + +func TestConfigIncludeMatches(t *testing.T) { + verify := func(t *testing.T, yml string) { + t.Helper() + + c, err := common.NewConfigWithYAML([]byte(yml), "source") + require.NoError(t, err) + + conf := defaultConfig() + require.NoError(t, c.Unpack(&conf)) + + assert.EqualValues(t, "_SYSTEMD_UNIT=foo.service", conf.Matches.OR[0].Matches[0].String()) + assert.EqualValues(t, "_SYSTEMD_UNIT=bar.service", conf.Matches.OR[1].Matches[0].String()) + } + + t.Run("normal", func(t *testing.T) { + const yaml = ` +include_matches: + or: + - match: _SYSTEMD_UNIT=foo.service + - match: _SYSTEMD_UNIT=bar.service +` + verify(t, yaml) + }) + + t.Run("backwards-compatible", func(t *testing.T) { + const yaml = ` +include_matches: + - _SYSTEMD_UNIT=foo.service + - _SYSTEMD_UNIT=bar.service +` + + verify(t, yaml) + }) +} diff --git a/filebeat/input/journald/input.go b/filebeat/input/journald/input.go index 41b6c649f90..bf86aa59626 100644 --- a/filebeat/input/journald/input.go +++ b/filebeat/input/journald/input.go @@ -107,7 +107,7 @@ func configure(cfg *common.Config) ([]cursor.Source, cursor.Input, error) { MaxBackoff: config.MaxBackoff, Seek: config.Seek, CursorSeekFallback: config.CursorSeekFallback, - Matches: config.Matches, + Matches: journalfield.IncludeMatches(config.Matches), Units: config.Units, Transports: config.Transports, Identifiers: config.Identifiers, From ddcf2e5fa64fb7c0b5b96494fb3c10873df8c59a Mon Sep 17 00:00:00 2001 From: Julien Lind Date: Mon, 31 Jan 2022 09:40:22 +0100 Subject: [PATCH 69/69] Update stale config following (#30082) Resolves https://github.com/elastic/beats/issues/30081 --- .github/stale.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/stale.yml b/.github/stale.yml index 0086b50c8c7..4361c1ab84f 100644 --- a/.github/stale.yml +++ b/.github/stale.yml @@ -1,11 +1,11 @@ # Configuration for probot-stale - https://github.com/probot/stale # Number of days of inactivity before an Issue or Pull Request becomes stale -daysUntilStale: 335 +daysUntilStale: 365 # Number of days of inactivity before an Issue or Pull Request with the stale label is closed. # Set to false to disable. If disabled, issues still need to be closed manually, but will remain marked as stale. -daysUntilClose: 30 +daysUntilClose: 180 # Only issues or pull requests with all of these labels are check if stale. Defaults to `[]` (disabled) onlyLabels: [] @@ -48,7 +48,7 @@ limitPerRun: 30 # Optionally, specify configuration settings that are specific to just 'issues' or 'pulls': pulls: - daysUntilStale: 90 + daysUntilStale: 60 daysUntilClose: 30 markComment: > Hi!