diff --git a/CHANGELOG.asciidoc b/CHANGELOG.asciidoc index e7de86b334c0..005c8261dc5e 100644 --- a/CHANGELOG.asciidoc +++ b/CHANGELOG.asciidoc @@ -273,7 +273,6 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...v7.0.0-beta1[Check the - Added support for ingesting structured Elasticsearch server logs {pull}10428[10428] - Populate more ECS fields in the Suricata module. {pull}10006[10006] - Add module zeek. {issue}9931[9931] {pull}10034[10034] -- Add support for CRI-O based logs autodiscover {pull}10687[10687] *Heartbeat* diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index bf8001ed88de..acbe1f04a526 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -46,6 +46,8 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d *Filebeat* +- Set `ecs: true` in user_agent processors when loading pipelines with Filebeat 7.0.x into Elasticsearch 6.7.x. {issue}10655[10655] {pull}10875[10875] + *Heartbeat* - Remove monitor generator script that was rarely used. {pull}9648[9648] @@ -90,6 +92,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Update rabbitmq.* fields to map to ECS. {pull}10563[10563] - Update haproxy.* fields to map to ECS. {pull}10558[10558] {pull}10568[10568] - Collect all EC2 meta data from all instances in all states. {pull}10628[10628] +- Migrate docker module to ECS. {pull}10927[10927] *Packetbeat* @@ -169,6 +172,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Fixed data types for various hosts fields in `mongodb/replstatus` metricset {pull}10307[10307] - Added function to close sql database connection. {pull}10355[10355] - Fix issue with `elasticsearch/node_stats` metricset (x-pack) not indexing `source_node` field. {pull}10639[10639] +- Migrate docker autodiscover to ECS. {issue}10757[10757] {pull}10862[10862] *Packetbeat* @@ -206,6 +210,8 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Calls to Elasticsearch X-Pack APIs made by Beats won't cause deprecation logs in Elasticsearch logs. {9656}9656[9656] - Add `network` condition to processors for matching IP addresses against CIDRs. {pull}10743[10743] - Add if/then/else support to processors. {pull}10744[10744] +- Add `community_id` processor for computing network flow hashes. {pull}10745[10745] + *Auditbeat* @@ -244,6 +250,11 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Populate more ECS fields in the Suricata module. {pull}10006[10006] - Add ISO8601 timestamp support in syslog metricset. {issue}8716[8716] {pull}10736[10736] - Add more info to message logged when a duplicated symlink file is found {pull}10845[10845] +- Add option to configure docker input with paths {pull}10687[10687] +- Add Netflow module to enrich flow events with geoip data. {pull}10877[10877] +- Set `event.category: network_traffic` for Suricata. {pull}10882[10882] +- Add configuration knob for auto-discover hints to control whether log harvesting is enabled for the pod/container. {issue}10811[10811] {pull}10911[10911] +- Change Suricata module pipeline to handle `destination.domain` being set if a reverse DNS processor is used. {issue}10510[10510] *Heartbeat* @@ -298,6 +309,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Add overview dashboard to Zookeeper Metricbeat module {pull}10379[10379] - Add Consul Metricbeat module with Agent Metricset {pull}8631[8631] - Add filters and pie chart for AWS EC2 dashboard. {pull}10596[10596] +- Add AWS SQS metricset. {pull}10684[10684] {issue}10053[10053] *Packetbeat* diff --git a/filebeat/autodiscover/builder/hints/logs_test.go b/filebeat/autodiscover/builder/hints/logs_test.go index de56e777765a..e9e1afa72248 100644 --- a/filebeat/autodiscover/builder/hints/logs_test.go +++ b/filebeat/autodiscover/builder/hints/logs_test.go @@ -47,6 +47,18 @@ func TestGenerateHints(t *testing.T) { len: 0, result: common.MapStr{}, }, + { + msg: "Hints with logs.disable should return nothing", + event: bus.Event{ + "hints": common.MapStr{ + "logs": common.MapStr{ + "disable": "true", + }, + }, + }, + len: 0, + result: common.MapStr{}, + }, { msg: "Empty event hints should return default config", event: bus.Event{ diff --git a/filebeat/docs/autodiscover-hints.asciidoc b/filebeat/docs/autodiscover-hints.asciidoc index a49a8abd055e..9d80d28c7adb 100644 --- a/filebeat/docs/autodiscover-hints.asciidoc +++ b/filebeat/docs/autodiscover-hints.asciidoc @@ -94,6 +94,25 @@ filebeat.autodiscover: hints.enabled: true ------------------------------------------------------------------------------------- +Autodiscover provides a way to control whether log harvesting is by default disabled for the pods/containers when auto-discovery is in use. To enable it, just set `default.disable` to true: + +["source","yaml",subs="attributes"] +------------------------------------------------------------------------------------- +filebeat.autodiscover: + providers: + - type: kubernetes + hints.enabled: true + default.disable: true +------------------------------------------------------------------------------------- + +Then, for the pods/containers that log harvesting should be enabled, you can annotate with hint: + +["source","yaml",subs="attributes"] +------------------------------------------------------------------------------------- +annotations: + co.elastic.logs/disable: false +------------------------------------------------------------------------------------- + You can annotate Kubernetes Pods with useful info to spin up {beatname_uc} inputs or modules: ["source","yaml",subs="attributes"] @@ -137,6 +156,26 @@ filebeat.autodiscover: hints.enabled: true ------------------------------------------------------------------------------------- +Autodiscover provides a way to control whether log harvesting is by default disabled for the +containers when auto-discovery is in use. To enable it, just set `default.disable` to true: + +["source","yaml",subs="attributes"] +------------------------------------------------------------------------------------- +filebeat.autodiscover: + providers: + - type: docker + hints.enabled: true + default.disable: true +------------------------------------------------------------------------------------- + +Then, for the containers that log harvesting should be enabled, you can label Docker containers with: + +["source","yaml",subs="attributes"] +------------------------------------------------------------------------------------- +annotations: + co.elastic.logs/disable: false +------------------------------------------------------------------------------------- + You can label Docker containers with useful info to spin up {beatname_uc} inputs, for example: ["source","yaml",subs="attributes"] diff --git a/filebeat/docs/fields.asciidoc b/filebeat/docs/fields.asciidoc index b55a7b9775d6..e550a37c11d3 100644 --- a/filebeat/docs/fields.asciidoc +++ b/filebeat/docs/fields.asciidoc @@ -33,6 +33,7 @@ grouped in the following categories: * <> * <> * <> +* <> * <> * <> * <> @@ -10621,6 +10622,12 @@ type: short -- +[[exported-fields-netflow-module]] +== NetFlow fields + +Module for receiving NetFlow and IPFIX flow records over UDP. The module does not add fields beyond what the netflow input provides. + + [[exported-fields-nginx]] == Nginx fields diff --git a/filebeat/docs/inputs/input-docker.asciidoc b/filebeat/docs/inputs/input-docker.asciidoc index 783391b39a31..c801fbb6d856 100644 --- a/filebeat/docs/inputs/input-docker.asciidoc +++ b/filebeat/docs/inputs/input-docker.asciidoc @@ -42,6 +42,24 @@ The list of Docker container IDs to read logs from. Specify The base path where Docker logs are located. The default is `/var/lib/docker/containers`. +===== `containers.paths` + +The list of paths to read logs from. This can be used as an alternative to +`containers.ids` for other runtimes that use the same logging format as docker +but place their logs in different paths. For example when using CRI-O runtime in +Kubernetes the following configuration can be used: + +["source","yaml",subs="attributes"] +---- +{beatname_lc}.inputs: +- type: docker + containers.paths: + - /var/log/pods/${data.kubernetes.pod.uid}/${data.kubernetes.container.name}/*.log +---- + +When `containers.paths` is used, `containers.path` is ignored. + + ===== `containers.stream` Reads from the specified streams only: `all`, `stdout` or `stderr`. The default diff --git a/filebeat/docs/modules-getting-started.asciidoc b/filebeat/docs/modules-getting-started.asciidoc index a3733f303d3f..4cf4e52208e1 100644 --- a/filebeat/docs/modules-getting-started.asciidoc +++ b/filebeat/docs/modules-getting-started.asciidoc @@ -100,7 +100,7 @@ load the ingest pipelines manually. To do this, run the `setup` command with the `--pipelines` option specified. If you used the <> command to enable modules in the `modules.d` directory, also specify the `--modules` flag. For example, the following command -loads the ingest pipelines used by all metricsets enabled in the system, nginx, +loads the ingest pipelines used by all filesets enabled in the system, nginx, and mysql modules: // override modulename attribute so it works with the --modules option @@ -134,5 +134,9 @@ and mysql modules: PS > .{backslash}{beatname_lc}.exe setup --pipelines --modules {modulename} ---- +TIP: If you're loading ingest pipelines manually because you want to send events +to {ls}, also see +{logstash-ref}/filebeat-modules.html[Working with {beatname_uc} modules]. + :has_module_steps!: :modulename!: diff --git a/filebeat/docs/modules/netflow.asciidoc b/filebeat/docs/modules/netflow.asciidoc new file mode 100644 index 000000000000..f3f9c8c01910 --- /dev/null +++ b/filebeat/docs/modules/netflow.asciidoc @@ -0,0 +1,80 @@ +//// +This file is generated! See scripts/docs_collector.py +//// + +[[filebeat-module-netflow]] +[role="xpack"] + +:modulename: netflow +:has-dashboards: false + +== NetFlow module + +This is a module for receiving NetFlow and IPFIX flow records over UDP. This +input supports NetFlow versions 1, 5, 6, 7, 8 and 9, as well as IPFIX. For +NetFlow versions older than 9, fields are mapped automatically to NetFlow v9. + +This module wraps the <> to enrich the +flow records with geolocation information about the IP endpoints by using +Elasticsearch Ingest Node. + +[float] +=== Compatibility + +This module requires the {elasticsearch-plugins}/ingest-geoip.html[ingest-geoip] +Elasticsearch plugins. + +include::../include/running-modules.asciidoc[] + +include::../include/configuring-intro.asciidoc[] + +:fileset_ex: log + +include::../include/config-option-intro.asciidoc[] + +[float] +==== `log` fileset settings + +The fileset is by default configured to listen for UDP traffic on +`localhost:2055`. For most uses cases you will want to set the `netflow_host` +variable to allow the input bind to all interfaces so that it can receive +traffic from network devices. + +["source","yaml",subs="attributes"] +----- +- module: netflow + log: + enabled: true + var: + netflow_host: 0.0.0.0 + netflow_port: 2055 +----- + +`var.netflow_host`:: Address to find to. Defaults to `localhost`. + +`var.netflow_port`:: Port to listen on. Defaults to `2055`. + +`var.max_message_size`:: The maximum size of the message received over UDP. +The default is `10KiB`. + +`var.expiration_timeout`:: The time before an idle session or unused template is +expired. Only applicable to v9 and IPFIX protocols. A value of zero disables +expiration. + +`var.queue_size`:: The maximum number of packets that can be queued for +processing. Use this setting to avoid packet-loss when dealing with occasional +bursts of traffic. + +:has-dashboards!: + +:fileset_ex!: + +:modulename!: + + +[float] +=== Fields + +For a description of each field in the module, see the +<> section. + diff --git a/filebeat/docs/modules_list.asciidoc b/filebeat/docs/modules_list.asciidoc index 1324bcc4996f..ea3b3d74997e 100644 --- a/filebeat/docs/modules_list.asciidoc +++ b/filebeat/docs/modules_list.asciidoc @@ -16,6 +16,7 @@ This file is generated! See scripts/docs_collector.py * <> * <> * <> + * <> * <> * <> * <> @@ -43,6 +44,7 @@ include::modules/logstash.asciidoc[] include::modules/mongodb.asciidoc[] include::modules/mysql.asciidoc[] include::modules/nats.asciidoc[] +include::modules/netflow.asciidoc[] include::modules/nginx.asciidoc[] include::modules/osquery.asciidoc[] include::modules/postgresql.asciidoc[] diff --git a/filebeat/fileset/pipelines.go b/filebeat/fileset/pipelines.go index 0c1983ef3fc9..0f21dc4a026d 100644 --- a/filebeat/fileset/pipelines.go +++ b/filebeat/fileset/pipelines.go @@ -121,6 +121,12 @@ func loadPipeline(esClient PipelineLoader, pipelineID string, content map[string return nil } } + + err := setECSProcessors(esClient.GetVersion(), pipelineID, content) + if err != nil { + return fmt.Errorf("failed to adapt pipeline for ECS compatibility: %v", err) + } + body, err := esClient.LoadJSON(path, content) if err != nil { return interpretError(err, body) @@ -129,6 +135,40 @@ func loadPipeline(esClient PipelineLoader, pipelineID string, content map[string return nil } +// setECSProcessors sets required ECS options in processors when filebeat version is >= 7.0.0 +// and ES is 6.7.X to ease migration to ECS. +func setECSProcessors(esVersion common.Version, pipelineID string, content map[string]interface{}) error { + ecsVersion := common.MustNewVersion("7.0.0") + if !esVersion.LessThan(ecsVersion) { + return nil + } + + p, ok := content["processors"] + if !ok { + return nil + } + processors, ok := p.([]interface{}) + if !ok { + return fmt.Errorf("'processors' in pipeline '%s' expected to be a list, found %T", pipelineID, p) + } + + minUserAgentVersion := common.MustNewVersion("6.7.0") + for _, p := range processors { + processor, ok := p.(map[string]interface{}) + if !ok { + continue + } + if options, ok := processor["user_agent"].(map[string]interface{}); ok { + if esVersion.LessThan(minUserAgentVersion) { + return fmt.Errorf("user_agent processor requires option 'ecs: true', but Elasticsearch %v does not support this option (Elasticsearch %v or newer is required)", esVersion, minUserAgentVersion) + } + logp.Debug("modules", "Setting 'ecs: true' option in user_agent processor for field '%v' in pipeline '%s'", options["field"], pipelineID) + options["ecs"] = true + } + } + return nil +} + func deletePipeline(esClient PipelineLoader, pipelineID string) error { path := makeIngestPipelinePath(pipelineID) _, _, err := esClient.Request("DELETE", path, "", nil, nil) diff --git a/filebeat/fileset/pipelines_test.go b/filebeat/fileset/pipelines_test.go index 194df5e9f149..a9758df894a2 100644 --- a/filebeat/fileset/pipelines_test.go +++ b/filebeat/fileset/pipelines_test.go @@ -26,6 +26,7 @@ import ( "github.com/stretchr/testify/assert" + "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/libbeat/outputs/elasticsearch" ) @@ -103,3 +104,112 @@ func TestLoadPipelinesWithMultiPipelineFileset(t *testing.T) { }) } } + +func TestSetEcsProcessors(t *testing.T) { + cases := []struct { + name string + esVersion *common.Version + content map[string]interface{} + expected map[string]interface{} + isErrExpected bool + }{ + { + name: "ES < 6.7.0", + esVersion: common.MustNewVersion("6.6.0"), + content: map[string]interface{}{ + "processors": []interface{}{ + map[string]interface{}{ + "user_agent": map[string]interface{}{ + "field": "foo.http_user_agent", + }, + }, + }}, + isErrExpected: true, + }, + { + name: "ES == 6.7.0", + esVersion: common.MustNewVersion("6.7.0"), + content: map[string]interface{}{ + "processors": []interface{}{ + map[string]interface{}{ + "rename": map[string]interface{}{ + "field": "foo.src_ip", + "target_field": "source.ip", + }, + }, + map[string]interface{}{ + "user_agent": map[string]interface{}{ + "field": "foo.http_user_agent", + }, + }, + }, + }, + expected: map[string]interface{}{ + "processors": []interface{}{ + map[string]interface{}{ + "rename": map[string]interface{}{ + "field": "foo.src_ip", + "target_field": "source.ip", + }, + }, + map[string]interface{}{ + "user_agent": map[string]interface{}{ + "field": "foo.http_user_agent", + "ecs": true, + }, + }, + }, + }, + isErrExpected: false, + }, + { + name: "ES >= 7.0.0", + esVersion: common.MustNewVersion("7.0.0"), + content: map[string]interface{}{ + "processors": []interface{}{ + map[string]interface{}{ + "rename": map[string]interface{}{ + "field": "foo.src_ip", + "target_field": "source.ip", + }, + }, + map[string]interface{}{ + "user_agent": map[string]interface{}{ + "field": "foo.http_user_agent", + }, + }, + }, + }, + expected: map[string]interface{}{ + "processors": []interface{}{ + map[string]interface{}{ + "rename": map[string]interface{}{ + "field": "foo.src_ip", + "target_field": "source.ip", + }, + }, + map[string]interface{}{ + "user_agent": map[string]interface{}{ + "field": "foo.http_user_agent", + }, + }, + }, + }, + isErrExpected: false, + }, + } + + for _, test := range cases { + test := test + t.Run(test.name, func(t *testing.T) { + t.Parallel() + err := setECSProcessors(*test.esVersion, "foo-pipeline", test.content) + if test.isErrExpected { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, test.expected, test.content) + } + }) + } +} diff --git a/filebeat/tests/system/test_autodiscover.py b/filebeat/tests/system/test_autodiscover.py index a85a0bf60482..973577920d33 100644 --- a/filebeat/tests/system/test_autodiscover.py +++ b/filebeat/tests/system/test_autodiscover.py @@ -50,6 +50,8 @@ def test_docker(self): # Check metadata is added assert output[0]['message'] == 'Busybox output 1' - assert output[0]['docker']['container']['image'] == 'busybox' + assert output[0]['container']['image']['name'] == 'busybox' assert output[0]['docker']['container']['labels'] == {} - assert 'name' in output[0]['docker']['container'] + assert 'name' in output[0]['container'] + + self.assert_fields_are_documented(output[0]) diff --git a/heartbeat/tests/system/test_autodiscovery.py b/heartbeat/tests/system/test_autodiscovery.py index 6a4abdf7d838..8828d875d7e8 100644 --- a/heartbeat/tests/system/test_autodiscovery.py +++ b/heartbeat/tests/system/test_autodiscovery.py @@ -60,7 +60,9 @@ def test_docker(self): # We don't check all the docker fields because this is really the responsibility # of libbeat's autodiscovery code. event = output[0] - if event['monitor']['id'] == 'myid' and event['docker']['container']['id'] is not None: + if event['monitor']['id'] == 'myid' and event['container']['id'] is not None: matched = True assert matched + + self.assert_fields_are_documented(output[0]) diff --git a/libbeat/autodiscover/builder/helper.go b/libbeat/autodiscover/builder/helper.go index e8a81cfd869b..3a574675b914 100644 --- a/libbeat/autodiscover/builder/helper.go +++ b/libbeat/autodiscover/builder/helper.go @@ -155,7 +155,7 @@ func IsNoOp(hints common.MapStr, key string) bool { } // GenerateHints parses annotations based on a prefix and sets up hints that can be picked up by individual Beats. -func GenerateHints(annotations common.MapStr, container, prefix string) common.MapStr { +func GenerateHints(annotations common.MapStr, container, prefix string, defaultDisable bool) common.MapStr { hints := common.MapStr{} if rawEntries, err := annotations.GetValue(prefix); err == nil { if entries, ok := rawEntries.(common.MapStr); ok { @@ -195,5 +195,10 @@ func GenerateHints(annotations common.MapStr, container, prefix string) common.M } } + // Update hints: if .disabled annotation does not exist, set according to disabledByDefault flag + if _, err := hints.GetValue("logs.disable"); err != nil && defaultDisable { + hints.Put("logs.disable", "true") + } + return hints } diff --git a/libbeat/autodiscover/builder/helper_test.go b/libbeat/autodiscover/builder/helper_test.go index 477888b93f0e..c282972925ec 100644 --- a/libbeat/autodiscover/builder/helper_test.go +++ b/libbeat/autodiscover/builder/helper_test.go @@ -27,13 +27,15 @@ import ( func TestGenerateHints(t *testing.T) { tests := []struct { - annotations map[string]string - result common.MapStr + annotations map[string]string + defaultDisable bool + result common.MapStr }{ // Empty annotations should return empty hints { - annotations: map[string]string{}, - result: common.MapStr{}, + annotations: map[string]string{}, + defaultDisable: false, + result: common.MapStr{}, }, // Scenarios being tested: @@ -50,6 +52,7 @@ func TestGenerateHints(t *testing.T) { "co.elastic.metrics.foobar1/period": "15s", "not.to.include": "true", }, + defaultDisable: false, result: common.MapStr{ "logs": common.MapStr{ "multiline": common.MapStr{ @@ -62,6 +65,67 @@ func TestGenerateHints(t *testing.T) { }, }, }, + // Scenarios being tested: + // logs.disable must be generated when defaultDisable is set and annotations does not + // have co.elastic.logs/disable set to false. + // logs/multiline.pattern must be a nested common.MapStr under hints.logs + // metrics/module must be found in hints.metrics + // not.to.include must not be part of hints + // period is annotated at both container and pod level. Container level value must be in hints + { + annotations: map[string]string{ + "co.elastic.logs/multiline.pattern": "^test", + "co.elastic.metrics/module": "prometheus", + "co.elastic.metrics/period": "10s", + "co.elastic.metrics.foobar/period": "15s", + "co.elastic.metrics.foobar1/period": "15s", + "not.to.include": "true", + }, + defaultDisable: true, + result: common.MapStr{ + "logs": common.MapStr{ + "multiline": common.MapStr{ + "pattern": "^test", + }, + "disable": "true", + }, + "metrics": common.MapStr{ + "module": "prometheus", + "period": "15s", + }, + }, + }, + // Scenarios being tested: + // logs.disable must not be generated when defaultDisable is set, but annotations + // have co.elastic.logs/disable set to false. + // logs/multiline.pattern must be a nested common.MapStr under hints.logs + // metrics/module must be found in hints.metrics + // not.to.include must not be part of hints + // period is annotated at both container and pod level. Container level value must be in hints + { + annotations: map[string]string{ + "co.elastic.logs/disable": "false", + "co.elastic.logs/multiline.pattern": "^test", + "co.elastic.metrics/module": "prometheus", + "co.elastic.metrics/period": "10s", + "co.elastic.metrics.foobar/period": "15s", + "co.elastic.metrics.foobar1/period": "15s", + "not.to.include": "true", + }, + defaultDisable: true, + result: common.MapStr{ + "logs": common.MapStr{ + "multiline": common.MapStr{ + "pattern": "^test", + }, + "disable": "false", + }, + "metrics": common.MapStr{ + "module": "prometheus", + "period": "15s", + }, + }, + }, } for _, test := range tests { @@ -69,6 +133,6 @@ func TestGenerateHints(t *testing.T) { for k, v := range test.annotations { annMap.Put(k, v) } - assert.Equal(t, GenerateHints(annMap, "foobar", "co.elastic"), test.result) + assert.Equal(t, GenerateHints(annMap, "foobar", "co.elastic", test.defaultDisable), test.result) } } diff --git a/libbeat/autodiscover/providers/docker/config.go b/libbeat/autodiscover/providers/docker/config.go index 0b76887a10f6..bba4f32cdb0c 100644 --- a/libbeat/autodiscover/providers/docker/config.go +++ b/libbeat/autodiscover/providers/docker/config.go @@ -25,19 +25,22 @@ import ( // Config for docker autodiscover provider type Config struct { - Host string `config:"host"` - TLS *docker.TLSConfig `config:"ssl"` - Prefix string `config:"prefix"` - HintsEnabled bool `config:"hints.enabled"` - Builders []*common.Config `config:"builders"` - Appenders []*common.Config `config:"appenders"` - Templates template.MapperSettings `config:"templates"` + Host string `config:"host"` + TLS *docker.TLSConfig `config:"ssl"` + Prefix string `config:"prefix"` + HintsEnabled bool `config:"hints.enabled"` + DefaultDisable bool `config:"default.disable"` + Builders []*common.Config `config:"builders"` + Appenders []*common.Config `config:"appenders"` + Templates template.MapperSettings `config:"templates"` + Dedot bool `config:"labels.dedot"` } func defaultConfig() *Config { return &Config{ Host: "unix:///var/run/docker.sock", Prefix: "co.elastic", + Dedot: true, } } diff --git a/libbeat/autodiscover/providers/docker/docker.go b/libbeat/autodiscover/providers/docker/docker.go index 6275897cb9f7..fb0901e55e51 100644 --- a/libbeat/autodiscover/providers/docker/docker.go +++ b/libbeat/autodiscover/providers/docker/docker.go @@ -18,6 +18,8 @@ package docker import ( + "errors" + "github.com/gofrs/uuid" "github.com/elastic/beats/libbeat/autodiscover" @@ -119,41 +121,95 @@ func (d *Provider) Start() { }() } -func (d *Provider) emitContainer(event bus.Event, flag string) { +type dockerMetadata struct { + // Old selectors [Deprecated] + Docker common.MapStr + + // New ECS-based selectors + Container common.MapStr + + // Metadata used to enrich events, like ECS-based selectors but can + // have modifications like dedotting + Metadata common.MapStr +} + +func (d *Provider) generateMetaDocker(event bus.Event) (*docker.Container, *dockerMetadata) { container, ok := event["container"].(*docker.Container) if !ok { - logp.Err("Couldn't get a container from watcher event") - return + logp.Error(errors.New("Couldn't get a container from watcher event")) + return nil, nil } - var host string - if len(container.IPAddresses) > 0 { - host = container.IPAddresses[0] - } + // Don't dedot selectors, dedot only metadata used for events enrichment labelMap := common.MapStr{} + metaLabelMap := common.MapStr{} for k, v := range container.Labels { safemapstr.Put(labelMap, k, v) + if d.config.Dedot { + label := common.DeDot(k) + metaLabelMap.Put(label, v) + } else { + safemapstr.Put(metaLabelMap, k, v) + } } - meta := common.MapStr{ - "container": common.MapStr{ - "id": container.ID, - "name": container.Name, - "image": container.Image, + meta := &dockerMetadata{ + Docker: common.MapStr{ + "container": common.MapStr{ + "id": container.ID, + "name": container.Name, + "image": container.Image, + "labels": labelMap, + }, + }, + Container: common.MapStr{ + "id": container.ID, + "name": container.Name, + "image": common.MapStr{ + "name": container.Image, + }, "labels": labelMap, }, + Metadata: common.MapStr{ + "container": common.MapStr{ + "id": container.ID, + "name": container.Name, + "image": common.MapStr{ + "name": container.Image, + }, + }, + "docker": common.MapStr{ + "container": common.MapStr{ + "labels": metaLabelMap, + }, + }, + }, + } + + return container, meta +} + +func (d *Provider) emitContainer(event bus.Event, flag string) { + container, meta := d.generateMetaDocker(event) + if container == nil || meta == nil { + return } + + var host string + if len(container.IPAddresses) > 0 { + host = container.IPAddresses[0] + } + // Without this check there would be overlapping configurations with and without ports. if len(container.Ports) == 0 { event := bus.Event{ - "provider": d.uuid, - "id": container.ID, - flag: true, - "host": host, - "docker": meta, - "meta": common.MapStr{ - "docker": meta, - }, + "provider": d.uuid, + "id": container.ID, + flag: true, + "host": host, + "docker": meta.Docker, + "container": meta.Container, + "meta": meta.Metadata, } d.publish(event) @@ -162,15 +218,14 @@ func (d *Provider) emitContainer(event bus.Event, flag string) { // Emit container container and port information for _, port := range container.Ports { event := bus.Event{ - "provider": d.uuid, - "id": container.ID, - flag: true, - "host": host, - "port": port.PrivatePort, - "docker": meta, - "meta": common.MapStr{ - "docker": meta, - }, + "provider": d.uuid, + "id": container.ID, + flag: true, + "host": host, + "port": port.PrivatePort, + "docker": meta.Docker, + "container": meta.Container, + "meta": meta.Metadata, } d.publish(event) @@ -212,7 +267,7 @@ func (d *Provider) generateHints(event bus.Event) bus.Event { e["port"] = port } if labels, err := dockerMeta.GetValue("labels"); err == nil { - hints := builder.GenerateHints(labels.(common.MapStr), "", d.config.Prefix) + hints := builder.GenerateHints(labels.(common.MapStr), "", d.config.Prefix, d.config.DefaultDisable) e["hints"] = hints } return e diff --git a/libbeat/autodiscover/providers/docker/docker_integration_test.go b/libbeat/autodiscover/providers/docker/docker_integration_test.go index acb1f8ff46a4..d790744aa489 100644 --- a/libbeat/autodiscover/providers/docker/docker_integration_test.go +++ b/libbeat/autodiscover/providers/docker/docker_integration_test.go @@ -92,17 +92,23 @@ func checkEvent(t *testing.T, listener bus.Listener, start bool) { assert.Equal(t, getValue(e, "stop"), true) assert.Nil(t, getValue(e, "start")) } - assert.Equal(t, getValue(e, "docker.container.image"), "busybox") - assert.Equal(t, getValue(e, "docker.container.labels"), common.MapStr{ - "label": common.MapStr{ - "value": "foo", - "child": "bar", + assert.Equal(t, getValue(e, "container.image.name"), "busybox") + // labels.dedot=true by default + assert.Equal(t, + common.MapStr{ + "label": common.MapStr{ + "value": "foo", + "child": "bar", + }, }, - }) - assert.NotNil(t, getValue(e, "docker.container.id")) - assert.NotNil(t, getValue(e, "docker.container.name")) + getValue(e, "container.labels"), + ) + assert.NotNil(t, getValue(e, "container.id")) + assert.NotNil(t, getValue(e, "container.name")) assert.NotNil(t, getValue(e, "host")) - assert.Equal(t, getValue(e, "docker"), getValue(e, "meta.docker")) + assert.Equal(t, getValue(e, "docker.container.id"), getValue(e, "meta.container.id")) + assert.Equal(t, getValue(e, "docker.container.name"), getValue(e, "meta.container.name")) + assert.Equal(t, getValue(e, "docker.container.image"), getValue(e, "meta.container.image.name")) return case <-time.After(10 * time.Second): diff --git a/libbeat/autodiscover/providers/docker/docker_test.go b/libbeat/autodiscover/providers/docker/docker_test.go index 7c01e6e2ec37..ba663d30314d 100644 --- a/libbeat/autodiscover/providers/docker/docker_test.go +++ b/libbeat/autodiscover/providers/docker/docker_test.go @@ -24,6 +24,7 @@ import ( "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/libbeat/common/bus" + "github.com/elastic/beats/libbeat/common/docker" ) func TestGenerateHints(t *testing.T) { @@ -105,3 +106,131 @@ func getNestedAnnotations(in common.MapStr) common.MapStr { } return out } + +func TestGenerateMetaDockerNoDedot(t *testing.T) { + event := bus.Event{ + "container": &docker.Container{ + ID: "abc", + Name: "foobar", + Labels: map[string]string{ + "do.not.include": "true", + "co.elastic.logs/disable": "true", + }, + }, + } + + cfg := defaultConfig() + cfg.Dedot = false + p := Provider{ + config: cfg, + } + _, meta := p.generateMetaDocker(event) + expectedMeta := &dockerMetadata{ + Docker: common.MapStr{ + "container": common.MapStr{ + "id": "abc", + "name": "foobar", + "image": "", + "labels": common.MapStr{ + "do": common.MapStr{"not": common.MapStr{"include": "true"}}, + "co": common.MapStr{"elastic": common.MapStr{"logs/disable": "true"}}, + }, + }, + }, + Container: common.MapStr{ + "id": "abc", + "name": "foobar", + "image": common.MapStr{ + "name": "", + }, + "labels": common.MapStr{ + "do": common.MapStr{"not": common.MapStr{"include": "true"}}, + "co": common.MapStr{"elastic": common.MapStr{"logs/disable": "true"}}, + }, + }, + Metadata: common.MapStr{ + "container": common.MapStr{ + "id": "abc", + "name": "foobar", + "image": common.MapStr{ + "name": "", + }, + }, + "docker": common.MapStr{ + "container": common.MapStr{ + "labels": common.MapStr{ + "do": common.MapStr{"not": common.MapStr{"include": "true"}}, + "co": common.MapStr{"elastic": common.MapStr{"logs/disable": "true"}}, + }, + }, + }, + }, + } + assert.Equal(t, expectedMeta.Docker, meta.Docker) + assert.Equal(t, expectedMeta.Container, meta.Container) + assert.Equal(t, expectedMeta.Metadata, meta.Metadata) +} + +func TestGenerateMetaDockerWithDedot(t *testing.T) { + event := bus.Event{ + "container": &docker.Container{ + ID: "abc", + Name: "foobar", + Labels: map[string]string{ + "do.not.include": "true", + "co.elastic.logs/disable": "true", + }, + }, + } + + cfg := defaultConfig() + cfg.Dedot = true + p := Provider{ + config: cfg, + } + _, meta := p.generateMetaDocker(event) + expectedMeta := &dockerMetadata{ + Docker: common.MapStr{ + "container": common.MapStr{ + "id": "abc", + "name": "foobar", + "image": "", + "labels": common.MapStr{ + "do": common.MapStr{"not": common.MapStr{"include": "true"}}, + "co": common.MapStr{"elastic": common.MapStr{"logs/disable": "true"}}, + }, + }, + }, + Container: common.MapStr{ + "id": "abc", + "name": "foobar", + "image": common.MapStr{ + "name": "", + }, + "labels": common.MapStr{ + "do": common.MapStr{"not": common.MapStr{"include": "true"}}, + "co": common.MapStr{"elastic": common.MapStr{"logs/disable": "true"}}, + }, + }, + Metadata: common.MapStr{ + "container": common.MapStr{ + "id": "abc", + "name": "foobar", + "image": common.MapStr{ + "name": "", + }, + }, + "docker": common.MapStr{ + "container": common.MapStr{ + "labels": common.MapStr{ + "do_not_include": "true", + "co_elastic_logs/disable": "true", + }, + }, + }, + }, + } + assert.Equal(t, expectedMeta.Docker, meta.Docker) + assert.Equal(t, expectedMeta.Container, meta.Container) + assert.Equal(t, expectedMeta.Metadata, meta.Metadata) +} diff --git a/libbeat/autodiscover/providers/kubernetes/config.go b/libbeat/autodiscover/providers/kubernetes/config.go index 5ba5f789f68b..269cb60cff36 100644 --- a/libbeat/autodiscover/providers/kubernetes/config.go +++ b/libbeat/autodiscover/providers/kubernetes/config.go @@ -33,11 +33,12 @@ type Config struct { SyncPeriod time.Duration `config:"sync_period"` CleanupTimeout time.Duration `config:"cleanup_timeout"` - Prefix string `config:"prefix"` - HintsEnabled bool `config:"hints.enabled"` - Builders []*common.Config `config:"builders"` - Appenders []*common.Config `config:"appenders"` - Templates template.MapperSettings `config:"templates"` + Prefix string `config:"prefix"` + HintsEnabled bool `config:"hints.enabled"` + DefaultDisable bool `config:"default.disable"` + Builders []*common.Config `config:"builders"` + Appenders []*common.Config `config:"appenders"` + Templates template.MapperSettings `config:"templates"` } func defaultConfig() *Config { diff --git a/libbeat/autodiscover/providers/kubernetes/kubernetes.go b/libbeat/autodiscover/providers/kubernetes/kubernetes.go index c3d9a03dda48..41037238edd7 100644 --- a/libbeat/autodiscover/providers/kubernetes/kubernetes.go +++ b/libbeat/autodiscover/providers/kubernetes/kubernetes.go @@ -275,12 +275,13 @@ func (p *Provider) generateHints(event bus.Event) bus.Event { } cname := builder.GetContainerName(container) - hints := builder.GenerateHints(annotations, cname, p.config.Prefix) + hints := builder.GenerateHints(annotations, cname, p.config.Prefix, p.config.DefaultDisable) + logp.Debug("kubernetes", "Generated hints %+v", hints) if len(hints) != 0 { e["hints"] = hints } - logp.Debug("kubernetes", "Generated builder event %v", event) + logp.Debug("kubernetes", "Generated builder event %+v", e) return e } diff --git a/libbeat/cmd/instance/imports.go b/libbeat/cmd/instance/imports.go index 8c9275694dfb..70cb46a49e78 100644 --- a/libbeat/cmd/instance/imports.go +++ b/libbeat/cmd/instance/imports.go @@ -30,6 +30,7 @@ import ( _ "github.com/elastic/beats/libbeat/processors/add_kubernetes_metadata" _ "github.com/elastic/beats/libbeat/processors/add_locale" _ "github.com/elastic/beats/libbeat/processors/add_process_metadata" + _ "github.com/elastic/beats/libbeat/processors/communityid" _ "github.com/elastic/beats/libbeat/processors/dissect" _ "github.com/elastic/beats/libbeat/processors/dns" _ "github.com/elastic/beats/libbeat/publisher/includes" // Register publisher pipeline modules diff --git a/libbeat/docs/processors-using.asciidoc b/libbeat/docs/processors-using.asciidoc index a2091e842925..e0a7e5099248 100644 --- a/libbeat/docs/processors-using.asciidoc +++ b/libbeat/docs/processors-using.asciidoc @@ -196,6 +196,7 @@ The supported processors are: * <> * <> + * <> * <> * <> * <> @@ -699,6 +700,53 @@ is treated as if the field was not set at all. exist in the event are overwritten by keys from the decoded JSON object. The default value is false. +[[community-id]] +=== Community ID Network Flow Hash + +The `community_id` processor computes a network flow hash according to the +https://github.com/corelight/community-id-spec[Community ID Flow Hash +specification]. + +The flow hash is useful for correlating all network events related to a +single flow. For example you can filter on a community ID value and you might +get back the Netflow records from multiple collectors and layer 7 protocol +records from Packetbeat. + +By default the processor is configured to read the flow parameters from the +appropriate Elastic Common Schema (ECS) fields. If you are processing ECS data +then no parameters are required. + +[source,yaml] +---- +processors: + - community_id: +---- + +If the data does not conform to ECS then you can customize the field names +that the processor reads from. You can also change the `target` field which +is where the computed hash is written to. + +[source,yaml] +---- +processors: + - community_id: + fields: + source_ip: my_source_ip + source_port: my_source_port + destination_ip: my_dest_ip + destination_port: my_dest_port + transport: proto + icmp_type: my_icmp_type + icmp_code: my_icmp_code + target: network.community_id +---- + +If the necessary fields are not present in the event then the processor will +silently continue without adding the target field. + +The processor also accepts an optional `seed` parameter that must be a 16-bit +unsigned integer. This value gets incorporated into all generated hashes. + [[drop-event]] === Drop events diff --git a/libbeat/docs/security/user-access.asciidoc b/libbeat/docs/security/user-access.asciidoc index 4828f7f2012a..fe77d4596faa 100644 --- a/libbeat/docs/security/user-access.asciidoc +++ b/libbeat/docs/security/user-access.asciidoc @@ -6,17 +6,23 @@ To enable users to access the indices {beatname_uc} creates, grant them `read` and `view_index_metadata` privileges on the {beatname_uc} indices. If they're using {kib}, they also need the `kibana_user` role. -. Create a reader role that has the `read` and `view_index_metadata` privileges +ifdef::apm-server[] +X-Pack security provides a built-in role called `apm_user` that you can explicitly assign to users. +This role grants them the necessary `read` and `view_index_metadata` privileges on the {beatname_uc} indices. +endif::apm-server[] + +ifndef::apm-server[] +. Create a role that has the `read` and `view_index_metadata` privileges on the {beatname_uc} indices. + You can create roles from the **Management > Roles** UI in {kib} or through the `role` API. For example, the following request creates a role named -++{beat_default_index_prefix}_reader++: +++{access_role}++: + -- ["source","sh",subs="attributes,callouts"] --------------------------------------------------------------- -POST _security/role/{beat_default_index_prefix}_reader +POST _security/role/{access_role} { "indices": [ { @@ -30,40 +36,43 @@ POST _security/role/{beat_default_index_prefix}_reader <1> If you use a custom {beatname_uc} index pattern, specify that pattern instead of the default ++{beat_default_index_prefix}-*++ pattern. -- +endif::apm-server[] -. Assign your users the reader role so they can access the {beatname_uc} -indices. For {kib} users who need to visualize the data, also assign the -`kibana_user` role: +. Assign your users the ++{access_role}++ +role so they can access the {beatname_uc} indices. +For {kib} users who need to visualize the data, +also assign the `kibana_user` role: .. If you're using the `native` realm, you can assign roles with the **Management > Users** UI in {kib} or through the `user` API. For example, the -following request grants ++{beat_default_index_prefix}_user++ the -++{beat_default_index_prefix}_reader++ and `kibana_user` roles: +following request grants ++{beat_default_index_prefix}_account++ the +++{access_role}++ and `kibana_user` roles: + -- ["source", "sh", subs="attributes,callouts"] --------------------------------------------------------------- -POST /_security/user/{beat_default_index_prefix}_user +POST /_security/user/{beat_default_index_prefix}_account { "password" : "{pwd}", - "roles" : [ "{beat_default_index_prefix}_reader","kibana_user"], - "full_name" : "{beatname_uc} User" + "roles" : [ "{access_role}","kibana_user"], + "full_name" : "{beatname_uc} account" } --------------------------------------------------------------- // CONSOLE -- -.. If you're using the LDAP, Active Directory, or PKI realms, you assign the -roles in the `role_mapping.yml` configuration file. For example, the following -snippet grants ++{beatname_uc} User++ the ++{beat_default_index_prefix}_reader++ -and `kibana_user` roles: +.. If you're using the LDAP, Active Directory, or PKI realms, +you assign the roles in the `role_mapping.yml` configuration file. +For example, the following snippet grants +++{beat_default_index_prefix}_account++ the +++{access_role}++ and `kibana_user` roles: + -- ["source", "yaml", subs="attributes,callouts"] --------------------------------------------------------------- -{beat_default_index_prefix}_reader: - - "cn={beatname_uc} User,dc=example,dc=com" +{access_role}: + - "cn={beat_default_index_prefix}_account,dc=example,dc=com" kibana_user: - - "cn={beatname_uc} User,dc=example,dc=com" + - "cn={beat_default_index_prefix}_account,dc=example,dc=com" --------------------------------------------------------------- For more information, see diff --git a/libbeat/docs/shared-beats-attributes.asciidoc b/libbeat/docs/shared-beats-attributes.asciidoc index 88f1d2d6abad..d11a73c604e9 100644 --- a/libbeat/docs/shared-beats-attributes.asciidoc +++ b/libbeat/docs/shared-beats-attributes.asciidoc @@ -14,7 +14,6 @@ :monitoringdoc: https://www.elastic.co/guide/en/elastic-stack-overview/{doc-branch} :dashboards: https://artifacts.elastic.co/downloads/beats/beats-dashboards/beats-dashboards-{stack-version}.zip :dockerimage: docker.elastic.co/beats/{beatname_lc}:{version} -:dockergithub: https://github.com/elastic/beats-docker/tree/{doc-branch} :dockerconfig: https://raw.githubusercontent.com/elastic/beats/{doc-branch}/deploy/docker/{beatname_lc}.docker.yml :downloads: https://artifacts.elastic.co/downloads/beats :ES-version: {stack-version} @@ -26,3 +25,4 @@ :beat_monitoring_user_version: 6.3.0 :beat_monitoring_version: 6.2 :beat_version_key: agent.version +:access_role: {beat_default_index_prefix}_reader diff --git a/libbeat/docs/shared-docker.asciidoc b/libbeat/docs/shared-docker.asciidoc index 43c3dc1d54cb..eda2b026e276 100644 --- a/libbeat/docs/shared-docker.asciidoc +++ b/libbeat/docs/shared-docker.asciidoc @@ -5,8 +5,7 @@ Docker images for {beatname_uc} are available from the Elastic Docker registry. The base image is https://hub.docker.com/_/centos/[centos:7]. A list of all published Docker images and tags is available at -https://www.docker.elastic.co[www.docker.elastic.co]. The source code is in -{dockergithub}[GitHub]. +https://www.docker.elastic.co[www.docker.elastic.co]. These images are free to use under the Elastic license. They contain open source and free commercial features and access to paid commercial features. diff --git a/libbeat/docs/shared-logstash-config.asciidoc b/libbeat/docs/shared-logstash-config.asciidoc index e849f619504a..68d05155c563 100644 --- a/libbeat/docs/shared-logstash-config.asciidoc +++ b/libbeat/docs/shared-logstash-config.asciidoc @@ -22,7 +22,7 @@ the {stack} getting started tutorial. Also see the documentation for the If you want to use {ls} to perform additional processing on the data collected by {beatname_uc}, you need to configure {beatname_uc} to use {ls}. -To do this, you edit the {beatname_uc} configuration file to disable the Elasticsearch +To do this, you edit the {beatname_uc} configuration file to disable the {es} output by commenting it out and enable the {ls} output by uncommenting the logstash section: @@ -36,8 +36,14 @@ output.logstash: The `hosts` option specifies the {ls} server and the port (`5044`) where {ls} is configured to listen for incoming Beats connections. -For this configuration, you must <> -because the options for auto loading the template are only available for the Elasticsearch output. +For this configuration, you must <> +because the options for auto loading the template are only available for the {es} output. + +ifeval::["{beatname_lc}"=="filebeat"] +Want to use <> with {ls}? You need to do +some extra setup. For more information, see +{logstash-ref}/filebeat-modules.html[Working with {beatname_uc} modules]. +endif::[] ifndef::win-only[] ifndef::apm-server[] diff --git a/libbeat/outputs/kafka/config.go b/libbeat/outputs/kafka/config.go index 9cf81d342611..48a46491f918 100644 --- a/libbeat/outputs/kafka/config.go +++ b/libbeat/outputs/kafka/config.go @@ -106,6 +106,14 @@ func defaultConfig() kafkaConfig { } } +func readConfig(cfg *common.Config) (*kafkaConfig, error) { + c := defaultConfig() + if err := cfg.Unpack(&c); err != nil { + return nil, err + } + return &c, nil +} + func (c *kafkaConfig) Validate() error { if len(c.Hosts) == 0 { return errors.New("no hosts configured") diff --git a/libbeat/outputs/kafka/config_test.go b/libbeat/outputs/kafka/config_test.go index 262da4f9c056..dd74b50ed3d7 100644 --- a/libbeat/outputs/kafka/config_test.go +++ b/libbeat/outputs/kafka/config_test.go @@ -39,18 +39,13 @@ func TestConfigAcceptValid(t *testing.T) { for name, test := range tests { test := test t.Run(name, func(t *testing.T) { - c, err := common.NewConfigFrom(test) + c := common.MustNewConfigFrom(test) + c.SetString("hosts", 0, "localhost") + cfg, err := readConfig(c) if err != nil { t.Fatalf("Can not create test configuration: %v", err) } - c.SetString("hosts", 0, "localhost") - - cfg := defaultConfig() - if err := c.Unpack(&cfg); err != nil { - t.Fatalf("Unpacking configuration failed: %v", err) - } - - if _, err := newSaramaConfig(&cfg); err != nil { + if _, err := newSaramaConfig(cfg); err != nil { t.Fatalf("Failure creating sarama config: %v", err) } }) diff --git a/libbeat/outputs/kafka/kafka.go b/libbeat/outputs/kafka/kafka.go index dc34e77c5581..f96b372698f8 100644 --- a/libbeat/outputs/kafka/kafka.go +++ b/libbeat/outputs/kafka/kafka.go @@ -77,8 +77,8 @@ func makeKafka( ) (outputs.Group, error) { debugf("initialize kafka output") - config := defaultConfig() - if err := cfg.Unpack(&config); err != nil { + config, err := readConfig(cfg) + if err != nil { return outputs.Fail(err) } @@ -92,7 +92,7 @@ func makeKafka( return outputs.Fail(err) } - libCfg, err := newSaramaConfig(&config) + libCfg, err := newSaramaConfig(config) if err != nil { return outputs.Fail(err) } @@ -107,7 +107,7 @@ func makeKafka( return outputs.Fail(err) } - client, err := newKafkaClient(observer, hosts, beat.Beat, config.Key, topic, codec, libCfg) + client, err := newKafkaClient(observer, hosts, beat.IndexPrefix, config.Key, topic, codec, libCfg) if err != nil { return outputs.Fail(err) } diff --git a/libbeat/outputs/kafka/kafka_integration_test.go b/libbeat/outputs/kafka/kafka_integration_test.go index 4d85003a719a..7230335612de 100644 --- a/libbeat/outputs/kafka/kafka_integration_test.go +++ b/libbeat/outputs/kafka/kafka_integration_test.go @@ -199,7 +199,7 @@ func TestKafkaPublish(t *testing.T) { } t.Run(name, func(t *testing.T) { - grp, err := makeKafka(nil, beat.Info{Beat: "libbeat"}, outputs.NewNilObserver(), cfg) + grp, err := makeKafka(nil, beat.Info{Beat: "libbeat", IndexPrefix: "testbeat"}, outputs.NewNilObserver(), cfg) if err != nil { t.Fatal(err) } @@ -208,6 +208,7 @@ func TestKafkaPublish(t *testing.T) { if err := output.Connect(); err != nil { t.Fatal(err) } + assert.Equal(t, output.index, "testbeat") defer output.Close() // publish test events diff --git a/libbeat/outputs/logstash/async_test.go b/libbeat/outputs/logstash/async_test.go index d02c3d348e88..b99fb9a57491 100644 --- a/libbeat/outputs/logstash/async_test.go +++ b/libbeat/outputs/logstash/async_test.go @@ -50,7 +50,7 @@ func TestAsyncStructuredEvent(t *testing.T) { } func makeAsyncTestClient(conn *transport.Client) testClientDriver { - config := defaultConfig + config := defaultConfig() config.Timeout = 1 * time.Second config.Pipelining = 3 client, err := newAsyncClient(beat.Info{}, conn, outputs.NewNilObserver(), &config) diff --git a/libbeat/outputs/logstash/config.go b/libbeat/outputs/logstash/config.go index 2b8d7687e555..598413f5814f 100644 --- a/libbeat/outputs/logstash/config.go +++ b/libbeat/outputs/logstash/config.go @@ -20,6 +20,10 @@ package logstash import ( "time" + "github.com/elastic/beats/libbeat/beat" + + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/common/cfgwarn" "github.com/elastic/beats/libbeat/common/transport/tlscommon" "github.com/elastic/beats/libbeat/outputs/transport" ) @@ -45,23 +49,39 @@ type Backoff struct { Max time.Duration } -var defaultConfig = Config{ - LoadBalance: false, - Pipelining: 2, - BulkMaxSize: 2048, - SlowStart: false, - CompressionLevel: 3, - Timeout: 30 * time.Second, - MaxRetries: 3, - TTL: 0 * time.Second, - Backoff: Backoff{ - Init: 1 * time.Second, - Max: 60 * time.Second, - }, - EscapeHTML: false, +func defaultConfig() Config { + return Config{ + LoadBalance: false, + Pipelining: 2, + BulkMaxSize: 2048, + SlowStart: false, + CompressionLevel: 3, + Timeout: 30 * time.Second, + MaxRetries: 3, + TTL: 0 * time.Second, + Backoff: Backoff{ + Init: 1 * time.Second, + Max: 60 * time.Second, + }, + EscapeHTML: false, + } } -func newConfig() *Config { - c := defaultConfig - return &c +func readConfig(cfg *common.Config, info beat.Info) (*Config, error) { + c := defaultConfig() + + err := cfgwarn.CheckRemoved6xSettings(cfg, "port") + if err != nil { + return nil, err + } + + if err := cfg.Unpack(&c); err != nil { + return nil, err + } + + if c.Index == "" { + c.Index = info.IndexPrefix + } + + return &c, nil } diff --git a/libbeat/outputs/logstash/config_test.go b/libbeat/outputs/logstash/config_test.go new file mode 100644 index 000000000000..ee3ffe179786 --- /dev/null +++ b/libbeat/outputs/logstash/config_test.go @@ -0,0 +1,100 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package logstash + +import ( + "testing" + "time" + + "github.com/elastic/beats/libbeat/beat" + "github.com/elastic/beats/libbeat/common" + + "github.com/stretchr/testify/assert" +) + +func TestConfig(t *testing.T) { + + info := beat.Info{Beat: "testbeat", Name: "foo", IndexPrefix: "bar"} + for name, test := range map[string]struct { + config *common.Config + expectedConfig *Config + err bool + }{ + "default config": { + config: common.MustNewConfigFrom([]byte(`{ }`)), + expectedConfig: &Config{ + LoadBalance: false, + Pipelining: 2, + BulkMaxSize: 2048, + SlowStart: false, + CompressionLevel: 3, + Timeout: 30 * time.Second, + MaxRetries: 3, + TTL: 0 * time.Second, + Backoff: Backoff{ + Init: 1 * time.Second, + Max: 60 * time.Second, + }, + EscapeHTML: false, + Index: "bar", + }, + }, + "config given": { + config: common.MustNewConfigFrom(common.MapStr{ + "index": "beat-index", + "loadbalance": true, + "bulk_max_size": 1024, + "slow_start": false, + }), + expectedConfig: &Config{ + LoadBalance: true, + BulkMaxSize: 1024, + Pipelining: 2, + SlowStart: false, + CompressionLevel: 3, + Timeout: 30 * time.Second, + MaxRetries: 3, + TTL: 0 * time.Second, + Backoff: Backoff{ + Init: 1 * time.Second, + Max: 60 * time.Second, + }, + EscapeHTML: false, + Index: "beat-index", + }, + }, + "removed config setting": { + config: common.MustNewConfigFrom(common.MapStr{ + "port": "8080", + }), + expectedConfig: nil, + err: true, + }, + } { + t.Run(name, func(t *testing.T) { + cfg, err := readConfig(test.config, info) + if test.err { + assert.Error(t, err) + assert.Nil(t, cfg) + } else { + assert.NoError(t, err) + assert.Equal(t, test.expectedConfig, cfg) + } + }) + } +} diff --git a/libbeat/outputs/logstash/logstash.go b/libbeat/outputs/logstash/logstash.go index 0c14bf5882bb..d1a64a47b269 100644 --- a/libbeat/outputs/logstash/logstash.go +++ b/libbeat/outputs/logstash/logstash.go @@ -20,7 +20,6 @@ package logstash import ( "github.com/elastic/beats/libbeat/beat" "github.com/elastic/beats/libbeat/common" - "github.com/elastic/beats/libbeat/common/cfgwarn" "github.com/elastic/beats/libbeat/common/transport/tlscommon" "github.com/elastic/beats/libbeat/logp" "github.com/elastic/beats/libbeat/outputs" @@ -45,20 +44,11 @@ func makeLogstash( observer outputs.Observer, cfg *common.Config, ) (outputs.Group, error) { - if !cfg.HasField("index") { - cfg.SetString("index", -1, beat.Beat) - } - - err := cfgwarn.CheckRemoved6xSettings(cfg, "port") + config, err := readConfig(cfg, beat) if err != nil { return outputs.Fail(err) } - config := newConfig() - if err := cfg.Unpack(config); err != nil { - return outputs.Fail(err) - } - hosts, err := outputs.ReadHostList(cfg) if err != nil { return outputs.Fail(err) diff --git a/libbeat/outputs/logstash/sync_test.go b/libbeat/outputs/logstash/sync_test.go index dc48309211dd..f9d74cb69cba 100644 --- a/libbeat/outputs/logstash/sync_test.go +++ b/libbeat/outputs/logstash/sync_test.go @@ -63,7 +63,7 @@ func newClientServerTCP(t *testing.T, to time.Duration) *clientServer { } func makeTestClient(conn *transport.Client) testClientDriver { - config := defaultConfig + config := defaultConfig() config.Timeout = 1 * time.Second config.TTL = 5 * time.Second client, err := newSyncClient(beat.Info{}, conn, outputs.NewNilObserver(), &config) diff --git a/libbeat/outputs/logstash/window_test.go b/libbeat/outputs/logstash/window_test.go index 773f9c374df9..ab5e64f85087 100644 --- a/libbeat/outputs/logstash/window_test.go +++ b/libbeat/outputs/logstash/window_test.go @@ -30,7 +30,7 @@ func TestShrinkWindowSizeNeverZero(t *testing.T) { windowSize := 124 var w window - w.init(windowSize, defaultConfig.BulkMaxSize) + w.init(windowSize, defaultConfig().BulkMaxSize) w.windowSize = int32(windowSize) for i := 0; i < 100; i++ { diff --git a/libbeat/processors/communityid/communityid.go b/libbeat/processors/communityid/communityid.go new file mode 100644 index 000000000000..c51c2b400f51 --- /dev/null +++ b/libbeat/processors/communityid/communityid.go @@ -0,0 +1,281 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package communityid + +import ( + "crypto" + "fmt" + "net" + "strconv" + "strings" + + "github.com/pkg/errors" + + "github.com/elastic/beats/libbeat/beat" + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/common/flowhash" + "github.com/elastic/beats/libbeat/logp" + "github.com/elastic/beats/libbeat/processors" +) + +const logName = "processor.community_id" + +func init() { + processors.RegisterPlugin("community_id", New) +} + +type processor struct { + config + log *logp.Logger + hasher flowhash.Hasher +} + +// New constructs a new processor that computes community ID flowhash. The +// values that are incorporated into the hash vary by protocol. +// +// TCP / UDP / SCTP: +// IP src / IP dst / IP proto / source port / dest port +// +// ICMPv4 / ICMPv6: +// IP src / IP dst / IP proto / ICMP type + "counter-type" or code +// +// Other IP-borne protocols: +// IP src / IP dst / IP proto +func New(cfg *common.Config) (processors.Processor, error) { + c := defaultConfig() + if err := cfg.Unpack(&c); err != nil { + return nil, errors.Wrap(err, "fail to unpack the community_id configuration") + } + + return newFromConfig(c) +} + +func newFromConfig(c config) (*processor, error) { + hasher := flowhash.CommunityID + if c.Seed != 0 { + hasher = flowhash.NewCommunityID(c.Seed, flowhash.Base64Encoding, crypto.SHA1) + } + + return &processor{ + config: c, + log: logp.NewLogger(logName), + hasher: hasher, + }, nil +} + +func (p *processor) String() string { + return fmt.Sprintf("community_id=[target=%s, fields=["+ + "source_ip=%v, source_port=%v, "+ + "destination_ip=%v, destination_port=%v, "+ + "transport_protocol=%v, "+ + "icmp_type=%v, icmp_code=%v], seed=%d]", + p.Target, p.Fields.SourceIP, p.Fields.SourcePort, + p.Fields.DestinationIP, p.Fields.DestinationPort, + p.Fields.TransportProtocol, p.Fields.ICMPType, p.Fields.ICMPCode, + p.Seed) +} + +func (p *processor) Run(event *beat.Event) (*beat.Event, error) { + // If already set then bail out. + _, err := event.GetValue(p.Target) + if err == nil { + return event, nil + } + + flow := p.buildFlow(event) + if flow == nil { + return event, nil + } + + id := p.hasher.Hash(*flow) + _, err = event.PutValue(p.Target, id) + return event, err +} + +func (p *processor) buildFlow(event *beat.Event) *flowhash.Flow { + var flow flowhash.Flow + + // source ip + v, err := event.GetValue(p.Fields.SourceIP) + if err != nil { + return nil + } + var ok bool + flow.SourceIP, ok = tryToIP(v) + if !ok { + return nil + } + + // destination ip + v, err = event.GetValue(p.Fields.DestinationIP) + if err != nil { + return nil + } + flow.DestinationIP, ok = tryToIP(v) + if !ok { + return nil + } + + // protocol + v, err = event.GetValue(p.Fields.TransportProtocol) + if err != nil { + return nil + } + flow.Protocol, ok = tryToIANATransportProtocol(v) + if !ok { + return nil + } + + switch flow.Protocol { + case tcpProtocol, udpProtocol, sctpProtocol: + // source port + v, err = event.GetValue(p.Fields.SourcePort) + if err != nil { + return nil + } + flow.SourcePort, ok = tryToUint16(v) + if !ok || flow.SourcePort == 0 { + return nil + } + + // destination port + v, err = event.GetValue(p.Fields.DestinationPort) + if err != nil { + return nil + } + flow.DestinationPort, ok = tryToUint16(v) + if !ok || flow.DestinationPort == 0 { + return nil + } + case icmpProtocol, icmpIPv6Protocol: + // Return a flow even if the ICMP type/code is unavailable. + if t, c, ok := getICMPTypeCode(event, p.Fields.ICMPType, p.Fields.ICMPCode); ok { + flow.ICMP.Type, flow.ICMP.Code = t, c + } + } + + return &flow +} + +func getICMPTypeCode(event *beat.Event, typeField, codeField string) (t, c uint8, ok bool) { + v, err := event.GetValue(typeField) + if err != nil { + return 0, 0, false + } + t, ok = tryToUint8(v) + if !ok { + return 0, 0, false + } + + v, err = event.GetValue(codeField) + if err != nil { + return 0, 0, false + } + c, ok = tryToUint8(v) + if !ok { + return 0, 0, false + } + return t, c, true +} + +func tryToIP(from interface{}) (net.IP, bool) { + switch v := from.(type) { + case net.IP: + return v, true + case string: + ip := net.ParseIP(v) + return ip, ip != nil + default: + return nil, false + } +} + +// tryToUint16 tries to coerce the given interface to an uint16. On success it +// returns the int value and true. +func tryToUint16(from interface{}) (uint16, bool) { + switch v := from.(type) { + case int: + return uint16(v), true + case int8: + return uint16(v), true + case int16: + return uint16(v), true + case int32: + return uint16(v), true + case int64: + return uint16(v), true + case uint: + return uint16(v), true + case uint8: + return uint16(v), true + case uint16: + return v, true + case uint32: + return uint16(v), true + case uint64: + return uint16(v), true + case string: + num, err := strconv.ParseUint(v, 0, 16) + if err != nil { + return 0, false + } + return uint16(num), true + default: + return 0, false + } +} + +func tryToUint8(from interface{}) (uint8, bool) { + to, ok := tryToUint16(from) + return uint8(to), ok +} + +const ( + icmpProtocol uint8 = 1 + igmpProtocol uint8 = 2 + tcpProtocol uint8 = 6 + udpProtocol uint8 = 17 + icmpIPv6Protocol uint8 = 58 + sctpProtocol uint8 = 132 +) + +var transports = map[string]uint8{ + "icmp": icmpProtocol, + "igmp": igmpProtocol, + "tcp": tcpProtocol, + "udp": udpProtocol, + "ipv6-icmp": icmpIPv6Protocol, + "icmpv6": icmpIPv6Protocol, + "sctp": sctpProtocol, +} + +func tryToIANATransportProtocol(from interface{}) (uint8, bool) { + switch v := from.(type) { + case string: + transport, found := transports[v] + if !found { + transport, found = transports[strings.ToLower(v)] + } + if found { + return transport, found + } + } + + // Allow raw protocol numbers. + return tryToUint8(from) +} diff --git a/libbeat/processors/communityid/communityid_test.go b/libbeat/processors/communityid/communityid_test.go new file mode 100644 index 000000000000..40cccc28b41c --- /dev/null +++ b/libbeat/processors/communityid/communityid_test.go @@ -0,0 +1,148 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package communityid + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/beats/libbeat/beat" + "github.com/elastic/beats/libbeat/common" +) + +func TestNewDefaults(t *testing.T) { + _, err := New(common.NewConfig()) + if err != nil { + t.Fatal(err) + } +} + +func TestRun(t *testing.T) { + // From flowhash package testdata. + // 1:LQU9qZlK+B5F3KDmev6m5PMibrg= | 128.232.110.120 66.35.250.204 6 34855 80 + evt := func() common.MapStr { + return common.MapStr{ + "source": common.MapStr{ + "ip": "128.232.110.120", + "port": 34855, + }, + "destination": common.MapStr{ + "ip": "66.35.250.204", + "port": 80, + }, + "network": common.MapStr{"transport": "TCP"}, + } + } + + t.Run("valid", func(t *testing.T) { + testProcessor(t, 0, evt(), "1:LQU9qZlK+B5F3KDmev6m5PMibrg=") + }) + + t.Run("seed", func(t *testing.T) { + testProcessor(t, 123, evt(), "1:hTSGlFQnR58UCk+NfKRZzA32dPg=") + }) + + t.Run("invalid source IP", func(t *testing.T) { + e := evt() + e.Put("source.ip", 2162716280) + testProcessor(t, 0, e, nil) + }) + + t.Run("invalid source port", func(t *testing.T) { + e := evt() + e.Put("source.port", 0) + testProcessor(t, 0, e, nil) + }) + + t.Run("invalid destination IP", func(t *testing.T) { + e := evt() + e.Put("destination.ip", "308.111.1.2.3") + testProcessor(t, 0, e, nil) + }) + + t.Run("invalid destination port", func(t *testing.T) { + e := evt() + e.Put("source.port", nil) + testProcessor(t, 0, e, nil) + }) + + t.Run("unknown protocol", func(t *testing.T) { + e := evt() + e.Put("network.transport", "xyz") + testProcessor(t, 0, e, nil) + }) + + t.Run("icmp", func(t *testing.T) { + e := evt() + e.Put("network.transport", "icmp") + e.Put("icmp.type", 3) + e.Put("icmp.code", 3) + testProcessor(t, 0, e, "1:KF3iG9XD24nhlSy4r1TcYIr5mfE=") + }) + + t.Run("icmp without typecode", func(t *testing.T) { + // Hashes src_ip + dst_ip + protocol with zero value typecode. + e := evt() + e.Put("network.transport", "icmp") + testProcessor(t, 0, e, "1:PAE85ZfR4SbNXl5URZwWYyDehwU=") + }) + + t.Run("igmp", func(t *testing.T) { + e := evt() + e.Delete("source.port") + e.Delete("destination.port") + e.Put("network.transport", "igmp") + testProcessor(t, 0, e, "1:D3t8Q1aFA6Ev0A/AO4i9PnU3AeI=") + }) + + t.Run("protocol number as string", func(t *testing.T) { + e := evt() + e.Delete("source.port") + e.Delete("destination.port") + e.Put("network.transport", "2") + testProcessor(t, 0, e, "1:D3t8Q1aFA6Ev0A/AO4i9PnU3AeI=") + }) + + t.Run("protocol number", func(t *testing.T) { + e := evt() + e.Delete("source.port") + e.Delete("destination.port") + e.Put("network.transport", 2) + testProcessor(t, 0, e, "1:D3t8Q1aFA6Ev0A/AO4i9PnU3AeI=") + }) +} + +func testProcessor(t testing.TB, seed uint16, fields common.MapStr, expectedHash interface{}) { + t.Helper() + + c := defaultConfig() + c.Seed = seed + p, err := newFromConfig(c) + if err != nil { + t.Fatal(err) + } + + out, err := p.Run(&beat.Event{Fields: fields}) + if err != nil { + t.Fatal(err) + } + + id, _ := out.GetValue(c.Target) + assert.EqualValues(t, expectedHash, id) +} diff --git a/libbeat/processors/communityid/config.go b/libbeat/processors/communityid/config.go new file mode 100644 index 000000000000..b618055e93fb --- /dev/null +++ b/libbeat/processors/communityid/config.go @@ -0,0 +1,49 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package communityid + +type config struct { + Fields fieldsConfig `config:"fields"` + Target string `config:"target"` + Seed uint16 `config:"seed"` +} + +type fieldsConfig struct { + SourceIP string `config:"source_ip"` + SourcePort string `config:"source_port"` + DestinationIP string `config:"destination_ip"` + DestinationPort string `config:"destination_port"` + TransportProtocol string `config:"transport"` + ICMPType string `config:"icmp_type"` + ICMPCode string `config:"icmp_code"` +} + +func defaultConfig() config { + return config{ + Fields: fieldsConfig{ + SourceIP: "source.ip", + SourcePort: "source.port", + DestinationIP: "destination.ip", + DestinationPort: "destination.port", + TransportProtocol: "network.transport", + ICMPType: "icmp.type", + ICMPCode: "icmp.code", + }, + Target: "network.community_id", + } +} diff --git a/metricbeat/docs/fields.asciidoc b/metricbeat/docs/fields.asciidoc index 53da6d070ff5..00080797d30b 100644 --- a/metricbeat/docs/fields.asciidoc +++ b/metricbeat/docs/fields.asciidoc @@ -1083,6 +1083,103 @@ type: integer The state of the instance (pending | running | shutting-down | terminated | stopping | stopped). +-- + +[float] +== sqs fields + +`sqs` contains the metrics that were scraped from AWS CloudWatch which contains monitoring metrics sent by AWS SQS. + + + +*`aws.sqs.oldest_message_age.sec`*:: ++ +-- +type: long + +The approximate age of the oldest non-deleted message in the queue. + + +-- + +*`aws.sqs.message.delayed`*:: ++ +-- +type: long + +TThe number of messages in the queue that are delayed and not available for reading immediately. + + +-- + +*`aws.sqs.message.not_visible`*:: ++ +-- +type: long + +The number of messages that are in flight. + + +-- + +*`aws.sqs.message.visible`*:: ++ +-- +type: long + +The number of messages available for retrieval from the queue. + + +-- + +*`aws.sqs.message.deleted`*:: ++ +-- +type: long + +The number of messages deleted from the queue. + + +-- + +*`aws.sqs.message.received`*:: ++ +-- +type: long + +The number of messages returned by calls to the ReceiveMessage action. + + +-- + +*`aws.sqs.message.sent`*:: ++ +-- +type: long + +The number of messages added to a queue. + + +-- + +*`aws.sqs.empty_receives`*:: ++ +-- +type: long + +The number of ReceiveMessage API calls that did not return a message. + + +-- + +*`aws.sqs.sent_message_size.bytes`*:: ++ +-- +type: scaled_float + +The size of messages added to a queue. + + -- [[exported-fields-beat]] diff --git a/metricbeat/docs/modules/aws.asciidoc b/metricbeat/docs/modules/aws.asciidoc index f6c5f0a40d26..8eae60fd9641 100644 --- a/metricbeat/docs/modules/aws.asciidoc +++ b/metricbeat/docs/modules/aws.asciidoc @@ -9,7 +9,7 @@ This module periodically fetches monitoring metrics from AWS Cloudwatch using https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetMetricData.html[GetMetricData API] for running EC2 instances. Note: extra AWS charges on GetMetricData API requests will be generated by this module. -The default metricset is `ec2`. +The default metricsets are `ec2` and `sqs`. [float] === Module-specific configuration notes @@ -35,20 +35,6 @@ aws> sts get-session-token --serial-number arn:aws:iam::1234:mfa/your-email@exam Specific permissions needs to be added into the IAM user's policy to authorize Metricbeat to collect AWS monitoring metrics. Please see documentation under each metricset for required permissions. -By default, Amazon EC2 sends metric data to CloudWatch every 5 minutes. With this basic monitoring, `period` in aws module -configuration should be larger or equal than `300s`. If `period` is set to be less than `300s`, the same cloudwatch metrics -will be collected more than once which will cause extra fees without getting more granular metrics. For example, in `US East (N. Virginia)` region, it costs -$0.01/1000 metrics requested using GetMetricData. Please see https://aws.amazon.com/cloudwatch/pricing/[AWS Cloudwatch Pricing] -for more details. To avoid unnecessary charges, `period` is preferred to be set to `300s` or multiples of `300s`, such as -`600s` and `900s`. - -For more granular monitoring data you can enable detailed monitoring on the instance to get metrics every 1 minute. Please see -https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-cloudwatch-new.html[Enabling Detailed Monitoring] for instructions -on how to enable detailed monitoring. With detailed monitoring enabled, `period` in aws module configuration can be any number -larger than `60s`. Since AWS sends metric data to CloudWatch in 1-minute periods, setting metricbeat module `period` less -than `60s` will cause extra API requests which means extra charges on AWS. To avoid unnecessary charges, `period` is -preferred to be set to `60s` or multiples of `60s`, such as `120s` and `180s`. - Here is an example of aws metricbeat module configuration: [source,yaml] @@ -56,15 +42,29 @@ Here is an example of aws metricbeat module configuration: metricbeat.modules: - module: aws period: 300s - metricsets: ["ec2"] + metricsets: + - "ec2" + - "sqs" access_key_id: '${AWS_ACCESS_KEY_ID}' secret_access_key: '${AWS_SECRET_ACCESS_KEY}' session_token: '${AWS_SESSION_TOKEN}' default_region: '${AWS_REGION:us-west-1}' ---- -This module only collects metrics for EC2 instances that are in `running` state and exist more than 10 minutes to make sure -there are monitoring metrics exist in Cloudwatch already. +By default, Amazon EC2 sends metric data to CloudWatch every 5 minutes. With this basic monitoring, `period` in aws module +configuration should be larger or equal than `300s`. If `period` is set to be less than `300s`, the same cloudwatch metrics +will be collected more than once which will cause extra fees without getting more granular metrics. For example, in `US East (N. Virginia)` region, it costs +$0.01/1000 metrics requested using GetMetricData. Please see https://aws.amazon.com/cloudwatch/pricing/[AWS Cloudwatch Pricing] +for more details. To avoid unnecessary charges, `period` is preferred to be set to `300s` or multiples of `300s`, such as +`600s` and `900s`. For more granular monitoring data you can enable detailed monitoring on the instance to get metrics every 1 minute. Please see +https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-cloudwatch-new.html[Enabling Detailed Monitoring] for instructions +on how to enable detailed monitoring. With detailed monitoring enabled, `period` in aws module configuration can be any number +larger than `60s`. Since AWS sends metric data to CloudWatch in 1-minute periods, setting metricbeat module `period` less +than `60s` will cause extra API requests which means extra charges on AWS. To avoid unnecessary charges, `period` is +preferred to be set to `60s` or multiples of `60s`, such as `120s` and `180s`. + +Since cloudWatch metrics for Amazon SQS queues are automatically collected and pushed to CloudWatch every 5 minutes, +`period` for `sqs` is recommended to set to `300s` or multiples of `300s`. The AWS module comes with a predefined dashboard. For example: @@ -84,6 +84,7 @@ metricbeat.modules: period: 300s metricsets: - "ec2" + - "sqs" access_key_id: '${AWS_ACCESS_KEY_ID:""}' secret_access_key: '${AWS_SECRET_ACCESS_KEY:""}' session_token: '${AWS_SESSION_TOKEN:""}' @@ -97,5 +98,9 @@ The following metricsets are available: * <> +* <> + include::aws/ec2.asciidoc[] +include::aws/sqs.asciidoc[] + diff --git a/metricbeat/docs/modules/aws/sqs.asciidoc b/metricbeat/docs/modules/aws/sqs.asciidoc new file mode 100644 index 000000000000..35f0373ab441 --- /dev/null +++ b/metricbeat/docs/modules/aws/sqs.asciidoc @@ -0,0 +1,23 @@ +//// +This file is generated! See scripts/docs_collector.py +//// + +[[metricbeat-metricset-aws-sqs]] +=== aws sqs metricset + +beta[] + +include::../../../../x-pack/metricbeat/module/aws/sqs/_meta/docs.asciidoc[] + + +==== Fields + +For a description of each field in the metricset, see the +<> section. + +Here is an example document generated by this metricset: + +[source,json] +---- +include::../../../../x-pack/metricbeat/module/aws/sqs/_meta/data.json[] +---- diff --git a/metricbeat/docs/modules_list.asciidoc b/metricbeat/docs/modules_list.asciidoc index d32853540383..7aef20c7339f 100644 --- a/metricbeat/docs/modules_list.asciidoc +++ b/metricbeat/docs/modules_list.asciidoc @@ -10,7 +10,8 @@ This file is generated! See scripts/docs_collector.py |<> |image:./images/icon-yes.png[Prebuilt dashboards are available] | .1+| .1+| |<> |<> |image:./images/icon-yes.png[Prebuilt dashboards are available] | -.1+| .1+| |<> +.2+| .2+| |<> +|<> beta[] |<> |image:./images/icon-no.png[No prebuilt dashboards] | .7+| .7+| |<> |<> diff --git a/metricbeat/module/docker/container/_meta/data.json b/metricbeat/module/docker/container/_meta/data.json index fb7c93711ccc..881232869394 100644 --- a/metricbeat/module/docker/container/_meta/data.json +++ b/metricbeat/module/docker/container/_meta/data.json @@ -1,37 +1,59 @@ { "@timestamp": "2017-10-12T08:05:34.853Z", - "beat": { + "agent": { "hostname": "host.example.com", "name": "host.example.com" }, + "container": { + "id": "cc78e58acfda4501105dc4de8e3ae218f2da616213e6e3af168c40103829302a", + "image": { + "name": "metricbeat_elasticsearch" + }, + "name": "metricbeat_elasticsearch_1_df866b3a7b3d", + "runtime": "docker" + }, "docker": { "container": { - "command": "go test -tags=integration github.com/elastic/beats/metricbeat/module/... -data", - "created": "2017-12-07T07:20:57.000Z", - "id": "d88e67bb6961a5bb70c1c1c48094c6030e43768eed91e827f437111888f9967e", - "image": "metricbeatsnapshotnoxpack700alpha1d71419298b58ed8b0a5b60a6d1e4a476ffaf80a8_beat", + "command": "/usr/local/bin/docker-entrypoint.sh eswrapper", + "created": "2019-02-25T10:18:10.000Z", "ip_addresses": [ - "172.18.0.27" + "172.23.0.2" ], "labels": { + "com_docker_compose_config-hash": "e3e0a2c6e5d1afb741bc8b1ecb09cda0395886b7a3e5084a9fd110be46d70f78", "com_docker_compose_container-number": "1", - "com_docker_compose_oneoff": "True", - "com_docker_compose_project": "metricbeatsnapshotnoxpack700alpha1d71419298b58ed8b0a5b60a6d1e4a476ffaf80a8", - "com_docker_compose_service": "beat", - "com_docker_compose_version": "1.16.1" + "com_docker_compose_oneoff": "False", + "com_docker_compose_project": "metricbeat", + "com_docker_compose_service": "elasticsearch", + "com_docker_compose_slug": "df866b3a7b3d50c0802350cbe58ee5b34fa32b7f6ba7fe9e48cde2c12dd0201d", + "com_docker_compose_version": "1.23.1", + "license": "Elastic License", + "org_label-schema_build-date": "20181006", + "org_label-schema_license": "GPLv2", + "org_label-schema_name": "elasticsearch", + "org_label-schema_schema-version": "1.0", + "org_label-schema_url": "https://www.elastic.co/products/elasticsearch", + "org_label-schema_vcs-url": "https://github.com/elastic/elasticsearch-docker", + "org_label-schema_vendor": "Elastic", + "org_label-schema_version": "6.5.1" }, - "name": "metricbeatsnapshotnoxpack700alpha1d71419298b58ed8b0a5b60a6d1e4a476ffaf80a8_beat_run_1", "size": { "root_fs": 0, "rw": 0 }, - "status": "Up 6 minutes (healthy)" + "status": "Up 7 minutes (healthy)" } }, + "event": { + "dataset": "docker.container", + "duration": 115000, + "module": "docker" + }, "metricset": { - "host": "/var/run/docker.sock", - "module": "docker", - "name": "container", - "rtt": 115 + "name": "container" + }, + "service": { + "address": "/var/run/docker.sock", + "type": "docker" } } \ No newline at end of file diff --git a/metricbeat/module/docker/container/container.go b/metricbeat/module/docker/container/container.go index 0247c179ea75..6032493e755a 100644 --- a/metricbeat/module/docker/container/container.go +++ b/metricbeat/module/docker/container/container.go @@ -22,12 +22,15 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/client" + "github.com/pkg/errors" - "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/logp" "github.com/elastic/beats/metricbeat/mb" "github.com/elastic/beats/metricbeat/module/docker" ) +var logger = logp.NewLogger("docker.container") + func init() { mb.Registry.MustAddMetricSet("docker", "container", New, mb.WithHostParser(docker.HostParser), @@ -62,11 +65,14 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { // Fetch returns a list of all containers as events. // This is based on https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/#/list-containers. -func (m *MetricSet) Fetch() ([]common.MapStr, error) { +func (m *MetricSet) Fetch(r mb.ReporterV2) { // Fetch a list of all containers. containers, err := m.dockerClient.ContainerList(context.Background(), types.ContainerListOptions{}) if err != nil { - return nil, err + err = errors.Wrap(err, "failed to get docker containers list") + logger.Error(err) + r.Error(err) + return } - return eventsMapping(containers, m.dedot), nil + eventsMapping(r, containers, m.dedot) } diff --git a/metricbeat/module/docker/container/container_integration_test.go b/metricbeat/module/docker/container/container_integration_test.go index 48cf0e2a6648..321a727fa433 100644 --- a/metricbeat/module/docker/container/container_integration_test.go +++ b/metricbeat/module/docker/container/container_integration_test.go @@ -22,13 +22,20 @@ package container import ( "testing" + "github.com/stretchr/testify/assert" + mbtest "github.com/elastic/beats/metricbeat/mb/testing" ) func TestData(t *testing.T) { - f := mbtest.NewEventsFetcher(t, getConfig()) - err := mbtest.WriteEvents(f, t) - if err != nil { + f := mbtest.NewReportingMetricSetV2(t, getConfig()) + events, errs := mbtest.ReportingFetchV2(f) + if len(errs) > 0 { + t.Fatalf("Expected 0 error, had %d. %v\n", len(errs), errs) + } + assert.NotEmpty(t, events) + + if err := mbtest.WriteEventsReporterV2(f, t, ""); err != nil { t.Fatal("write", err) } } diff --git a/metricbeat/module/docker/container/data.go b/metricbeat/module/docker/container/data.go index 80e4179fa150..c0bb07518faf 100644 --- a/metricbeat/module/docker/container/data.go +++ b/metricbeat/module/docker/container/data.go @@ -23,39 +23,49 @@ import ( "github.com/docker/docker/api/types" "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/metricbeat/mb" "github.com/elastic/beats/metricbeat/module/docker" ) -func eventsMapping(containersList []types.Container, dedot bool) []common.MapStr { - myEvents := []common.MapStr{} +func eventsMapping(r mb.ReporterV2, containersList []types.Container, dedot bool) { for _, container := range containersList { - myEvents = append(myEvents, eventMapping(&container, dedot)) + eventMapping(r, &container, dedot) } - return myEvents } -func eventMapping(cont *types.Container, dedot bool) common.MapStr { +func eventMapping(r mb.ReporterV2, cont *types.Container, dedot bool) { event := common.MapStr{ - "created": common.Time(time.Unix(cont.Created, 0)), - "id": cont.ID, - "name": docker.ExtractContainerName(cont.Names), - "command": cont.Command, - "image": cont.Image, - "ip_addresses": extractIPAddresses(cont.NetworkSettings), - "size": common.MapStr{ - "root_fs": cont.SizeRootFs, - "rw": cont.SizeRw, + "container": common.MapStr{ + "id": cont.ID, + "image": common.MapStr{ + "name": cont.Image, + }, + "name": docker.ExtractContainerName(cont.Names), + "runtime": "docker", + }, + "docker": common.MapStr{ + "container": common.MapStr{ + "created": common.Time(time.Unix(cont.Created, 0)), + "command": cont.Command, + "ip_addresses": extractIPAddresses(cont.NetworkSettings), + "size": common.MapStr{ + "root_fs": cont.SizeRootFs, + "rw": cont.SizeRw, + }, + "status": cont.Status, + }, }, - "status": cont.Status, } labels := docker.DeDotLabels(cont.Labels, dedot) if len(labels) > 0 { - event["labels"] = labels + event.Put("docker.container.labels", labels) } - return event + r.Event(mb.Event{ + RootFields: event, + }) } func extractIPAddresses(networks *types.SummaryNetworkSettings) []string { diff --git a/metricbeat/module/docker/cpu/_meta/data.json b/metricbeat/module/docker/cpu/_meta/data.json index 5589d9858013..370558eed754 100644 --- a/metricbeat/module/docker/cpu/_meta/data.json +++ b/metricbeat/module/docker/cpu/_meta/data.json @@ -1,67 +1,84 @@ { "@timestamp": "2017-10-12T08:05:34.853Z", - "beat": { + "agent": { "hostname": "host.example.com", "name": "host.example.com" }, + "container": { + "id": "cc78e58acfda4501105dc4de8e3ae218f2da616213e6e3af168c40103829302a", + "image": { + "name": "metricbeat_elasticsearch" + }, + "name": "metricbeat_elasticsearch_1_df866b3a7b3d", + "runtime": "docker" + }, "docker": { "container": { - "id": "bbdcbc751e7eda7a50e773b4a5d8c2800af664f835ef9c0ad6bbb9c160c50d83", "labels": { - "build-date": "20170911", - "com_docker_compose_config-hash": "371e477ae73fd44b19bcbcf0d4feaf4de9adfb69137a8f16d09cff749724ca99", + "com_docker_compose_config-hash": "e3e0a2c6e5d1afb741bc8b1ecb09cda0395886b7a3e5084a9fd110be46d70f78", "com_docker_compose_container-number": "1", "com_docker_compose_oneoff": "False", "com_docker_compose_project": "metricbeat", "com_docker_compose_service": "elasticsearch", - "com_docker_compose_version": "1.21.0", - "license": "GPLv2", - "maintainer": "Elastic Docker Team \u003cdocker@elastic.co\u003e", - "name": "CentOS Base Image", - "vendor": "CentOS" - }, - "name": "metricbeat_elasticsearch_1" + "com_docker_compose_slug": "df866b3a7b3d50c0802350cbe58ee5b34fa32b7f6ba7fe9e48cde2c12dd0201d", + "com_docker_compose_version": "1.23.1", + "license": "Elastic License", + "org_label-schema_build-date": "20181006", + "org_label-schema_license": "GPLv2", + "org_label-schema_name": "elasticsearch", + "org_label-schema_schema-version": "1.0", + "org_label-schema_url": "https://www.elastic.co/products/elasticsearch", + "org_label-schema_vcs-url": "https://github.com/elastic/elasticsearch-docker", + "org_label-schema_vendor": "Elastic", + "org_label-schema_version": "6.5.1" + } }, "cpu": { "core": { "0": { - "pct": 0.01012583677581864, - "ticks": 9528454911 + "pct": 0.03263313721518987, + "ticks": 38346196894 }, "1": { - "pct": 0.0069975889168765746, - "ticks": 11916812270 + "pct": 0.014317838987341772, + "ticks": 37143007802 }, "2": { - "pct": 0.001329603022670025, - "ticks": 10894346015 + "pct": 0.0028625296202531647, + "ticks": 37194678570 }, "3": { - "pct": 0.0018390015113350126, - "ticks": 10847487614 + "pct": 0.005687502784810126, + "ticks": 39335551141 } }, "kernel": { - "pct": 0.010075566750629723, - "ticks": 1960000000 + "pct": 0.010126582278481013, + "ticks": 10560000000 }, "system": { "pct": 4, - "ticks": 1092479570000000 + "ticks": 5566563680000000 }, "total": { - "pct": 0.02029203022670025 + "pct": 0.05550100860759494 }, "user": { - "pct": 0.010075566750629723, - "ticks": 40960000000 + "pct": 0.05063291139240506, + "ticks": 139520000000 } } }, + "event": { + "dataset": "docker.cpu", + "duration": 115000, + "module": "docker" + }, "metricset": { - "host": "/var/run/docker.sock", - "module": "docker", - "name": "cpu", - "rtt": 115 + "name": "cpu" + }, + "service": { + "address": "/var/run/docker.sock", + "type": "docker" } } \ No newline at end of file diff --git a/metricbeat/module/docker/cpu/cpu.go b/metricbeat/module/docker/cpu/cpu.go index 954eb37d6d21..a7dd10cf8926 100644 --- a/metricbeat/module/docker/cpu/cpu.go +++ b/metricbeat/module/docker/cpu/cpu.go @@ -19,12 +19,15 @@ package cpu import ( "github.com/docker/docker/client" + "github.com/pkg/errors" - "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/logp" "github.com/elastic/beats/metricbeat/mb" "github.com/elastic/beats/metricbeat/module/docker" ) +var logger = logp.NewLogger("docker.cpu") + func init() { mb.Registry.MustAddMetricSet("docker", "cpu", New, mb.WithHostParser(docker.HostParser), @@ -69,12 +72,15 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { } // Fetch returns a list of docker CPU stats. -func (m *MetricSet) Fetch() ([]common.MapStr, error) { +func (m *MetricSet) Fetch(r mb.ReporterV2) { stats, err := docker.FetchStats(m.dockerClient, m.Module().Config().Timeout) if err != nil { - return nil, err + err = errors.Wrap(err, "failed to get docker stats") + logger.Error(err) + r.Error(err) + return } formattedStats := m.cpuService.getCPUStatsList(stats, m.dedot) - return eventsMapping(formattedStats), nil + eventsMapping(r, formattedStats) } diff --git a/metricbeat/module/docker/cpu/cpu_integration_test.go b/metricbeat/module/docker/cpu/cpu_integration_test.go index 209f66335586..1dad5cb1dd8f 100644 --- a/metricbeat/module/docker/cpu/cpu_integration_test.go +++ b/metricbeat/module/docker/cpu/cpu_integration_test.go @@ -22,13 +22,20 @@ package cpu import ( "testing" + "github.com/stretchr/testify/assert" + mbtest "github.com/elastic/beats/metricbeat/mb/testing" ) func TestData(t *testing.T) { - f := mbtest.NewEventsFetcher(t, getConfig()) - err := mbtest.WriteEvents(f, t) - if err != nil { + f := mbtest.NewReportingMetricSetV2(t, getConfig()) + events, errs := mbtest.ReportingFetchV2(f) + if len(errs) > 0 { + t.Fatalf("Expected 0 error, had %d. %v\n", len(errs), errs) + } + assert.NotEmpty(t, events) + + if err := mbtest.WriteEventsReporterV2(f, t, ""); err != nil { t.Fatal("write", err) } } diff --git a/metricbeat/module/docker/cpu/data.go b/metricbeat/module/docker/cpu/data.go index 3cf93fe93d68..a8c3042ed4d4 100644 --- a/metricbeat/module/docker/cpu/data.go +++ b/metricbeat/module/docker/cpu/data.go @@ -22,19 +22,14 @@ import ( "github.com/elastic/beats/metricbeat/mb" ) -func eventsMapping(cpuStatsList []CPUStats) []common.MapStr { - events := []common.MapStr{} +func eventsMapping(r mb.ReporterV2, cpuStatsList []CPUStats) { for _, cpuStats := range cpuStatsList { - events = append(events, eventMapping(&cpuStats)) + eventMapping(r, &cpuStats) } - return events } -func eventMapping(stats *CPUStats) common.MapStr { - event := common.MapStr{ - mb.ModuleDataKey: common.MapStr{ - "container": stats.Container.ToMapStr(), - }, +func eventMapping(r mb.ReporterV2, stats *CPUStats) { + fields := common.MapStr{ "core": stats.PerCpuUsage, "total": common.MapStr{ "pct": stats.TotalUsage, @@ -53,5 +48,8 @@ func eventMapping(stats *CPUStats) common.MapStr { }, } - return event + r.Event(mb.Event{ + RootFields: stats.Container.ToMapStr(), + MetricSetFields: fields, + }) } diff --git a/metricbeat/module/docker/diskio/_meta/data.json b/metricbeat/module/docker/diskio/_meta/data.json index 3adb0cc16750..2fba47412c8b 100644 --- a/metricbeat/module/docker/diskio/_meta/data.json +++ b/metricbeat/module/docker/diskio/_meta/data.json @@ -1,52 +1,69 @@ { "@timestamp": "2017-10-12T08:05:34.853Z", - "beat": { + "agent": { "hostname": "host.example.com", "name": "host.example.com" }, + "container": { + "id": "cc78e58acfda4501105dc4de8e3ae218f2da616213e6e3af168c40103829302a", + "image": { + "name": "metricbeat_elasticsearch" + }, + "name": "metricbeat_elasticsearch_1_df866b3a7b3d", + "runtime": "docker" + }, "docker": { "container": { - "id": "59c5d4838454f38c7d67fdacec7a32ca4476a062ef00edf69ba6be9117cf2e7b", "labels": { - "build-date": "20170911", - "com_docker_compose_config-hash": "a2bcfc1f8c99a4be6920deda8da8d4d06fe0d10d51623b8e1dbcc8228e96926c", + "com_docker_compose_config-hash": "e3e0a2c6e5d1afb741bc8b1ecb09cda0395886b7a3e5084a9fd110be46d70f78", "com_docker_compose_container-number": "1", "com_docker_compose_oneoff": "False", "com_docker_compose_project": "metricbeat", "com_docker_compose_service": "elasticsearch", - "com_docker_compose_version": "1.20.1", - "license": "GPLv2", - "maintainer": "Elastic Docker Team \u003cdocker@elastic.co\u003e", - "name": "CentOS Base Image", - "vendor": "CentOS" - }, - "name": "metricbeat_elasticsearch_1" + "com_docker_compose_slug": "df866b3a7b3d50c0802350cbe58ee5b34fa32b7f6ba7fe9e48cde2c12dd0201d", + "com_docker_compose_version": "1.23.1", + "license": "Elastic License", + "org_label-schema_build-date": "20181006", + "org_label-schema_license": "GPLv2", + "org_label-schema_name": "elasticsearch", + "org_label-schema_schema-version": "1.0", + "org_label-schema_url": "https://www.elastic.co/products/elasticsearch", + "org_label-schema_vcs-url": "https://github.com/elastic/elasticsearch-docker", + "org_label-schema_vendor": "Elastic", + "org_label-schema_version": "6.5.1" + } }, "diskio": { "read": { - "bytes": 61964288, - "ops": 3284, + "bytes": 998932480, + "ops": 8473, "rate": 0 }, "reads": 0, "summary": { - "bytes": 63479808, - "ops": 3500, + "bytes": 1090650112, + "ops": 9585, "rate": 0 }, "total": 0, "write": { - "bytes": 1515520, - "ops": 216, + "bytes": 91717632, + "ops": 1112, "rate": 0 }, "writes": 0 } }, + "event": { + "dataset": "docker.diskio", + "duration": 115000, + "module": "docker" + }, "metricset": { - "host": "/var/run/docker.sock", - "module": "docker", - "name": "diskio", - "rtt": 115 + "name": "diskio" + }, + "service": { + "address": "/var/run/docker.sock", + "type": "docker" } } \ No newline at end of file diff --git a/metricbeat/module/docker/diskio/data.go b/metricbeat/module/docker/diskio/data.go index 5613c4f713b5..2a7d4cb9569a 100644 --- a/metricbeat/module/docker/diskio/data.go +++ b/metricbeat/module/docker/diskio/data.go @@ -22,19 +22,14 @@ import ( "github.com/elastic/beats/metricbeat/mb" ) -func eventsMapping(blkioStatsList []BlkioStats) []common.MapStr { - myEvents := []common.MapStr{} +func eventsMapping(r mb.ReporterV2, blkioStatsList []BlkioStats) { for _, blkioStats := range blkioStatsList { - myEvents = append(myEvents, eventMapping(&blkioStats)) + eventMapping(r, &blkioStats) } - return myEvents } -func eventMapping(stats *BlkioStats) common.MapStr { - event := common.MapStr{ - mb.ModuleDataKey: common.MapStr{ - "container": stats.Container.ToMapStr(), - }, +func eventMapping(r mb.ReporterV2, stats *BlkioStats) { + fields := common.MapStr{ "reads": stats.reads, "writes": stats.writes, "total": stats.totals, @@ -55,5 +50,8 @@ func eventMapping(stats *BlkioStats) common.MapStr { }, } - return event + r.Event(mb.Event{ + RootFields: stats.Container.ToMapStr(), + MetricSetFields: fields, + }) } diff --git a/metricbeat/module/docker/diskio/diskio.go b/metricbeat/module/docker/diskio/diskio.go index 994823bba57d..d300e92d3e46 100644 --- a/metricbeat/module/docker/diskio/diskio.go +++ b/metricbeat/module/docker/diskio/diskio.go @@ -19,12 +19,15 @@ package diskio import ( "github.com/docker/docker/client" + "github.com/pkg/errors" - "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/logp" "github.com/elastic/beats/metricbeat/mb" "github.com/elastic/beats/metricbeat/module/docker" ) +var logger = logp.NewLogger("docker.diskio") + func init() { mb.Registry.MustAddMetricSet("docker", "diskio", New, mb.WithHostParser(docker.HostParser), @@ -60,12 +63,15 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { } // Fetch creates list of events with diskio stats for all containers. -func (m *MetricSet) Fetch() ([]common.MapStr, error) { +func (m *MetricSet) Fetch(r mb.ReporterV2) { stats, err := docker.FetchStats(m.dockerClient, m.Module().Config().Timeout) if err != nil { - return nil, err + err = errors.Wrap(err, "failed to get docker stats") + logger.Error(err) + r.Error(err) + return } formattedStats := m.blkioService.getBlkioStatsList(stats, m.dedot) - return eventsMapping(formattedStats), nil + eventsMapping(r, formattedStats) } diff --git a/metricbeat/module/docker/diskio/diskio_integration_test.go b/metricbeat/module/docker/diskio/diskio_integration_test.go index 1f3a7d1ceed8..21e3b20c20d6 100644 --- a/metricbeat/module/docker/diskio/diskio_integration_test.go +++ b/metricbeat/module/docker/diskio/diskio_integration_test.go @@ -22,13 +22,20 @@ package diskio import ( "testing" + "github.com/stretchr/testify/assert" + mbtest "github.com/elastic/beats/metricbeat/mb/testing" ) func TestData(t *testing.T) { - f := mbtest.NewEventsFetcher(t, getConfig()) - err := mbtest.WriteEvents(f, t) - if err != nil { + f := mbtest.NewReportingMetricSetV2(t, getConfig()) + events, errs := mbtest.ReportingFetchV2(f) + if len(errs) > 0 { + t.Fatalf("Expected 0 error, had %d. %v\n", len(errs), errs) + } + assert.NotEmpty(t, events) + + if err := mbtest.WriteEventsReporterV2(f, t, ""); err != nil { t.Fatal("write", err) } } diff --git a/metricbeat/module/docker/fields.go b/metricbeat/module/docker/fields.go index f2e8dfeab15b..ec367b3a5a70 100644 --- a/metricbeat/module/docker/fields.go +++ b/metricbeat/module/docker/fields.go @@ -32,5 +32,5 @@ func init() { // AssetDocker returns asset data. // This is the base64 encoded gzipped contents of ../metricbeat/module/docker. func AssetDocker() string { - return "eJzsm82O2zgSgO/9FIXsYYFF2sYugj34sMBOMoPxIUkjSc/VoKmSzbFEavhjt/P0A5KSJUuUZMuyuzuIjpZd9bFYPySLvocN7mcQCbpBeQegmU5wBm8+uA/e3AFEqKhkmWaCz+B/dwAA/iUoTbQCKpIEqcYIYinS/N3kDkBigkThDFbkDkCthdQLKnjMVjOISaLwDiBmmERq5qTeAycpVljso/eZlSCFyfJPAjz2mfNYyJTYj4HwyMExpRlVQJbC6FzsPxVIwznjK6CCa8I4SjXJpVRpqkSHbx7ehMA64CpGO8iCFLVk9KDcPscmK5461jFamhIeHb0r4Da43wlZf9eBaJ/3XiDoNdGwIwrwCamx08s46DU2xjEJc0kkGsNcEdF4HtQHohF2a/QEpQktX64pjGG9wKgxrVOo9pLDWlm2IFEkUSkM62bZULXzBziIbhky+163bthXzxsu+44hj4UW/6wSSSH0Iq5bogRLBF8FXvawueeb0CTxcCIGkiTOQWKWoCr8tcVRjwB312D7mlOVRC6m1mSLsETkheeCkEDXhK8wAsU4Rf+CCR6eYE1WI3r0PCUrdDInzbyXmUsy3hfDNUsR3j88jpPsNig5JpOM6uD4FSUJRos4EaT+BV8bZpChpMg1WZ2ZgB4Ov3MzakfFeM4DKiMUw3OVE2tGN+E5C/hXX0w+PIKTdxqB2iuN6QuwmYtUD++tZ+Mip+siH9t2Xqw3YVixUShfgMFyM1margl2tFdzsD7tblqfw1jfDv5kFFm10FEhcfKvVjyx/BMbr/yHixvP9nFgMOXRuwbVPuX9wzrfKT6ZdImyJM3dI4B6WMYztWHiohUzUxuYTz+PUzwkkvBydMC66P+UmtQkrnpbuQoiI+2ewua0hMWHuh/aObSBVmFFdo3lUjmJg6BLvOVeN1a2vYBFwLT9+IQB/GJ/6uCHs8vm7qMX/SzbUiMlcp3bOLP5E6mo7dGqXhmO4o7UE2EmkVrvm8F/J++GRvJZoDvJGnYbJX6c4FcXQMOoX0oEWXqN/BUEUW7nU5zzucPoNFRl0pTI/fUqEeGvNaZsqRcZSrf3fbWx5apTMQmvI8gqRu/xXrfcf6Y4a7h3gLXgxC1yfdHa059y1uWcv+wc+wTyV0sUkno4fBzxMNgrY5E/eyVbwhKyTDCoN5YiHX2YwkgaVmdlj6fu2xrdz62f+Z0SYMq0LgK37gclB6FW5nVIOrWKeuK4oIY0hfXVgoaX9Q37BI5i+PMPRY48bSqODKO1ZEvTVQCC22Oob5EvGsUfRDJhlBUy3ZLEYIXreGxvbXZEHtnRCQ5Mq2PPLsa1RpLoNV0j3YyQ1kZuQsWEJYyvlJZINkGvZFzjqlHqeixJBafGFSirACOoD/96Wfb30ty5bKAiCmeikEMOjkUnLDf2uX0W5NEi0FuDrsbbCUh1eyCPwpIqkyH1LUicom4WYXRmQsE+QpRXUVr0HKbmielFw4OqJOEIGWaUhrsesnZ6fEw5NH84OePkjpblyoDQsaXDgyVkj1IBi5BrFrNm668vkvKF8XXcBh45+8sUrCUkrNgWOZgsrwPhLmAVMyNXpJyXYHnFcsBvgcXAtPVopdVbf+ljt2Z07Xc1+ZbCDy5iEqlO9k4h8npKu+JlAXeFgaWVWwOeqP/GwIjtc99bdd1p75Ln+uGWSW0aGy4YuzvtTNPRHseVSUgoNY3cv7csJEmAErrGyGMpIEoJytwJhxZNJyu5g/OZkCUmQ/skF3TUvd4euNu18hmPL+rFzHksioQPS6IwssvVtdaZmk2nkaBq4i9YTahIp8hXjONUYowSOcUpydjUv19ITIXGBcnYYvvvyX/eTf8xjZjKErK/963Z+x2L8J6V97kuvSFVXPMaK6w/b1E6Nz26DHR2cGfEqEbSgxGiygcVPxyceEWB+25Npvxu3A2g2m/hNamUFll2E1Plmk6iCp2FXYPJVdouU13j5Cdfo+TrvbVQOricCnO4vB1kOb/x3GoNr6WZ6VJMxdH5+tm57qOTMHx527pHnlBhWraKncennQb6jTCbigzXbXcxE5aysNbAdHSdg/eQ5HZz6sIkUo2WhL98/ZpP9bDsOyh6R2gSeH/OySUqF1WgULtVUMdaP3hk1es8cMpNlRPRP7ZAVwS3XVFq6hw88Y/K7z2HT31Knp5h4j+Sp4I6cGMJXuRUO9DW6YWXFk41uxZsHPVOyIvOTT95ESPXBGZzdkzoiI2MAvQg2qlqWS+E2xbhmLyghTfnVKS2ZOcTkf9xpGzfnRvAz9Xgra9FWDEwJ7M9OCLZvX7tCeoBZLnGkjAjdIPNVFk5oZRSNLZIMNpy1ot3LY6TkfIv3GCJ3c1UOUu+TcB8NnolfsSAEcXAXmzAHAhfTsCcjnS7gOlmKgvMUpiWP6fdqsr4f9Qc/23M9Y3qB8CvJ4rGKjvjucPPcnOdcnPD8GmpOT9g+IxVhMYPn5/FZ2Dx+TsAAP//9lmlhw==" + return "eJzsm82O2zgSgO/9FIXsYYFF2sYugj34sMBOMoPxIUkjSc/VoKmSzbFEavhjt/P0A5KSJUuUZMuyuzuIjpZd9bFYPySLvocN7mcQCbpBeQegmU5wBm8+uA/e3AFEqKhkmWaCz+B/dwAA/iUoTbQCKpIEqcYIYinS/N3kDkBigkThDFbkDkCthdQLKnjMVjOISaLwDiBmmERq5qTeAycpVljso/eZlSCFyfJPAjz2mfNYyJTYj4HwyMExpRlVQJbC6FzsPxVIwznjK6CCa8I4SjXJpVRpqkSHbx7ehMA64CpGO8iCFLVk9KDcPscmK5461jFamhIeHb0r4Da43wlZf9eBaJ/3XiDoNdGwIwrwCamx08s46DU2xjEJc0kkGsNcEdF4HtQHohF2a/QEpQktX64pjGG9wKgxrVOo9pLDWlm2IFEkUSkM62bZULXzBziIbhky+163bthXzxsu+44hj4UW/6wSSSH0Iq5bogRLBF8FXvawueeb0CTxcCIGkiTOQWKWoCr8tcVRjwB312D7mlOVRC6m1mSLsETkheeCkEDXhK8wAsU4Rf+CCR6eYE1WI3r0PCUrdDInzbyXmUsy3hfDNUsR3j88jpPsNig5JpOM6uD4FSUJRos4EaT+BV8bZpChpMg1WZ2ZgB4Ov3MzakfFeM4DKiMUw3OVE2tGN+E5C/hXX0w+PIKTdxqB2iuN6QuwmYtUD++tZ+Mip+siH9t2Xqw3YVixUShfgMFyM1margl2tFdzsD7tblqfw1jfDv5kFFm10FEhcfKvVjyx/BMbr/yHixvP9nFgMOXRuwbVPuX9wzrfKT6ZdImyJM3dI4B6WMYztWHiohUzUxuYTz+PUzwkkvBydMC66P+UmtQkrnpbuQoiI+2ewua0hMWHuh/aObSBVmFFdo3lUjmJg6BLvOVeN1a2vYBFwLT9+IQB/GJ/6uCHs8vm7qMX/SzbUiMlcp3bOLP5E6mo7dGqXhmO4o7UE2EmkVrvm8F/J++GRvJZoDvJGnYbJX6c4FcXQMOoX0oEWXqN/BUEUW7nU5zzucPoNFRl0pTI/fUqEeGvNaZsqRcZSrf3fbWx5apTMQmvI8gqRu/xXrfcf6Y4a7h3gLXgxC1yfdHa059y1uWcv+wc+wTyV0sUkno4fBzxMNgrY5E/eyVbwhKyTDCoN5YiHX2YwkgaVmdlj6fu2xrdz62f+Z0SYMq0LgK37gclB6FW5nVIOrWKeuK4oIY0hfXVgoaX9Q37BI5i+PMPRY48bSqODKO1ZEvTVQCC22Oob5EvGsUfRDJhlBUy3ZLEYIXreGxvbXZEHtnRCQ5Mq2PPLsa1RpLoNV0j3YyQ1kZuQsWEJYyvlJZINkGvZFzjqlHqeixJBafGFSirACOoD/96Wfb30ty5bKAiCmeikEMOjkUnLDf2uX0W5NEi0FuDrsbbCUh1eyCPwpIqkyH1LUicom4WYXRmQsE+QpRXUVr0HKbmielFw4OqJOEIGWaUhrsesnZ6fEw5NH84OePkjpblyoDQsaXDgyVkj1IBi5BrFrNm668vkvKF8XXcBh45+8sUrCUkrNgWOZgsrwPhLmAVMyNXpJyXYHnFcsBvgcXAtPVopdVbf+ljt2Z07Xc1+ZbCDy5iEqlO9k4h8npKu+JlAXeFgaWVWwOeqP/GwIjtc99bdd1p75Ln+uGWSW0aGy4YuzvtTNPRHseVSUgoNY3cv7csJEmAErrGyGMpIEoJytwJhxZNJyu5g/OZkCUmQ/skF3TUvd4euNu18hmPL+rFzHksioQPS6IwssvVtdaZmk2nkaBq4i9YTahIp8hXjONUYowSOcUpydjUv19ITIXGBcnYYvvvyX/eTf8xjZjKErK/963Z+x2L8J6V97kuvSFVXPMaK6w/b1E6Nz26DHR2cGfEqEbSgxGiygcVPxyceEWB+25Npvxu3A2g2m/hNamUFll2E1Plmk6iCp2FXYPJVdouU13j5Cdfo+TrvbVQOricCnO4vB1kOb/x3GoNr6WZ6VJMxdH5+tm57qOTMHx527pHnlBhWraKncennQb6jTCbigzXbXcxE5aysNbAdHSdg/eQ5HZz6sIkUo2WhL98/ZpP9bDsOyh6R2gSeH/OySUqF1WgULtVUMdaP3hk1es8cMpNlRPRP7ZAVwS3XVFq6hw88Y/K7z2HT31Knp5h4j+Sp4I6cGMJXuRUO9DW6YWXFk41uxZsHPVOyIvOTT95ESPXBGZzdkzoiI2MAvQg2qlqWS+E2xbhmLyghTfnVKS2ZOcTkf9xpGzfnRvAz9Xgra9FWDEwJ7M9OCLZvX7tCeoBZLnGkjAjdIPNVFk5oZRSNLZIMNpy1ot3LY6TkfIv3GCJ3c1UOUu+TcB8NnolfsSAEcXAXmzAHAhfTsCcjnS7gOlmKgvMUpiWP6cNOU4N1xH/n5njP4a5zlD9iPf1xMlYhWW8Cf9ZUK5TUEYNkJa68QMGyFiFZPwA+VlABhaQvwMAAP//ay+X7Q==" } diff --git a/metricbeat/module/docker/healthcheck/_meta/data.json b/metricbeat/module/docker/healthcheck/_meta/data.json index ce47fb6706e6..ace6af48c147 100644 --- a/metricbeat/module/docker/healthcheck/_meta/data.json +++ b/metricbeat/module/docker/healthcheck/_meta/data.json @@ -1,37 +1,59 @@ { "@timestamp": "2017-10-12T08:05:34.853Z", - "beat": { + "agent": { "hostname": "host.example.com", "name": "host.example.com" }, + "container": { + "id": "cc78e58acfda4501105dc4de8e3ae218f2da616213e6e3af168c40103829302a", + "image": { + "name": "metricbeat_elasticsearch" + }, + "name": "metricbeat_elasticsearch_1_df866b3a7b3d", + "runtime": "docker" + }, "docker": { "container": { - "id": "228848ff8ea238430c4e9cc21deb3175ea5c77483a7f9f4b7c42f7cd6d18c0fb", "labels": { - "com_docker_compose_config-hash": "13347b7d96bf1b3f1ddc089bdef099e42dde235e", + "com_docker_compose_config-hash": "e3e0a2c6e5d1afb741bc8b1ecb09cda0395886b7a3e5084a9fd110be46d70f78", "com_docker_compose_container-number": "1", "com_docker_compose_oneoff": "False", "com_docker_compose_project": "metricbeat", - "com_docker_compose_service": "aerospike", - "com_docker_compose_version": "1.5.0" - }, - "name": "metricbeat_aerospike_1" + "com_docker_compose_service": "elasticsearch", + "com_docker_compose_slug": "df866b3a7b3d50c0802350cbe58ee5b34fa32b7f6ba7fe9e48cde2c12dd0201d", + "com_docker_compose_version": "1.23.1", + "license": "Elastic License", + "org_label-schema_build-date": "20181006", + "org_label-schema_license": "GPLv2", + "org_label-schema_name": "elasticsearch", + "org_label-schema_schema-version": "1.0", + "org_label-schema_url": "https://www.elastic.co/products/elasticsearch", + "org_label-schema_vcs-url": "https://github.com/elastic/elasticsearch-docker", + "org_label-schema_vendor": "Elastic", + "org_label-schema_version": "6.5.1" + } }, "healthcheck": { "event": { - "end_date": "2017-12-07T07:27:18.738Z", - "exit_code": 1, - "output": "", - "start_date": "2017-12-07T07:27:17.416Z" + "end_date": "2019-02-25T10:59:07.472Z", + "exit_code": 0, + "output": " % Total % Received % Xferd Average Speed Time Time Time Current\n Dload Upload Total Spent Left Speed\n\r 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0\r100 338 100 338 0 0 13188 0 --:--:-- --:--:-- --:--:-- 13520\n{\n \"license\" : {\n \"status\" : \"active\",\n \"uid\" : \"ea5a516e-d9ee-4131-8eec-b39741e80869\",\n \"type\" : \"basic\",\n \"issue_date\" : \"2019-02-25T10:18:24.885Z\",\n \"issue_date_in_millis\" : 1551089904885,\n \"max_nodes\" : 1000,\n \"issued_to\" : \"docker-cluster\",\n \"issuer\" : \"elasticsearch\",\n \"start_date_in_millis\" : -1\n }\n}\n", + "start_date": "2019-02-25T10:59:07.342Z" }, - "failingstreak": 1, - "status": "starting" + "failingstreak": 0, + "status": "healthy" } }, + "event": { + "dataset": "docker.healthcheck", + "duration": 115000, + "module": "docker" + }, "metricset": { - "host": "/var/run/docker.sock", - "module": "docker", - "name": "healthcheck", - "rtt": 115 + "name": "healthcheck" + }, + "service": { + "address": "/var/run/docker.sock", + "type": "docker" } } \ No newline at end of file diff --git a/metricbeat/module/docker/healthcheck/data.go b/metricbeat/module/docker/healthcheck/data.go index dc91093c9186..673e15a73230 100644 --- a/metricbeat/module/docker/healthcheck/data.go +++ b/metricbeat/module/docker/healthcheck/data.go @@ -24,49 +24,40 @@ import ( "github.com/docker/docker/api/types" "github.com/elastic/beats/libbeat/common" - "github.com/elastic/beats/libbeat/logp" "github.com/elastic/beats/metricbeat/mb" "github.com/elastic/beats/metricbeat/module/docker" ) -func eventsMapping(containers []types.Container, m *MetricSet) []common.MapStr { - var events []common.MapStr +func eventsMapping(r mb.ReporterV2, containers []types.Container, m *MetricSet) { for _, container := range containers { - event := eventMapping(&container, m) - if event != nil { - events = append(events, event) - } + eventMapping(r, &container, m) } - return events } -func eventMapping(cont *types.Container, m *MetricSet) common.MapStr { +func eventMapping(r mb.ReporterV2, cont *types.Container, m *MetricSet) { if !hasHealthCheck(cont.Status) { - return nil + return } container, err := m.dockerClient.ContainerInspect(context.TODO(), cont.ID) if err != nil { - logp.Err("Error inspecting container %v: %v", cont.ID, err) - return nil + logger.Error("Error inspecting container %v: %v", cont.ID, err) + return } // Check if the container has any health check if container.State.Health == nil { - return nil + return } lastEvent := len(container.State.Health.Log) - 1 // Checks if a healthcheck already happened if lastEvent < 0 { - return nil + return } - return common.MapStr{ - mb.ModuleDataKey: common.MapStr{ - "container": docker.NewContainer(cont, m.dedot).ToMapStr(), - }, + fields := common.MapStr{ "status": container.State.Health.Status, "failingstreak": container.State.Health.FailingStreak, "event": common.MapStr{ @@ -76,6 +67,11 @@ func eventMapping(cont *types.Container, m *MetricSet) common.MapStr { "output": container.State.Health.Log[lastEvent].Output, }, } + + r.Event(mb.Event{ + RootFields: docker.NewContainer(cont, m.dedot).ToMapStr(), + MetricSetFields: fields, + }) } // hasHealthCheck detects if healthcheck is available for container diff --git a/metricbeat/module/docker/healthcheck/healthcheck.go b/metricbeat/module/docker/healthcheck/healthcheck.go index 781871587cc1..64c710140e8e 100644 --- a/metricbeat/module/docker/healthcheck/healthcheck.go +++ b/metricbeat/module/docker/healthcheck/healthcheck.go @@ -22,12 +22,15 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/client" + "github.com/pkg/errors" - "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/logp" "github.com/elastic/beats/metricbeat/mb" "github.com/elastic/beats/metricbeat/module/docker" ) +var logger = logp.NewLogger("docker.healthcheck") + func init() { mb.Registry.MustAddMetricSet("docker", "healthcheck", New, mb.WithHostParser(docker.HostParser), @@ -62,11 +65,14 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { // Fetch returns a list of all containers as events. // This is based on https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/#/list-containers. -func (m *MetricSet) Fetch() ([]common.MapStr, error) { +func (m *MetricSet) Fetch(r mb.ReporterV2) { // Fetch a list of all containers. containers, err := m.dockerClient.ContainerList(context.TODO(), types.ContainerListOptions{}) if err != nil { - return nil, err + err = errors.Wrap(err, "failed to get docker containers list") + logger.Error(err) + r.Error(err) + return } - return eventsMapping(containers, m), nil + eventsMapping(r, containers, m) } diff --git a/metricbeat/module/docker/healthcheck/healthcheck_integration_test.go b/metricbeat/module/docker/healthcheck/healthcheck_integration_test.go index 0448a1e3313e..79dfeb00ab77 100644 --- a/metricbeat/module/docker/healthcheck/healthcheck_integration_test.go +++ b/metricbeat/module/docker/healthcheck/healthcheck_integration_test.go @@ -22,13 +22,20 @@ package healthcheck import ( "testing" + "github.com/stretchr/testify/assert" + mbtest "github.com/elastic/beats/metricbeat/mb/testing" ) func TestData(t *testing.T) { - f := mbtest.NewEventsFetcher(t, getConfig()) - err := mbtest.WriteEvents(f, t) - if err != nil { + f := mbtest.NewReportingMetricSetV2(t, getConfig()) + events, errs := mbtest.ReportingFetchV2(f) + if len(errs) > 0 { + t.Fatalf("Expected 0 error, had %d. %v\n", len(errs), errs) + } + assert.NotEmpty(t, events) + + if err := mbtest.WriteEventsReporterV2(f, t, ""); err != nil { t.Fatal("write", err) } } diff --git a/metricbeat/module/docker/helper.go b/metricbeat/module/docker/helper.go index 1306b89e8c7f..a4284db25d32 100644 --- a/metricbeat/module/docker/helper.go +++ b/metricbeat/module/docker/helper.go @@ -35,13 +35,18 @@ type Container struct { func (c *Container) ToMapStr() common.MapStr { m := common.MapStr{ - "id": c.ID, - "name": c.Name, - "image": c.Image, + "container": common.MapStr{ + "id": c.ID, + "name": c.Name, + "image": common.MapStr{ + "name": c.Image, + }, + "runtime": "docker", + }, } if len(c.Labels) > 0 { - m["labels"] = c.Labels + m.Put("docker.container.labels", c.Labels) } return m } diff --git a/metricbeat/module/docker/memory/_meta/data.json b/metricbeat/module/docker/memory/_meta/data.json index 10c6587f43c9..8537971aacac 100644 --- a/metricbeat/module/docker/memory/_meta/data.json +++ b/metricbeat/module/docker/memory/_meta/data.json @@ -1,47 +1,64 @@ { "@timestamp": "2017-10-12T08:05:34.853Z", - "beat": { + "agent": { "hostname": "host.example.com", "name": "host.example.com" }, + "container": { + "id": "cc78e58acfda4501105dc4de8e3ae218f2da616213e6e3af168c40103829302a", + "image": { + "name": "metricbeat_elasticsearch" + }, + "name": "metricbeat_elasticsearch_1_df866b3a7b3d", + "runtime": "docker" + }, "docker": { "container": { - "id": "da57ef738524e5a4f6ae17b477f134d30719603db7b96d2c01b6f34010412e66", "labels": { - "build-date": "20170911", - "com_docker_compose_config-hash": "13a74b89a90a6fdcb6bbbc7eb37b7cb0615bdaf8", + "com_docker_compose_config-hash": "e3e0a2c6e5d1afb741bc8b1ecb09cda0395886b7a3e5084a9fd110be46d70f78", "com_docker_compose_container-number": "1", "com_docker_compose_oneoff": "False", "com_docker_compose_project": "metricbeat", "com_docker_compose_service": "elasticsearch", - "com_docker_compose_version": "1.5.0", - "license": "GPLv2", - "maintainer": "Elastic Docker Team \u003cdocker@elastic.co\u003e", - "name": "CentOS Base Image", - "vendor": "CentOS" - }, - "name": "metricbeat_elasticsearch_1" + "com_docker_compose_slug": "df866b3a7b3d50c0802350cbe58ee5b34fa32b7f6ba7fe9e48cde2c12dd0201d", + "com_docker_compose_version": "1.23.1", + "license": "Elastic License", + "org_label-schema_build-date": "20181006", + "org_label-schema_license": "GPLv2", + "org_label-schema_name": "elasticsearch", + "org_label-schema_schema-version": "1.0", + "org_label-schema_url": "https://www.elastic.co/products/elasticsearch", + "org_label-schema_vcs-url": "https://github.com/elastic/elasticsearch-docker", + "org_label-schema_vendor": "Elastic", + "org_label-schema_version": "6.5.1" + } }, "memory": { "fail": { "count": 0 }, - "limit": 4139122688, + "limit": 16571719680, "rss": { - "pct": 0.08521683713860477, - "total": 352722944 + "pct": 0.02170185104168984, + "total": 359636992 }, "usage": { - "max": 370184192, - "pct": 0.08716433389277685, - "total": 360783872 + "max": 521326592, + "pct": 0.029796111057558028, + "total": 493772800 } } }, + "event": { + "dataset": "docker.memory", + "duration": 115000, + "module": "docker" + }, "metricset": { - "host": "/var/run/docker.sock", - "module": "docker", - "name": "memory", - "rtt": 115 + "name": "memory" + }, + "service": { + "address": "/var/run/docker.sock", + "type": "docker" } } \ No newline at end of file diff --git a/metricbeat/module/docker/memory/data.go b/metricbeat/module/docker/memory/data.go index 6504a64b36b6..95346ac83d37 100644 --- a/metricbeat/module/docker/memory/data.go +++ b/metricbeat/module/docker/memory/data.go @@ -22,19 +22,14 @@ import ( "github.com/elastic/beats/metricbeat/mb" ) -func eventsMapping(memoryDataList []MemoryData) []common.MapStr { - events := []common.MapStr{} +func eventsMapping(r mb.ReporterV2, memoryDataList []MemoryData) { for _, memoryData := range memoryDataList { - events = append(events, eventMapping(&memoryData)) + eventMapping(r, &memoryData) } - return events } -func eventMapping(memoryData *MemoryData) common.MapStr { - event := common.MapStr{ - mb.ModuleDataKey: common.MapStr{ - "container": memoryData.Container.ToMapStr(), - }, +func eventMapping(r mb.ReporterV2, memoryData *MemoryData) { + fields := common.MapStr{ "fail": common.MapStr{ "count": memoryData.Failcnt, }, @@ -49,5 +44,9 @@ func eventMapping(memoryData *MemoryData) common.MapStr { "max": memoryData.MaxUsage, }, } - return event + + r.Event(mb.Event{ + RootFields: memoryData.Container.ToMapStr(), + MetricSetFields: fields, + }) } diff --git a/metricbeat/module/docker/memory/memory.go b/metricbeat/module/docker/memory/memory.go index 466b6ca38d06..240ac448f0b2 100644 --- a/metricbeat/module/docker/memory/memory.go +++ b/metricbeat/module/docker/memory/memory.go @@ -19,12 +19,15 @@ package memory import ( "github.com/docker/docker/client" + "github.com/pkg/errors" - "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/logp" "github.com/elastic/beats/metricbeat/mb" "github.com/elastic/beats/metricbeat/module/docker" ) +var logger = logp.NewLogger("docker.memory") + func init() { mb.Registry.MustAddMetricSet("docker", "memory", New, mb.WithHostParser(docker.HostParser), @@ -60,12 +63,15 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { } // Fetch creates a list of memory events for each container. -func (m *MetricSet) Fetch() ([]common.MapStr, error) { +func (m *MetricSet) Fetch(r mb.ReporterV2) { stats, err := docker.FetchStats(m.dockerClient, m.Module().Config().Timeout) if err != nil { - return nil, err + err = errors.Wrap(err, "failed to get docker stats") + logger.Error(err) + r.Error(err) + return } memoryStats := m.memoryService.getMemoryStatsList(stats, m.dedot) - return eventsMapping(memoryStats), nil + eventsMapping(r, memoryStats) } diff --git a/metricbeat/module/docker/memory/memory_integration_test.go b/metricbeat/module/docker/memory/memory_integration_test.go index cf29fc4faa16..e77284614f04 100644 --- a/metricbeat/module/docker/memory/memory_integration_test.go +++ b/metricbeat/module/docker/memory/memory_integration_test.go @@ -22,24 +22,20 @@ package memory import ( "testing" + "github.com/stretchr/testify/assert" + mbtest "github.com/elastic/beats/metricbeat/mb/testing" ) -/* -// TODO: Enable -func TestFetch(t *testing.T) { - f := mbtest.NewEventsFetcher(t, getConfig()) - event, err := f.Fetch() - if err != nil { - t.Fatal(err) +func TestData(t *testing.T) { + f := mbtest.NewReportingMetricSetV2(t, getConfig()) + events, errs := mbtest.ReportingFetchV2(f) + if len(errs) > 0 { + t.Fatalf("Expected 0 error, had %d. %v\n", len(errs), errs) } - t.Logf(" module : %s metricset : %s event: %+v", f.Module().Name(), f.Name(), event) -}*/ + assert.NotEmpty(t, events) -func TestData(t *testing.T) { - f := mbtest.NewEventsFetcher(t, getConfig()) - err := mbtest.WriteEvents(f, t) - if err != nil { + if err := mbtest.WriteEventsReporterV2(f, t, ""); err != nil { t.Fatal("write", err) } } diff --git a/metricbeat/module/docker/memory/memory_test.go b/metricbeat/module/docker/memory/memory_test.go index 594665e87a8f..77918dbe2c2e 100644 --- a/metricbeat/module/docker/memory/memory_test.go +++ b/metricbeat/module/docker/memory/memory_test.go @@ -23,11 +23,11 @@ import ( "time" "github.com/docker/docker/api/types" + "github.com/stretchr/testify/assert" "github.com/elastic/beats/libbeat/common" + mbtest "github.com/elastic/beats/metricbeat/mb/testing" "github.com/elastic/beats/metricbeat/module/docker" - - "github.com/stretchr/testify/assert" ) func TestMemoryService_GetMemoryStats(t *testing.T) { @@ -57,12 +57,17 @@ func TestMemoryService_GetMemoryStats(t *testing.T) { memoryRawStats.Stats = memorystats totalRSS := memorystats.MemoryStats.Stats["total_rss"] - expectedEvent := common.MapStr{ - "_module": common.MapStr{ + expectedRootFields := common.MapStr{ + "container": common.MapStr{ + "id": containerID, + "name": "name1", + "image": common.MapStr{ + "name": "image", + }, + "runtime": "docker", + }, + "docker": common.MapStr{ "container": common.MapStr{ - "id": containerID, - "name": "name1", - "image": "image", "labels": common.MapStr{ "label1": "val1", "label2": common.MapStr{ @@ -72,6 +77,8 @@ func TestMemoryService_GetMemoryStats(t *testing.T) { }, }, }, + } + expectedFields := common.MapStr{ "fail": common.MapStr{ "count": memorystats.MemoryStats.Failcnt, }, @@ -88,11 +95,15 @@ func TestMemoryService_GetMemoryStats(t *testing.T) { } //WHEN rawStats := memoryService.getMemoryStats(memoryRawStats, false) - event := eventMapping(&rawStats) + r := &mbtest.CapturingReporterV2{} + eventMapping(r, &rawStats) + events := r.GetEvents() //THEN - assert.True(t, equalEvent(expectedEvent, event)) - t.Logf(" expected : %v", expectedEvent) - t.Logf(" returned : %v", event) + assert.Empty(t, r.GetErrors()) + assert.NotEmpty(t, events) + event := events[0] + assert.Equal(t, expectedRootFields, event.RootFields) + assert.Equal(t, expectedFields, event.MetricSetFields) } func getMemoryStats(read time.Time, number uint64) types.StatsJSON { diff --git a/metricbeat/module/docker/network/_meta/data.json b/metricbeat/module/docker/network/_meta/data.json index ddb3c827a8aa..fba2fae388c3 100644 --- a/metricbeat/module/docker/network/_meta/data.json +++ b/metricbeat/module/docker/network/_meta/data.json @@ -1,23 +1,37 @@ { "@timestamp": "2017-10-12T08:05:34.853Z", - "beat": { + "agent": { "hostname": "host.example.com", "name": "host.example.com" }, + "container": { + "id": "cc78e58acfda4501105dc4de8e3ae218f2da616213e6e3af168c40103829302a", + "image": { + "name": "metricbeat_elasticsearch" + }, + "name": "metricbeat_elasticsearch_1_df866b3a7b3d", + "runtime": "docker" + }, "docker": { "container": { - "id": "452523bf833fd9fd1a8425135b720de4cb9b5a32096deac5b52a97e97bb6d16d", "labels": { - "com_docker_compose_config-hash": "68a840a9e1c606ca1026492e50620e139ca342c585f330025a90f39a5fd32538", + "com_docker_compose_config-hash": "e3e0a2c6e5d1afb741bc8b1ecb09cda0395886b7a3e5084a9fd110be46d70f78", "com_docker_compose_container-number": "1", "com_docker_compose_oneoff": "False", "com_docker_compose_project": "metricbeat", "com_docker_compose_service": "elasticsearch", - "com_docker_compose_version": "1.21.0", - "maintainer": "Elastic Docker Team \u003cdocker@elastic.co\u003e", - "org_label-schema_schema-version": "= 1.0 org.label-schema.name=CentOS Base Image org.label-schema.vendor=CentOS org.label-schema.license=GPLv2 org.label-schema.build-date=20180402" - }, - "name": "metricbeat_elasticsearch_1" + "com_docker_compose_slug": "df866b3a7b3d50c0802350cbe58ee5b34fa32b7f6ba7fe9e48cde2c12dd0201d", + "com_docker_compose_version": "1.23.1", + "license": "Elastic License", + "org_label-schema_build-date": "20181006", + "org_label-schema_license": "GPLv2", + "org_label-schema_name": "elasticsearch", + "org_label-schema_schema-version": "1.0", + "org_label-schema_url": "https://www.elastic.co/products/elasticsearch", + "org_label-schema_vcs-url": "https://github.com/elastic/elasticsearch-docker", + "org_label-schema_vendor": "Elastic", + "org_label-schema_version": "6.5.1" + } }, "network": { "in": { @@ -27,10 +41,10 @@ "packets": 0 }, "inbound": { - "bytes": 61694097, + "bytes": 23047, "dropped": 0, "errors": 0, - "packets": 714036 + "packets": 241 }, "interface": "eth0", "out": { @@ -40,17 +54,23 @@ "packets": 0 }, "outbound": { - "bytes": 69114459, + "bytes": 0, "dropped": 0, "errors": 0, - "packets": 713985 + "packets": 0 } } }, + "event": { + "dataset": "docker.network", + "duration": 115000, + "module": "docker" + }, "metricset": { - "host": "/var/run/docker.sock", - "module": "docker", - "name": "network", - "rtt": 115 + "name": "network" + }, + "service": { + "address": "/var/run/docker.sock", + "type": "docker" } -} +} \ No newline at end of file diff --git a/metricbeat/module/docker/network/_meta/fields.yml b/metricbeat/module/docker/network/_meta/fields.yml index 63d06ce7fb8e..035047eb0918 100644 --- a/metricbeat/module/docker/network/_meta/fields.yml +++ b/metricbeat/module/docker/network/_meta/fields.yml @@ -57,7 +57,6 @@ Total number of outgoing packets. - name: inbound type: group - deprecated: 6.4 description: > Incoming network stats since the container started. fields: @@ -80,7 +79,6 @@ Total number of incoming packets. - name: outbound type: group - deprecated: 6.4 description: > Outgoing network stats since the container started. fields: diff --git a/metricbeat/module/docker/network/data.go b/metricbeat/module/docker/network/data.go index 80273317613e..30f6bd34b937 100644 --- a/metricbeat/module/docker/network/data.go +++ b/metricbeat/module/docker/network/data.go @@ -29,19 +29,18 @@ func eventsMapping(r mb.ReporterV2, netsStatsList []NetStats) { } func eventMapping(r mb.ReporterV2, stats *NetStats) { - // Deprecated fields r.Event(mb.Event{ - ModuleFields: common.MapStr{ - "container": stats.Container.ToMapStr(), - }, + RootFields: stats.Container.ToMapStr(), MetricSetFields: common.MapStr{ "interface": stats.NameInterface, + // Deprecated "in": common.MapStr{ "bytes": stats.RxBytes, "dropped": stats.RxDropped, "errors": stats.RxErrors, "packets": stats.RxPackets, }, + // Deprecated "out": common.MapStr{ "bytes": stats.TxBytes, "dropped": stats.TxDropped, diff --git a/metricbeat/module/docker/network/network.go b/metricbeat/module/docker/network/network.go index 67dd2423286f..78b908dd47b4 100644 --- a/metricbeat/module/docker/network/network.go +++ b/metricbeat/module/docker/network/network.go @@ -19,11 +19,15 @@ package network import ( "github.com/docker/docker/client" + "github.com/pkg/errors" + "github.com/elastic/beats/libbeat/logp" "github.com/elastic/beats/metricbeat/mb" "github.com/elastic/beats/metricbeat/module/docker" ) +var logger = logp.NewLogger("docker.network") + func init() { mb.Registry.MustAddMetricSet("docker", "network", New, mb.WithHostParser(docker.HostParser), @@ -64,6 +68,8 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { func (m *MetricSet) Fetch(r mb.ReporterV2) { stats, err := docker.FetchStats(m.dockerClient, m.Module().Config().Timeout) if err != nil { + err = errors.Wrap(err, "failed to get docker stats") + logger.Error(err) r.Error(err) return } diff --git a/metricbeat/tests/system/test_autodiscover.py b/metricbeat/tests/system/test_autodiscover.py index 2c95c52f3bdf..1b41e80a1dfd 100644 --- a/metricbeat/tests/system/test_autodiscover.py +++ b/metricbeat/tests/system/test_autodiscover.py @@ -51,9 +51,10 @@ def test_docker(self): proc.check_kill_and_wait() # Check metadata is added - assert output[0]['docker']['container']['image'] == 'memcached:latest' + assert output[0]['container']['image']['name'] == 'memcached:latest' assert output[0]['docker']['container']['labels'] == {} - assert 'name' in output[0]['docker']['container'] + assert 'name' in output[0]['container'] + self.assert_fields_are_documented(output[0]) @unittest.skipIf(not INTEGRATION_TESTS or os.getenv("TESTING_ENVIRONMENT") == "2x", @@ -93,8 +94,9 @@ def test_docker_labels(self): proc.check_kill_and_wait() # Check metadata is added - assert output[0]['docker']['container']['image'] == 'memcached:latest' - assert 'name' in output[0]['docker']['container'] + assert output[0]['container']['image']['name'] == 'memcached:latest' + assert 'name' in output[0]['container'] + self.assert_fields_are_documented(output[0]) @unittest.skipIf(not INTEGRATION_TESTS or os.getenv("TESTING_ENVIRONMENT") == "2x", @@ -143,3 +145,4 @@ def test_config_appender(self): # Check field is added assert output[0]['fields']['foo'] == 'bar' + self.assert_fields_are_documented(output[0]) diff --git a/metricbeat/tests/system/test_docker.py b/metricbeat/tests/system/test_docker.py index 696e4cb3089b..2d266d59dd8a 100644 --- a/metricbeat/tests/system/test_docker.py +++ b/metricbeat/tests/system/test_docker.py @@ -138,7 +138,7 @@ def test_memory_fields(self): @unittest.skipUnless(metricbeat.INTEGRATION_TESTS, "integration test") def test_network_fields(self): """ - test info fields + test network fields """ self.render_config_template(modules=[{ "name": "docker", diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 0d0fd4fbc2a2..a119227ca519 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:7.0.0-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:7.1.0-SNAPSHOT healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9200"] retries: 300 @@ -16,7 +16,7 @@ services: - "xpack.security.enabled=false" logstash: - image: docker.elastic.co/logstash/logstash:7.0.0-SNAPSHOT + image: docker.elastic.co/logstash/logstash:7.1.0-SNAPSHOT healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9600/_node/stats"] retries: 600 @@ -26,7 +26,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:7.0.0-SNAPSHOT + image: docker.elastic.co/kibana/kibana:7.1.0-SNAPSHOT healthcheck: test: ["CMD-SHELL", 'python -c ''import urllib, json; response = urllib.urlopen("http://localhost:5601/api/status"); data = json.loads(response.read()); exit(1) if data["status"]["overall"]["state"] != "green" else exit(0);'''] retries: 600 diff --git a/x-pack/filebeat/filebeat.reference.yml b/x-pack/filebeat/filebeat.reference.yml index 1dc451b88d13..1ac2e2cb14df 100644 --- a/x-pack/filebeat/filebeat.reference.yml +++ b/x-pack/filebeat/filebeat.reference.yml @@ -312,6 +312,14 @@ filebeat.modules: # Filebeat will choose the paths depending on your OS. #var.paths: +#------------------------------- NetFlow Module ------------------------------- +- module: netflow + log: + enabled: true + var: + netflow_host: localhost + netflow_port: 2055 + #-------------------------------- Nginx Module -------------------------------- #- module: nginx # Access logs diff --git a/x-pack/filebeat/include/list.go b/x-pack/filebeat/include/list.go index f4550a6e8989..c6332bde38f4 100644 --- a/x-pack/filebeat/include/list.go +++ b/x-pack/filebeat/include/list.go @@ -10,6 +10,7 @@ import ( // Import packages that need to register themselves. _ "github.com/elastic/beats/x-pack/filebeat/input/netflow" _ "github.com/elastic/beats/x-pack/filebeat/module/iptables" + _ "github.com/elastic/beats/x-pack/filebeat/module/netflow" _ "github.com/elastic/beats/x-pack/filebeat/module/suricata" _ "github.com/elastic/beats/x-pack/filebeat/module/zeek" ) diff --git a/x-pack/filebeat/module/netflow/_meta/config.yml b/x-pack/filebeat/module/netflow/_meta/config.yml new file mode 100644 index 000000000000..20d1905b6f4a --- /dev/null +++ b/x-pack/filebeat/module/netflow/_meta/config.yml @@ -0,0 +1,6 @@ +- module: netflow + log: + enabled: true + var: + netflow_host: localhost + netflow_port: 2055 diff --git a/x-pack/filebeat/module/netflow/_meta/docs.asciidoc b/x-pack/filebeat/module/netflow/_meta/docs.asciidoc new file mode 100644 index 000000000000..334a219ba844 --- /dev/null +++ b/x-pack/filebeat/module/netflow/_meta/docs.asciidoc @@ -0,0 +1,67 @@ +[role="xpack"] + +:modulename: netflow +:has-dashboards: false + +== NetFlow module + +This is a module for receiving NetFlow and IPFIX flow records over UDP. This +input supports NetFlow versions 1, 5, 6, 7, 8 and 9, as well as IPFIX. For +NetFlow versions older than 9, fields are mapped automatically to NetFlow v9. + +This module wraps the <> to enrich the +flow records with geolocation information about the IP endpoints by using +Elasticsearch Ingest Node. + +[float] +=== Compatibility + +This module requires the {elasticsearch-plugins}/ingest-geoip.html[ingest-geoip] +Elasticsearch plugins. + +include::../include/running-modules.asciidoc[] + +include::../include/configuring-intro.asciidoc[] + +:fileset_ex: log + +include::../include/config-option-intro.asciidoc[] + +[float] +==== `log` fileset settings + +The fileset is by default configured to listen for UDP traffic on +`localhost:2055`. For most uses cases you will want to set the `netflow_host` +variable to allow the input bind to all interfaces so that it can receive +traffic from network devices. + +["source","yaml",subs="attributes"] +----- +- module: netflow + log: + enabled: true + var: + netflow_host: 0.0.0.0 + netflow_port: 2055 +----- + +`var.netflow_host`:: Address to find to. Defaults to `localhost`. + +`var.netflow_port`:: Port to listen on. Defaults to `2055`. + +`var.max_message_size`:: The maximum size of the message received over UDP. +The default is `10KiB`. + +`var.expiration_timeout`:: The time before an idle session or unused template is +expired. Only applicable to v9 and IPFIX protocols. A value of zero disables +expiration. + +`var.queue_size`:: The maximum number of packets that can be queued for +processing. Use this setting to avoid packet-loss when dealing with occasional +bursts of traffic. + +:has-dashboards!: + +:fileset_ex!: + +:modulename!: diff --git a/x-pack/filebeat/module/netflow/_meta/fields.yml b/x-pack/filebeat/module/netflow/_meta/fields.yml new file mode 100644 index 000000000000..fc4bf3bb8874 --- /dev/null +++ b/x-pack/filebeat/module/netflow/_meta/fields.yml @@ -0,0 +1,6 @@ +- key: netflow-module + title: NetFlow + description: > + Module for receiving NetFlow and IPFIX flow records over UDP. The module + does not add fields beyond what the netflow input provides. + fields: diff --git a/x-pack/filebeat/module/netflow/fields.go b/x-pack/filebeat/module/netflow/fields.go new file mode 100644 index 000000000000..d71d8ceb53f9 --- /dev/null +++ b/x-pack/filebeat/module/netflow/fields.go @@ -0,0 +1,23 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// Code generated by beats/dev-tools/cmd/asset/asset.go - DO NOT EDIT. + +package netflow + +import ( + "github.com/elastic/beats/libbeat/asset" +) + +func init() { + if err := asset.SetFields("filebeat", "netflow", asset.ModuleFieldsPri, AssetNetflow); err != nil { + panic(err) + } +} + +// AssetNetflow returns asset data. +// This is the base64 encoded gzipped contents of module/netflow. +func AssetNetflow() string { + return "eJw8jjFOw0AQRfs9xbtAcoAtqFCkFKAUINGazBiPWHas3Ymt3B4Z4fTv/f8OfOs9UzXG4uvhx+VWNEFYFM28apyKrwlE+7XZHOY185QAXv5gRm80vaotVr92g6EK58vp/ME2vAHepOOLNt6fL0feJuVxB+LaqR4MIoymRTqfevcqrNMQxKR7JVbnWzA3X0y0HxP/Qk6/AQAA//9CcUYh" +} diff --git a/x-pack/filebeat/module/netflow/log/config/netflow.yml b/x-pack/filebeat/module/netflow/log/config/netflow.yml new file mode 100644 index 000000000000..b6d045353db2 --- /dev/null +++ b/x-pack/filebeat/module/netflow/log/config/netflow.yml @@ -0,0 +1,6 @@ +type: netflow +protocols: [v1, v5, v6, v7, v8, v9, ipfix] +host: '{{.netflow_host}}:{{.netflow_port}}' +max_message_size: '{{.max_message_size}}' +expiration_timeout: '{{.expiration_timeout}}' +queue_size: {{.queue_size}} diff --git a/x-pack/filebeat/module/netflow/log/ingest/pipeline.json b/x-pack/filebeat/module/netflow/log/ingest/pipeline.json new file mode 100644 index 000000000000..6eeddb6f4302 --- /dev/null +++ b/x-pack/filebeat/module/netflow/log/ingest/pipeline.json @@ -0,0 +1,29 @@ +{ + "description": "Pipeline for Filebeat NetFlow", + "processors": [ + { + "geoip": { + "if": "ctx.source?.geo == null", + "field": "source.ip", + "target_field": "source.geo", + "ignore_missing": true + } + }, + { + "geoip": { + "if": "ctx.destination?.geo == null", + "field": "destination.ip", + "target_field": "destination.geo", + "ignore_missing": true + } + } + ], + "on_failure": [ + { + "set": { + "field": "error.message", + "value": "{{ _ingest.on_failure_message }}" + } + } + ] +} diff --git a/x-pack/filebeat/module/netflow/log/manifest.yml b/x-pack/filebeat/module/netflow/log/manifest.yml new file mode 100644 index 000000000000..4e8ee1317360 --- /dev/null +++ b/x-pack/filebeat/module/netflow/log/manifest.yml @@ -0,0 +1,20 @@ +module_version: "1.0" + +var: + - name: netflow_host + default: localhost + - name: netflow_port + default: 2055 + - name: max_message_size + default: 10KiB + - name: expiration_timeout + default: 30m + - name: queue_size + default: 8192 + +ingest_pipeline: ingest/pipeline.json +input: config/netflow.yml + +requires.processors: +- name: geoip + plugin: ingest-geoip diff --git a/x-pack/filebeat/module/suricata/eve/ingest/pipeline.json b/x-pack/filebeat/module/suricata/eve/ingest/pipeline.json index 67ec81b85a53..3276c1968db7 100644 --- a/x-pack/filebeat/module/suricata/eve/ingest/pipeline.json +++ b/x-pack/filebeat/module/suricata/eve/ingest/pipeline.json @@ -50,10 +50,23 @@ } }, { - "rename": { + "append": { + "if": "ctx.suricata?.eve?.http?.hostname != null", + "value": "{{suricata.eve.http.hostname}}", + "field": "destination.domain" + } + }, + { + "remove": { "field": "suricata.eve.http.hostname", - "target_field": "destination.domain", - "ignore_missing": true + "ignore_failure": true + } + }, + { + "script": { + "type": "painless", + "source": "def domain = ctx.destination?.domain; if (domain instanceof Collection) { domain = domain.stream().distinct().collect(Collectors.toList()); if (domain.length == 1) { domain = domain[0]; }ctx.destination.domain = domain; }", + "ignore_failure": true } }, { @@ -227,6 +240,7 @@ }, { "geoip": { + "if": "ctx.source?.geo == null", "field": "source.ip", "target_field": "source.geo", "ignore_missing": true @@ -234,6 +248,7 @@ }, { "geoip": { + "if": "ctx.destination?.geo == null", "field": "destination.ip", "target_field": "destination.geo", "ignore_missing": true @@ -254,9 +269,9 @@ } }, { - "set": { - "field": "event.kind", - "value": "event" + "script": { + "lang": "painless", + "source": "def t = ctx.suricata?.eve?.event_type; if (t == \"stats\") {\n ctx['event']['kind'] = \"metric\";\n} else if (t == \"alert\") {\n ctx['event']['kind'] = \"alert\";\n ctx['event']['category'] = \"network_traffic\";\n} else {\n ctx['event']['kind'] = \"event\";\n ctx['event']['category'] = \"network_traffic\";\n}" } } ], diff --git a/x-pack/filebeat/module/suricata/eve/test/eve-alerts.log-expected.json b/x-pack/filebeat/module/suricata/eve/test/eve-alerts.log-expected.json index 346691a4a2c8..64f62fcb7ec6 100644 --- a/x-pack/filebeat/module/suricata/eve/test/eve-alerts.log-expected.json +++ b/x-pack/filebeat/module/suricata/eve/test/eve-alerts.log-expected.json @@ -14,10 +14,11 @@ "destination.packets": 3, "destination.port": 80, "ecs.version": "1.0.0-beta2", + "event.category": "network_traffic", "event.dataset": "suricata.eve", "event.duration": 223000000, "event.end": "2018-10-03T14:42:44.836Z", - "event.kind": "event", + "event.kind": "alert", "event.module": "suricata", "event.outcome": "allowed", "event.severity": 2, @@ -75,10 +76,11 @@ "destination.packets": 3, "destination.port": 80, "ecs.version": "1.0.0-beta2", + "event.category": "network_traffic", "event.dataset": "suricata.eve", "event.duration": 244000000, "event.end": "2018-10-03T16:16:26.711Z", - "event.kind": "event", + "event.kind": "alert", "event.module": "suricata", "event.outcome": "allowed", "event.severity": 2, @@ -136,10 +138,11 @@ "destination.packets": 3, "destination.port": 80, "ecs.version": "1.0.0-beta2", + "event.category": "network_traffic", "event.dataset": "suricata.eve", "event.duration": 233000000, "event.end": "2018-10-03T16:44:50.813Z", - "event.kind": "event", + "event.kind": "alert", "event.module": "suricata", "event.outcome": "allowed", "event.severity": 2, @@ -197,10 +200,11 @@ "destination.packets": 3, "destination.port": 80, "ecs.version": "1.0.0-beta2", + "event.category": "network_traffic", "event.dataset": "suricata.eve", "event.duration": 231000000, "event.end": "2018-10-03T16:45:09.267Z", - "event.kind": "event", + "event.kind": "alert", "event.module": "suricata", "event.outcome": "allowed", "event.severity": 2, @@ -258,10 +262,11 @@ "destination.packets": 3, "destination.port": 80, "ecs.version": "1.0.0-beta2", + "event.category": "network_traffic", "event.dataset": "suricata.eve", "event.duration": 229000000, "event.end": "2018-10-03T16:45:34.481Z", - "event.kind": "event", + "event.kind": "alert", "event.module": "suricata", "event.outcome": "allowed", "event.severity": 2, @@ -319,10 +324,11 @@ "destination.packets": 3, "destination.port": 80, "ecs.version": "1.0.0-beta2", + "event.category": "network_traffic", "event.dataset": "suricata.eve", "event.duration": 301000000, "event.end": "2018-10-03T17:02:38.900Z", - "event.kind": "event", + "event.kind": "alert", "event.module": "suricata", "event.outcome": "allowed", "event.severity": 2, @@ -380,10 +386,11 @@ "destination.packets": 3, "destination.port": 80, "ecs.version": "1.0.0-beta2", + "event.category": "network_traffic", "event.dataset": "suricata.eve", "event.duration": 85000000, "event.end": "2018-10-04T09:34:59.009Z", - "event.kind": "event", + "event.kind": "alert", "event.module": "suricata", "event.outcome": "allowed", "event.severity": 3, @@ -441,10 +448,11 @@ "destination.packets": 3, "destination.port": 80, "ecs.version": "1.0.0-beta2", + "event.category": "network_traffic", "event.dataset": "suricata.eve", "event.duration": 242000000, "event.end": "2018-10-04T09:34:59.168Z", - "event.kind": "event", + "event.kind": "alert", "event.module": "suricata", "event.outcome": "allowed", "event.severity": 3, @@ -502,10 +510,11 @@ "destination.packets": 5, "destination.port": 80, "ecs.version": "1.0.0-beta2", + "event.category": "network_traffic", "event.dataset": "suricata.eve", "event.duration": 362000000, "event.end": "2018-10-04T09:34:59.288Z", - "event.kind": "event", + "event.kind": "alert", "event.module": "suricata", "event.outcome": "allowed", "event.severity": 3, @@ -563,10 +572,11 @@ "destination.packets": 62, "destination.port": 80, "ecs.version": "1.0.0-beta2", + "event.category": "network_traffic", "event.dataset": "suricata.eve", "event.duration": 365000000, "event.end": "2018-10-04T09:34:59.289Z", - "event.kind": "event", + "event.kind": "alert", "event.module": "suricata", "event.outcome": "allowed", "event.severity": 3, @@ -624,10 +634,11 @@ "destination.packets": 98, "destination.port": 80, "ecs.version": "1.0.0-beta2", + "event.category": "network_traffic", "event.dataset": "suricata.eve", "event.duration": 432000000, "event.end": "2018-10-04T09:34:59.356Z", - "event.kind": "event", + "event.kind": "alert", "event.module": "suricata", "event.outcome": "allowed", "event.severity": 3, @@ -685,10 +696,11 @@ "destination.packets": 221, "destination.port": 80, "ecs.version": "1.0.0-beta2", + "event.category": "network_traffic", "event.dataset": "suricata.eve", "event.duration": 532000000, "event.end": "2018-10-04T09:34:59.456Z", - "event.kind": "event", + "event.kind": "alert", "event.module": "suricata", "event.outcome": "allowed", "event.severity": 3, @@ -746,10 +758,11 @@ "destination.packets": 67, "destination.port": 80, "ecs.version": "1.0.0-beta2", + "event.category": "network_traffic", "event.dataset": "suricata.eve", "event.duration": 821000000, "event.end": "2018-10-04T09:34:59.747Z", - "event.kind": "event", + "event.kind": "alert", "event.module": "suricata", "event.outcome": "allowed", "event.severity": 3, @@ -807,10 +820,11 @@ "destination.packets": 119, "destination.port": 80, "ecs.version": "1.0.0-beta2", + "event.category": "network_traffic", "event.dataset": "suricata.eve", "event.duration": 1027000000, "event.end": "2018-10-04T09:34:59.953Z", - "event.kind": "event", + "event.kind": "alert", "event.module": "suricata", "event.outcome": "allowed", "event.severity": 3, @@ -868,10 +882,11 @@ "destination.packets": 253, "destination.port": 80, "ecs.version": "1.0.0-beta2", + "event.category": "network_traffic", "event.dataset": "suricata.eve", "event.duration": 1324000000, "event.end": "2018-10-04T09:35:00.250Z", - "event.kind": "event", + "event.kind": "alert", "event.module": "suricata", "event.outcome": "allowed", "event.severity": 3, @@ -929,10 +944,11 @@ "destination.packets": 314, "destination.port": 80, "ecs.version": "1.0.0-beta2", + "event.category": "network_traffic", "event.dataset": "suricata.eve", "event.duration": 1475000000, "event.end": "2018-10-04T09:35:00.401Z", - "event.kind": "event", + "event.kind": "alert", "event.module": "suricata", "event.outcome": "allowed", "event.severity": 3, @@ -990,10 +1006,11 @@ "destination.packets": 588, "destination.port": 80, "ecs.version": "1.0.0-beta2", + "event.category": "network_traffic", "event.dataset": "suricata.eve", "event.duration": 1850000000, "event.end": "2018-10-04T09:35:00.776Z", - "event.kind": "event", + "event.kind": "alert", "event.module": "suricata", "event.outcome": "allowed", "event.severity": 3, @@ -1051,10 +1068,11 @@ "destination.packets": 591, "destination.port": 80, "ecs.version": "1.0.0-beta2", + "event.category": "network_traffic", "event.dataset": "suricata.eve", "event.duration": 1971000000, "event.end": "2018-10-04T09:35:00.897Z", - "event.kind": "event", + "event.kind": "alert", "event.module": "suricata", "event.outcome": "allowed", "event.severity": 3, @@ -1112,10 +1130,11 @@ "destination.packets": 979, "destination.port": 80, "ecs.version": "1.0.0-beta2", + "event.category": "network_traffic", "event.dataset": "suricata.eve", "event.duration": 2436000000, "event.end": "2018-10-04T09:35:01.362Z", - "event.kind": "event", + "event.kind": "alert", "event.module": "suricata", "event.outcome": "allowed", "event.severity": 3, @@ -1172,10 +1191,11 @@ "destination.packets": 1079, "destination.port": 80, "ecs.version": "1.0.0-beta2", + "event.category": "network_traffic", "event.dataset": "suricata.eve", "event.duration": 2649000000, "event.end": "2018-10-04T09:35:01.575Z", - "event.kind": "event", + "event.kind": "alert", "event.module": "suricata", "event.outcome": "allowed", "event.severity": 3, diff --git a/x-pack/filebeat/module/suricata/eve/test/eve-small.log-expected.json b/x-pack/filebeat/module/suricata/eve/test/eve-small.log-expected.json index f7aa462b3347..82c1fd667255 100644 --- a/x-pack/filebeat/module/suricata/eve/test/eve-small.log-expected.json +++ b/x-pack/filebeat/module/suricata/eve/test/eve-small.log-expected.json @@ -4,6 +4,7 @@ "destination.ip": "192.168.253.112", "destination.port": 22, "ecs.version": "1.0.0-beta2", + "event.category": "network_traffic", "event.dataset": "suricata.eve", "event.end": "2018-07-05T19:01:09.820Z", "event.kind": "event", @@ -33,10 +34,11 @@ "destination.packets": 3, "destination.port": 443, "ecs.version": "1.0.0-beta2", + "event.category": "network_traffic", "event.dataset": "suricata.eve", "event.duration": 1251000000, "event.end": "2018-07-05T19:07:20.910Z", - "event.kind": "event", + "event.kind": "alert", "event.module": "suricata", "event.outcome": "allowed", "event.severity": 1, @@ -76,6 +78,7 @@ "destination.ip": "192.168.86.28", "destination.port": 63963, "ecs.version": "1.0.0-beta2", + "event.category": "network_traffic", "event.dataset": "suricata.eve", "event.end": "2018-07-05T19:43:47.690Z", "event.kind": "event", @@ -116,6 +119,7 @@ "destination.ip": "192.168.86.85", "destination.port": 56118, "ecs.version": "1.0.0-beta2", + "event.category": "network_traffic", "event.dataset": "suricata.eve", "event.end": "2018-07-05T19:44:33.222Z", "event.kind": "event", @@ -164,6 +168,7 @@ "destination.ip": "192.168.86.85", "destination.port": 39464, "ecs.version": "1.0.0-beta2", + "event.category": "network_traffic", "event.dataset": "suricata.eve", "event.end": "2018-07-05T19:51:20.213Z", "event.kind": "event", @@ -194,7 +199,7 @@ "ecs.version": "1.0.0-beta2", "event.dataset": "suricata.eve", "event.end": "2018-07-05T19:51:23.009Z", - "event.kind": "event", + "event.kind": "metric", "event.module": "suricata", "fileset.name": "eve", "input.type": "log", @@ -330,6 +335,7 @@ "destination.ip": "17.142.164.13", "destination.port": 443, "ecs.version": "1.0.0-beta2", + "event.category": "network_traffic", "event.dataset": "suricata.eve", "event.end": "2018-07-05T19:51:50.666Z", "event.kind": "event", @@ -363,6 +369,7 @@ "destination.packets": 0, "destination.port": 547, "ecs.version": "1.0.0-beta2", + "event.category": "network_traffic", "event.dataset": "suricata.eve", "event.duration": 30548000000, "event.end": "2018-07-05T19:51:54.001Z", diff --git a/x-pack/filebeat/modules.d/netflow.yml.disabled b/x-pack/filebeat/modules.d/netflow.yml.disabled new file mode 100644 index 000000000000..522307d7e71b --- /dev/null +++ b/x-pack/filebeat/modules.d/netflow.yml.disabled @@ -0,0 +1,9 @@ +# Module: netflow +# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-netflow.html + +- module: netflow + log: + enabled: true + var: + netflow_host: localhost + netflow_port: 2055 diff --git a/x-pack/metricbeat/include/list.go b/x-pack/metricbeat/include/list.go index 4f662b04b824..4e75b505f43a 100644 --- a/x-pack/metricbeat/include/list.go +++ b/x-pack/metricbeat/include/list.go @@ -10,6 +10,7 @@ import ( // Import packages that need to register themselves. _ "github.com/elastic/beats/x-pack/metricbeat/module/aws" _ "github.com/elastic/beats/x-pack/metricbeat/module/aws/ec2" + _ "github.com/elastic/beats/x-pack/metricbeat/module/aws/sqs" _ "github.com/elastic/beats/x-pack/metricbeat/module/mssql" _ "github.com/elastic/beats/x-pack/metricbeat/module/mssql/performance" _ "github.com/elastic/beats/x-pack/metricbeat/module/mssql/transaction_log" diff --git a/x-pack/metricbeat/metricbeat.reference.yml b/x-pack/metricbeat/metricbeat.reference.yml index cd90ff182a01..6f5f1327b1e4 100644 --- a/x-pack/metricbeat/metricbeat.reference.yml +++ b/x-pack/metricbeat/metricbeat.reference.yml @@ -152,6 +152,7 @@ metricbeat.modules: period: 300s metricsets: - "ec2" + - "sqs" access_key_id: '${AWS_ACCESS_KEY_ID:""}' secret_access_key: '${AWS_SECRET_ACCESS_KEY:""}' session_token: '${AWS_SESSION_TOKEN:""}' diff --git a/x-pack/metricbeat/module/aws/_meta/config.yml b/x-pack/metricbeat/module/aws/_meta/config.yml index ccb5317ce096..d831e5e094d7 100644 --- a/x-pack/metricbeat/module/aws/_meta/config.yml +++ b/x-pack/metricbeat/module/aws/_meta/config.yml @@ -2,6 +2,7 @@ period: 300s metricsets: - "ec2" + - "sqs" access_key_id: '${AWS_ACCESS_KEY_ID:""}' secret_access_key: '${AWS_SECRET_ACCESS_KEY:""}' session_token: '${AWS_SESSION_TOKEN:""}' diff --git a/x-pack/metricbeat/module/aws/_meta/docs.asciidoc b/x-pack/metricbeat/module/aws/_meta/docs.asciidoc index a510dfd570ca..6706525d8d07 100644 --- a/x-pack/metricbeat/module/aws/_meta/docs.asciidoc +++ b/x-pack/metricbeat/module/aws/_meta/docs.asciidoc @@ -2,7 +2,7 @@ This module periodically fetches monitoring metrics from AWS Cloudwatch using https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetMetricData.html[GetMetricData API] for running EC2 instances. Note: extra AWS charges on GetMetricData API requests will be generated by this module. -The default metricset is `ec2`. +The default metricsets are `ec2` and `sqs`. [float] === Module-specific configuration notes @@ -28,20 +28,6 @@ aws> sts get-session-token --serial-number arn:aws:iam::1234:mfa/your-email@exam Specific permissions needs to be added into the IAM user's policy to authorize Metricbeat to collect AWS monitoring metrics. Please see documentation under each metricset for required permissions. -By default, Amazon EC2 sends metric data to CloudWatch every 5 minutes. With this basic monitoring, `period` in aws module -configuration should be larger or equal than `300s`. If `period` is set to be less than `300s`, the same cloudwatch metrics -will be collected more than once which will cause extra fees without getting more granular metrics. For example, in `US East (N. Virginia)` region, it costs -$0.01/1000 metrics requested using GetMetricData. Please see https://aws.amazon.com/cloudwatch/pricing/[AWS Cloudwatch Pricing] -for more details. To avoid unnecessary charges, `period` is preferred to be set to `300s` or multiples of `300s`, such as -`600s` and `900s`. - -For more granular monitoring data you can enable detailed monitoring on the instance to get metrics every 1 minute. Please see -https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-cloudwatch-new.html[Enabling Detailed Monitoring] for instructions -on how to enable detailed monitoring. With detailed monitoring enabled, `period` in aws module configuration can be any number -larger than `60s`. Since AWS sends metric data to CloudWatch in 1-minute periods, setting metricbeat module `period` less -than `60s` will cause extra API requests which means extra charges on AWS. To avoid unnecessary charges, `period` is -preferred to be set to `60s` or multiples of `60s`, such as `120s` and `180s`. - Here is an example of aws metricbeat module configuration: [source,yaml] @@ -49,15 +35,29 @@ Here is an example of aws metricbeat module configuration: metricbeat.modules: - module: aws period: 300s - metricsets: ["ec2"] + metricsets: + - "ec2" + - "sqs" access_key_id: '${AWS_ACCESS_KEY_ID}' secret_access_key: '${AWS_SECRET_ACCESS_KEY}' session_token: '${AWS_SESSION_TOKEN}' default_region: '${AWS_REGION:us-west-1}' ---- -This module only collects metrics for EC2 instances that are in `running` state and exist more than 10 minutes to make sure -there are monitoring metrics exist in Cloudwatch already. +By default, Amazon EC2 sends metric data to CloudWatch every 5 minutes. With this basic monitoring, `period` in aws module +configuration should be larger or equal than `300s`. If `period` is set to be less than `300s`, the same cloudwatch metrics +will be collected more than once which will cause extra fees without getting more granular metrics. For example, in `US East (N. Virginia)` region, it costs +$0.01/1000 metrics requested using GetMetricData. Please see https://aws.amazon.com/cloudwatch/pricing/[AWS Cloudwatch Pricing] +for more details. To avoid unnecessary charges, `period` is preferred to be set to `300s` or multiples of `300s`, such as +`600s` and `900s`. For more granular monitoring data you can enable detailed monitoring on the instance to get metrics every 1 minute. Please see +https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-cloudwatch-new.html[Enabling Detailed Monitoring] for instructions +on how to enable detailed monitoring. With detailed monitoring enabled, `period` in aws module configuration can be any number +larger than `60s`. Since AWS sends metric data to CloudWatch in 1-minute periods, setting metricbeat module `period` less +than `60s` will cause extra API requests which means extra charges on AWS. To avoid unnecessary charges, `period` is +preferred to be set to `60s` or multiples of `60s`, such as `120s` and `180s`. + +Since cloudWatch metrics for Amazon SQS queues are automatically collected and pushed to CloudWatch every 5 minutes, +`period` for `sqs` is recommended to set to `300s` or multiples of `300s`. The AWS module comes with a predefined dashboard. For example: diff --git a/x-pack/metricbeat/module/aws/fields.go b/x-pack/metricbeat/module/aws/fields.go index 36cc75e590f1..15e67526b2ae 100644 --- a/x-pack/metricbeat/module/aws/fields.go +++ b/x-pack/metricbeat/module/aws/fields.go @@ -19,5 +19,5 @@ func init() { // AssetAws returns asset data. // This is the base64 encoded gzipped contents of module/aws. func AssetAws() string { - return "eJzEl0FvGzkPhu/5FURPLdAM8BUf9pDDAtu0h1wWwXaLHh1aoj1ENNJApDxw0R+/oDx2bNdpnKydnYNhjDTU876SKPES7ml5BTjIBYCyBrqCNzjImwsAT+Iy98opXsHvFwAAdzjIHXTJl0DgUgjkVOCPb1+gS5E1ZY5z6EgzO4FZTl1tuw6p+AHVtc0FQKZAKHQFc7wAmDEFL1c1+iVE7GhNY48ue+uYU+nHNwegdoNsByL3YfPuULBHA66eO3If7sClqMhRQFvaaNMWFQbKBOIy9uT31H4ztTC07NqHAAc8EooK02X98PP1h2Zr/F2f1s++1G25ri+NJsXQ9E53eqzFi8NAfjILCfc7/MIHe/5uCXrKjqLinCDNAENIDpW8gYNLXV+UoETW0R7MBK7kTFHDEjhCEYIUq48cRTE6ah4V4jJ51kkRnNMZtMTSTSmbjuvbr7AaTED6cT62GWGWcu1VlAN/Rwv7JPcUg317VnLCHMnvCFgZHx/YWxRA53IhD8L2hhUGFAhYomvJQ8ogilnJPy5KSu5DkckrihuH3FXW4oJgShQfZgojlBi4Y1uJG9lDSxHss+vbr9c1wscVMywwFAIW+E45HatYJq7FPCd/XslV00HhtpdiUuiRPfg0RJP+8/y/B4x+TDvaFgGOrmTzCL1no8AAKymHpUfSIeX7hmPTo7snlbMqHseATI54YYsxWl5ZYwBHpTxDR7K/KX+Nn4q+Kn9N46noqfg5NtOl0nnh6whnsf612E9lu2e559RkQn8W9o+j0zheE4x1k6pEUyZYpFA6EsAFcsBpINB0PPmQWemM6BZfKRrTydmr66k/Nfh16vpAdihU31NPuZ7c8vIpsDsMWpZ2PGPydh/i5G05KnfHTNA5VdYRtmW+cK5eIlIUtUjjWnL3kxlyeOSgDCnOn6fvL+pTVrHzXFvKu6R2t+lRhDxMk7a7jSsmqEz1VLRWWYpSt9vGqxtpQFHoOBY9XuRkFe+VtZ5DyHqc/0DK4Rk7VswmxbiU7afEw5WPHQlzyv+6UEiZpBYET+e3TSt3OKeGD++Je1oOKe+3HQF286luSsOw+FZdedvMq4v9c/ge6tLG5uDwIngR5030bDXiw0rwpHXFbRfDLEDRctEjFcgGtM+8QKXGR5lY02kNHaPDpz+/1IHX9v50qziSkvvDK3H/9TPQbm4X/7erfCYRQJHkuFbgA4/p79msZRrYncvQGvwnP49clSPaCV1cGzdyfLbkwg5ubjctb83gdzBNJfr1wfhcS+sWalzyh918cSKqcfc9fA9W38P/frucskKJwvNY6+A6yBOk2trVSCY95Ykltlfghbc9RW+b/gfkEuPqn7RFleP8sla2P0Apdxzryv5h95a+H/vZX/Lvmot/AgAA//+h+RBa" + return "eJzEWM9v27gSvuevGPTUAo2AVzy8Qw4PaNMecthFtmnRozMmx9YgFKlyhvY66B+/ICU7sSMnTmpldQgCUZ75vo+cXzyFG1qdAS7lBEBZHZ3BG1zKmxMAS2Iit8rBn8H/TwAArnEp19AEmxyBCc6RUYGPP66gCZ41RPZzaEgjG4FZDE1ZO3ch2SWqqasTgEiOUOgM5ngCMGNyVs6K9VPw2NAaTX501eYPY0ht/2YA1LaR+4bIfNi8GzK212D3XJP5cA0meEX2AlrThpvWqLCkSCAmYkt2h+2PzBaWNZv6zsCARkJeYboqP/xy/qG6539bp/WzS/U+XdOmSoOiq1qjW1+syYtBR3YycwF3P3hEh/x8qwlaioa84pwgzACdCwaVbAYOJjRtUoLkWXt5MBKYFCN5dStgD0kIgi86shdFb6jaS8REsqyTJDinEbj41EwpZh7nl9+hcyYgbb8f9zHCLMTyVVJ2fIvZ7JO4p+jyb0dFThg92S0CnfD+DnuNAmhMTGRBOL9hhSUKOEze1GQhRBDFqGT3k5IUW5dk8orkepfbzGpcEEyJ/N1OoYfkHTecT+KG9rImD/ln55ffz4uFTx1mWKBLBCxwSzEcylgmpsY4Jzsu5cJpkHiOJR8UWmQLNix9pv5w/98DetunHa2TAHuTYtYIreWMAh10VIape9JliDcV+6pFc0MqozLufUAkQ7zIh9HnvLKGAeyV4gwNyW5QPg4/JH1V/CWNh6THws++mq6UxgVfPIwi/WthP5bsluWGQxUJ7SjYP/VKY98mZKybVCUaIsEiuNSQAC6QHU4dgYbDkS8jK40IPdtX8hnT0bEX1UN7bODnoWkd5aJQdA8txVK55eVbkHsYzFna8IzJ5n6Ig83HUbk5ZIPGZFk83Kf5wr16CUlR1CSVqcncTGbIbk+hdMHPn8fvK7UhquR6rjXFbaS5t2lRhCxMg9bbix0mKJhKVcyrshKlZnuNu47UoSg07JMeTnLS2XtlrmMQWfv5F6gM79ihZDYpxoSY/yQ/PPnkkjCn+NuDQogkZSB4Or9tVrnBOVU8HBM3tFqGuLt2ALCLzyUoM4xsP09XNgdz19g/B9/dXFrlPRg+BC/CeeEt5xnx7iRY0nLi7g/DLEA+56I9E8gGaBt5gUqV9TLJS8cVtLcOn/+8Ko7X8j7oKg5Eye3wSdx9/QxoF5eL/+ZWPpIIoEgwXCbwJffp79lY09SxGUvQYvyBngeeyh7aEVVcC9fj+JKTCxu4uNysvM0Cv4NpSN6uC+NzJS0hVJlgh9V8cSIqdnc1fA95vof//O90ygrJC899mYOLkyeQap1bI5m0FCc5sb0CXnjbkrc56H9BTN53/0mdVNnPT8tk+wuUYsO+nOxfuW9p2/67/C/Zd9WDmz75Kb910yc/5RVv+q7+uhq86ZuSHnrXF5wl0UlDIjinSS4wQuZI5bsES9vG8Dc3eRP7G78sS+cXfPCnlrrOs8ewLtk/E6U9MdJ/WVlyuDpaj/htu0z3TmQLz90tSu+7tIQ+6L1GOFf1HBClJDUN2Zxb3epxKj7oZMHCU3es3mkPmw0B9jBzPK/1cWCvAmpXPI1MC3RdiBx+GPIxGhfo+qw+B9j6ZmRcZJE0lWu86QoMOifruvO1c/9HH11o9t87rxHnHDPyhlvbdZn4mILUtLqa9PoNj7y/i2hHnY+XF2v1cpxY7oK7Exdwo9DwQET+LpMK345zk1JqIt/SE3r+EwAA//8lh/10" } diff --git a/x-pack/metricbeat/module/aws/sqs/_meta/data.json b/x-pack/metricbeat/module/aws/sqs/_meta/data.json new file mode 100644 index 000000000000..afec23f860d0 --- /dev/null +++ b/x-pack/metricbeat/module/aws/sqs/_meta/data.json @@ -0,0 +1,39 @@ +{ + "@timestamp": "2017-10-12T08:05:34.853Z", + "agent": { + "hostname": "host.example.com", + "name": "host.example.com" + }, + "aws": { + "sqs": { + "empty_receives": 0, + "messages": { + "delayed": 0, + "deleted": 0, + "not_visible": 0, + "received": 0, + "sent": 0, + "visible": 91 + }, + "oldest_message_age": { + "sec": 86404 + }, + "sent_message_size": {} + } + }, + "cloud": { + "region": "us-east-1" + }, + "event": { + "dataset": "aws.sqs", + "duration": 115000, + "module": "aws" + }, + "metricset": { + "name": "sqs" + }, + "service": { + "name": "sqs", + "type": "sqs" + } +} \ No newline at end of file diff --git a/x-pack/metricbeat/module/aws/sqs/_meta/docs.asciidoc b/x-pack/metricbeat/module/aws/sqs/_meta/docs.asciidoc new file mode 100644 index 000000000000..11ed9f87c506 --- /dev/null +++ b/x-pack/metricbeat/module/aws/sqs/_meta/docs.asciidoc @@ -0,0 +1,12 @@ +The sqs metricset of aws module allows you to monitor your AWS SQS queues. `sqs` metricset fetches a set of values from +https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-available-cloudwatch-metrics.html[Amazon SQS Metrics]. +CloudWatch metrics for Amazon SQS queues are automatically collected and pushed to CloudWatch every five minutes. + +=== AWS Permissions +Some specific AWS permissions are required for IAM user to collect AWS SQS metrics. +---- +cloudwatch:GetMetricData +ec2:DescribeRegions +---- + +=== Dashboard diff --git a/x-pack/metricbeat/module/aws/sqs/_meta/fields.yml b/x-pack/metricbeat/module/aws/sqs/_meta/fields.yml new file mode 100644 index 000000000000..9a1edd352e6e --- /dev/null +++ b/x-pack/metricbeat/module/aws/sqs/_meta/fields.yml @@ -0,0 +1,42 @@ +- name: sqs + type: group + description: > + `sqs` contains the metrics that were scraped from AWS CloudWatch which contains monitoring metrics sent by AWS SQS. + release: beta + fields: + - name: oldest_message_age.sec + type: long + description: > + The approximate age of the oldest non-deleted message in the queue. + - name: message.delayed + type: long + description: > + TThe number of messages in the queue that are delayed and not available for reading immediately. + - name: message.not_visible + type: long + description: > + The number of messages that are in flight. + - name: message.visible + type: long + description: > + The number of messages available for retrieval from the queue. + - name: message.deleted + type: long + description: > + The number of messages deleted from the queue. + - name: message.received + type: long + description: > + The number of messages returned by calls to the ReceiveMessage action. + - name: message.sent + type: long + description: > + The number of messages added to a queue. + - name: empty_receives + type: long + description: > + The number of ReceiveMessage API calls that did not return a message. + - name: sent_message_size.bytes + type: scaled_float + description: > + The size of messages added to a queue. diff --git a/x-pack/metricbeat/module/aws/sqs/data.go b/x-pack/metricbeat/module/aws/sqs/data.go new file mode 100644 index 000000000000..9c45715b0812 --- /dev/null +++ b/x-pack/metricbeat/module/aws/sqs/data.go @@ -0,0 +1,30 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package sqs + +import ( + s "github.com/elastic/beats/libbeat/common/schema" + c "github.com/elastic/beats/libbeat/common/schema/mapstrstr" +) + +var ( + schemaRequestFields = s.Schema{ + "oldest_message_age": s.Object{ + "sec": c.Float("ApproximateAgeOfOldestMessage"), + }, + "messages": s.Object{ + "delayed": c.Float("ApproximateNumberOfMessagesDelayed"), + "not_visible": c.Float("ApproximateNumberOfMessagesNotVisible"), + "visible": c.Float("ApproximateNumberOfMessagesVisible"), + "deleted": c.Float("NumberOfMessagesDeleted"), + "received": c.Float("NumberOfMessagesReceived"), + "sent": c.Float("NumberOfMessagesSent"), + }, + "empty_receives": c.Float("NumberOfEmptyReceives"), + "sent_message_size": s.Object{ + "bytes": c.Float("SentMessageSize"), + }, + } +) diff --git a/x-pack/metricbeat/module/aws/sqs/sqs.go b/x-pack/metricbeat/module/aws/sqs/sqs.go new file mode 100644 index 000000000000..764d3898b7c8 --- /dev/null +++ b/x-pack/metricbeat/module/aws/sqs/sqs.go @@ -0,0 +1,179 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package sqs + +import ( + "fmt" + "strconv" + "strings" + + "github.com/aws/aws-sdk-go-v2/service/cloudwatch" + "github.com/pkg/errors" + + "github.com/elastic/beats/libbeat/common" + s "github.com/elastic/beats/libbeat/common/schema" + "github.com/elastic/beats/libbeat/logp" + "github.com/elastic/beats/metricbeat/mb" + "github.com/elastic/beats/x-pack/metricbeat/module/aws" +) + +var metricsetName = "sqs" + +// init registers the MetricSet with the central registry as soon as the program +// starts. The New function will be called later to instantiate an instance of +// the MetricSet for each host defined in the module's configuration. After the +// MetricSet has been created then Fetch will begin to be called periodically. +func init() { + mb.Registry.MustAddMetricSet(aws.ModuleName, metricsetName, New, + mb.DefaultMetricSet(), + ) +} + +// MetricSet holds any configuration or state information. It must implement +// the mb.MetricSet interface. And this is best achieved by embedding +// mb.BaseMetricSet because it implements all of the required mb.MetricSet +// interface methods except for Fetch. +type MetricSet struct { + *aws.MetricSet + logger *logp.Logger +} + +// New creates a new instance of the MetricSet. New is responsible for unpacking +// any MetricSet specific configuration options if there are any. +func New(base mb.BaseMetricSet) (mb.MetricSet, error) { + logger := logp.NewLogger(aws.ModuleName) + metricSet, err := aws.NewMetricSet(base) + if err != nil { + return nil, errors.Wrap(err, "error creating aws metricset") + } + + // Check if period is set to be multiple of 300s + remainder := metricSet.PeriodInSec % 300 + if remainder != 0 { + err := errors.New("period needs to be set to 300s (or a multiple of 300s). " + + "To avoid data missing or extra costs, please make sure period is set correctly in config.yml") + logger.Info(err) + } + + return &MetricSet{ + MetricSet: metricSet, + logger: logger, + }, nil +} + +// Fetch methods implements the data gathering and data conversion to the right +// format. It publishes the event which is then forwarded to the output. In case +// of an error set the Error field of mb.Event or simply call report.Error(). +func (m *MetricSet) Fetch(report mb.ReporterV2) { + namespace := "AWS/SQS" + // Get startTime and endTime + startTime, endTime, err := aws.GetStartTimeEndTime(m.DurationString) + if err != nil { + m.logger.Error(errors.Wrap(err, "Error ParseDuration")) + report.Error(err) + return + } + + for _, regionName := range m.MetricSet.RegionsList { + m.MetricSet.AwsConfig.Region = regionName + svcCloudwatch := cloudwatch.New(*m.MetricSet.AwsConfig) + + // Get listMetrics output + listMetricsOutput, err := aws.GetListMetricsOutput(namespace, regionName, svcCloudwatch) + if err != nil { + m.logger.Error(err.Error()) + report.Error(err) + continue + } + if listMetricsOutput == nil || len(listMetricsOutput) == 0 { + continue + } + + // Construct metricDataQueries + metricDataQueries := constructMetricQueries(listMetricsOutput, int64(m.PeriodInSec)) + if len(metricDataQueries) == 0 { + continue + } + + // Use metricDataQueries to make GetMetricData API calls + metricDataResults, err := aws.GetMetricDataResults(metricDataQueries, svcCloudwatch, startTime, endTime) + if err != nil { + err = errors.Wrap(err, "GetMetricDataResults failed, skipping region "+regionName) + m.logger.Error(err.Error()) + report.Error(err) + continue + } + + // Create Cloudwatch Events for SQS + event, err := createSQSEvents(metricDataResults, metricsetName, regionName, schemaRequestFields) + if err != nil { + m.logger.Error(err.Error()) + event.Error = err + report.Event(event) + continue + } + + report.Event(event) + } +} + +func constructMetricQueries(listMetricsOutput []cloudwatch.Metric, period int64) []cloudwatch.MetricDataQuery { + metricDataQueries := []cloudwatch.MetricDataQuery{} + for i, listMetric := range listMetricsOutput { + metricDataQuery := createMetricDataQuery(listMetric, i, period) + metricDataQueries = append(metricDataQueries, metricDataQuery) + } + return metricDataQueries +} + +func createMetricDataQuery(metric cloudwatch.Metric, index int, period int64) (metricDataQuery cloudwatch.MetricDataQuery) { + statistic := "Average" + id := "sqs" + strconv.Itoa(index) + metricDims := metric.Dimensions + metricName := *metric.MetricName + queueName := "" + for _, dim := range metricDims { + if *dim.Name == "QueueName" { + queueName = *dim.Value + } + } + label := queueName + " " + metricName + + metricDataQuery = cloudwatch.MetricDataQuery{ + Id: &id, + MetricStat: &cloudwatch.MetricStat{ + Period: &period, + Stat: &statistic, + Metric: &metric, + }, + Label: &label, + } + return +} + +func createSQSEvents(getMetricDataResults []cloudwatch.MetricDataResult, metricsetName string, regionName string, schemaMetricFields s.Schema) (event mb.Event, err error) { + event.Service = metricsetName + event.RootFields = common.MapStr{} + event.RootFields.Put("service.name", metricsetName) + event.RootFields.Put("cloud.region", regionName) + + mapOfMetricSetFieldResults := make(map[string]interface{}) + for _, output := range getMetricDataResults { + if len(output.Values) == 0 { + continue + } + labels := strings.Split(*output.Label, " ") + mapOfMetricSetFieldResults["queue.name"] = labels[0] + mapOfMetricSetFieldResults[labels[1]] = fmt.Sprint(output.Values[0]) + } + + resultMetricSetFields, err := aws.EventMapping(mapOfMetricSetFieldResults, schemaMetricFields) + if err != nil { + err = errors.Wrap(err, "Error trying to apply schemaMetricSetFields in AWS SQS metricbeat module.") + return + } + event.MetricSetFields = resultMetricSetFields + return +} diff --git a/x-pack/metricbeat/module/aws/sqs/sqs_integration_test.go b/x-pack/metricbeat/module/aws/sqs/sqs_integration_test.go new file mode 100644 index 000000000000..704242f52116 --- /dev/null +++ b/x-pack/metricbeat/module/aws/sqs/sqs_integration_test.go @@ -0,0 +1,65 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// +build !integration + +package sqs + +import ( + "testing" + + "github.com/elastic/beats/x-pack/metricbeat/module/aws/mtest" + + "github.com/stretchr/testify/assert" + + mbtest "github.com/elastic/beats/metricbeat/mb/testing" +) + +func TestFetch(t *testing.T) { + config, info := mtest.GetConfigForTest("sqs") + if info != "" { + t.Skip("Skipping TestFetch: " + info) + } + + sqsMetricSet := mbtest.NewReportingMetricSetV2(t, config) + events, err := mbtest.ReportingFetchV2(sqsMetricSet) + if err != nil { + t.Skip("Skipping TestFetch: failed to make api calls. Please check $AWS_ACCESS_KEY_ID, " + + "$AWS_SECRET_ACCESS_KEY and $AWS_SESSION_TOKEN in config.yml") + } + + if !assert.NotEmpty(t, events) { + t.FailNow() + } + t.Logf("Module: %s Metricset: %s", sqsMetricSet.Module().Name(), sqsMetricSet.Name()) + + for _, event := range events { + // RootField + mtest.CheckEventField("service.name", "string", event, t) + mtest.CheckEventField("cloud.region", "string", event, t) + // MetricSetField + mtest.CheckEventField("empty_receives", "float", event, t) + mtest.CheckEventField("messages.delayed", "float", event, t) + mtest.CheckEventField("messages.deleted", "float", event, t) + mtest.CheckEventField("messages.not_visible", "float", event, t) + mtest.CheckEventField("messages.received", "float", event, t) + mtest.CheckEventField("messages.sent", "float", event, t) + mtest.CheckEventField("messages.visible", "float", event, t) + mtest.CheckEventField("oldest_message_age.sec", "float", event, t) + mtest.CheckEventField("sent_message_size", "float", event, t) + } +} + +func TestData(t *testing.T) { + config, info := mtest.GetConfigForTest("sqs") + if info != "" { + t.Skip("Skipping TestData: " + info) + } + + sqsMetricSet := mbtest.NewReportingMetricSetV2(t, config) + errs := mbtest.WriteEventsReporterV2(sqsMetricSet, t, "/") + if errs != nil { + t.Fatal("write", errs) + } +} diff --git a/x-pack/metricbeat/modules.d/aws.yml.disabled b/x-pack/metricbeat/modules.d/aws.yml.disabled index ccb5317ce096..d831e5e094d7 100644 --- a/x-pack/metricbeat/modules.d/aws.yml.disabled +++ b/x-pack/metricbeat/modules.d/aws.yml.disabled @@ -2,6 +2,7 @@ period: 300s metricsets: - "ec2" + - "sqs" access_key_id: '${AWS_ACCESS_KEY_ID:""}' secret_access_key: '${AWS_SECRET_ACCESS_KEY:""}' session_token: '${AWS_SESSION_TOKEN:""}'