diff --git a/CHANGELOG.asciidoc b/CHANGELOG.asciidoc index 8573206e620..05c7e883534 100644 --- a/CHANGELOG.asciidoc +++ b/CHANGELOG.asciidoc @@ -3,6 +3,32 @@ :issue: https://github.com/elastic/beats/issues/ :pull: https://github.com/elastic/beats/pull/ +[[release-notes-8.0.1]] +=== Beats version 8.0.1 +https://github.com/elastic/beats/compare/v8.0.0...v8.0.1[View commits] + +==== Bugfixes + +*Filebeat* + +- tcp/unix input: Stop accepting connections after socket is closed. {pull}29712[29712] +- Fix using log_group_name_prefix in aws-cloudwatch input. {pull}29695[29695] +- Fix multiple instances of the same module configured within `filebeat.modules` in filebeat.yml. {issue}29649[29649] {pull}29952[29952] +- aws-s3: fix race condition in states used by s3-poller. {issue}30123[30123] {pull}30131[30131] + +*Filebeat* +- Fix broken Kafka input {issue}29746[29746] {pull}30277[30277] +- cisco module: Fix change the broke ASA and FTD configs that used `var.input: syslog`. {pull}30072[30072] +- aws-s3: fix race condition in states used by s3-poller. {issue}30123[30123] {pull}30131[30131] + +*Heartbeat* +- Fix missing mapping for `service.name`. {pull}30324[30324] + +*Winlogbeat* + +- Fix run loop when reading from evtx file {pull}30006[30006] + + [[release-notes-8.0.0]] === Beats version 8.0.0 https://github.com/elastic/beats/compare/v7.17.0...v8.0.0[View commits] diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index a07654b6e34..a0aaea48833 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -41,6 +41,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...main[Check the HEAD dif - Fix field names with `add_network_direction` processor. {issue}29747[29747] {pull}29751[29751] - Fix a logging bug when `ssl.verification_mode` was set to `full` or `certificate`, the command `test output` incorrectly logged that TLS was disabled. - Fix the ability for subcommands to be ran properly from the beats containers. {pull}30452[30452] +- Update docker/distribution dependency library to fix a security issues concerning OCI Manifest Type Confusion Issue. {pull}30462[30462] *Auditbeat* @@ -48,22 +49,17 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...main[Check the HEAD dif *Filebeat* -- tcp/unix input: Stop accepting connections after socket is closed. {pull}29712[29712] -- Fix using log_group_name_prefix in aws-cloudwatch input. {pull}29695[29695] -- Fix multiple instances of the same module configured within `filebeat.modules` in filebeat.yml. {issue}29649[29649] {pull}29952[29952] -- aws-s3: fix race condition in states used by s3-poller. {issue}30123[30123] {pull}30131[30131] -- Fix broken Kafka input {issue}29746[29746] {pull}30277[30277] - Report the starting offset of the line in `log.offset` when using `filestream` instead of the end to be ECS compliant. {pull}30445[30445] - auditd: Prevent mapping explosion when truncated EXECVE records are ingested. {pull}30382[30382] - elasticsearch: fix duplicate ingest when using a common appender configuration {issue}30428[30428] {pull}30440[30440] *Heartbeat* -- Fix missing mapping for `service.name`. {pull}30324[30324] *Metricbeat* - Enhance metricbeat on openshift documentation {pull}30054[30054] - Fixed missing ZooKeeper metrics due compatibility issues with versions >= 3.6.0 {pull}30068[30068] +- Fix Docker module: rename fields on dashboards. {pull}30500[30500] *Packetbeat* @@ -71,7 +67,6 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...main[Check the HEAD dif *Winlogbeat* - Add provider names to Security pipeline conditional check in routing pipeline. {issue}27288[27288] {pull}29781[29781] -- Fix run loop when reading from evtx file {pull}30006[30006] *Functionbeat* @@ -136,10 +131,11 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...main[Check the HEAD dif - Add gcp firestore metricset. {pull}29918[29918] - Remove strict parsing on RabbitMQ module {pull}30090[30090] - Add `kubernetes.container.status.last.reason` metric {pull}30306[30306] +- Extend documentation about `orchestrator.cluster` fields {pull}30518[30518] *Packetbeat* -- Add automated OEM Npcap installation handling. {pull}29112[29112] {pull}30438[30438] +- Add automated OEM Npcap installation handling. {pull}29112[29112] {pull}30438[30438] {pull}30493[30493] - Add support for capturing TLS random number and OCSP status request details. {issue}29962[29962] {pull}30102[30102] *Functionbeat* @@ -172,4 +168,3 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...main[Check the HEAD dif ==== Known Issue -*Journalbeat* diff --git a/NOTICE.txt b/NOTICE.txt index e96e88a7b85..7dddd44d077 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -23825,11 +23825,11 @@ SOFTWARE. -------------------------------------------------------------------------------- Dependency : github.com/docker/distribution -Version: v2.7.1+incompatible +Version: v2.8.0+incompatible Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/docker/distribution@v2.7.1+incompatible/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/docker/distribution@v2.8.0+incompatible/LICENSE: Apache License Version 2.0, January 2004 diff --git a/auditbeat/tests/system/test_file_integrity.py b/auditbeat/tests/system/test_file_integrity.py index 08fef13d8e5..280d2916a55 100644 --- a/auditbeat/tests/system/test_file_integrity.py +++ b/auditbeat/tests/system/test_file_integrity.py @@ -62,7 +62,7 @@ def wait_output(self, min_events): else: break - @unittest.skipIf(os.getenv("BUILD_ID") is not None and platform.system() == 'Darwin', + @unittest.skipIf(os.getenv("CI") is not None and platform.system() == 'Darwin', 'Flaky test: https://github.com/elastic/beats/issues/24678') def test_non_recursive(self): """ diff --git a/filebeat/_meta/config/filebeat.inputs.reference.yml.tmpl b/filebeat/_meta/config/filebeat.inputs.reference.yml.tmpl index 010e5e36e2f..c1e5fd55d72 100644 --- a/filebeat/_meta/config/filebeat.inputs.reference.yml.tmpl +++ b/filebeat/_meta/config/filebeat.inputs.reference.yml.tmpl @@ -293,6 +293,101 @@ filebeat.inputs: # original for harvesting but will report the symlink name as source. #prospector.scanner.symlinks: false + ### Parsers configuration + + #### JSON configuration + + #parsers: + #- ndjson: + # Decode JSON options. Enable this if your logs are structured in JSON. + # JSON key on which to apply the line filtering and multiline settings. This key + # must be top level and its value must be a string, otherwise it is ignored. If + # no text key is defined, the line filtering and multiline features cannot be used. + #message_key: + + # By default, the decoded JSON is placed under a "json" key in the output document. + # If you enable this setting, the keys are copied to the top level of the output document. + #keys_under_root: false + + # If keys_under_root and this setting are enabled, then the values from the decoded + # JSON object overwrite the fields that Filebeat normally adds (type, source, offset, etc.) + # in case of conflicts. + #overwrite_keys: false + + # If this setting is enabled, then keys in the decoded JSON object will be recursively + # de-dotted, and expanded into a hierarchical object structure. + # For example, `{"a.b.c": 123}` would be expanded into `{"a":{"b":{"c":123}}}`. + #expand_keys: false + + # If this setting is enabled, Filebeat adds an "error.message" and "error.key: json" key in case of JSON + # unmarshaling errors or when a text key is defined in the configuration but cannot + # be used. + #add_error_key: false + + #### Multiline options + + # Multiline can be used for log messages spanning multiple lines. This is common + # for Java Stack Traces or C-Line Continuation + + #parsers: + #- multiline: + #type: pattern + # The regexp Pattern that has to be matched. The example pattern matches all lines starting with [ + #pattern: ^\[ + + # Defines if the pattern set under the pattern setting should be negated or not. Default is false. + #negate: false + + # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern + # that was (not) matched before or after or as long as a pattern is not matched based on negate. + # Note: After is the equivalent to previous and before is the equivalent to next in Logstash + #match: after + + # The maximum number of lines that are combined to one event. + # In case there are more than max_lines the additional lines are discarded. + # Default is 500 + #max_lines: 500 + + # After the defined timeout, a multiline event is sent even if no new pattern was found to start a new event + # Default is 5s. + #timeout: 5s + + # Do not add new line character when concatenating lines. + #skip_newline: false + + # To aggregate constant number of lines into a single event use the count mode of multiline. + + #parsers: + #- multiline: + #type: count + + # The number of lines to aggregate into a single event. + #count_lines: 3 + + # The maximum number of lines that are combined to one event. + # In case there are more than max_lines the additional lines are discarded. + # Default is 500 + #max_lines: 500 + + # After the defined timeout, an multiline event is sent even if no new pattern was found to start a new event + # Default is 5s. + #timeout: 5s + + # Do not add new line character when concatenating lines. + #skip_newline: false + + #### Parsing container events + + # You can parse container events with different formats from all streams. + + #parsers: + #- container: + # Source of container events. Available options: all, stdin, stderr. + #stream: all + + # Format of the container events. Available options: auto, cri, docker, json-file + #format: auto + ### Log rotation # When an external tool rotates the input files with copytruncate strategy diff --git a/filebeat/filebeat.reference.yml b/filebeat/filebeat.reference.yml index 90d614545dc..67765b0f6d0 100644 --- a/filebeat/filebeat.reference.yml +++ b/filebeat/filebeat.reference.yml @@ -700,6 +700,101 @@ filebeat.inputs: # original for harvesting but will report the symlink name as source. #prospector.scanner.symlinks: false + ### Parsers configuration + + #### JSON configuration + + #parsers: + #- ndjson: + # Decode JSON options. Enable this if your logs are structured in JSON. + # JSON key on which to apply the line filtering and multiline settings. This key + # must be top level and its value must be a string, otherwise it is ignored. If + # no text key is defined, the line filtering and multiline features cannot be used. + #message_key: + + # By default, the decoded JSON is placed under a "json" key in the output document. + # If you enable this setting, the keys are copied to the top level of the output document. + #keys_under_root: false + + # If keys_under_root and this setting are enabled, then the values from the decoded + # JSON object overwrite the fields that Filebeat normally adds (type, source, offset, etc.) + # in case of conflicts. + #overwrite_keys: false + + # If this setting is enabled, then keys in the decoded JSON object will be recursively + # de-dotted, and expanded into a hierarchical object structure. + # For example, `{"a.b.c": 123}` would be expanded into `{"a":{"b":{"c":123}}}`. + #expand_keys: false + + # If this setting is enabled, Filebeat adds an "error.message" and "error.key: json" key in case of JSON + # unmarshaling errors or when a text key is defined in the configuration but cannot + # be used. + #add_error_key: false + + #### Multiline options + + # Multiline can be used for log messages spanning multiple lines. This is common + # for Java Stack Traces or C-Line Continuation + + #parsers: + #- multiline: + #type: pattern + # The regexp Pattern that has to be matched. The example pattern matches all lines starting with [ + #pattern: ^\[ + + # Defines if the pattern set under the pattern setting should be negated or not. Default is false. + #negate: false + + # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern + # that was (not) matched before or after or as long as a pattern is not matched based on negate. + # Note: After is the equivalent to previous and before is the equivalent to next in Logstash + #match: after + + # The maximum number of lines that are combined to one event. + # In case there are more than max_lines the additional lines are discarded. + # Default is 500 + #max_lines: 500 + + # After the defined timeout, a multiline event is sent even if no new pattern was found to start a new event + # Default is 5s. + #timeout: 5s + + # Do not add new line character when concatenating lines. + #skip_newline: false + + # To aggregate constant number of lines into a single event use the count mode of multiline. + + #parsers: + #- multiline: + #type: count + + # The number of lines to aggregate into a single event. + #count_lines: 3 + + # The maximum number of lines that are combined to one event. + # In case there are more than max_lines the additional lines are discarded. + # Default is 500 + #max_lines: 500 + + # After the defined timeout, an multiline event is sent even if no new pattern was found to start a new event + # Default is 5s. + #timeout: 5s + + # Do not add new line character when concatenating lines. + #skip_newline: false + + #### Parsing container events + + # You can parse container events with different formats from all streams. + + #parsers: + #- container: + # Source of container events. Available options: all, stdin, stderr. + #stream: all + + # Format of the container events. Available options: auto, cri, docker, json-file + #format: auto + ### Log rotation # When an external tool rotates the input files with copytruncate strategy diff --git a/go.mod b/go.mod index 1bad66722d5..414ce1e38de 100644 --- a/go.mod +++ b/go.mod @@ -215,7 +215,7 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/dgraph-io/ristretto v0.1.0 // indirect github.com/dimchansky/utfbom v1.1.0 // indirect - github.com/docker/distribution v2.7.1+incompatible // indirect + github.com/docker/distribution v2.8.0+incompatible // indirect github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 // indirect github.com/eapache/queue v1.1.0 // indirect github.com/evanphx/json-patch v4.12.0+incompatible // indirect diff --git a/go.sum b/go.sum index 5974ef527e0..73d67b979fc 100644 --- a/go.sum +++ b/go.sum @@ -473,8 +473,9 @@ github.com/dlclark/regexp2 v1.1.7-0.20171009020623-7632a260cbaf/go.mod h1:2pZnwu github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.8.0+incompatible h1:l9EaZDICImO1ngI+uTifW+ZYvvz7fKISBAKpg+MbWbY= +github.com/docker/distribution v2.8.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v1.4.2-0.20170802015333-8af4db6f002a/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.7+incompatible h1:Z6O9Nhsjv+ayUEeI1IojKbYcsGdgYSNqxe1s2MYzUhQ= github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= diff --git a/libbeat/docs/release.asciidoc b/libbeat/docs/release.asciidoc index 0ce6f44b2df..aa997f284ae 100644 --- a/libbeat/docs/release.asciidoc +++ b/libbeat/docs/release.asciidoc @@ -8,6 +8,7 @@ This section summarizes the changes in each release. Also read <> for more detail about changes that affect upgrade. +* <> * <> * <> * <> diff --git a/metricbeat/docs/modules/kubernetes.asciidoc b/metricbeat/docs/modules/kubernetes.asciidoc index dc6873daece..bbc11556ee2 100644 --- a/metricbeat/docs/modules/kubernetes.asciidoc +++ b/metricbeat/docs/modules/kubernetes.asciidoc @@ -166,7 +166,16 @@ If you are using HA for those components, be aware that when gathering data from Dashboards for `controllermanager` `scheduler` and `proxy` are not compatible with kibana versions below `7.2.0` -Cluster selector in `cluster overview` dashboard helps in distinguishing and filtering metrics collected from multiple clusters. If you want to focus on a subset of the Kubernetes clusters for monitoring a specific scenario, this cluster selector could be a handy tool. Note that this selector gets populated from the `orchestrator.cluster.name` field that may not always be available. This field gets its value from sources like `kube_config`, `kubeadm-config` configMap, and Google Cloud's meta API for GKE. If the sources mentioned above don't provide this value, metricbeat will not report it. However, you can always use https://www.elastic.co/guide/en/beats/metricbeat/current/defining-processors.html[processors] to set this field and utilize it in the `cluster overview` dashboard. +Cluster selector in `cluster overview` dashboard helps in distinguishing and filtering metrics collected from multiple clusters. If you want to focus on a subset of the Kubernetes clusters for monitoring a specific scenario, this cluster selector could be a handy tool. Note that this selector gets populated from the `orchestrator.cluster.name` field that may not always be available. This field gets its value from sources like `kube_config`, `kubeadm-config` configMap, and Google Cloud's meta API for GKE. If the sources mentioned above don't provide this value, metricbeat will not report it. However, you can always use https://www.elastic.co/guide/en/beats/filebeat/current/add-fields.html[add_fields processor] to set `orchestrator.cluster.name` fields and utilize it in the `cluster overview` dashboard: +[source,yaml] +---- +processors: + - add_fields: + target: orchestrator.cluster + fields: + name: clusterName + url: clusterURL +---- Kubernetes cluster overview example: diff --git a/metricbeat/module/docker/_meta/kibana/7/visualization/Docker-Network-IO-ecs.json b/metricbeat/module/docker/_meta/kibana/7/visualization/Docker-Network-IO-ecs.json index c93a18e3e3a..af3e5261c4c 100644 --- a/metricbeat/module/docker/_meta/kibana/7/visualization/Docker-Network-IO-ecs.json +++ b/metricbeat/module/docker/_meta/kibana/7/visualization/Docker-Network-IO-ecs.json @@ -34,7 +34,7 @@ "id": "1", "params": { "customLabel": "IN bytes", - "field": "docker.network.in.bytes" + "field": "docker.network.inbound.bytes" }, "schema": "metric", "type": "max" @@ -69,7 +69,7 @@ "id": "4", "params": { "customLabel": "OUT bytes", - "field": "docker.network.out.bytes" + "field": "docker.network.outbound.bytes" }, "schema": "metric", "type": "max" @@ -171,4 +171,4 @@ "type": "visualization", "updated_at": "2021-08-04T16:31:07.529Z", "version": "WzM3NjQsMV0=" -} \ No newline at end of file +} diff --git a/metricbeat/module/docker/_meta/kibana/7/visualization/Docker-containers-ecs.json b/metricbeat/module/docker/_meta/kibana/7/visualization/Docker-containers-ecs.json index 1c5bee9fd7f..0d6eadff796 100644 --- a/metricbeat/module/docker/_meta/kibana/7/visualization/Docker-containers-ecs.json +++ b/metricbeat/module/docker/_meta/kibana/7/visualization/Docker-containers-ecs.json @@ -49,7 +49,7 @@ "id": "4", "params": { "customLabel": "DiskIO", - "field": "docker.diskio.total" + "field": "docker.diskio.summary.bytes" }, "schema": "metric", "type": "max" @@ -117,4 +117,4 @@ "type": "visualization", "updated_at": "2021-08-04T16:31:07.529Z", "version": "WzM3NTgsMV0=" -} \ No newline at end of file +} diff --git a/metricbeat/module/kubernetes/_meta/docs.asciidoc b/metricbeat/module/kubernetes/_meta/docs.asciidoc index 8e8e8419523..e2c2ac82ecf 100644 --- a/metricbeat/module/kubernetes/_meta/docs.asciidoc +++ b/metricbeat/module/kubernetes/_meta/docs.asciidoc @@ -157,7 +157,16 @@ If you are using HA for those components, be aware that when gathering data from Dashboards for `controllermanager` `scheduler` and `proxy` are not compatible with kibana versions below `7.2.0` -Cluster selector in `cluster overview` dashboard helps in distinguishing and filtering metrics collected from multiple clusters. If you want to focus on a subset of the Kubernetes clusters for monitoring a specific scenario, this cluster selector could be a handy tool. Note that this selector gets populated from the `orchestrator.cluster.name` field that may not always be available. This field gets its value from sources like `kube_config`, `kubeadm-config` configMap, and Google Cloud's meta API for GKE. If the sources mentioned above don't provide this value, metricbeat will not report it. However, you can always use https://www.elastic.co/guide/en/beats/metricbeat/current/defining-processors.html[processors] to set this field and utilize it in the `cluster overview` dashboard. +Cluster selector in `cluster overview` dashboard helps in distinguishing and filtering metrics collected from multiple clusters. If you want to focus on a subset of the Kubernetes clusters for monitoring a specific scenario, this cluster selector could be a handy tool. Note that this selector gets populated from the `orchestrator.cluster.name` field that may not always be available. This field gets its value from sources like `kube_config`, `kubeadm-config` configMap, and Google Cloud's meta API for GKE. If the sources mentioned above don't provide this value, metricbeat will not report it. However, you can always use https://www.elastic.co/guide/en/beats/filebeat/current/add-fields.html[add_fields processor] to set `orchestrator.cluster.name` fields and utilize it in the `cluster overview` dashboard: +[source,yaml] +---- +processors: + - add_fields: + target: orchestrator.cluster + fields: + name: clusterName + url: clusterURL +---- Kubernetes cluster overview example: diff --git a/packetbeat/beater/install_npcap.go b/packetbeat/beater/install_npcap.go index e947bca5b01..d15ac21479a 100644 --- a/packetbeat/beater/install_npcap.go +++ b/packetbeat/beater/install_npcap.go @@ -51,6 +51,9 @@ func installNpcap(b *beat.Beat) error { log.Infof("npcap version: %s", npcapVersion) } }() + if !npcap.Upgradeable() { + return nil + } ctx, cancel := context.WithTimeout(context.Background(), installTimeout) defer cancel() diff --git a/packetbeat/npcap/npcap.go b/packetbeat/npcap/npcap.go index c81d1ce731d..d0cc42dce48 100644 --- a/packetbeat/npcap/npcap.go +++ b/packetbeat/npcap/npcap.go @@ -68,6 +68,18 @@ func Install(ctx context.Context, log *logp.Logger, path, dst string, compat boo } func install(ctx context.Context, log *logp.Logger, path, dst string, compat bool) error { + if pcap.Version() != "" { + // If we are here there is a runtime Npcap DLL loaded. We need to + // unload this to prevent the application being killed during the + // install. + // + // See https://npcap.com/guide/npcap-users-guide.html#npcap-installation-uninstall-options. + err := unloadWinPCAP() + if err != nil { + return fmt.Errorf("npcap: failed to unload Npcap DLL: %w", err) + } + } + args := []string{"/S", "/winpcap_mode=no"} if compat { args[1] = "/winpcap_mode=yes" @@ -96,7 +108,7 @@ func install(ctx context.Context, log *logp.Logger, path, dst string, compat boo return fmt.Errorf("npcap: failed to install Npcap: %w", err) } - return reloadWinPCAP() + return loadWinPCAP() } func Upgradeable() bool { diff --git a/packetbeat/npcap/npcap_other.go b/packetbeat/npcap/npcap_other.go index c813644d471..7f0d29c09e6 100644 --- a/packetbeat/npcap/npcap_other.go +++ b/packetbeat/npcap/npcap_other.go @@ -22,4 +22,4 @@ package npcap func loadWinPCAP() error { return nil } -func reloadWinPCAP() error { return nil } +func unloadWinPCAP() error { return nil } diff --git a/packetbeat/npcap/npcap_windows.go b/packetbeat/npcap/npcap_windows.go index 44d0053820f..3e08bf4a1ee 100644 --- a/packetbeat/npcap/npcap_windows.go +++ b/packetbeat/npcap/npcap_windows.go @@ -24,10 +24,4 @@ import "github.com/google/gopacket/pcap" func loadWinPCAP() error { return pcap.LoadWinPCAP() } -func reloadWinPCAP() error { - err := pcap.UnloadWinPCAP() - if err != nil { - return err - } - return pcap.LoadWinPCAP() -} +func unloadWinPCAP() error { return pcap.UnloadWinPCAP() } diff --git a/x-pack/filebeat/filebeat.reference.yml b/x-pack/filebeat/filebeat.reference.yml index becffd39a6b..5cfa51f4a6a 100644 --- a/x-pack/filebeat/filebeat.reference.yml +++ b/x-pack/filebeat/filebeat.reference.yml @@ -2759,6 +2759,101 @@ filebeat.inputs: # original for harvesting but will report the symlink name as source. #prospector.scanner.symlinks: false + ### Parsers configuration + + #### JSON configuration + + #parsers: + #- ndjson: + # Decode JSON options. Enable this if your logs are structured in JSON. + # JSON key on which to apply the line filtering and multiline settings. This key + # must be top level and its value must be a string, otherwise it is ignored. If + # no text key is defined, the line filtering and multiline features cannot be used. + #message_key: + + # By default, the decoded JSON is placed under a "json" key in the output document. + # If you enable this setting, the keys are copied to the top level of the output document. + #keys_under_root: false + + # If keys_under_root and this setting are enabled, then the values from the decoded + # JSON object overwrite the fields that Filebeat normally adds (type, source, offset, etc.) + # in case of conflicts. + #overwrite_keys: false + + # If this setting is enabled, then keys in the decoded JSON object will be recursively + # de-dotted, and expanded into a hierarchical object structure. + # For example, `{"a.b.c": 123}` would be expanded into `{"a":{"b":{"c":123}}}`. + #expand_keys: false + + # If this setting is enabled, Filebeat adds an "error.message" and "error.key: json" key in case of JSON + # unmarshaling errors or when a text key is defined in the configuration but cannot + # be used. + #add_error_key: false + + #### Multiline options + + # Multiline can be used for log messages spanning multiple lines. This is common + # for Java Stack Traces or C-Line Continuation + + #parsers: + #- multiline: + #type: pattern + # The regexp Pattern that has to be matched. The example pattern matches all lines starting with [ + #pattern: ^\[ + + # Defines if the pattern set under the pattern setting should be negated or not. Default is false. + #negate: false + + # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern + # that was (not) matched before or after or as long as a pattern is not matched based on negate. + # Note: After is the equivalent to previous and before is the equivalent to next in Logstash + #match: after + + # The maximum number of lines that are combined to one event. + # In case there are more than max_lines the additional lines are discarded. + # Default is 500 + #max_lines: 500 + + # After the defined timeout, a multiline event is sent even if no new pattern was found to start a new event + # Default is 5s. + #timeout: 5s + + # Do not add new line character when concatenating lines. + #skip_newline: false + + # To aggregate constant number of lines into a single event use the count mode of multiline. + + #parsers: + #- multiline: + #type: count + + # The number of lines to aggregate into a single event. + #count_lines: 3 + + # The maximum number of lines that are combined to one event. + # In case there are more than max_lines the additional lines are discarded. + # Default is 500 + #max_lines: 500 + + # After the defined timeout, an multiline event is sent even if no new pattern was found to start a new event + # Default is 5s. + #timeout: 5s + + # Do not add new line character when concatenating lines. + #skip_newline: false + + #### Parsing container events + + # You can parse container events with different formats from all streams. + + #parsers: + #- container: + # Source of container events. Available options: all, stdin, stderr. + #stream: all + + # Format of the container events. Available options: auto, cri, docker, json-file + #format: auto + ### Log rotation # When an external tool rotates the input files with copytruncate strategy